The Basted Turkey Release (aka 2.1.130)
[davej-history.git] / kernel / sched.c
blobc8c297180888488cf74655e4583952ddbf7107ca
1 /*
2 * linux/kernel/sched.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1996-04-21 Modified by Ulrich Windl to make NTP work
7 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
8 * make semaphores SMP safe
9 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
13 * 'sched.c' is the main kernel file. It contains scheduling primitives
14 * (sleep_on, wakeup, schedule etc) as well as a number of simple system
15 * call functions (type getpid()), which just extract a field from
16 * current-task
19 #include <linux/mm.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/fdreg.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/smp_lock.h>
25 #include <linux/init.h>
27 #include <asm/io.h>
28 #include <asm/uaccess.h>
29 #include <asm/pgtable.h>
30 #include <asm/mmu_context.h>
32 #include <linux/timex.h>
35 * kernel variables
38 unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
40 long tick = (1000000 + HZ/2) / HZ; /* timer interrupt period */
42 /* The current time */
43 volatile struct timeval xtime __attribute__ ((aligned (16)));
45 /* Don't completely fail for HZ > 500. */
46 int tickadj = 500/HZ ? : 1; /* microsecs */
48 DECLARE_TASK_QUEUE(tq_timer);
49 DECLARE_TASK_QUEUE(tq_immediate);
50 DECLARE_TASK_QUEUE(tq_scheduler);
53 * phase-lock loop variables
55 /* TIME_ERROR prevents overwriting the CMOS clock */
56 int time_state = TIME_ERROR; /* clock synchronization status */
57 int time_status = STA_UNSYNC; /* clock status bits */
58 long time_offset = 0; /* time adjustment (us) */
59 long time_constant = 2; /* pll time constant */
60 long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
61 long time_precision = 1; /* clock precision (us) */
62 long time_maxerror = MAXPHASE; /* maximum error (us) */
63 long time_esterror = MAXPHASE; /* estimated error (us) */
64 long time_phase = 0; /* phase offset (scaled us) */
65 long time_freq = ((1000000 + HZ/2) % HZ - HZ/2) << SHIFT_USEC; /* frequency offset (scaled ppm) */
66 long time_adj = 0; /* tick adjust (scaled 1 / HZ) */
67 long time_reftime = 0; /* time at last adjustment (s) */
69 long time_adjust = 0;
70 long time_adjust_step = 0;
72 unsigned long event = 0;
74 extern int do_setitimer(int, struct itimerval *, struct itimerval *);
75 unsigned int * prof_buffer = NULL;
76 unsigned long prof_len = 0;
77 unsigned long prof_shift = 0;
79 extern void mem_use(void);
81 unsigned long volatile jiffies=0;
84 * Init task must be ok at boot for the ix86 as we will check its signals
85 * via the SMP irq return path.
88 struct task_struct * task[NR_TASKS] = {&init_task, };
90 struct kernel_stat kstat = { 0 };
92 void scheduling_functions_start_here(void) { }
94 static inline void reschedule_idle(struct task_struct * p)
98 * For SMP, we try to see if the CPU the task used
99 * to run on is idle..
101 #if 0
103 * Disable this for now. Ingo has some interesting
104 * code that looks too complex, and I have some ideas,
105 * but in the meantime.. One problem is that "wakeup()"
106 * can be (and is) called before we've even initialized
107 * SMP completely, so..
109 #ifdef __SMP__
110 int want_cpu = p->processor;
113 * Don't even try to find another CPU for us if the task
114 * ran on this one before..
116 if (want_cpu != smp_processor_id()) {
117 struct task_struct **idle = task;
118 int i = smp_num_cpus;
120 do {
121 struct task_struct *tsk = *idle;
122 idle++;
123 /* Something like this.. */
124 if (tsk->has_cpu && tsk->processor == want_cpu) {
125 tsk->need_resched = 1;
126 smp_send_reschedule(want_cpu);
127 return;
129 } while (--i > 0);
131 #endif
132 #endif
133 if (p->policy != SCHED_OTHER || p->counter > current->counter + 3)
134 current->need_resched = 1;
138 * Careful!
140 * This has to add the process to the _beginning_ of the
141 * run-queue, not the end. See the comment about "This is
142 * subtle" in the scheduler proper..
144 static inline void add_to_runqueue(struct task_struct * p)
146 struct task_struct *next = init_task.next_run;
148 p->prev_run = &init_task;
149 init_task.next_run = p;
150 p->next_run = next;
151 next->prev_run = p;
154 static inline void del_from_runqueue(struct task_struct * p)
156 struct task_struct *next = p->next_run;
157 struct task_struct *prev = p->prev_run;
159 nr_running--;
160 next->prev_run = prev;
161 prev->next_run = next;
162 p->next_run = NULL;
163 p->prev_run = NULL;
166 static inline void move_last_runqueue(struct task_struct * p)
168 struct task_struct *next = p->next_run;
169 struct task_struct *prev = p->prev_run;
171 /* remove from list */
172 next->prev_run = prev;
173 prev->next_run = next;
174 /* add back to list */
175 p->next_run = &init_task;
176 prev = init_task.prev_run;
177 init_task.prev_run = p;
178 p->prev_run = prev;
179 prev->next_run = p;
182 static inline void move_first_runqueue(struct task_struct * p)
184 struct task_struct *next = p->next_run;
185 struct task_struct *prev = p->prev_run;
187 /* remove from list */
188 next->prev_run = prev;
189 prev->next_run = next;
190 /* add back to list */
191 p->prev_run = &init_task;
192 next = init_task.next_run;
193 init_task.next_run = p;
194 p->next_run = next;
195 next->prev_run = p;
199 * The tasklist_lock protects the linked list of processes.
201 * The scheduler lock is protecting against multiple entry
202 * into the scheduling code, and doesn't need to worry
203 * about interrupts (because interrupts cannot call the
204 * scheduler).
206 * The run-queue lock locks the parts that actually access
207 * and change the run-queues, and have to be interrupt-safe.
209 spinlock_t scheduler_lock = SPIN_LOCK_UNLOCKED; /* should be acquired first */
210 spinlock_t runqueue_lock = SPIN_LOCK_UNLOCKED; /* second */
211 rwlock_t tasklist_lock = RW_LOCK_UNLOCKED; /* third */
214 * Wake up a process. Put it on the run-queue if it's not
215 * already there. The "current" process is always on the
216 * run-queue (except when the actual re-schedule is in
217 * progress), and as such you're allowed to do the simpler
218 * "current->state = TASK_RUNNING" to mark yourself runnable
219 * without the overhead of this.
221 void wake_up_process(struct task_struct * p)
223 unsigned long flags;
225 spin_lock_irqsave(&runqueue_lock, flags);
226 p->state = TASK_RUNNING;
227 if (!p->next_run) {
228 add_to_runqueue(p);
229 reschedule_idle(p);
230 nr_running++;
232 spin_unlock_irqrestore(&runqueue_lock, flags);
235 static void process_timeout(unsigned long __data)
237 struct task_struct * p = (struct task_struct *) __data;
239 wake_up_process(p);
243 * This is the function that decides how desirable a process is..
244 * You can weigh different processes against each other depending
245 * on what CPU they've run on lately etc to try to handle cache
246 * and TLB miss penalties.
248 * Return values:
249 * -1000: never select this
250 * 0: out of time, recalculate counters (but it might still be
251 * selected)
252 * +ve: "goodness" value (the larger, the better)
253 * +1000: realtime process, select this.
255 static inline int goodness(struct task_struct * p, struct task_struct * prev, int this_cpu)
257 int policy = p->policy;
258 int weight;
260 if (policy & SCHED_YIELD) {
261 p->policy = policy & ~SCHED_YIELD;
262 return 0;
266 * Realtime process, select the first one on the
267 * runqueue (taking priorities within processes
268 * into account).
270 if (policy != SCHED_OTHER)
271 return 1000 + p->rt_priority;
274 * Give the process a first-approximation goodness value
275 * according to the number of clock-ticks it has left.
277 * Don't do any other calculations if the time slice is
278 * over..
280 weight = p->counter;
281 if (weight) {
283 #ifdef __SMP__
284 /* Give a largish advantage to the same processor... */
285 /* (this is equivalent to penalizing other processors) */
286 if (p->processor == this_cpu)
287 weight += PROC_CHANGE_PENALTY;
288 #endif
290 /* .. and a slight advantage to the current thread */
291 if (p->mm == prev->mm)
292 weight += 1;
293 weight += p->priority;
296 return weight;
300 * Event timer code
302 #define TVN_BITS 6
303 #define TVR_BITS 8
304 #define TVN_SIZE (1 << TVN_BITS)
305 #define TVR_SIZE (1 << TVR_BITS)
306 #define TVN_MASK (TVN_SIZE - 1)
307 #define TVR_MASK (TVR_SIZE - 1)
309 struct timer_vec {
310 int index;
311 struct timer_list *vec[TVN_SIZE];
314 struct timer_vec_root {
315 int index;
316 struct timer_list *vec[TVR_SIZE];
319 static struct timer_vec tv5 = { 0 };
320 static struct timer_vec tv4 = { 0 };
321 static struct timer_vec tv3 = { 0 };
322 static struct timer_vec tv2 = { 0 };
323 static struct timer_vec_root tv1 = { 0 };
325 static struct timer_vec * const tvecs[] = {
326 (struct timer_vec *)&tv1, &tv2, &tv3, &tv4, &tv5
329 #define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
331 static unsigned long timer_jiffies = 0;
333 static inline void insert_timer(struct timer_list *timer,
334 struct timer_list **vec, int idx)
336 if ((timer->next = vec[idx]))
337 vec[idx]->prev = timer;
338 vec[idx] = timer;
339 timer->prev = (struct timer_list *)&vec[idx];
342 static inline void internal_add_timer(struct timer_list *timer)
345 * must be cli-ed when calling this
347 unsigned long expires = timer->expires;
348 unsigned long idx = expires - timer_jiffies;
350 if (idx < TVR_SIZE) {
351 int i = expires & TVR_MASK;
352 insert_timer(timer, tv1.vec, i);
353 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
354 int i = (expires >> TVR_BITS) & TVN_MASK;
355 insert_timer(timer, tv2.vec, i);
356 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
357 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
358 insert_timer(timer, tv3.vec, i);
359 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
360 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
361 insert_timer(timer, tv4.vec, i);
362 } else if ((signed long) idx < 0) {
363 /* can happen if you add a timer with expires == jiffies,
364 * or you set a timer to go off in the past
366 insert_timer(timer, tv1.vec, tv1.index);
367 } else if (idx <= 0xffffffffUL) {
368 int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
369 insert_timer(timer, tv5.vec, i);
370 } else {
371 /* Can only get here on architectures with 64-bit jiffies */
372 timer->next = timer->prev = timer;
376 spinlock_t timerlist_lock = SPIN_LOCK_UNLOCKED;
378 void add_timer(struct timer_list *timer)
380 unsigned long flags;
382 spin_lock_irqsave(&timerlist_lock, flags);
383 internal_add_timer(timer);
384 spin_unlock_irqrestore(&timerlist_lock, flags);
387 static inline int detach_timer(struct timer_list *timer)
389 struct timer_list *prev = timer->prev;
390 if (prev) {
391 struct timer_list *next = timer->next;
392 prev->next = next;
393 if (next)
394 next->prev = prev;
395 return 1;
397 return 0;
400 void mod_timer(struct timer_list *timer, unsigned long expires)
402 unsigned long flags;
404 spin_lock_irqsave(&timerlist_lock, flags);
405 timer->expires = expires;
406 detach_timer(timer);
407 internal_add_timer(timer);
408 spin_unlock_irqrestore(&timerlist_lock, flags);
411 int del_timer(struct timer_list * timer)
413 int ret;
414 unsigned long flags;
416 spin_lock_irqsave(&timerlist_lock, flags);
417 ret = detach_timer(timer);
418 timer->next = timer->prev = 0;
419 spin_unlock_irqrestore(&timerlist_lock, flags);
420 return ret;
423 #ifdef __SMP__
425 #define idle_task (task[cpu_number_map[this_cpu]])
426 #define can_schedule(p) (!(p)->has_cpu)
428 #else
430 #define idle_task (&init_task)
431 #define can_schedule(p) (1)
433 #endif
435 signed long schedule_timeout(signed long timeout)
437 struct timer_list timer;
438 unsigned long expire;
441 * PARANOID.
443 if (current->state == TASK_UNINTERRUPTIBLE)
445 printk(KERN_WARNING "schedule_timeout: task not interrutible "
446 "from %p\n", __builtin_return_address(0));
448 * We don' t want to interrupt a not interruptible task
449 * risking to cause corruption. Better a a deadlock ;-).
451 timeout = MAX_SCHEDULE_TIMEOUT;
455 * Here we start for real.
457 switch (timeout)
459 case MAX_SCHEDULE_TIMEOUT:
461 * These two special cases are useful to be comfortable
462 * in the caller. Nothing more. We could take
463 * MAX_SCHEDULE_TIMEOUT from one of the negative value
464 * but I' d like to return a valid offset (>=0) to allow
465 * the caller to do everything it want with the retval.
467 schedule();
468 goto out;
469 default:
471 * Another bit of PARANOID. Note that the retval will be
472 * 0 since no piece of kernel is supposed to do a check
473 * for a negative retval of schedule_timeout() (since it
474 * should never happens anyway). You just have the printk()
475 * that will tell you if something is gone wrong and where.
477 if (timeout < 0)
479 printk(KERN_ERR "schedule_timeout: wrong timeout "
480 "value %lx from %p\n", timeout,
481 __builtin_return_address(0));
482 goto out;
486 expire = timeout + jiffies;
488 init_timer(&timer);
489 timer.expires = expire;
490 timer.data = (unsigned long) current;
491 timer.function = process_timeout;
493 add_timer(&timer);
494 schedule();
495 del_timer(&timer);
497 timeout = expire - jiffies;
499 out:
500 return timeout < 0 ? 0 : timeout;
504 * 'schedule()' is the scheduler function. It's a very simple and nice
505 * scheduler: it's not perfect, but certainly works for most things.
507 * The goto is "interesting".
509 * NOTE!! Task 0 is the 'idle' task, which gets called when no other
510 * tasks can run. It can not be killed, and it cannot sleep. The 'state'
511 * information in task[0] is never used.
513 asmlinkage void schedule(void)
515 struct task_struct * prev, * next;
516 int this_cpu;
518 prev = current;
519 this_cpu = prev->processor;
520 if (in_interrupt())
521 goto scheduling_in_interrupt;
522 release_kernel_lock(prev, this_cpu);
524 /* Do "administrative" work here while we don't hold any locks */
525 if (bh_active & bh_mask)
526 do_bottom_half();
527 run_task_queue(&tq_scheduler);
529 spin_lock(&scheduler_lock);
530 spin_lock_irq(&runqueue_lock);
532 /* move an exhausted RR process to be last.. */
533 prev->need_resched = 0;
534 if (!prev->counter && prev->policy == SCHED_RR) {
535 prev->counter = prev->priority;
536 move_last_runqueue(prev);
539 switch (prev->state) {
540 case TASK_INTERRUPTIBLE:
541 if (signal_pending(prev)) {
542 prev->state = TASK_RUNNING;
543 break;
545 default:
546 del_from_runqueue(prev);
547 case TASK_RUNNING:
550 struct task_struct * p = init_task.next_run;
552 * This is subtle.
553 * Note how we can enable interrupts here, even
554 * though interrupts can add processes to the run-
555 * queue. This is because any new processes will
556 * be added to the front of the queue, so "p" above
557 * is a safe starting point.
558 * run-queue deletion and re-ordering is protected by
559 * the scheduler lock
561 spin_unlock_irq(&runqueue_lock);
562 #ifdef __SMP__
563 prev->has_cpu = 0;
564 #endif
567 * Note! there may appear new tasks on the run-queue during this, as
568 * interrupts are enabled. However, they will be put on front of the
569 * list, so our list starting at "p" is essentially fixed.
571 /* this is the scheduler proper: */
573 int c = -1000;
574 next = idle_task;
575 while (p != &init_task) {
576 if (can_schedule(p)) {
577 int weight = goodness(p, prev, this_cpu);
578 if (weight > c)
579 c = weight, next = p;
581 p = p->next_run;
584 /* Do we need to re-calculate counters? */
585 if (!c) {
586 struct task_struct *p;
587 read_lock(&tasklist_lock);
588 for_each_task(p)
589 p->counter = (p->counter >> 1) + p->priority;
590 read_unlock(&tasklist_lock);
595 #ifdef __SMP__
596 next->has_cpu = 1;
597 next->processor = this_cpu;
598 #endif
600 if (prev != next) {
601 kstat.context_swtch++;
602 get_mmu_context(next);
603 switch_to(prev,next);
606 spin_unlock(&scheduler_lock);
609 * At this point "prev" is "current", as we just
610 * switched into it (from an even more "previous"
611 * prev)
613 reacquire_kernel_lock(prev);
614 return;
616 scheduling_in_interrupt:
617 printk("Scheduling in interrupt\n");
618 *(int *)0 = 0;
622 rwlock_t waitqueue_lock = RW_LOCK_UNLOCKED;
625 * wake_up doesn't wake up stopped processes - they have to be awakened
626 * with signals or similar.
628 * Note that we only need a read lock for the wait queue (and thus do not
629 * have to protect against interrupts), as the actual removal from the
630 * queue is handled by the process itself.
632 void __wake_up(struct wait_queue **q, unsigned int mode)
634 struct wait_queue *next;
636 read_lock(&waitqueue_lock);
637 if (q && (next = *q)) {
638 struct wait_queue *head;
640 head = WAIT_QUEUE_HEAD(q);
641 while (next != head) {
642 struct task_struct *p = next->task;
643 next = next->next;
644 if (p->state & mode)
645 wake_up_process(p);
648 read_unlock(&waitqueue_lock);
652 * Semaphores are implemented using a two-way counter:
653 * The "count" variable is decremented for each process
654 * that tries to sleep, while the "waking" variable is
655 * incremented when the "up()" code goes to wake up waiting
656 * processes.
658 * Notably, the inline "up()" and "down()" functions can
659 * efficiently test if they need to do any extra work (up
660 * needs to do something only if count was negative before
661 * the increment operation.
663 * waking_non_zero() (from asm/semaphore.h) must execute
664 * atomically.
666 * When __up() is called, the count was negative before
667 * incrementing it, and we need to wake up somebody.
669 * This routine adds one to the count of processes that need to
670 * wake up and exit. ALL waiting processes actually wake up but
671 * only the one that gets to the "waking" field first will gate
672 * through and acquire the semaphore. The others will go back
673 * to sleep.
675 * Note that these functions are only called when there is
676 * contention on the lock, and as such all this is the
677 * "non-critical" part of the whole semaphore business. The
678 * critical part is the inline stuff in <asm/semaphore.h>
679 * where we want to avoid any extra jumps and calls.
681 void __up(struct semaphore *sem)
683 wake_one_more(sem);
684 wake_up(&sem->wait);
688 * Perform the "down" function. Return zero for semaphore acquired,
689 * return negative for signalled out of the function.
691 * If called from __down, the return is ignored and the wait loop is
692 * not interruptible. This means that a task waiting on a semaphore
693 * using "down()" cannot be killed until someone does an "up()" on
694 * the semaphore.
696 * If called from __down_interruptible, the return value gets checked
697 * upon return. If the return value is negative then the task continues
698 * with the negative value in the return register (it can be tested by
699 * the caller).
701 * Either form may be used in conjunction with "up()".
704 static inline int __do_down(struct semaphore * sem, int task_state)
706 struct task_struct *tsk = current;
707 struct wait_queue wait = { tsk, NULL };
708 int ret = 0;
710 tsk->state = task_state;
711 add_wait_queue(&sem->wait, &wait);
714 * Ok, we're set up. sem->count is known to be less than zero
715 * so we must wait.
717 * We can let go the lock for purposes of waiting.
718 * We re-acquire it after awaking so as to protect
719 * all semaphore operations.
721 * If "up()" is called before we call waking_non_zero() then
722 * we will catch it right away. If it is called later then
723 * we will have to go through a wakeup cycle to catch it.
725 * Multiple waiters contend for the semaphore lock to see
726 * who gets to gate through and who has to wait some more.
728 for (;;) {
729 if (waking_non_zero(sem)) /* are we waking up? */
730 break; /* yes, exit loop */
732 if (task_state == TASK_INTERRUPTIBLE && signal_pending(tsk)) {
733 ret = -EINTR; /* interrupted */
734 atomic_inc(&sem->count); /* give up on down operation */
735 break;
738 schedule();
739 tsk->state = task_state;
741 tsk->state = TASK_RUNNING;
742 remove_wait_queue(&sem->wait, &wait);
743 return ret;
746 void __down(struct semaphore * sem)
748 __do_down(sem,TASK_UNINTERRUPTIBLE);
751 int __down_interruptible(struct semaphore * sem)
753 return __do_down(sem,TASK_INTERRUPTIBLE);
756 #define SLEEP_ON_VAR \
757 unsigned long flags; \
758 struct wait_queue wait;
760 #define SLEEP_ON_HEAD \
761 wait.task = current; \
762 write_lock_irqsave(&waitqueue_lock, flags); \
763 __add_wait_queue(p, &wait); \
764 write_unlock(&waitqueue_lock);
766 #define SLEEP_ON_TAIL \
767 write_lock_irq(&waitqueue_lock); \
768 __remove_wait_queue(p, &wait); \
769 write_unlock_irqrestore(&waitqueue_lock, flags);
771 void interruptible_sleep_on(struct wait_queue **p)
773 SLEEP_ON_VAR
775 current->state = TASK_INTERRUPTIBLE;
777 SLEEP_ON_HEAD
778 schedule();
779 SLEEP_ON_TAIL
782 long interruptible_sleep_on_timeout(struct wait_queue **p, long timeout)
784 SLEEP_ON_VAR
786 current->state = TASK_INTERRUPTIBLE;
788 SLEEP_ON_HEAD
789 timeout = schedule_timeout(timeout);
790 SLEEP_ON_TAIL
792 return timeout;
795 void sleep_on(struct wait_queue **p)
797 SLEEP_ON_VAR
799 current->state = TASK_UNINTERRUPTIBLE;
801 SLEEP_ON_HEAD
802 schedule();
803 SLEEP_ON_TAIL
806 void scheduling_functions_end_here(void) { }
808 static inline void cascade_timers(struct timer_vec *tv)
810 /* cascade all the timers from tv up one level */
811 struct timer_list *timer;
812 timer = tv->vec[tv->index];
814 * We are removing _all_ timers from the list, so we don't have to
815 * detach them individually, just clear the list afterwards.
817 while (timer) {
818 struct timer_list *tmp = timer;
819 timer = timer->next;
820 internal_add_timer(tmp);
822 tv->vec[tv->index] = NULL;
823 tv->index = (tv->index + 1) & TVN_MASK;
826 static inline void run_timer_list(void)
828 spin_lock_irq(&timerlist_lock);
829 while ((long)(jiffies - timer_jiffies) >= 0) {
830 struct timer_list *timer;
831 if (!tv1.index) {
832 int n = 1;
833 do {
834 cascade_timers(tvecs[n]);
835 } while (tvecs[n]->index == 1 && ++n < NOOF_TVECS);
837 while ((timer = tv1.vec[tv1.index])) {
838 void (*fn)(unsigned long) = timer->function;
839 unsigned long data = timer->data;
840 detach_timer(timer);
841 timer->next = timer->prev = NULL;
842 spin_unlock_irq(&timerlist_lock);
843 fn(data);
844 spin_lock_irq(&timerlist_lock);
846 ++timer_jiffies;
847 tv1.index = (tv1.index + 1) & TVR_MASK;
849 spin_unlock_irq(&timerlist_lock);
853 static inline void run_old_timers(void)
855 struct timer_struct *tp;
856 unsigned long mask;
858 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
859 if (mask > timer_active)
860 break;
861 if (!(mask & timer_active))
862 continue;
863 if (time_after(tp->expires, jiffies))
864 continue;
865 timer_active &= ~mask;
866 tp->fn();
867 sti();
871 spinlock_t tqueue_lock;
873 void tqueue_bh(void)
875 run_task_queue(&tq_timer);
878 void immediate_bh(void)
880 run_task_queue(&tq_immediate);
883 unsigned long timer_active = 0;
884 struct timer_struct timer_table[32];
887 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
888 * imply that avenrun[] is the standard name for this kind of thing.
889 * Nothing else seems to be standardized: the fractional size etc
890 * all seem to differ on different machines.
892 unsigned long avenrun[3] = { 0,0,0 };
895 * Nr of active tasks - counted in fixed-point numbers
897 static unsigned long count_active_tasks(void)
899 struct task_struct *p;
900 unsigned long nr = 0;
902 read_lock(&tasklist_lock);
903 for_each_task(p) {
904 if ((p->state == TASK_RUNNING ||
905 p->state == TASK_UNINTERRUPTIBLE ||
906 p->state == TASK_SWAPPING))
907 nr += FIXED_1;
909 read_unlock(&tasklist_lock);
910 return nr;
913 static inline void calc_load(unsigned long ticks)
915 unsigned long active_tasks; /* fixed-point */
916 static int count = LOAD_FREQ;
918 count -= ticks;
919 if (count < 0) {
920 count += LOAD_FREQ;
921 active_tasks = count_active_tasks();
922 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
923 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
924 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
929 * this routine handles the overflow of the microsecond field
931 * The tricky bits of code to handle the accurate clock support
932 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
933 * They were originally developed for SUN and DEC kernels.
934 * All the kudos should go to Dave for this stuff.
937 static void second_overflow(void)
939 long ltemp;
941 /* Bump the maxerror field */
942 time_maxerror += time_tolerance >> SHIFT_USEC;
943 if ( time_maxerror > MAXPHASE )
944 time_maxerror = MAXPHASE;
947 * Leap second processing. If in leap-insert state at
948 * the end of the day, the system clock is set back one
949 * second; if in leap-delete state, the system clock is
950 * set ahead one second. The microtime() routine or
951 * external clock driver will insure that reported time
952 * is always monotonic. The ugly divides should be
953 * replaced.
955 switch (time_state) {
957 case TIME_OK:
958 if (time_status & STA_INS)
959 time_state = TIME_INS;
960 else if (time_status & STA_DEL)
961 time_state = TIME_DEL;
962 break;
964 case TIME_INS:
965 if (xtime.tv_sec % 86400 == 0) {
966 xtime.tv_sec--;
967 time_state = TIME_OOP;
968 printk("Clock: inserting leap second 23:59:60 UTC\n");
970 break;
972 case TIME_DEL:
973 if ((xtime.tv_sec + 1) % 86400 == 0) {
974 xtime.tv_sec++;
975 time_state = TIME_WAIT;
976 printk("Clock: deleting leap second 23:59:59 UTC\n");
978 break;
980 case TIME_OOP:
981 time_state = TIME_WAIT;
982 break;
984 case TIME_WAIT:
985 if (!(time_status & (STA_INS | STA_DEL)))
986 time_state = TIME_OK;
990 * Compute the phase adjustment for the next second. In
991 * PLL mode, the offset is reduced by a fixed factor
992 * times the time constant. In FLL mode the offset is
993 * used directly. In either mode, the maximum phase
994 * adjustment for each second is clamped so as to spread
995 * the adjustment over not more than the number of
996 * seconds between updates.
998 if (time_offset < 0) {
999 ltemp = -time_offset;
1000 if (!(time_status & STA_FLL))
1001 ltemp >>= SHIFT_KG + time_constant;
1002 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
1003 ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
1004 time_offset += ltemp;
1005 time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
1006 } else {
1007 ltemp = time_offset;
1008 if (!(time_status & STA_FLL))
1009 ltemp >>= SHIFT_KG + time_constant;
1010 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
1011 ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
1012 time_offset -= ltemp;
1013 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
1017 * Compute the frequency estimate and additional phase
1018 * adjustment due to frequency error for the next
1019 * second. When the PPS signal is engaged, gnaw on the
1020 * watchdog counter and update the frequency computed by
1021 * the pll and the PPS signal.
1023 pps_valid++;
1024 if (pps_valid == PPS_VALID) {
1025 pps_jitter = MAXTIME;
1026 pps_stabil = MAXFREQ;
1027 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
1028 STA_PPSWANDER | STA_PPSERROR);
1030 ltemp = time_freq + pps_freq;
1031 if (ltemp < 0)
1032 time_adj -= -ltemp >>
1033 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
1034 else
1035 time_adj += ltemp >>
1036 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
1038 #if HZ == 100
1039 /* compensate for (HZ==100) != 128. Add 25% to get 125; => only 3% error */
1040 if (time_adj < 0)
1041 time_adj -= -time_adj >> 2;
1042 else
1043 time_adj += time_adj >> 2;
1044 #endif
1047 /* in the NTP reference this is called "hardclock()" */
1048 static void update_wall_time_one_tick(void)
1051 * Advance the phase, once it gets to one microsecond, then
1052 * advance the tick more.
1054 time_phase += time_adj;
1055 if (time_phase <= -FINEUSEC) {
1056 long ltemp = -time_phase >> SHIFT_SCALE;
1057 time_phase += ltemp << SHIFT_SCALE;
1058 xtime.tv_usec += tick + time_adjust_step - ltemp;
1060 else if (time_phase >= FINEUSEC) {
1061 long ltemp = time_phase >> SHIFT_SCALE;
1062 time_phase -= ltemp << SHIFT_SCALE;
1063 xtime.tv_usec += tick + time_adjust_step + ltemp;
1064 } else
1065 xtime.tv_usec += tick + time_adjust_step;
1067 if (time_adjust) {
1068 /* We are doing an adjtime thing.
1070 * Modify the value of the tick for next time.
1071 * Note that a positive delta means we want the clock
1072 * to run fast. This means that the tick should be bigger
1074 * Limit the amount of the step for *next* tick to be
1075 * in the range -tickadj .. +tickadj
1077 if (time_adjust > tickadj)
1078 time_adjust_step = tickadj;
1079 else if (time_adjust < -tickadj)
1080 time_adjust_step = -tickadj;
1081 else
1082 time_adjust_step = time_adjust;
1084 /* Reduce by this step the amount of time left */
1085 time_adjust -= time_adjust_step;
1087 else
1088 time_adjust_step = 0;
1092 * Using a loop looks inefficient, but "ticks" is
1093 * usually just one (we shouldn't be losing ticks,
1094 * we're doing this this way mainly for interrupt
1095 * latency reasons, not because we think we'll
1096 * have lots of lost timer ticks
1098 static void update_wall_time(unsigned long ticks)
1100 do {
1101 ticks--;
1102 update_wall_time_one_tick();
1103 } while (ticks);
1105 if (xtime.tv_usec >= 1000000) {
1106 xtime.tv_usec -= 1000000;
1107 xtime.tv_sec++;
1108 second_overflow();
1112 static inline void do_process_times(struct task_struct *p,
1113 unsigned long user, unsigned long system)
1115 long psecs;
1117 psecs = (p->times.tms_utime += user);
1118 psecs += (p->times.tms_stime += system);
1119 if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_cur) {
1120 /* Send SIGXCPU every second.. */
1121 if (!(psecs % HZ))
1122 send_sig(SIGXCPU, p, 1);
1123 /* and SIGKILL when we go over max.. */
1124 if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_max)
1125 send_sig(SIGKILL, p, 1);
1129 static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
1131 unsigned long it_virt = p->it_virt_value;
1133 if (it_virt) {
1134 if (it_virt <= ticks) {
1135 it_virt = ticks + p->it_virt_incr;
1136 send_sig(SIGVTALRM, p, 1);
1138 p->it_virt_value = it_virt - ticks;
1142 static inline void do_it_prof(struct task_struct * p, unsigned long ticks)
1144 unsigned long it_prof = p->it_prof_value;
1146 if (it_prof) {
1147 if (it_prof <= ticks) {
1148 it_prof = ticks + p->it_prof_incr;
1149 send_sig(SIGPROF, p, 1);
1151 p->it_prof_value = it_prof - ticks;
1155 void update_one_process(struct task_struct *p,
1156 unsigned long ticks, unsigned long user, unsigned long system, int cpu)
1158 p->per_cpu_utime[cpu] += user;
1159 p->per_cpu_stime[cpu] += system;
1160 do_process_times(p, user, system);
1161 do_it_virt(p, user);
1162 do_it_prof(p, ticks);
1165 static void update_process_times(unsigned long ticks, unsigned long system)
1168 * SMP does this on a per-CPU basis elsewhere
1170 #ifndef __SMP__
1171 struct task_struct * p = current;
1172 unsigned long user = ticks - system;
1173 if (p->pid) {
1174 p->counter -= ticks;
1175 if (p->counter < 0) {
1176 p->counter = 0;
1177 p->need_resched = 1;
1179 if (p->priority < DEF_PRIORITY)
1180 kstat.cpu_nice += user;
1181 else
1182 kstat.cpu_user += user;
1183 kstat.cpu_system += system;
1185 update_one_process(p, ticks, user, system, 0);
1186 #endif
1189 volatile unsigned long lost_ticks = 0;
1190 static unsigned long lost_ticks_system = 0;
1192 static inline void update_times(void)
1194 unsigned long ticks;
1195 unsigned long flags;
1197 save_flags(flags);
1198 cli();
1200 ticks = lost_ticks;
1201 lost_ticks = 0;
1203 if (ticks) {
1204 unsigned long system;
1205 system = xchg(&lost_ticks_system, 0);
1207 calc_load(ticks);
1208 update_wall_time(ticks);
1209 restore_flags(flags);
1211 update_process_times(ticks, system);
1213 } else
1214 restore_flags(flags);
1217 static void timer_bh(void)
1219 update_times();
1220 run_old_timers();
1221 run_timer_list();
1224 void do_timer(struct pt_regs * regs)
1226 (*(unsigned long *)&jiffies)++;
1227 lost_ticks++;
1228 mark_bh(TIMER_BH);
1229 if (!user_mode(regs))
1230 lost_ticks_system++;
1231 if (tq_timer)
1232 mark_bh(TQUEUE_BH);
1235 #ifndef __alpha__
1238 * For backwards compatibility? This can be done in libc so Alpha
1239 * and all newer ports shouldn't need it.
1241 asmlinkage unsigned int sys_alarm(unsigned int seconds)
1243 struct itimerval it_new, it_old;
1244 unsigned int oldalarm;
1246 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
1247 it_new.it_value.tv_sec = seconds;
1248 it_new.it_value.tv_usec = 0;
1249 do_setitimer(ITIMER_REAL, &it_new, &it_old);
1250 oldalarm = it_old.it_value.tv_sec;
1251 /* ehhh.. We can't return 0 if we have an alarm pending.. */
1252 /* And we'd better return too much than too little anyway */
1253 if (it_old.it_value.tv_usec)
1254 oldalarm++;
1255 return oldalarm;
1259 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
1260 * should be moved into arch/i386 instead?
1263 asmlinkage int sys_getpid(void)
1265 /* This is SMP safe - current->pid doesn't change */
1266 return current->pid;
1270 * This is not strictly SMP safe: p_opptr could change
1271 * from under us. However, rather than getting any lock
1272 * we can use an optimistic algorithm: get the parent
1273 * pid, and go back and check that the parent is still
1274 * the same. If it has changed (which is extremely unlikely
1275 * indeed), we just try again..
1277 * NOTE! This depends on the fact that even if we _do_
1278 * get an old value of "parent", we can happily dereference
1279 * the pointer: we just can't necessarily trust the result
1280 * until we know that the parent pointer is valid.
1282 * The "mb()" macro is a memory barrier - a synchronizing
1283 * event. It also makes sure that gcc doesn't optimize
1284 * away the necessary memory references.. The barrier doesn't
1285 * have to have all that strong semantics: on x86 we don't
1286 * really require a synchronizing instruction, for example.
1287 * The barrier is more important for code generation than
1288 * for any real memory ordering semantics (even if there is
1289 * a small window for a race, using the old pointer is
1290 * harmless for a while).
1292 asmlinkage int sys_getppid(void)
1294 int pid;
1295 struct task_struct * me = current;
1296 struct task_struct * parent;
1298 parent = me->p_opptr;
1299 for (;;) {
1300 pid = parent->pid;
1301 #if __SMP__
1303 struct task_struct *old = parent;
1304 mb();
1305 parent = me->p_opptr;
1306 if (old != parent)
1307 continue;
1309 #endif
1310 break;
1312 return pid;
1315 asmlinkage int sys_getuid(void)
1317 /* Only we change this so SMP safe */
1318 return current->uid;
1321 asmlinkage int sys_geteuid(void)
1323 /* Only we change this so SMP safe */
1324 return current->euid;
1327 asmlinkage int sys_getgid(void)
1329 /* Only we change this so SMP safe */
1330 return current->gid;
1333 asmlinkage int sys_getegid(void)
1335 /* Only we change this so SMP safe */
1336 return current->egid;
1340 * This has been replaced by sys_setpriority. Maybe it should be
1341 * moved into the arch dependent tree for those ports that require
1342 * it for backward compatibility?
1345 asmlinkage int sys_nice(int increment)
1347 unsigned long newprio;
1348 int increase = 0;
1351 * Setpriority might change our priority at the same moment.
1352 * We don't have to worry. Conceptually one call occurs first
1353 * and we have a single winner.
1356 newprio = increment;
1357 if (increment < 0) {
1358 if (!capable(CAP_SYS_NICE))
1359 return -EPERM;
1360 newprio = -increment;
1361 increase = 1;
1364 if (newprio > 40)
1365 newprio = 40;
1367 * do a "normalization" of the priority (traditionally
1368 * Unix nice values are -20 to 20; Linux doesn't really
1369 * use that kind of thing, but uses the length of the
1370 * timeslice instead (default 150 ms). The rounding is
1371 * why we want to avoid negative values.
1373 newprio = (newprio * DEF_PRIORITY + 10) / 20;
1374 increment = newprio;
1375 if (increase)
1376 increment = -increment;
1378 * Current->priority can change between this point
1379 * and the assignment. We are assigning not doing add/subs
1380 * so thats ok. Conceptually a process might just instantaneously
1381 * read the value we stomp over. I don't think that is an issue
1382 * unless posix makes it one. If so we can loop on changes
1383 * to current->priority.
1385 newprio = current->priority - increment;
1386 if ((signed) newprio < 1)
1387 newprio = 1;
1388 if (newprio > DEF_PRIORITY*2)
1389 newprio = DEF_PRIORITY*2;
1390 current->priority = newprio;
1391 return 0;
1394 #endif
1396 static inline struct task_struct *find_process_by_pid(pid_t pid)
1398 struct task_struct *tsk = current;
1400 if (pid)
1401 tsk = find_task_by_pid(pid);
1402 return tsk;
1405 static int setscheduler(pid_t pid, int policy,
1406 struct sched_param *param)
1408 struct sched_param lp;
1409 struct task_struct *p;
1410 int retval;
1412 retval = -EINVAL;
1413 if (!param || pid < 0)
1414 goto out_nounlock;
1416 retval = -EFAULT;
1417 if (copy_from_user(&lp, param, sizeof(struct sched_param)))
1418 goto out_nounlock;
1421 * We play safe to avoid deadlocks.
1423 spin_lock(&scheduler_lock);
1424 spin_lock_irq(&runqueue_lock);
1425 read_lock(&tasklist_lock);
1427 p = find_process_by_pid(pid);
1429 retval = -ESRCH;
1430 if (!p)
1431 goto out_unlock;
1433 if (policy < 0)
1434 policy = p->policy;
1435 else {
1436 retval = -EINVAL;
1437 if (policy != SCHED_FIFO && policy != SCHED_RR &&
1438 policy != SCHED_OTHER)
1439 goto out_unlock;
1443 * Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid
1444 * priority for SCHED_OTHER is 0.
1446 retval = -EINVAL;
1447 if (lp.sched_priority < 0 || lp.sched_priority > 99)
1448 goto out_unlock;
1449 if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
1450 goto out_unlock;
1452 retval = -EPERM;
1453 if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
1454 !capable(CAP_SYS_NICE))
1455 goto out_unlock;
1456 if ((current->euid != p->euid) && (current->euid != p->uid) &&
1457 !capable(CAP_SYS_NICE))
1458 goto out_unlock;
1460 retval = 0;
1461 p->policy = policy;
1462 p->rt_priority = lp.sched_priority;
1463 if (p->next_run)
1464 move_first_runqueue(p);
1466 current->need_resched = 1;
1468 out_unlock:
1469 read_unlock(&tasklist_lock);
1470 spin_unlock_irq(&runqueue_lock);
1471 spin_unlock(&scheduler_lock);
1473 out_nounlock:
1474 return retval;
1477 asmlinkage int sys_sched_setscheduler(pid_t pid, int policy,
1478 struct sched_param *param)
1480 return setscheduler(pid, policy, param);
1483 asmlinkage int sys_sched_setparam(pid_t pid, struct sched_param *param)
1485 return setscheduler(pid, -1, param);
1488 asmlinkage int sys_sched_getscheduler(pid_t pid)
1490 struct task_struct *p;
1491 int retval;
1493 retval = -EINVAL;
1494 if (pid < 0)
1495 goto out_nounlock;
1497 read_lock(&tasklist_lock);
1499 retval = -ESRCH;
1500 p = find_process_by_pid(pid);
1501 if (!p)
1502 goto out_unlock;
1504 retval = p->policy;
1506 out_unlock:
1507 read_unlock(&tasklist_lock);
1509 out_nounlock:
1510 return retval;
1513 asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param)
1515 struct task_struct *p;
1516 struct sched_param lp;
1517 int retval;
1519 retval = -EINVAL;
1520 if (!param || pid < 0)
1521 goto out_nounlock;
1523 read_lock(&tasklist_lock);
1524 p = find_process_by_pid(pid);
1525 retval = -ESRCH;
1526 if (!p)
1527 goto out_unlock;
1528 lp.sched_priority = p->rt_priority;
1529 read_unlock(&tasklist_lock);
1532 * This one might sleep, we cannot do it with a spinlock held ...
1534 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
1536 out_nounlock:
1537 return retval;
1539 out_unlock:
1540 read_unlock(&tasklist_lock);
1541 return retval;
1544 asmlinkage int sys_sched_yield(void)
1546 spin_lock(&scheduler_lock);
1547 spin_lock_irq(&runqueue_lock);
1548 if (current->policy == SCHED_OTHER)
1549 current->policy |= SCHED_YIELD;
1550 current->need_resched = 1;
1551 move_last_runqueue(current);
1552 spin_unlock_irq(&runqueue_lock);
1553 spin_unlock(&scheduler_lock);
1554 return 0;
1557 asmlinkage int sys_sched_get_priority_max(int policy)
1559 int ret = -EINVAL;
1561 switch (policy) {
1562 case SCHED_FIFO:
1563 case SCHED_RR:
1564 ret = 99;
1565 break;
1566 case SCHED_OTHER:
1567 ret = 0;
1568 break;
1570 return ret;
1573 asmlinkage int sys_sched_get_priority_min(int policy)
1575 int ret = -EINVAL;
1577 switch (policy) {
1578 case SCHED_FIFO:
1579 case SCHED_RR:
1580 ret = 1;
1581 break;
1582 case SCHED_OTHER:
1583 ret = 0;
1585 return ret;
1588 asmlinkage int sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
1590 struct timespec t;
1592 t.tv_sec = 0;
1593 t.tv_nsec = 150000;
1594 if (copy_to_user(interval, &t, sizeof(struct timespec)))
1595 return -EFAULT;
1596 return 0;
1599 asmlinkage int sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
1601 struct timespec t;
1602 unsigned long expire;
1604 if(copy_from_user(&t, rqtp, sizeof(struct timespec)))
1605 return -EFAULT;
1607 if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
1608 return -EINVAL;
1611 if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
1612 current->policy != SCHED_OTHER)
1615 * Short delay requests up to 2 ms will be handled with
1616 * high precision by a busy wait for all real-time processes.
1618 * Its important on SMP not to do this holding locks.
1620 udelay((t.tv_nsec + 999) / 1000);
1621 return 0;
1624 expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
1626 current->state = TASK_INTERRUPTIBLE;
1627 expire = schedule_timeout(expire);
1629 if (expire) {
1630 if (rmtp) {
1631 jiffies_to_timespec(expire, &t);
1632 if (copy_to_user(rmtp, &t, sizeof(struct timespec)))
1633 return -EFAULT;
1635 return -EINTR;
1637 return 0;
1640 static void show_task(int nr,struct task_struct * p)
1642 unsigned long free = 0;
1643 int state;
1644 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
1646 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
1647 state = p->state ? ffz(~p->state) + 1 : 0;
1648 if (((unsigned) state) < sizeof(stat_nam)/sizeof(char *))
1649 printk(stat_nam[state]);
1650 else
1651 printk(" ");
1652 #if (BITS_PER_LONG == 32)
1653 if (p == current)
1654 printk(" current ");
1655 else
1656 printk(" %08lX ", thread_saved_pc(&p->tss));
1657 #else
1658 if (p == current)
1659 printk(" current task ");
1660 else
1661 printk(" %016lx ", thread_saved_pc(&p->tss));
1662 #endif
1664 unsigned long * n = (unsigned long *) (p+1);
1665 while (!*n)
1666 n++;
1667 free = (unsigned long) n - (unsigned long)(p+1);
1669 printk("%5lu %5d %6d ", free, p->pid, p->p_pptr->pid);
1670 if (p->p_cptr)
1671 printk("%5d ", p->p_cptr->pid);
1672 else
1673 printk(" ");
1674 if (p->p_ysptr)
1675 printk("%7d", p->p_ysptr->pid);
1676 else
1677 printk(" ");
1678 if (p->p_osptr)
1679 printk(" %5d\n", p->p_osptr->pid);
1680 else
1681 printk("\n");
1684 struct signal_queue *q;
1685 char s[sizeof(sigset_t)*2+1], b[sizeof(sigset_t)*2+1];
1687 render_sigset_t(&p->signal, s);
1688 render_sigset_t(&p->blocked, b);
1689 printk(" sig: %d %s %s :", signal_pending(p), s, b);
1690 for (q = p->sigqueue; q ; q = q->next)
1691 printk(" %d", q->info.si_signo);
1692 printk(" X\n");
1696 char * render_sigset_t(sigset_t *set, char *buffer)
1698 int i = _NSIG, x;
1699 do {
1700 i -= 4, x = 0;
1701 if (sigismember(set, i+1)) x |= 1;
1702 if (sigismember(set, i+2)) x |= 2;
1703 if (sigismember(set, i+3)) x |= 4;
1704 if (sigismember(set, i+4)) x |= 8;
1705 *buffer++ = (x < 10 ? '0' : 'a' - 10) + x;
1706 } while (i >= 4);
1707 *buffer = 0;
1708 return buffer;
1711 void show_state(void)
1713 struct task_struct *p;
1715 #if (BITS_PER_LONG == 32)
1716 printk("\n"
1717 " free sibling\n");
1718 printk(" task PC stack pid father child younger older\n");
1719 #else
1720 printk("\n"
1721 " free sibling\n");
1722 printk(" task PC stack pid father child younger older\n");
1723 #endif
1724 read_lock(&tasklist_lock);
1725 for_each_task(p)
1726 show_task((p->tarray_ptr - &task[0]),p);
1727 read_unlock(&tasklist_lock);
1730 void __init sched_init(void)
1733 * We have to do a little magic to get the first
1734 * process right in SMP mode.
1736 int cpu=hard_smp_processor_id();
1737 int nr = NR_TASKS;
1739 init_task.processor=cpu;
1741 /* Init task array free list and pidhash table. */
1742 while(--nr > 0)
1743 add_free_taskslot(&task[nr]);
1745 for(nr = 0; nr < PIDHASH_SZ; nr++)
1746 pidhash[nr] = NULL;
1748 init_bh(TIMER_BH, timer_bh);
1749 init_bh(TQUEUE_BH, tqueue_bh);
1750 init_bh(IMMEDIATE_BH, immediate_bh);