Import 2.1.37pre7
[davej-history.git] / kernel / sched.c
bloba575e1050a53f1ba51cea4ac51562407eb04d043
1 /*
2 * linux/kernel/sched.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1996-04-21 Modified by Ulrich Windl to make NTP work
7 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
8 * make semaphores SMP safe
9 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
13 * 'sched.c' is the main kernel file. It contains scheduling primitives
14 * (sleep_on, wakeup, schedule etc) as well as a number of simple system
15 * call functions (type getpid()), which just extract a field from
16 * current-task
19 #include <linux/signal.h>
20 #include <linux/sched.h>
21 #include <linux/timer.h>
22 #include <linux/kernel.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/fdreg.h>
25 #include <linux/errno.h>
26 #include <linux/time.h>
27 #include <linux/ptrace.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/tqueue.h>
31 #include <linux/resource.h>
32 #include <linux/mm.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/init.h>
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/uaccess.h>
40 #include <asm/pgtable.h>
41 #include <asm/mmu_context.h>
42 #include <asm/spinlock.h>
44 #include <linux/timex.h>
47 * kernel variables
50 int securelevel = 0; /* system security level */
52 long tick = (1000000 + HZ/2) / HZ; /* timer interrupt period */
53 volatile struct timeval xtime __attribute__ ((aligned (8))); /* The current time */
54 int tickadj = 500/HZ; /* microsecs */
56 DECLARE_TASK_QUEUE(tq_timer);
57 DECLARE_TASK_QUEUE(tq_immediate);
58 DECLARE_TASK_QUEUE(tq_scheduler);
61 * phase-lock loop variables
63 /* TIME_ERROR prevents overwriting the CMOS clock */
64 int time_state = TIME_ERROR; /* clock synchronization status */
65 int time_status = STA_UNSYNC; /* clock status bits */
66 long time_offset = 0; /* time adjustment (us) */
67 long time_constant = 2; /* pll time constant */
68 long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
69 long time_precision = 1; /* clock precision (us) */
70 long time_maxerror = MAXPHASE; /* maximum error (us) */
71 long time_esterror = MAXPHASE; /* estimated error (us) */
72 long time_phase = 0; /* phase offset (scaled us) */
73 long time_freq = ((1000000 + HZ/2) % HZ - HZ/2) << SHIFT_USEC; /* frequency offset (scaled ppm) */
74 long time_adj = 0; /* tick adjust (scaled 1 / HZ) */
75 long time_reftime = 0; /* time at last adjustment (s) */
77 long time_adjust = 0;
78 long time_adjust_step = 0;
80 int need_resched = 0;
81 unsigned long event = 0;
83 extern int _setitimer(int, struct itimerval *, struct itimerval *);
84 unsigned int * prof_buffer = NULL;
85 unsigned long prof_len = 0;
86 unsigned long prof_shift = 0;
88 #define _S(nr) (1<<((nr)-1))
90 extern void mem_use(void);
92 unsigned long volatile jiffies=0;
95 * Init task must be ok at boot for the ix86 as we will check its signals
96 * via the SMP irq return path.
99 struct task_struct *last_task_used_math = NULL;
101 struct task_struct * task[NR_TASKS] = {&init_task, };
103 struct kernel_stat kstat = { 0 };
105 static inline void add_to_runqueue(struct task_struct * p)
107 if (p->counter > current->counter + 3)
108 need_resched = 1;
109 nr_running++;
110 (p->prev_run = init_task.prev_run)->next_run = p;
111 p->next_run = &init_task;
112 init_task.prev_run = p;
115 static inline void del_from_runqueue(struct task_struct * p)
117 struct task_struct *next = p->next_run;
118 struct task_struct *prev = p->prev_run;
120 nr_running--;
121 next->prev_run = prev;
122 prev->next_run = next;
123 p->next_run = NULL;
124 p->prev_run = NULL;
127 static inline void move_last_runqueue(struct task_struct * p)
129 struct task_struct *next = p->next_run;
130 struct task_struct *prev = p->prev_run;
132 /* remove from list */
133 next->prev_run = prev;
134 prev->next_run = next;
135 /* add back to list */
136 p->next_run = &init_task;
137 prev = init_task.prev_run;
138 init_task.prev_run = p;
139 p->prev_run = prev;
140 prev->next_run = p;
144 * The tasklist_lock protects the linked list of processes.
146 * The scheduler lock is protecting against multiple entry
147 * into the scheduling code, and doesn't need to worry
148 * about interrupts (because interrupts cannot call the
149 * scheduler).
151 * The run-queue lock locks the parts that actually access
152 * and change the run-queues, and have to be interrupt-safe.
154 rwlock_t tasklist_lock = RW_LOCK_UNLOCKED;
155 spinlock_t scheduler_lock = SPIN_LOCK_UNLOCKED;
156 static spinlock_t runqueue_lock = SPIN_LOCK_UNLOCKED;
159 * Wake up a process. Put it on the run-queue if it's not
160 * already there. The "current" process is always on the
161 * run-queue (except when the actual re-schedule is in
162 * progress), and as such you're allowed to do the simpler
163 * "current->state = TASK_RUNNING" to mark yourself runnable
164 * without the overhead of this.
166 inline void wake_up_process(struct task_struct * p)
168 unsigned long flags;
170 spin_lock_irqsave(&runqueue_lock, flags);
171 p->state = TASK_RUNNING;
172 if (!p->next_run)
173 add_to_runqueue(p);
174 spin_unlock_irqrestore(&runqueue_lock, flags);
177 static void process_timeout(unsigned long __data)
179 struct task_struct * p = (struct task_struct *) __data;
181 p->timeout = 0;
182 wake_up_process(p);
186 * This is the function that decides how desirable a process is..
187 * You can weigh different processes against each other depending
188 * on what CPU they've run on lately etc to try to handle cache
189 * and TLB miss penalties.
191 * Return values:
192 * -1000: never select this
193 * 0: out of time, recalculate counters (but it might still be
194 * selected)
195 * +ve: "goodness" value (the larger, the better)
196 * +1000: realtime process, select this.
198 static inline int goodness(struct task_struct * p, struct task_struct * prev, int this_cpu)
200 int weight;
203 * Realtime process, select the first one on the
204 * runqueue (taking priorities within processes
205 * into account).
207 if (p->policy != SCHED_OTHER)
208 return 1000 + p->rt_priority;
211 * Give the process a first-approximation goodness value
212 * according to the number of clock-ticks it has left.
214 * Don't do any other calculations if the time slice is
215 * over..
217 weight = p->counter;
218 if (weight) {
220 #ifdef __SMP__
221 /* Give a largish advantage to the same processor... */
222 /* (this is equivalent to penalizing other processors) */
223 if (p->processor == this_cpu)
224 weight += PROC_CHANGE_PENALTY;
225 #endif
227 /* .. and a slight advantage to the current process */
228 if (p == prev)
229 weight += 1;
232 return weight;
236 * Event timer code
238 #define TVN_BITS 6
239 #define TVR_BITS 8
240 #define TVN_SIZE (1 << TVN_BITS)
241 #define TVR_SIZE (1 << TVR_BITS)
242 #define TVN_MASK (TVN_SIZE - 1)
243 #define TVR_MASK (TVR_SIZE - 1)
245 struct timer_vec {
246 int index;
247 struct timer_list *vec[TVN_SIZE];
250 struct timer_vec_root {
251 int index;
252 struct timer_list *vec[TVR_SIZE];
255 static struct timer_vec tv5 = { 0 };
256 static struct timer_vec tv4 = { 0 };
257 static struct timer_vec tv3 = { 0 };
258 static struct timer_vec tv2 = { 0 };
259 static struct timer_vec_root tv1 = { 0 };
261 static struct timer_vec * const tvecs[] = {
262 (struct timer_vec *)&tv1, &tv2, &tv3, &tv4, &tv5
265 #define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
267 static unsigned long timer_jiffies = 0;
269 static inline void insert_timer(struct timer_list *timer,
270 struct timer_list **vec, int idx)
272 if ((timer->next = vec[idx]))
273 vec[idx]->prev = timer;
274 vec[idx] = timer;
275 timer->prev = (struct timer_list *)&vec[idx];
278 static inline void internal_add_timer(struct timer_list *timer)
281 * must be cli-ed when calling this
283 unsigned long expires = timer->expires;
284 unsigned long idx = expires - timer_jiffies;
286 if (idx < TVR_SIZE) {
287 int i = expires & TVR_MASK;
288 insert_timer(timer, tv1.vec, i);
289 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
290 int i = (expires >> TVR_BITS) & TVN_MASK;
291 insert_timer(timer, tv2.vec, i);
292 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
293 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
294 insert_timer(timer, tv3.vec, i);
295 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
296 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
297 insert_timer(timer, tv4.vec, i);
298 } else if (expires < timer_jiffies) {
299 /* can happen if you add a timer with expires == jiffies,
300 * or you set a timer to go off in the past
302 insert_timer(timer, tv1.vec, tv1.index);
303 } else if (idx < 0xffffffffUL) {
304 int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
305 insert_timer(timer, tv5.vec, i);
306 } else {
307 /* Can only get here on architectures with 64-bit jiffies */
308 timer->next = timer->prev = timer;
312 static spinlock_t timerlist_lock = SPIN_LOCK_UNLOCKED;
314 void add_timer(struct timer_list *timer)
316 unsigned long flags;
318 spin_lock_irqsave(&timerlist_lock, flags);
319 internal_add_timer(timer);
320 spin_unlock_irqrestore(&timerlist_lock, flags);
323 static inline int detach_timer(struct timer_list *timer)
325 int ret = 0;
326 struct timer_list *next, *prev;
327 next = timer->next;
328 prev = timer->prev;
329 if (next) {
330 next->prev = prev;
332 if (prev) {
333 ret = 1;
334 prev->next = next;
336 return ret;
340 int del_timer(struct timer_list * timer)
342 int ret;
343 unsigned long flags;
345 spin_lock_irqsave(&timerlist_lock, flags);
346 ret = detach_timer(timer);
347 timer->next = timer->prev = 0;
348 spin_unlock_irqrestore(&timerlist_lock, flags);
349 return ret;
352 #ifdef __SMP__
354 #define idle_task (task[cpu_number_map[this_cpu]])
355 #define can_schedule(p) (!(p)->has_cpu)
357 #else
359 #define idle_task (&init_task)
360 #define can_schedule(p) (1)
362 #endif
365 * 'schedule()' is the scheduler function. It's a very simple and nice
366 * scheduler: it's not perfect, but certainly works for most things.
368 * The goto is "interesting".
370 * NOTE!! Task 0 is the 'idle' task, which gets called when no other
371 * tasks can run. It can not be killed, and it cannot sleep. The 'state'
372 * information in task[0] is never used.
374 asmlinkage void schedule(void)
376 int lock_depth;
377 struct task_struct * prev, * next;
378 unsigned long timeout;
379 int this_cpu;
381 need_resched = 0;
382 prev = current;
383 this_cpu = smp_processor_id();
384 if (local_irq_count[this_cpu])
385 goto scheduling_in_interrupt;
386 release_kernel_lock(prev, this_cpu, lock_depth);
387 if (bh_active & bh_mask)
388 do_bottom_half();
390 spin_lock(&scheduler_lock);
391 spin_lock_irq(&runqueue_lock);
393 /* move an exhausted RR process to be last.. */
394 if (!prev->counter && prev->policy == SCHED_RR) {
395 prev->counter = prev->priority;
396 move_last_runqueue(prev);
398 timeout = 0;
399 switch (prev->state) {
400 case TASK_INTERRUPTIBLE:
401 if (prev->signal & ~prev->blocked)
402 goto makerunnable;
403 timeout = prev->timeout;
404 if (timeout && (timeout <= jiffies)) {
405 prev->timeout = 0;
406 timeout = 0;
407 makerunnable:
408 prev->state = TASK_RUNNING;
409 break;
411 default:
412 del_from_runqueue(prev);
413 case TASK_RUNNING:
416 struct task_struct * p = init_task.next_run;
418 * This is subtle.
419 * Note how we can enable interrupts here, even
420 * though interrupts can add processes to the run-
421 * queue. This is because any new processes will
422 * be added to the front of the queue, so "p" above
423 * is a safe starting point.
424 * run-queue deletion and re-ordering is protected by
425 * the scheduler lock
427 spin_unlock_irq(&runqueue_lock);
428 #ifdef __SMP__
429 prev->has_cpu = 0;
430 #endif
433 * Note! there may appear new tasks on the run-queue during this, as
434 * interrupts are enabled. However, they will be put on front of the
435 * list, so our list starting at "p" is essentially fixed.
437 /* this is the scheduler proper: */
439 int c = -1000;
440 next = idle_task;
441 while (p != &init_task) {
442 if (can_schedule(p)) {
443 int weight = goodness(p, prev, this_cpu);
444 if (weight > c)
445 c = weight, next = p;
447 p = p->next_run;
450 /* Do we need to re-calculate counters? */
451 if (!c) {
452 struct task_struct *p;
453 read_lock(&tasklist_lock);
454 for_each_task(p)
455 p->counter = (p->counter >> 1) + p->priority;
456 read_unlock(&tasklist_lock);
461 #ifdef __SMP__
462 next->has_cpu = 1;
463 next->processor = this_cpu;
464 #endif
466 if (prev != next) {
467 struct timer_list timer;
469 kstat.context_swtch++;
470 if (timeout) {
471 init_timer(&timer);
472 timer.expires = timeout;
473 timer.data = (unsigned long) prev;
474 timer.function = process_timeout;
475 add_timer(&timer);
477 get_mmu_context(next);
478 switch_to(prev,next);
480 if (timeout)
481 del_timer(&timer);
483 spin_unlock(&scheduler_lock);
485 reacquire_kernel_lock(prev, smp_processor_id(), lock_depth);
486 return;
488 scheduling_in_interrupt:
489 printk("Scheduling in interrupt\n");
490 *(int *)0 = 0;
493 #ifndef __alpha__
496 * For backwards compatibility? This can be done in libc so Alpha
497 * and all newer ports shouldn't need it.
499 asmlinkage int sys_pause(void)
501 current->state = TASK_INTERRUPTIBLE;
502 schedule();
503 return -ERESTARTNOHAND;
506 #endif
508 rwlock_t waitqueue_lock = RW_LOCK_UNLOCKED;
511 * wake_up doesn't wake up stopped processes - they have to be awakened
512 * with signals or similar.
514 * Note that we only need a read lock for the wait queue (and thus do not
515 * have to protect against interrupts), as the actual removal from the
516 * queue is handled by the process itself.
518 void wake_up(struct wait_queue **q)
520 struct wait_queue *next;
522 read_lock(&waitqueue_lock);
523 if (q && (next = *q)) {
524 struct wait_queue *head;
526 head = WAIT_QUEUE_HEAD(q);
527 while (next != head) {
528 struct task_struct *p = next->task;
529 next = next->next;
530 if ((p->state == TASK_UNINTERRUPTIBLE) ||
531 (p->state == TASK_INTERRUPTIBLE))
532 wake_up_process(p);
535 read_unlock(&waitqueue_lock);
538 void wake_up_interruptible(struct wait_queue **q)
540 struct wait_queue *next;
542 read_lock(&waitqueue_lock);
543 if (q && (next = *q)) {
544 struct wait_queue *head;
546 head = WAIT_QUEUE_HEAD(q);
547 while (next != head) {
548 struct task_struct *p = next->task;
549 next = next->next;
550 if (p->state == TASK_INTERRUPTIBLE)
551 wake_up_process(p);
554 read_unlock(&waitqueue_lock);
558 * Semaphores are implemented using a two-way counter:
559 * The "count" variable is decremented for each process
560 * that tries to sleep, while the "waking" variable is
561 * incremented when the "up()" code goes to wake up waiting
562 * processes.
564 * Notably, the inline "up()" and "down()" functions can
565 * efficiently test if they need to do any extra work (up
566 * needs to do something only if count was negative before
567 * the increment operation.
569 * waking_non_zero() (from asm/semaphore.h) must execute
570 * atomically.
572 * When __up() is called, the count was negative before
573 * incrementing it, and we need to wake up somebody.
575 * This routine adds one to the count of processes that need to
576 * wake up and exit. ALL waiting processes actually wake up but
577 * only the one that gets to the "waking" field first will gate
578 * through and acquire the semaphore. The others will go back
579 * to sleep.
581 * Note that these functions are only called when there is
582 * contention on the lock, and as such all this is the
583 * "non-critical" part of the whole semaphore business. The
584 * critical part is the inline stuff in <asm/semaphore.h>
585 * where we want to avoid any extra jumps and calls.
587 void __up(struct semaphore *sem)
589 wake_one_more(sem);
590 wake_up(&sem->wait);
594 * Perform the "down" function. Return zero for semaphore acquired,
595 * return negative for signalled out of the function.
597 * If called from __down, the return is ignored and the wait loop is
598 * not interruptible. This means that a task waiting on a semaphore
599 * using "down()" cannot be killed until someone does an "up()" on
600 * the semaphore.
602 * If called from __down_interruptible, the return value gets checked
603 * upon return. If the return value is negative then the task continues
604 * with the negative value in the return register (it can be tested by
605 * the caller).
607 * Either form may be used in conjunction with "up()".
610 static inline int __do_down(struct semaphore * sem, int task_state)
612 struct task_struct *tsk = current;
613 struct wait_queue wait = { tsk, NULL };
614 int ret = 0;
616 tsk->state = task_state;
617 add_wait_queue(&sem->wait, &wait);
620 * Ok, we're set up. sem->count is known to be less than zero
621 * so we must wait.
623 * We can let go the lock for purposes of waiting.
624 * We re-acquire it after awaking so as to protect
625 * all semaphore operations.
627 * If "up()" is called before we call waking_non_zero() then
628 * we will catch it right away. If it is called later then
629 * we will have to go through a wakeup cycle to catch it.
631 * Multiple waiters contend for the semaphore lock to see
632 * who gets to gate through and who has to wait some more.
634 for (;;) {
635 if (waking_non_zero(sem)) /* are we waking up? */
636 break; /* yes, exit loop */
638 if ( task_state == TASK_INTERRUPTIBLE
639 && (tsk->signal & ~tsk->blocked) /* signalled */
641 ret = -EINTR; /* interrupted */
642 atomic_inc(&sem->count); /* give up on down operation */
643 break;
646 schedule();
647 tsk->state = task_state;
650 tsk->state = TASK_RUNNING;
651 remove_wait_queue(&sem->wait, &wait);
652 return ret;
655 void __down(struct semaphore * sem)
657 __do_down(sem,TASK_UNINTERRUPTIBLE);
660 int __down_interruptible(struct semaphore * sem)
662 return __do_down(sem,TASK_INTERRUPTIBLE);
666 static inline void __sleep_on(struct wait_queue **p, int state)
668 unsigned long flags;
669 struct wait_queue wait = { current, NULL };
671 if (!p)
672 return;
673 current->state = state;
674 write_lock_irqsave(&waitqueue_lock, flags);
675 __add_wait_queue(p, &wait);
676 write_unlock(&waitqueue_lock);
677 schedule();
678 write_lock_irq(&waitqueue_lock);
679 __remove_wait_queue(p, &wait);
680 write_unlock_irqrestore(&waitqueue_lock, flags);
683 void interruptible_sleep_on(struct wait_queue **p)
685 __sleep_on(p,TASK_INTERRUPTIBLE);
688 void sleep_on(struct wait_queue **p)
690 __sleep_on(p,TASK_UNINTERRUPTIBLE);
693 static inline void cascade_timers(struct timer_vec *tv)
695 /* cascade all the timers from tv up one level */
696 struct timer_list *timer;
697 timer = tv->vec[tv->index];
699 * We are removing _all_ timers from the list, so we don't have to
700 * detach them individually, just clear the list afterwards.
702 while (timer) {
703 struct timer_list *tmp = timer;
704 timer = timer->next;
705 internal_add_timer(tmp);
707 tv->vec[tv->index] = NULL;
708 tv->index = (tv->index + 1) & TVN_MASK;
711 static inline void run_timer_list(void)
713 spin_lock_irq(&timerlist_lock);
714 while ((long)(jiffies - timer_jiffies) >= 0) {
715 struct timer_list *timer;
716 if (!tv1.index) {
717 int n = 1;
718 do {
719 cascade_timers(tvecs[n]);
720 } while (tvecs[n]->index == 1 && ++n < NOOF_TVECS);
722 while ((timer = tv1.vec[tv1.index])) {
723 void (*fn)(unsigned long) = timer->function;
724 unsigned long data = timer->data;
725 detach_timer(timer);
726 timer->next = timer->prev = NULL;
727 spin_unlock_irq(&timerlist_lock);
728 fn(data);
729 spin_lock_irq(&timerlist_lock);
731 ++timer_jiffies;
732 tv1.index = (tv1.index + 1) & TVR_MASK;
734 spin_unlock_irq(&timerlist_lock);
738 static inline void run_old_timers(void)
740 struct timer_struct *tp;
741 unsigned long mask;
743 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
744 if (mask > timer_active)
745 break;
746 if (!(mask & timer_active))
747 continue;
748 if (tp->expires > jiffies)
749 continue;
750 timer_active &= ~mask;
751 tp->fn();
752 sti();
756 spinlock_t tqueue_lock;
758 void tqueue_bh(void)
760 run_task_queue(&tq_timer);
763 void immediate_bh(void)
765 run_task_queue(&tq_immediate);
768 unsigned long timer_active = 0;
769 struct timer_struct timer_table[32];
772 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
773 * imply that avenrun[] is the standard name for this kind of thing.
774 * Nothing else seems to be standardized: the fractional size etc
775 * all seem to differ on different machines.
777 unsigned long avenrun[3] = { 0,0,0 };
780 * Nr of active tasks - counted in fixed-point numbers
782 static unsigned long count_active_tasks(void)
784 struct task_struct *p;
785 unsigned long nr = 0;
787 read_lock(&tasklist_lock);
788 for_each_task(p) {
789 if (p->pid &&
790 (p->state == TASK_RUNNING ||
791 p->state == TASK_UNINTERRUPTIBLE ||
792 p->state == TASK_SWAPPING))
793 nr += FIXED_1;
795 read_unlock(&tasklist_lock);
796 return nr;
799 static inline void calc_load(unsigned long ticks)
801 unsigned long active_tasks; /* fixed-point */
802 static int count = LOAD_FREQ;
804 count -= ticks;
805 if (count < 0) {
806 count += LOAD_FREQ;
807 active_tasks = count_active_tasks();
808 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
809 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
810 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
815 * this routine handles the overflow of the microsecond field
817 * The tricky bits of code to handle the accurate clock support
818 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
819 * They were originally developed for SUN and DEC kernels.
820 * All the kudos should go to Dave for this stuff.
823 static void second_overflow(void)
825 long ltemp;
827 /* Bump the maxerror field */
828 time_maxerror += time_tolerance >> SHIFT_USEC;
829 if ( time_maxerror > MAXPHASE )
830 time_maxerror = MAXPHASE;
833 * Leap second processing. If in leap-insert state at
834 * the end of the day, the system clock is set back one
835 * second; if in leap-delete state, the system clock is
836 * set ahead one second. The microtime() routine or
837 * external clock driver will insure that reported time
838 * is always monotonic. The ugly divides should be
839 * replaced.
841 switch (time_state) {
843 case TIME_OK:
844 if (time_status & STA_INS)
845 time_state = TIME_INS;
846 else if (time_status & STA_DEL)
847 time_state = TIME_DEL;
848 break;
850 case TIME_INS:
851 if (xtime.tv_sec % 86400 == 0) {
852 xtime.tv_sec--;
853 time_state = TIME_OOP;
854 printk("Clock: inserting leap second 23:59:60 UTC\n");
856 break;
858 case TIME_DEL:
859 if ((xtime.tv_sec + 1) % 86400 == 0) {
860 xtime.tv_sec++;
861 time_state = TIME_WAIT;
862 printk("Clock: deleting leap second 23:59:59 UTC\n");
864 break;
866 case TIME_OOP:
867 time_state = TIME_WAIT;
868 break;
870 case TIME_WAIT:
871 if (!(time_status & (STA_INS | STA_DEL)))
872 time_state = TIME_OK;
876 * Compute the phase adjustment for the next second. In
877 * PLL mode, the offset is reduced by a fixed factor
878 * times the time constant. In FLL mode the offset is
879 * used directly. In either mode, the maximum phase
880 * adjustment for each second is clamped so as to spread
881 * the adjustment over not more than the number of
882 * seconds between updates.
884 if (time_offset < 0) {
885 ltemp = -time_offset;
886 if (!(time_status & STA_FLL))
887 ltemp >>= SHIFT_KG + time_constant;
888 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
889 ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
890 time_offset += ltemp;
891 time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
892 } else {
893 ltemp = time_offset;
894 if (!(time_status & STA_FLL))
895 ltemp >>= SHIFT_KG + time_constant;
896 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
897 ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
898 time_offset -= ltemp;
899 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
903 * Compute the frequency estimate and additional phase
904 * adjustment due to frequency error for the next
905 * second. When the PPS signal is engaged, gnaw on the
906 * watchdog counter and update the frequency computed by
907 * the pll and the PPS signal.
909 pps_valid++;
910 if (pps_valid == PPS_VALID) {
911 pps_jitter = MAXTIME;
912 pps_stabil = MAXFREQ;
913 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
914 STA_PPSWANDER | STA_PPSERROR);
916 ltemp = time_freq + pps_freq;
917 if (ltemp < 0)
918 time_adj -= -ltemp >>
919 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
920 else
921 time_adj += ltemp >>
922 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
924 #if HZ == 100
925 /* compensate for (HZ==100) != 128. Add 25% to get 125; => only 3% error */
926 if (time_adj < 0)
927 time_adj -= -time_adj >> 2;
928 else
929 time_adj += time_adj >> 2;
930 #endif
933 /* in the NTP reference this is called "hardclock()" */
934 static void update_wall_time_one_tick(void)
937 * Advance the phase, once it gets to one microsecond, then
938 * advance the tick more.
940 time_phase += time_adj;
941 if (time_phase <= -FINEUSEC) {
942 long ltemp = -time_phase >> SHIFT_SCALE;
943 time_phase += ltemp << SHIFT_SCALE;
944 xtime.tv_usec += tick + time_adjust_step - ltemp;
946 else if (time_phase >= FINEUSEC) {
947 long ltemp = time_phase >> SHIFT_SCALE;
948 time_phase -= ltemp << SHIFT_SCALE;
949 xtime.tv_usec += tick + time_adjust_step + ltemp;
950 } else
951 xtime.tv_usec += tick + time_adjust_step;
953 if (time_adjust) {
954 /* We are doing an adjtime thing.
956 * Modify the value of the tick for next time.
957 * Note that a positive delta means we want the clock
958 * to run fast. This means that the tick should be bigger
960 * Limit the amount of the step for *next* tick to be
961 * in the range -tickadj .. +tickadj
963 if (time_adjust > tickadj)
964 time_adjust_step = tickadj;
965 else if (time_adjust < -tickadj)
966 time_adjust_step = -tickadj;
967 else
968 time_adjust_step = time_adjust;
970 /* Reduce by this step the amount of time left */
971 time_adjust -= time_adjust_step;
973 else
974 time_adjust_step = 0;
978 * Using a loop looks inefficient, but "ticks" is
979 * usually just one (we shouldn't be losing ticks,
980 * we're doing this this way mainly for interrupt
981 * latency reasons, not because we think we'll
982 * have lots of lost timer ticks
984 static void update_wall_time(unsigned long ticks)
986 do {
987 ticks--;
988 update_wall_time_one_tick();
989 } while (ticks);
991 if (xtime.tv_usec >= 1000000) {
992 xtime.tv_usec -= 1000000;
993 xtime.tv_sec++;
994 second_overflow();
998 static inline void do_process_times(struct task_struct *p,
999 unsigned long user, unsigned long system)
1001 long psecs;
1003 p->utime += user;
1004 p->stime += system;
1006 psecs = (p->stime + p->utime) / HZ;
1007 if (psecs > p->rlim[RLIMIT_CPU].rlim_cur) {
1008 /* Send SIGXCPU every second.. */
1009 if (psecs * HZ == p->stime + p->utime)
1010 send_sig(SIGXCPU, p, 1);
1011 /* and SIGKILL when we go over max.. */
1012 if (psecs > p->rlim[RLIMIT_CPU].rlim_max)
1013 send_sig(SIGKILL, p, 1);
1017 static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
1019 unsigned long it_virt = p->it_virt_value;
1021 if (it_virt) {
1022 if (it_virt <= ticks) {
1023 it_virt = ticks + p->it_virt_incr;
1024 send_sig(SIGVTALRM, p, 1);
1026 p->it_virt_value = it_virt - ticks;
1030 static inline void do_it_prof(struct task_struct * p, unsigned long ticks)
1032 unsigned long it_prof = p->it_prof_value;
1034 if (it_prof) {
1035 if (it_prof <= ticks) {
1036 it_prof = ticks + p->it_prof_incr;
1037 send_sig(SIGPROF, p, 1);
1039 p->it_prof_value = it_prof - ticks;
1043 void update_one_process(struct task_struct *p,
1044 unsigned long ticks, unsigned long user, unsigned long system)
1046 do_process_times(p, user, system);
1047 do_it_virt(p, user);
1048 do_it_prof(p, ticks);
1051 static void update_process_times(unsigned long ticks, unsigned long system)
1054 * SMP does this on a per-CPU basis elsewhere
1056 #ifndef __SMP__
1057 struct task_struct * p = current;
1058 unsigned long user = ticks - system;
1059 if (p->pid) {
1060 p->counter -= ticks;
1061 if (p->counter < 0) {
1062 p->counter = 0;
1063 need_resched = 1;
1065 if (p->priority < DEF_PRIORITY)
1066 kstat.cpu_nice += user;
1067 else
1068 kstat.cpu_user += user;
1069 kstat.cpu_system += system;
1071 update_one_process(p, ticks, user, system);
1072 #endif
1075 volatile unsigned long lost_ticks = 0;
1076 static unsigned long lost_ticks_system = 0;
1078 static inline void update_times(void)
1080 unsigned long ticks;
1081 unsigned long flags;
1083 save_flags(flags);
1084 cli();
1086 ticks = lost_ticks;
1087 lost_ticks = 0;
1089 if (ticks) {
1090 unsigned long system;
1091 system = xchg(&lost_ticks_system, 0);
1093 calc_load(ticks);
1094 update_wall_time(ticks);
1095 restore_flags(flags);
1097 update_process_times(ticks, system);
1099 } else
1100 restore_flags(flags);
1103 static void timer_bh(void)
1105 update_times();
1106 run_old_timers();
1107 run_timer_list();
1110 void do_timer(struct pt_regs * regs)
1112 (*(unsigned long *)&jiffies)++;
1113 lost_ticks++;
1114 mark_bh(TIMER_BH);
1115 if (!user_mode(regs))
1116 lost_ticks_system++;
1117 if (tq_timer)
1118 mark_bh(TQUEUE_BH);
1121 #ifndef __alpha__
1124 * For backwards compatibility? This can be done in libc so Alpha
1125 * and all newer ports shouldn't need it.
1127 asmlinkage unsigned int sys_alarm(unsigned int seconds)
1129 struct itimerval it_new, it_old;
1130 unsigned int oldalarm;
1132 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
1133 it_new.it_value.tv_sec = seconds;
1134 it_new.it_value.tv_usec = 0;
1135 _setitimer(ITIMER_REAL, &it_new, &it_old);
1136 oldalarm = it_old.it_value.tv_sec;
1137 /* ehhh.. We can't return 0 if we have an alarm pending.. */
1138 /* And we'd better return too much than too little anyway */
1139 if (it_old.it_value.tv_usec)
1140 oldalarm++;
1141 return oldalarm;
1145 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
1146 * should be moved into arch/i386 instead?
1149 asmlinkage int sys_getpid(void)
1151 /* This is SMP safe - current->pid doesnt change */
1152 return current->pid;
1156 * This is not strictly SMP safe: p_opptr could change
1157 * from under us. However, rather than getting any lock
1158 * we can use an optimistic algorithm: get the parent
1159 * pid, and go back and check that the parent is still
1160 * the same. If it has changed (which is extremely unlikely
1161 * indeed), we just try again..
1163 * NOTE! This depends on the fact that even if we _do_
1164 * get an old value of "parent", we can happily dereference
1165 * the pointer: we just can't necessarily trust the result
1166 * until we know that the parent pointer is valid.
1168 * The "mb()" macro is a memory barrier - a synchronizing
1169 * event. It also makes sure that gcc doesn't optimize
1170 * away the necessary memory references.. The barrier doesn't
1171 * have to have all that strong semantics: on x86 we don't
1172 * really require a synchronizing instruction, for example.
1173 * The barrier is more important for code generation than
1174 * for any real memory ordering semantics (even if there is
1175 * a small window for a race, using the old pointer is
1176 * harmless for a while).
1178 asmlinkage int sys_getppid(void)
1180 int pid;
1181 struct task_struct * me = current;
1182 struct task_struct * parent;
1184 parent = me->p_opptr;
1185 for (;;) {
1186 pid = parent->pid;
1187 #if __SMP__
1189 struct task_struct *old = parent;
1190 mb();
1191 parent = me->p_opptr;
1192 if (old != parent)
1193 continue;
1195 #endif
1196 break;
1198 return pid;
1201 asmlinkage int sys_getuid(void)
1203 /* Only we change this so SMP safe */
1204 return current->uid;
1207 asmlinkage int sys_geteuid(void)
1209 /* Only we change this so SMP safe */
1210 return current->euid;
1213 asmlinkage int sys_getgid(void)
1215 /* Only we change this so SMP safe */
1216 return current->gid;
1219 asmlinkage int sys_getegid(void)
1221 /* Only we change this so SMP safe */
1222 return current->egid;
1226 * This has been replaced by sys_setpriority. Maybe it should be
1227 * moved into the arch dependent tree for those ports that require
1228 * it for backward compatibility?
1231 asmlinkage int sys_nice(int increment)
1233 unsigned long newprio;
1234 int increase = 0;
1237 * Setpriority might change our priority at the same moment.
1238 * We don't have to worry. Conceptually one call occurs first
1239 * and we have a single winner.
1242 newprio = increment;
1243 if (increment < 0) {
1244 if (!suser())
1245 return -EPERM;
1246 newprio = -increment;
1247 increase = 1;
1250 if (newprio > 40)
1251 newprio = 40;
1253 * do a "normalization" of the priority (traditionally
1254 * unix nice values are -20..20, linux doesn't really
1255 * use that kind of thing, but uses the length of the
1256 * timeslice instead (default 150 msec). The rounding is
1257 * why we want to avoid negative values.
1259 newprio = (newprio * DEF_PRIORITY + 10) / 20;
1260 increment = newprio;
1261 if (increase)
1262 increment = -increment;
1264 * Current->priority can change between this point
1265 * and the assignment. We are assigning not doing add/subs
1266 * so thats ok. Conceptually a process might just instantaneously
1267 * read the value we stomp over. I don't think that is an issue
1268 * unless posix makes it one. If so we can loop on changes
1269 * to current->priority.
1271 newprio = current->priority - increment;
1272 if ((signed) newprio < 1)
1273 newprio = 1;
1274 if (newprio > DEF_PRIORITY*2)
1275 newprio = DEF_PRIORITY*2;
1276 current->priority = newprio;
1277 return 0;
1280 #endif
1282 static inline struct task_struct *find_process_by_pid(pid_t pid)
1284 if (pid)
1285 return find_task_by_pid(pid);
1286 else
1287 return current;
1290 static int setscheduler(pid_t pid, int policy,
1291 struct sched_param *param)
1293 struct sched_param lp;
1294 struct task_struct *p;
1296 if (!param || pid < 0)
1297 return -EINVAL;
1299 if (copy_from_user(&lp, param, sizeof(struct sched_param)))
1300 return -EFAULT;
1302 p = find_process_by_pid(pid);
1303 if (!p)
1304 return -ESRCH;
1306 if (policy < 0)
1307 policy = p->policy;
1308 else if (policy != SCHED_FIFO && policy != SCHED_RR &&
1309 policy != SCHED_OTHER)
1310 return -EINVAL;
1313 * Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid
1314 * priority for SCHED_OTHER is 0.
1316 if (lp.sched_priority < 0 || lp.sched_priority > 99)
1317 return -EINVAL;
1318 if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
1319 return -EINVAL;
1321 if ((policy == SCHED_FIFO || policy == SCHED_RR) && !suser())
1322 return -EPERM;
1323 if ((current->euid != p->euid) && (current->euid != p->uid) &&
1324 !suser())
1325 return -EPERM;
1327 p->policy = policy;
1328 p->rt_priority = lp.sched_priority;
1329 spin_lock(&scheduler_lock);
1330 spin_lock_irq(&runqueue_lock);
1331 if (p->next_run)
1332 move_last_runqueue(p);
1333 spin_unlock_irq(&runqueue_lock);
1334 spin_unlock(&scheduler_lock);
1335 need_resched = 1;
1336 return 0;
1339 asmlinkage int sys_sched_setscheduler(pid_t pid, int policy,
1340 struct sched_param *param)
1342 return setscheduler(pid, policy, param);
1345 asmlinkage int sys_sched_setparam(pid_t pid, struct sched_param *param)
1347 return setscheduler(pid, -1, param);
1350 asmlinkage int sys_sched_getscheduler(pid_t pid)
1352 struct task_struct *p;
1354 if (pid < 0)
1355 return -EINVAL;
1357 p = find_process_by_pid(pid);
1358 if (!p)
1359 return -ESRCH;
1361 return p->policy;
1364 asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param)
1366 struct task_struct *p;
1367 struct sched_param lp;
1369 if (!param || pid < 0)
1370 return -EINVAL;
1372 p = find_process_by_pid(pid);
1373 if (!p)
1374 return -ESRCH;
1376 lp.sched_priority = p->rt_priority;
1377 return copy_to_user(param, &lp, sizeof(struct sched_param)) ? -EFAULT : 0;
1380 asmlinkage int sys_sched_yield(void)
1382 spin_lock(&scheduler_lock);
1383 spin_lock_irq(&runqueue_lock);
1384 move_last_runqueue(current);
1385 spin_unlock_irq(&runqueue_lock);
1386 spin_unlock(&scheduler_lock);
1387 need_resched = 1;
1388 return 0;
1391 asmlinkage int sys_sched_get_priority_max(int policy)
1393 int ret = -EINVAL;
1395 switch (policy) {
1396 case SCHED_FIFO:
1397 case SCHED_RR:
1398 ret = 99;
1399 break;
1400 case SCHED_OTHER:
1401 ret = 0;
1402 break;
1404 return ret;
1407 asmlinkage int sys_sched_get_priority_min(int policy)
1409 int ret = -EINVAL;
1411 switch (policy) {
1412 case SCHED_FIFO:
1413 case SCHED_RR:
1414 ret = 1;
1415 break;
1416 case SCHED_OTHER:
1417 ret = 0;
1419 return ret;
1422 asmlinkage int sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
1424 struct timespec t;
1426 t.tv_sec = 0;
1427 t.tv_nsec = 150000;
1428 if (copy_to_user(interval, &t, sizeof(struct timespec)))
1429 return -EFAULT;
1430 return 0;
1434 * change timeval to jiffies, trying to avoid the
1435 * most obvious overflows..
1437 static unsigned long timespectojiffies(struct timespec *value)
1439 unsigned long sec = (unsigned) value->tv_sec;
1440 long nsec = value->tv_nsec;
1442 if (sec > (LONG_MAX / HZ))
1443 return LONG_MAX;
1444 nsec += 1000000000L / HZ - 1;
1445 nsec /= 1000000000L / HZ;
1446 return HZ * sec + nsec;
1449 static void jiffiestotimespec(unsigned long jiffies, struct timespec *value)
1451 value->tv_nsec = (jiffies % HZ) * (1000000000L / HZ);
1452 value->tv_sec = jiffies / HZ;
1455 asmlinkage int sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
1457 struct timespec t;
1458 unsigned long expire;
1460 if(copy_from_user(&t, rqtp, sizeof(struct timespec)))
1461 return -EFAULT;
1463 if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
1464 return -EINVAL;
1467 if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
1468 current->policy != SCHED_OTHER)
1471 * Short delay requests up to 2 ms will be handled with
1472 * high precision by a busy wait for all real-time processes.
1474 * Its important on SMP not to do this holding locks.
1476 udelay((t.tv_nsec + 999) / 1000);
1477 return 0;
1480 expire = timespectojiffies(&t) + (t.tv_sec || t.tv_nsec) + jiffies;
1482 current->timeout = expire;
1483 current->state = TASK_INTERRUPTIBLE;
1484 schedule();
1486 if (expire > jiffies) {
1487 if (rmtp) {
1488 jiffiestotimespec(expire - jiffies -
1489 (expire > jiffies + 1), &t);
1490 if (copy_to_user(rmtp, &t, sizeof(struct timespec)))
1491 return -EFAULT;
1493 return -EINTR;
1495 return 0;
1498 static void show_task(int nr,struct task_struct * p)
1500 unsigned long free = 0;
1501 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
1503 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
1504 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
1505 printk(stat_nam[p->state]);
1506 else
1507 printk(" ");
1508 #if ((~0UL) == 0xffffffff)
1509 if (p == current)
1510 printk(" current ");
1511 else
1512 printk(" %08lX ", thread_saved_pc(&p->tss));
1513 #else
1514 if (p == current)
1515 printk(" current task ");
1516 else
1517 printk(" %016lx ", thread_saved_pc(&p->tss));
1518 #endif
1519 #if 0
1520 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
1521 if (((unsigned long *)p->kernel_stack_page)[free])
1522 break;
1524 #endif
1525 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
1526 if (p->p_cptr)
1527 printk("%5d ", p->p_cptr->pid);
1528 else
1529 printk(" ");
1530 if (p->p_ysptr)
1531 printk("%7d", p->p_ysptr->pid);
1532 else
1533 printk(" ");
1534 if (p->p_osptr)
1535 printk(" %5d\n", p->p_osptr->pid);
1536 else
1537 printk("\n");
1540 void show_state(void)
1542 struct task_struct *p;
1544 #if ((~0UL) == 0xffffffff)
1545 printk("\n"
1546 " free sibling\n");
1547 printk(" task PC stack pid father child younger older\n");
1548 #else
1549 printk("\n"
1550 " free sibling\n");
1551 printk(" task PC stack pid father child younger older\n");
1552 #endif
1553 read_lock(&tasklist_lock);
1554 for_each_task(p)
1555 show_task((p->tarray_ptr - &task[0]),p);
1556 read_unlock(&tasklist_lock);
1559 __initfunc(void sched_init(void))
1562 * We have to do a little magic to get the first
1563 * process right in SMP mode.
1565 int cpu=hard_smp_processor_id();
1566 int nr = NR_TASKS;
1568 init_task.processor=cpu;
1570 /* Init task array free list and pidhash table. */
1571 while(--nr > 0)
1572 add_free_taskslot(&task[nr]);
1574 for(nr = 0; nr < PIDHASH_SZ; nr++)
1575 pidhash[nr] = NULL;
1577 init_bh(TIMER_BH, timer_bh);
1578 init_bh(TQUEUE_BH, tqueue_bh);
1579 init_bh(IMMEDIATE_BH, immediate_bh);