4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
7 * make semaphores SMP safe
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
10 * "A Kernel Model for Precision Timekeeping" by Dave Mills
11 * 1998-11-19 Implemented schedule_timeout() and related stuff
13 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
14 * serialize accesses to xtime/lost_ticks).
15 * Copyright (C) 1998 Andrea Arcangeli
16 * 1998-12-28 Implemented better SMP scheduling by Ingo Molnar
17 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
21 * 'sched.c' is the main kernel file. It contains scheduling primitives
22 * (sleep_on, wakeup, schedule etc) as well as a number of simple system
23 * call functions (type getpid()), which just extract a field from
28 #include <linux/kernel_stat.h>
29 #include <linux/fdreg.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/smp_lock.h>
33 #include <linux/init.h>
36 #include <asm/uaccess.h>
37 #include <asm/pgtable.h>
38 #include <asm/mmu_context.h>
39 #include <asm/semaphore-helper.h>
41 #include <linux/timex.h>
47 unsigned securebits
= SECUREBITS_DEFAULT
; /* systemwide security settings */
49 long tick
= (1000000 + HZ
/2) / HZ
; /* timer interrupt period */
51 /* The current time */
52 volatile struct timeval xtime
__attribute__ ((aligned (16)));
54 /* Don't completely fail for HZ > 500. */
55 int tickadj
= 500/HZ
? : 1; /* microsecs */
57 DECLARE_TASK_QUEUE(tq_timer
);
58 DECLARE_TASK_QUEUE(tq_immediate
);
59 DECLARE_TASK_QUEUE(tq_scheduler
);
62 * phase-lock loop variables
64 /* TIME_ERROR prevents overwriting the CMOS clock */
65 int time_state
= TIME_OK
; /* clock synchronization status */
66 int time_status
= STA_UNSYNC
; /* clock status bits */
67 long time_offset
= 0; /* time adjustment (us) */
68 long time_constant
= 2; /* pll time constant */
69 long time_tolerance
= MAXFREQ
; /* frequency tolerance (ppm) */
70 long time_precision
= 1; /* clock precision (us) */
71 long time_maxerror
= NTP_PHASE_LIMIT
; /* maximum error (us) */
72 long time_esterror
= NTP_PHASE_LIMIT
; /* estimated error (us) */
73 long time_phase
= 0; /* phase offset (scaled us) */
74 long time_freq
= ((1000000 + HZ
/2) % HZ
- HZ
/2) << SHIFT_USEC
; /* frequency offset (scaled ppm) */
75 long time_adj
= 0; /* tick adjust (scaled 1 / HZ) */
76 long time_reftime
= 0; /* time at last adjustment (s) */
79 long time_adjust_step
= 0;
81 unsigned long event
= 0;
83 extern int do_setitimer(int, struct itimerval
*, struct itimerval
*);
84 unsigned int * prof_buffer
= NULL
;
85 unsigned long prof_len
= 0;
86 unsigned long prof_shift
= 0;
88 extern void mem_use(void);
90 unsigned long volatile jiffies
=0;
93 * Init task must be ok at boot for the ix86 as we will check its signals
94 * via the SMP irq return path.
97 struct task_struct
* task
[NR_TASKS
] = {&init_task
, };
100 * We align per-CPU scheduling data on cacheline boundaries,
101 * to prevent cacheline ping-pong.
104 struct schedule_data
{
105 struct task_struct
* curr
;
106 cycles_t last_schedule
;
108 char __pad
[SMP_CACHE_BYTES
];
109 } aligned_data
[NR_CPUS
] __cacheline_aligned
= { {{&init_task
,0}}};
111 #define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.curr
113 struct kernel_stat kstat
= { 0 };
117 #define idle_task(cpu) (task[cpu_number_map[(cpu)]])
118 #define can_schedule(p) (!(p)->has_cpu)
122 #define idle_task(cpu) (&init_task)
123 #define can_schedule(p) (1)
127 void scheduling_functions_start_here(void) { }
130 * This is the function that decides how desirable a process is..
131 * You can weigh different processes against each other depending
132 * on what CPU they've run on lately etc to try to handle cache
133 * and TLB miss penalties.
136 * -1000: never select this
137 * 0: out of time, recalculate counters (but it might still be
139 * +ve: "goodness" value (the larger, the better)
140 * +1000: realtime process, select this.
143 static inline int goodness (struct task_struct
* prev
,
144 struct task_struct
* p
, int this_cpu
)
149 * Realtime process, select the first one on the
150 * runqueue (taking priorities within processes
153 if (p
->policy
!= SCHED_OTHER
) {
154 weight
= 1000 + p
->rt_priority
;
159 * Give the process a first-approximation goodness value
160 * according to the number of clock-ticks it has left.
162 * Don't do any other calculations if the time slice is
170 /* Give a largish advantage to the same processor... */
171 /* (this is equivalent to penalizing other processors) */
172 if (p
->processor
== this_cpu
)
173 weight
+= PROC_CHANGE_PENALTY
;
176 /* .. and a slight advantage to the current MM */
177 if (p
->mm
== prev
->mm
)
179 weight
+= p
->priority
;
186 * subtle. We want to discard a yielded process only if it's being
187 * considered for a reschedule. Wakeup-time 'queries' of the scheduling
188 * state do not count. Another optimization we do: sched_yield()-ed
189 * processes are runnable (and thus will be considered for scheduling)
190 * right when they are calling schedule(). So the only place we need
191 * to care about SCHED_YIELD is when we calculate the previous process'
194 static inline int prev_goodness (struct task_struct
* prev
,
195 struct task_struct
* p
, int this_cpu
)
197 if (p
->policy
& SCHED_YIELD
) {
198 p
->policy
&= ~SCHED_YIELD
;
201 return goodness(prev
, p
, this_cpu
);
205 * the 'goodness value' of replacing a process on a given CPU.
206 * positive value means 'replace', zero or negative means 'dont'.
208 static inline int preemption_goodness (struct task_struct
* prev
,
209 struct task_struct
* p
, int cpu
)
211 return goodness(prev
, p
, cpu
) - goodness(prev
, prev
, cpu
);
215 * If there is a dependency between p1 and p2,
216 * don't be too eager to go into the slow schedule.
217 * In particular, if p1 and p2 both want the kernel
218 * lock, there is no point in trying to make them
219 * extremely parallel..
221 * (No lock - lock_depth < 0)
223 * There are two additional metrics here:
225 * first, a 'cutoff' interval, currently 0-200 usecs on
226 * x86 CPUs, depending on the size of the 'SMP-local cache'.
227 * If the current process has longer average timeslices than
228 * this, then we utilize the idle CPU.
230 * second, if the wakeup comes from a process context,
231 * then the two processes are 'related'. (they form a
234 * An idle CPU is almost always a bad thing, thus we skip
235 * the idle-CPU utilization only if both these conditions
236 * are true. (ie. a 'process-gang' rescheduling with rather
237 * high frequency should stay on the same CPU).
239 * [We can switch to something more finegrained in 2.3.]
241 * do not 'guess' if the to-be-scheduled task is RT.
243 #define related(p1,p2) (((p1)->lock_depth >= 0) && (p2)->lock_depth >= 0) && \
244 (((p2)->policy == SCHED_OTHER) && ((p1)->avg_slice < cacheflush_time))
246 static inline void reschedule_idle_slow(struct task_struct
* p
)
250 * (see reschedule_idle() for an explanation first ...)
254 * We try to find another (idle) CPU for this woken-up process.
256 * On SMP, we mostly try to see if the CPU the task used
257 * to run on is idle.. but we will use another idle CPU too,
258 * at this point we already know that this CPU is not
259 * willing to reschedule in the near future.
261 * An idle CPU is definitely wasted, especially if this CPU is
262 * running long-timeslice processes. The following algorithm is
263 * pretty good at finding the best idle CPU to send this process
266 * [We can try to preempt low-priority processes on other CPUs in
267 * 2.3. Also we can try to use the avg_slice value to predict
268 * 'likely reschedule' events even on other CPUs.]
270 int this_cpu
= smp_processor_id(), target_cpu
;
271 struct task_struct
*tsk
, *target_tsk
;
272 int cpu
, best_cpu
, weight
, best_weight
, i
;
275 best_weight
= 0; /* prevents negative weight */
277 spin_lock_irqsave(&runqueue_lock
, flags
);
280 * shortcut if the woken up task's last CPU is
283 best_cpu
= p
->processor
;
284 target_tsk
= idle_task(best_cpu
);
285 if (cpu_curr(best_cpu
) == target_tsk
)
289 for (i
= 0; i
< smp_num_cpus
; i
++) {
290 cpu
= cpu_logical_map(i
);
294 weight
= preemption_goodness(tsk
, p
, cpu
);
295 if (weight
> best_weight
) {
296 best_weight
= weight
;
302 * found any suitable CPU?
308 target_cpu
= target_tsk
->processor
;
309 target_tsk
->need_resched
= 1;
310 spin_unlock_irqrestore(&runqueue_lock
, flags
);
312 * the APIC stuff can go outside of the lock because
313 * it uses no task information, only CPU#.
315 if (target_cpu
!= this_cpu
)
316 smp_send_reschedule(target_cpu
);
319 spin_unlock_irqrestore(&runqueue_lock
, flags
);
322 int this_cpu
= smp_processor_id();
323 struct task_struct
*tsk
;
325 tsk
= cpu_curr(this_cpu
);
326 if (preemption_goodness(tsk
, p
, this_cpu
) > 0)
327 tsk
->need_resched
= 1;
331 static void reschedule_idle(struct task_struct
* p
)
334 int cpu
= smp_processor_id();
336 * ("wakeup()" should not be called before we've initialized
338 * Basically a not-yet initialized SMP subsystem can be
339 * considered as a not-yet working scheduler, simply dont use
340 * it before it's up and running ...)
342 * SMP rescheduling is done in 2 passes:
343 * - pass #1: faster: 'quick decisions'
344 * - pass #2: slower: 'lets try and find a suitable CPU'
348 * Pass #1. (subtle. We might be in the middle of __switch_to, so
349 * to preserve scheduling atomicity we have to use cpu_curr)
351 if ((p
->processor
== cpu
) && related(cpu_curr(cpu
), p
))
357 reschedule_idle_slow(p
);
363 * This has to add the process to the _beginning_ of the
364 * run-queue, not the end. See the comment about "This is
365 * subtle" in the scheduler proper..
367 static inline void add_to_runqueue(struct task_struct
* p
)
369 struct task_struct
*next
= init_task
.next_run
;
371 p
->prev_run
= &init_task
;
372 init_task
.next_run
= p
;
378 static inline void del_from_runqueue(struct task_struct
* p
)
380 struct task_struct
*next
= p
->next_run
;
381 struct task_struct
*prev
= p
->prev_run
;
384 next
->prev_run
= prev
;
385 prev
->next_run
= next
;
390 static inline void move_last_runqueue(struct task_struct
* p
)
392 struct task_struct
*next
= p
->next_run
;
393 struct task_struct
*prev
= p
->prev_run
;
395 /* remove from list */
396 next
->prev_run
= prev
;
397 prev
->next_run
= next
;
398 /* add back to list */
399 p
->next_run
= &init_task
;
400 prev
= init_task
.prev_run
;
401 init_task
.prev_run
= p
;
406 static inline void move_first_runqueue(struct task_struct
* p
)
408 struct task_struct
*next
= p
->next_run
;
409 struct task_struct
*prev
= p
->prev_run
;
411 /* remove from list */
412 next
->prev_run
= prev
;
413 prev
->next_run
= next
;
414 /* add back to list */
415 p
->prev_run
= &init_task
;
416 next
= init_task
.next_run
;
417 init_task
.next_run
= p
;
423 * The tasklist_lock protects the linked list of processes.
425 * The scheduler lock is protecting against multiple entry
426 * into the scheduling code, and doesn't need to worry
427 * about interrupts (because interrupts cannot call the
430 * The run-queue lock locks the parts that actually access
431 * and change the run-queues, and have to be interrupt-safe.
433 spinlock_t runqueue_lock
= SPIN_LOCK_UNLOCKED
; /* second */
434 rwlock_t tasklist_lock
= RW_LOCK_UNLOCKED
; /* third */
437 * Wake up a process. Put it on the run-queue if it's not
438 * already there. The "current" process is always on the
439 * run-queue (except when the actual re-schedule is in
440 * progress), and as such you're allowed to do the simpler
441 * "current->state = TASK_RUNNING" to mark yourself runnable
442 * without the overhead of this.
444 void wake_up_process(struct task_struct
* p
)
449 * We want the common case fall through straight, thus the goto.
451 spin_lock_irqsave(&runqueue_lock
, flags
);
452 p
->state
= TASK_RUNNING
;
456 spin_unlock_irqrestore(&runqueue_lock
, flags
);
461 spin_unlock_irqrestore(&runqueue_lock
, flags
);
464 static void process_timeout(unsigned long __data
)
466 struct task_struct
* p
= (struct task_struct
*) __data
;
476 #define TVN_SIZE (1 << TVN_BITS)
477 #define TVR_SIZE (1 << TVR_BITS)
478 #define TVN_MASK (TVN_SIZE - 1)
479 #define TVR_MASK (TVR_SIZE - 1)
483 struct timer_list
*vec
[TVN_SIZE
];
486 struct timer_vec_root
{
488 struct timer_list
*vec
[TVR_SIZE
];
491 static struct timer_vec tv5
= { 0 };
492 static struct timer_vec tv4
= { 0 };
493 static struct timer_vec tv3
= { 0 };
494 static struct timer_vec tv2
= { 0 };
495 static struct timer_vec_root tv1
= { 0 };
497 static struct timer_vec
* const tvecs
[] = {
498 (struct timer_vec
*)&tv1
, &tv2
, &tv3
, &tv4
, &tv5
501 #define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
503 static unsigned long timer_jiffies
= 0;
505 static inline void insert_timer(struct timer_list
*timer
,
506 struct timer_list
**vec
, int idx
)
508 if ((timer
->next
= vec
[idx
]))
509 vec
[idx
]->prev
= timer
;
511 timer
->prev
= (struct timer_list
*)&vec
[idx
];
514 static inline void internal_add_timer(struct timer_list
*timer
)
517 * must be cli-ed when calling this
519 unsigned long expires
= timer
->expires
;
520 unsigned long idx
= expires
- timer_jiffies
;
522 if (idx
< TVR_SIZE
) {
523 int i
= expires
& TVR_MASK
;
524 insert_timer(timer
, tv1
.vec
, i
);
525 } else if (idx
< 1 << (TVR_BITS
+ TVN_BITS
)) {
526 int i
= (expires
>> TVR_BITS
) & TVN_MASK
;
527 insert_timer(timer
, tv2
.vec
, i
);
528 } else if (idx
< 1 << (TVR_BITS
+ 2 * TVN_BITS
)) {
529 int i
= (expires
>> (TVR_BITS
+ TVN_BITS
)) & TVN_MASK
;
530 insert_timer(timer
, tv3
.vec
, i
);
531 } else if (idx
< 1 << (TVR_BITS
+ 3 * TVN_BITS
)) {
532 int i
= (expires
>> (TVR_BITS
+ 2 * TVN_BITS
)) & TVN_MASK
;
533 insert_timer(timer
, tv4
.vec
, i
);
534 } else if ((signed long) idx
< 0) {
535 /* can happen if you add a timer with expires == jiffies,
536 * or you set a timer to go off in the past
538 insert_timer(timer
, tv1
.vec
, tv1
.index
);
539 } else if (idx
<= 0xffffffffUL
) {
540 int i
= (expires
>> (TVR_BITS
+ 3 * TVN_BITS
)) & TVN_MASK
;
541 insert_timer(timer
, tv5
.vec
, i
);
543 /* Can only get here on architectures with 64-bit jiffies */
544 timer
->next
= timer
->prev
= timer
;
548 spinlock_t timerlist_lock
= SPIN_LOCK_UNLOCKED
;
550 void add_timer(struct timer_list
*timer
)
554 spin_lock_irqsave(&timerlist_lock
, flags
);
557 internal_add_timer(timer
);
559 spin_unlock_irqrestore(&timerlist_lock
, flags
);
563 printk("bug: kernel timer added twice at %p.\n",
564 __builtin_return_address(0));
568 static inline int detach_timer(struct timer_list
*timer
)
570 struct timer_list
*prev
= timer
->prev
;
572 struct timer_list
*next
= timer
->next
;
581 void mod_timer(struct timer_list
*timer
, unsigned long expires
)
585 spin_lock_irqsave(&timerlist_lock
, flags
);
586 timer
->expires
= expires
;
588 internal_add_timer(timer
);
589 spin_unlock_irqrestore(&timerlist_lock
, flags
);
592 int del_timer(struct timer_list
* timer
)
597 spin_lock_irqsave(&timerlist_lock
, flags
);
598 ret
= detach_timer(timer
);
599 timer
->next
= timer
->prev
= 0;
600 spin_unlock_irqrestore(&timerlist_lock
, flags
);
604 signed long schedule_timeout(signed long timeout
)
606 struct timer_list timer
;
607 unsigned long expire
;
611 case MAX_SCHEDULE_TIMEOUT
:
613 * These two special cases are useful to be comfortable
614 * in the caller. Nothing more. We could take
615 * MAX_SCHEDULE_TIMEOUT from one of the negative value
616 * but I' d like to return a valid offset (>=0) to allow
617 * the caller to do everything it want with the retval.
623 * Another bit of PARANOID. Note that the retval will be
624 * 0 since no piece of kernel is supposed to do a check
625 * for a negative retval of schedule_timeout() (since it
626 * should never happens anyway). You just have the printk()
627 * that will tell you if something is gone wrong and where.
631 printk(KERN_ERR
"schedule_timeout: wrong timeout "
632 "value %lx from %p\n", timeout
,
633 __builtin_return_address(0));
638 expire
= timeout
+ jiffies
;
641 timer
.expires
= expire
;
642 timer
.data
= (unsigned long) current
;
643 timer
.function
= process_timeout
;
649 timeout
= expire
- jiffies
;
652 return timeout
< 0 ? 0 : timeout
;
656 * schedule_tail() is getting called from the fork return path. This
657 * cleans up all remaining scheduler things, without impacting the
660 static inline void __schedule_tail (struct task_struct
*prev
)
663 if ((prev
->state
== TASK_RUNNING
) &&
664 (prev
!= idle_task(smp_processor_id())))
665 reschedule_idle(prev
);
671 void schedule_tail (struct task_struct
*prev
)
673 __schedule_tail(prev
);
677 * 'schedule()' is the scheduler function. It's a very simple and nice
678 * scheduler: it's not perfect, but certainly works for most things.
680 * The goto is "interesting".
682 * NOTE!! Task 0 is the 'idle' task, which gets called when no other
683 * tasks can run. It can not be killed, and it cannot sleep. The 'state'
684 * information in task[0] is never used.
686 asmlinkage
void schedule(void)
688 struct schedule_data
* sched_data
;
689 struct task_struct
*prev
, *next
, *p
;
693 goto handle_tq_scheduler
;
697 this_cpu
= prev
->processor
;
700 goto scheduling_in_interrupt
;
702 release_kernel_lock(prev
, this_cpu
);
704 /* Do "administrative" work here while we don't hold any locks */
705 if (bh_mask
& bh_active
)
710 * 'sched_data' is protected by the fact that we can run
711 * only one process per CPU.
713 sched_data
= & aligned_data
[this_cpu
].schedule_data
;
715 spin_lock_irq(&runqueue_lock
);
717 /* move an exhausted RR process to be last.. */
718 if (prev
->policy
== SCHED_RR
)
722 switch (prev
->state
) {
723 case TASK_INTERRUPTIBLE
:
724 if (signal_pending(prev
)) {
725 prev
->state
= TASK_RUNNING
;
729 del_from_runqueue(prev
);
732 prev
->need_resched
= 0;
737 * this is the scheduler proper:
740 p
= init_task
.next_run
;
741 /* Default process to select.. */
742 next
= idle_task(this_cpu
);
744 if (prev
->state
== TASK_RUNNING
)
750 * Note how we can enable interrupts here, even
751 * though interrupts can add processes to the run-
752 * queue. This is because any new processes will
753 * be added to the front of the queue, so "p" above
754 * is a safe starting point.
755 * run-queue deletion and re-ordering is protected by
759 * Note! there may appear new tasks on the run-queue during this, as
760 * interrupts are enabled. However, they will be put on front of the
761 * list, so our list starting at "p" is essentially fixed.
763 while (p
!= &init_task
) {
764 if (can_schedule(p
)) {
765 int weight
= goodness(prev
, p
, this_cpu
);
767 c
= weight
, next
= p
;
772 /* Do we need to re-calculate counters? */
776 * from this point on nothing can prevent us from
777 * switching to the next task, save this fact in
780 sched_data
->curr
= next
;
783 next
->processor
= this_cpu
;
785 spin_unlock_irq(&runqueue_lock
);
792 * maintain the per-process 'average timeslice' value.
793 * (this has to be recalculated even if we reschedule to
794 * the same process) Currently this is only used on SMP,
795 * and it's approximate, so we do not have to maintain
796 * it while holding the runqueue spinlock.
799 cycles_t t
, this_slice
;
802 this_slice
= t
- sched_data
->last_schedule
;
803 sched_data
->last_schedule
= t
;
806 * Exponentially fading average calculation, with
807 * some weight so it doesnt get fooled easily by
808 * smaller irregularities.
810 prev
->avg_slice
= (this_slice
*1 + prev
->avg_slice
*1)/2;
814 * We drop the scheduler lock early (it's a global spinlock),
815 * thus we have to lock the previous process from getting
816 * rescheduled during switch_to().
821 kstat
.context_swtch
++;
822 get_mmu_context(next
);
823 switch_to(prev
, next
, prev
);
824 __schedule_tail(prev
);
828 reacquire_kernel_lock(current
);
833 struct task_struct
*p
;
834 spin_unlock_irq(&runqueue_lock
);
835 read_lock(&tasklist_lock
);
837 p
->counter
= (p
->counter
>> 1) + p
->priority
;
838 read_unlock(&tasklist_lock
);
839 spin_lock_irq(&runqueue_lock
);
840 goto repeat_schedule
;
844 c
= prev_goodness(prev
, prev
, this_cpu
);
846 goto still_running_back
;
853 run_task_queue(&tq_scheduler
);
854 goto tq_scheduler_back
;
857 if (!prev
->counter
) {
858 prev
->counter
= prev
->priority
;
859 move_last_runqueue(prev
);
863 scheduling_in_interrupt
:
864 printk("Scheduling in interrupt\n");
869 void __wake_up(wait_queue_head_t
*q
, unsigned int mode
)
871 struct list_head
*tmp
, *head
;
872 struct task_struct
*p
;
878 wq_write_lock_irqsave(&q
->lock
, flags
);
881 CHECK_MAGIC_WQHEAD(q
);
884 head
= &q
->task_list
;
886 if (!head
->next
|| !head
->prev
)
890 while (tmp
!= head
) {
892 wait_queue_t
*curr
= list_entry(tmp
, wait_queue_t
, task_list
);
897 CHECK_MAGIC(curr
->__magic
);
903 curr
->__waker
= (long)__builtin_return_address(0);
906 if (state
& TASK_EXCLUSIVE
)
910 wq_write_unlock_irqrestore(&q
->lock
, flags
);
916 * Semaphores are implemented using a two-way counter:
917 * The "count" variable is decremented for each process
918 * that tries to sleep, while the "waking" variable is
919 * incremented when the "up()" code goes to wake up waiting
922 * Notably, the inline "up()" and "down()" functions can
923 * efficiently test if they need to do any extra work (up
924 * needs to do something only if count was negative before
925 * the increment operation.
927 * waking_non_zero() (from asm/semaphore.h) must execute
930 * When __up() is called, the count was negative before
931 * incrementing it, and we need to wake up somebody.
933 * This routine adds one to the count of processes that need to
934 * wake up and exit. ALL waiting processes actually wake up but
935 * only the one that gets to the "waking" field first will gate
936 * through and acquire the semaphore. The others will go back
939 * Note that these functions are only called when there is
940 * contention on the lock, and as such all this is the
941 * "non-critical" part of the whole semaphore business. The
942 * critical part is the inline stuff in <asm/semaphore.h>
943 * where we want to avoid any extra jumps and calls.
945 void __up(struct semaphore
*sem
)
952 * Perform the "down" function. Return zero for semaphore acquired,
953 * return negative for signalled out of the function.
955 * If called from __down, the return is ignored and the wait loop is
956 * not interruptible. This means that a task waiting on a semaphore
957 * using "down()" cannot be killed until someone does an "up()" on
960 * If called from __down_interruptible, the return value gets checked
961 * upon return. If the return value is negative then the task continues
962 * with the negative value in the return register (it can be tested by
965 * Either form may be used in conjunction with "up()".
970 struct task_struct *tsk = current; \
972 init_waitqueue_entry(&wait, tsk);
974 #define DOWN_HEAD(task_state) \
977 tsk->state = (task_state); \
978 add_wait_queue(&sem->wait, &wait); \
981 * Ok, we're set up. sem->count is known to be less than zero \
984 * We can let go the lock for purposes of waiting. \
985 * We re-acquire it after awaking so as to protect \
986 * all semaphore operations. \
988 * If "up()" is called before we call waking_non_zero() then \
989 * we will catch it right away. If it is called later then \
990 * we will have to go through a wakeup cycle to catch it. \
992 * Multiple waiters contend for the semaphore lock to see \
993 * who gets to gate through and who has to wait some more. \
997 #define DOWN_TAIL(task_state) \
998 tsk->state = (task_state); \
1000 tsk->state = TASK_RUNNING; \
1001 remove_wait_queue(&sem->wait, &wait);
1003 void __down(struct semaphore
* sem
)
1006 DOWN_HEAD(TASK_UNINTERRUPTIBLE
)
1007 if (waking_non_zero(sem
))
1010 DOWN_TAIL(TASK_UNINTERRUPTIBLE
)
1013 int __down_interruptible(struct semaphore
* sem
)
1017 DOWN_HEAD(TASK_INTERRUPTIBLE
)
1019 ret
= waking_non_zero_interruptible(sem
, tsk
);
1023 /* ret != 0 only if we get interrupted -arca */
1028 DOWN_TAIL(TASK_INTERRUPTIBLE
)
1032 int __down_trylock(struct semaphore
* sem
)
1034 return waking_non_zero_trylock(sem
);
1037 #define SLEEP_ON_VAR \
1038 unsigned long flags; \
1039 wait_queue_t wait; \
1040 init_waitqueue_entry(&wait, current);
1042 #define SLEEP_ON_HEAD \
1043 wq_write_lock_irqsave(&q->lock,flags); \
1044 __add_wait_queue(q, &wait); \
1045 wq_write_unlock(&q->lock);
1047 #define SLEEP_ON_TAIL \
1048 wq_write_lock_irq(&q->lock); \
1049 __remove_wait_queue(q, &wait); \
1050 wq_write_unlock_irqrestore(&q->lock,flags);
1052 void interruptible_sleep_on(wait_queue_head_t
*q
)
1056 current
->state
= TASK_INTERRUPTIBLE
;
1063 long interruptible_sleep_on_timeout(wait_queue_head_t
*q
, long timeout
)
1067 current
->state
= TASK_INTERRUPTIBLE
;
1070 timeout
= schedule_timeout(timeout
);
1076 void sleep_on(wait_queue_head_t
*q
)
1080 current
->state
= TASK_UNINTERRUPTIBLE
;
1087 long sleep_on_timeout(wait_queue_head_t
*q
, long timeout
)
1091 current
->state
= TASK_UNINTERRUPTIBLE
;
1094 timeout
= schedule_timeout(timeout
);
1100 void scheduling_functions_end_here(void) { }
1102 static inline void cascade_timers(struct timer_vec
*tv
)
1104 /* cascade all the timers from tv up one level */
1105 struct timer_list
*timer
;
1106 timer
= tv
->vec
[tv
->index
];
1108 * We are removing _all_ timers from the list, so we don't have to
1109 * detach them individually, just clear the list afterwards.
1112 struct timer_list
*tmp
= timer
;
1113 timer
= timer
->next
;
1114 internal_add_timer(tmp
);
1116 tv
->vec
[tv
->index
] = NULL
;
1117 tv
->index
= (tv
->index
+ 1) & TVN_MASK
;
1120 static inline void run_timer_list(void)
1122 spin_lock_irq(&timerlist_lock
);
1123 while ((long)(jiffies
- timer_jiffies
) >= 0) {
1124 struct timer_list
*timer
;
1128 cascade_timers(tvecs
[n
]);
1129 } while (tvecs
[n
]->index
== 1 && ++n
< NOOF_TVECS
);
1131 while ((timer
= tv1
.vec
[tv1
.index
])) {
1132 void (*fn
)(unsigned long) = timer
->function
;
1133 unsigned long data
= timer
->data
;
1134 detach_timer(timer
);
1135 timer
->next
= timer
->prev
= NULL
;
1136 spin_unlock_irq(&timerlist_lock
);
1138 spin_lock_irq(&timerlist_lock
);
1141 tv1
.index
= (tv1
.index
+ 1) & TVR_MASK
;
1143 spin_unlock_irq(&timerlist_lock
);
1147 static inline void run_old_timers(void)
1149 struct timer_struct
*tp
;
1152 for (mask
= 1, tp
= timer_table
+0 ; mask
; tp
++,mask
+= mask
) {
1153 if (mask
> timer_active
)
1155 if (!(mask
& timer_active
))
1157 if (time_after(tp
->expires
, jiffies
))
1159 timer_active
&= ~mask
;
1165 spinlock_t tqueue_lock
;
1167 void tqueue_bh(void)
1169 run_task_queue(&tq_timer
);
1172 void immediate_bh(void)
1174 run_task_queue(&tq_immediate
);
1177 unsigned long timer_active
= 0;
1178 struct timer_struct timer_table
[32];
1181 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
1182 * imply that avenrun[] is the standard name for this kind of thing.
1183 * Nothing else seems to be standardized: the fractional size etc
1184 * all seem to differ on different machines.
1186 unsigned long avenrun
[3] = { 0,0,0 };
1189 * Nr of active tasks - counted in fixed-point numbers
1191 static unsigned long count_active_tasks(void)
1193 struct task_struct
*p
;
1194 unsigned long nr
= 0;
1196 read_lock(&tasklist_lock
);
1198 if ((p
->state
== TASK_RUNNING
||
1199 (p
->state
& TASK_UNINTERRUPTIBLE
) ||
1200 (p
->state
& TASK_SWAPPING
)))
1203 read_unlock(&tasklist_lock
);
1207 static inline void calc_load(unsigned long ticks
)
1209 unsigned long active_tasks
; /* fixed-point */
1210 static int count
= LOAD_FREQ
;
1215 active_tasks
= count_active_tasks();
1216 CALC_LOAD(avenrun
[0], EXP_1
, active_tasks
);
1217 CALC_LOAD(avenrun
[1], EXP_5
, active_tasks
);
1218 CALC_LOAD(avenrun
[2], EXP_15
, active_tasks
);
1223 * this routine handles the overflow of the microsecond field
1225 * The tricky bits of code to handle the accurate clock support
1226 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
1227 * They were originally developed for SUN and DEC kernels.
1228 * All the kudos should go to Dave for this stuff.
1231 static void second_overflow(void)
1235 /* Bump the maxerror field */
1236 time_maxerror
+= time_tolerance
>> SHIFT_USEC
;
1237 if ( time_maxerror
> NTP_PHASE_LIMIT
) {
1238 time_maxerror
= NTP_PHASE_LIMIT
;
1239 time_status
|= STA_UNSYNC
;
1243 * Leap second processing. If in leap-insert state at
1244 * the end of the day, the system clock is set back one
1245 * second; if in leap-delete state, the system clock is
1246 * set ahead one second. The microtime() routine or
1247 * external clock driver will insure that reported time
1248 * is always monotonic. The ugly divides should be
1251 switch (time_state
) {
1254 if (time_status
& STA_INS
)
1255 time_state
= TIME_INS
;
1256 else if (time_status
& STA_DEL
)
1257 time_state
= TIME_DEL
;
1261 if (xtime
.tv_sec
% 86400 == 0) {
1263 time_state
= TIME_OOP
;
1264 printk(KERN_NOTICE
"Clock: inserting leap second 23:59:60 UTC\n");
1269 if ((xtime
.tv_sec
+ 1) % 86400 == 0) {
1271 time_state
= TIME_WAIT
;
1272 printk(KERN_NOTICE
"Clock: deleting leap second 23:59:59 UTC\n");
1277 time_state
= TIME_WAIT
;
1281 if (!(time_status
& (STA_INS
| STA_DEL
)))
1282 time_state
= TIME_OK
;
1286 * Compute the phase adjustment for the next second. In
1287 * PLL mode, the offset is reduced by a fixed factor
1288 * times the time constant. In FLL mode the offset is
1289 * used directly. In either mode, the maximum phase
1290 * adjustment for each second is clamped so as to spread
1291 * the adjustment over not more than the number of
1292 * seconds between updates.
1294 if (time_offset
< 0) {
1295 ltemp
= -time_offset
;
1296 if (!(time_status
& STA_FLL
))
1297 ltemp
>>= SHIFT_KG
+ time_constant
;
1298 if (ltemp
> (MAXPHASE
/ MINSEC
) << SHIFT_UPDATE
)
1299 ltemp
= (MAXPHASE
/ MINSEC
) << SHIFT_UPDATE
;
1300 time_offset
+= ltemp
;
1301 time_adj
= -ltemp
<< (SHIFT_SCALE
- SHIFT_HZ
- SHIFT_UPDATE
);
1303 ltemp
= time_offset
;
1304 if (!(time_status
& STA_FLL
))
1305 ltemp
>>= SHIFT_KG
+ time_constant
;
1306 if (ltemp
> (MAXPHASE
/ MINSEC
) << SHIFT_UPDATE
)
1307 ltemp
= (MAXPHASE
/ MINSEC
) << SHIFT_UPDATE
;
1308 time_offset
-= ltemp
;
1309 time_adj
= ltemp
<< (SHIFT_SCALE
- SHIFT_HZ
- SHIFT_UPDATE
);
1313 * Compute the frequency estimate and additional phase
1314 * adjustment due to frequency error for the next
1315 * second. When the PPS signal is engaged, gnaw on the
1316 * watchdog counter and update the frequency computed by
1317 * the pll and the PPS signal.
1320 if (pps_valid
== PPS_VALID
) { /* PPS signal lost */
1321 pps_jitter
= MAXTIME
;
1322 pps_stabil
= MAXFREQ
;
1323 time_status
&= ~(STA_PPSSIGNAL
| STA_PPSJITTER
|
1324 STA_PPSWANDER
| STA_PPSERROR
);
1326 ltemp
= time_freq
+ pps_freq
;
1328 time_adj
-= -ltemp
>>
1329 (SHIFT_USEC
+ SHIFT_HZ
- SHIFT_SCALE
);
1331 time_adj
+= ltemp
>>
1332 (SHIFT_USEC
+ SHIFT_HZ
- SHIFT_SCALE
);
1335 /* Compensate for (HZ==100) != (1 << SHIFT_HZ).
1336 * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14)
1339 time_adj
-= (-time_adj
>> 2) + (-time_adj
>> 5);
1341 time_adj
+= (time_adj
>> 2) + (time_adj
>> 5);
1345 /* in the NTP reference this is called "hardclock()" */
1346 static void update_wall_time_one_tick(void)
1348 if ( (time_adjust_step
= time_adjust
) != 0 ) {
1349 /* We are doing an adjtime thing.
1351 * Prepare time_adjust_step to be within bounds.
1352 * Note that a positive time_adjust means we want the clock
1355 * Limit the amount of the step to be in the range
1356 * -tickadj .. +tickadj
1358 if (time_adjust
> tickadj
)
1359 time_adjust_step
= tickadj
;
1360 else if (time_adjust
< -tickadj
)
1361 time_adjust_step
= -tickadj
;
1363 /* Reduce by this step the amount of time left */
1364 time_adjust
-= time_adjust_step
;
1366 xtime
.tv_usec
+= tick
+ time_adjust_step
;
1368 * Advance the phase, once it gets to one microsecond, then
1369 * advance the tick more.
1371 time_phase
+= time_adj
;
1372 if (time_phase
<= -FINEUSEC
) {
1373 long ltemp
= -time_phase
>> SHIFT_SCALE
;
1374 time_phase
+= ltemp
<< SHIFT_SCALE
;
1375 xtime
.tv_usec
-= ltemp
;
1377 else if (time_phase
>= FINEUSEC
) {
1378 long ltemp
= time_phase
>> SHIFT_SCALE
;
1379 time_phase
-= ltemp
<< SHIFT_SCALE
;
1380 xtime
.tv_usec
+= ltemp
;
1385 * Using a loop looks inefficient, but "ticks" is
1386 * usually just one (we shouldn't be losing ticks,
1387 * we're doing this this way mainly for interrupt
1388 * latency reasons, not because we think we'll
1389 * have lots of lost timer ticks
1391 static void update_wall_time(unsigned long ticks
)
1395 update_wall_time_one_tick();
1398 if (xtime
.tv_usec
>= 1000000) {
1399 xtime
.tv_usec
-= 1000000;
1405 static inline void do_process_times(struct task_struct
*p
,
1406 unsigned long user
, unsigned long system
)
1410 psecs
= (p
->times
.tms_utime
+= user
);
1411 psecs
+= (p
->times
.tms_stime
+= system
);
1412 if (psecs
/ HZ
> p
->rlim
[RLIMIT_CPU
].rlim_cur
) {
1413 /* Send SIGXCPU every second.. */
1415 send_sig(SIGXCPU
, p
, 1);
1416 /* and SIGKILL when we go over max.. */
1417 if (psecs
/ HZ
> p
->rlim
[RLIMIT_CPU
].rlim_max
)
1418 send_sig(SIGKILL
, p
, 1);
1422 static inline void do_it_virt(struct task_struct
* p
, unsigned long ticks
)
1424 unsigned long it_virt
= p
->it_virt_value
;
1427 if (it_virt
<= ticks
) {
1428 it_virt
= ticks
+ p
->it_virt_incr
;
1429 send_sig(SIGVTALRM
, p
, 1);
1431 p
->it_virt_value
= it_virt
- ticks
;
1435 static inline void do_it_prof(struct task_struct
* p
, unsigned long ticks
)
1437 unsigned long it_prof
= p
->it_prof_value
;
1440 if (it_prof
<= ticks
) {
1441 it_prof
= ticks
+ p
->it_prof_incr
;
1442 send_sig(SIGPROF
, p
, 1);
1444 p
->it_prof_value
= it_prof
- ticks
;
1448 void update_one_process(struct task_struct
*p
,
1449 unsigned long ticks
, unsigned long user
, unsigned long system
, int cpu
)
1451 p
->per_cpu_utime
[cpu
] += user
;
1452 p
->per_cpu_stime
[cpu
] += system
;
1453 do_process_times(p
, user
, system
);
1454 do_it_virt(p
, user
);
1455 do_it_prof(p
, ticks
);
1458 static void update_process_times(unsigned long ticks
, unsigned long system
)
1461 * SMP does this on a per-CPU basis elsewhere
1464 struct task_struct
* p
= current
;
1465 unsigned long user
= ticks
- system
;
1467 p
->counter
-= ticks
;
1468 if (p
->counter
< 0) {
1470 p
->need_resched
= 1;
1472 if (p
->priority
< DEF_PRIORITY
)
1473 kstat
.cpu_nice
+= user
;
1475 kstat
.cpu_user
+= user
;
1476 kstat
.cpu_system
+= system
;
1478 update_one_process(p
, ticks
, user
, system
, 0);
1482 volatile unsigned long lost_ticks
= 0;
1483 static unsigned long lost_ticks_system
= 0;
1486 * This spinlock protect us from races in SMP while playing with xtime. -arca
1488 rwlock_t xtime_lock
= RW_LOCK_UNLOCKED
;
1490 static inline void update_times(void)
1492 unsigned long ticks
;
1495 * update_times() is run from the raw timer_bh handler so we
1496 * just know that the irqs are locally enabled and so we don't
1497 * need to save/restore the flags of the local CPU here. -arca
1499 write_lock_irq(&xtime_lock
);
1505 unsigned long system
;
1506 system
= xchg(&lost_ticks_system
, 0);
1509 update_wall_time(ticks
);
1510 write_unlock_irq(&xtime_lock
);
1512 update_process_times(ticks
, system
);
1515 write_unlock_irq(&xtime_lock
);
1518 static void timer_bh(void)
1525 void do_timer(struct pt_regs
* regs
)
1527 (*(unsigned long *)&jiffies
)++;
1530 if (!user_mode(regs
))
1531 lost_ticks_system
++;
1539 * For backwards compatibility? This can be done in libc so Alpha
1540 * and all newer ports shouldn't need it.
1542 asmlinkage
unsigned int sys_alarm(unsigned int seconds
)
1544 struct itimerval it_new
, it_old
;
1545 unsigned int oldalarm
;
1547 it_new
.it_interval
.tv_sec
= it_new
.it_interval
.tv_usec
= 0;
1548 it_new
.it_value
.tv_sec
= seconds
;
1549 it_new
.it_value
.tv_usec
= 0;
1550 do_setitimer(ITIMER_REAL
, &it_new
, &it_old
);
1551 oldalarm
= it_old
.it_value
.tv_sec
;
1552 /* ehhh.. We can't return 0 if we have an alarm pending.. */
1553 /* And we'd better return too much than too little anyway */
1554 if (it_old
.it_value
.tv_usec
)
1560 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
1561 * should be moved into arch/i386 instead?
1564 asmlinkage
int sys_getpid(void)
1566 /* This is SMP safe - current->pid doesn't change */
1567 return current
->pid
;
1571 * This is not strictly SMP safe: p_opptr could change
1572 * from under us. However, rather than getting any lock
1573 * we can use an optimistic algorithm: get the parent
1574 * pid, and go back and check that the parent is still
1575 * the same. If it has changed (which is extremely unlikely
1576 * indeed), we just try again..
1578 * NOTE! This depends on the fact that even if we _do_
1579 * get an old value of "parent", we can happily dereference
1580 * the pointer: we just can't necessarily trust the result
1581 * until we know that the parent pointer is valid.
1583 * The "mb()" macro is a memory barrier - a synchronizing
1584 * event. It also makes sure that gcc doesn't optimize
1585 * away the necessary memory references.. The barrier doesn't
1586 * have to have all that strong semantics: on x86 we don't
1587 * really require a synchronizing instruction, for example.
1588 * The barrier is more important for code generation than
1589 * for any real memory ordering semantics (even if there is
1590 * a small window for a race, using the old pointer is
1591 * harmless for a while).
1593 asmlinkage
int sys_getppid(void)
1596 struct task_struct
* me
= current
;
1597 struct task_struct
* parent
;
1599 parent
= me
->p_opptr
;
1604 struct task_struct
*old
= parent
;
1606 parent
= me
->p_opptr
;
1616 asmlinkage
int sys_getuid(void)
1618 /* Only we change this so SMP safe */
1619 return current
->uid
;
1622 asmlinkage
int sys_geteuid(void)
1624 /* Only we change this so SMP safe */
1625 return current
->euid
;
1628 asmlinkage
int sys_getgid(void)
1630 /* Only we change this so SMP safe */
1631 return current
->gid
;
1634 asmlinkage
int sys_getegid(void)
1636 /* Only we change this so SMP safe */
1637 return current
->egid
;
1641 * This has been replaced by sys_setpriority. Maybe it should be
1642 * moved into the arch dependent tree for those ports that require
1643 * it for backward compatibility?
1646 asmlinkage
int sys_nice(int increment
)
1648 unsigned long newprio
;
1652 * Setpriority might change our priority at the same moment.
1653 * We don't have to worry. Conceptually one call occurs first
1654 * and we have a single winner.
1657 newprio
= increment
;
1658 if (increment
< 0) {
1659 if (!capable(CAP_SYS_NICE
))
1661 newprio
= -increment
;
1668 * do a "normalization" of the priority (traditionally
1669 * Unix nice values are -20 to 20; Linux doesn't really
1670 * use that kind of thing, but uses the length of the
1671 * timeslice instead (default 210 ms). The rounding is
1672 * why we want to avoid negative values.
1674 newprio
= (newprio
* DEF_PRIORITY
+ 10) / 20;
1675 increment
= newprio
;
1677 increment
= -increment
;
1679 * Current->priority can change between this point
1680 * and the assignment. We are assigning not doing add/subs
1681 * so thats ok. Conceptually a process might just instantaneously
1682 * read the value we stomp over. I don't think that is an issue
1683 * unless posix makes it one. If so we can loop on changes
1684 * to current->priority.
1686 newprio
= current
->priority
- increment
;
1687 if ((signed) newprio
< 1)
1689 if (newprio
> DEF_PRIORITY
*2)
1690 newprio
= DEF_PRIORITY
*2;
1691 current
->priority
= newprio
;
1697 static inline struct task_struct
*find_process_by_pid(pid_t pid
)
1699 struct task_struct
*tsk
= current
;
1702 tsk
= find_task_by_pid(pid
);
1706 static int setscheduler(pid_t pid
, int policy
,
1707 struct sched_param
*param
)
1709 struct sched_param lp
;
1710 struct task_struct
*p
;
1714 if (!param
|| pid
< 0)
1718 if (copy_from_user(&lp
, param
, sizeof(struct sched_param
)))
1722 * We play safe to avoid deadlocks.
1724 spin_lock_irq(&runqueue_lock
);
1725 read_lock(&tasklist_lock
);
1727 p
= find_process_by_pid(pid
);
1737 if (policy
!= SCHED_FIFO
&& policy
!= SCHED_RR
&&
1738 policy
!= SCHED_OTHER
)
1743 * Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid
1744 * priority for SCHED_OTHER is 0.
1747 if (lp
.sched_priority
< 0 || lp
.sched_priority
> 99)
1749 if ((policy
== SCHED_OTHER
) != (lp
.sched_priority
== 0))
1753 if ((policy
== SCHED_FIFO
|| policy
== SCHED_RR
) &&
1754 !capable(CAP_SYS_NICE
))
1756 if ((current
->euid
!= p
->euid
) && (current
->euid
!= p
->uid
) &&
1757 !capable(CAP_SYS_NICE
))
1762 p
->rt_priority
= lp
.sched_priority
;
1764 move_first_runqueue(p
);
1766 current
->need_resched
= 1;
1769 read_unlock(&tasklist_lock
);
1770 spin_unlock_irq(&runqueue_lock
);
1776 asmlinkage
int sys_sched_setscheduler(pid_t pid
, int policy
,
1777 struct sched_param
*param
)
1779 return setscheduler(pid
, policy
, param
);
1782 asmlinkage
int sys_sched_setparam(pid_t pid
, struct sched_param
*param
)
1784 return setscheduler(pid
, -1, param
);
1787 asmlinkage
int sys_sched_getscheduler(pid_t pid
)
1789 struct task_struct
*p
;
1796 read_lock(&tasklist_lock
);
1799 p
= find_process_by_pid(pid
);
1806 read_unlock(&tasklist_lock
);
1812 asmlinkage
int sys_sched_getparam(pid_t pid
, struct sched_param
*param
)
1814 struct task_struct
*p
;
1815 struct sched_param lp
;
1819 if (!param
|| pid
< 0)
1822 read_lock(&tasklist_lock
);
1823 p
= find_process_by_pid(pid
);
1827 lp
.sched_priority
= p
->rt_priority
;
1828 read_unlock(&tasklist_lock
);
1831 * This one might sleep, we cannot do it with a spinlock held ...
1833 retval
= copy_to_user(param
, &lp
, sizeof(*param
)) ? -EFAULT
: 0;
1839 read_unlock(&tasklist_lock
);
1843 asmlinkage
int sys_sched_yield(void)
1845 spin_lock_irq(&runqueue_lock
);
1846 if (current
->policy
== SCHED_OTHER
)
1847 current
->policy
|= SCHED_YIELD
;
1848 current
->need_resched
= 1;
1849 move_last_runqueue(current
);
1850 spin_unlock_irq(&runqueue_lock
);
1854 asmlinkage
int sys_sched_get_priority_max(int policy
)
1870 asmlinkage
int sys_sched_get_priority_min(int policy
)
1885 asmlinkage
int sys_sched_rr_get_interval(pid_t pid
, struct timespec
*interval
)
1891 if (copy_to_user(interval
, &t
, sizeof(struct timespec
)))
1896 asmlinkage
int sys_nanosleep(struct timespec
*rqtp
, struct timespec
*rmtp
)
1899 unsigned long expire
;
1901 if(copy_from_user(&t
, rqtp
, sizeof(struct timespec
)))
1904 if (t
.tv_nsec
>= 1000000000L || t
.tv_nsec
< 0 || t
.tv_sec
< 0)
1908 if (t
.tv_sec
== 0 && t
.tv_nsec
<= 2000000L &&
1909 current
->policy
!= SCHED_OTHER
)
1912 * Short delay requests up to 2 ms will be handled with
1913 * high precision by a busy wait for all real-time processes.
1915 * Its important on SMP not to do this holding locks.
1917 udelay((t
.tv_nsec
+ 999) / 1000);
1921 expire
= timespec_to_jiffies(&t
) + (t
.tv_sec
|| t
.tv_nsec
);
1923 current
->state
= TASK_INTERRUPTIBLE
;
1924 expire
= schedule_timeout(expire
);
1928 jiffies_to_timespec(expire
, &t
);
1929 if (copy_to_user(rmtp
, &t
, sizeof(struct timespec
)))
1937 static void show_task(int nr
,struct task_struct
* p
)
1939 unsigned long free
= 0;
1941 static const char * stat_nam
[] = { "R", "S", "D", "Z", "T", "W" };
1943 printk("%-8s %3d ", p
->comm
, (p
== current
) ? -nr
: nr
);
1944 state
= p
->state
? ffz(~p
->state
) + 1 : 0;
1945 if (((unsigned) state
) < sizeof(stat_nam
)/sizeof(char *))
1946 printk(stat_nam
[state
]);
1949 #if (BITS_PER_LONG == 32)
1951 printk(" current ");
1953 printk(" %08lX ", thread_saved_pc(&p
->tss
));
1956 printk(" current task ");
1958 printk(" %016lx ", thread_saved_pc(&p
->tss
));
1961 unsigned long * n
= (unsigned long *) (p
+1);
1964 free
= (unsigned long) n
- (unsigned long)(p
+1);
1966 printk("%5lu %5d %6d ", free
, p
->pid
, p
->p_pptr
->pid
);
1968 printk("%5d ", p
->p_cptr
->pid
);
1972 printk("%7d", p
->p_ysptr
->pid
);
1976 printk(" %5d\n", p
->p_osptr
->pid
);
1981 struct signal_queue
*q
;
1982 char s
[sizeof(sigset_t
)*2+1], b
[sizeof(sigset_t
)*2+1];
1984 render_sigset_t(&p
->signal
, s
);
1985 render_sigset_t(&p
->blocked
, b
);
1986 printk(" sig: %d %s %s :", signal_pending(p
), s
, b
);
1987 for (q
= p
->sigqueue
; q
; q
= q
->next
)
1988 printk(" %d", q
->info
.si_signo
);
1993 char * render_sigset_t(sigset_t
*set
, char *buffer
)
1998 if (sigismember(set
, i
+1)) x
|= 1;
1999 if (sigismember(set
, i
+2)) x
|= 2;
2000 if (sigismember(set
, i
+3)) x
|= 4;
2001 if (sigismember(set
, i
+4)) x
|= 8;
2002 *buffer
++ = (x
< 10 ? '0' : 'a' - 10) + x
;
2008 void show_state(void)
2010 struct task_struct
*p
;
2012 #if (BITS_PER_LONG == 32)
2015 printk(" task PC stack pid father child younger older\n");
2019 printk(" task PC stack pid father child younger older\n");
2021 read_lock(&tasklist_lock
);
2023 show_task((p
->tarray_ptr
- &task
[0]),p
);
2024 read_unlock(&tasklist_lock
);
2027 void __init
init_idle(void)
2030 struct schedule_data
* sched_data
;
2031 sched_data
= &aligned_data
[smp_processor_id()].schedule_data
;
2034 sched_data
->curr
= current
;
2035 sched_data
->last_schedule
= t
;
2038 void __init
sched_init(void)
2041 * We have to do a little magic to get the first
2042 * process right in SMP mode.
2044 int cpu
=hard_smp_processor_id();
2047 init_task
.processor
=cpu
;
2049 /* Init task array free list and pidhash table. */
2051 add_free_taskslot(&task
[nr
]);
2053 for(nr
= 0; nr
< PIDHASH_SZ
; nr
++)
2056 init_bh(TIMER_BH
, timer_bh
);
2057 init_bh(TQUEUE_BH
, tqueue_bh
);
2058 init_bh(IMMEDIATE_BH
, immediate_bh
);