4 * Kernel internal timers, kernel timekeeping, basic process system calls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/notifier.h>
30 #include <linux/thread_info.h>
31 #include <linux/time.h>
32 #include <linux/jiffies.h>
33 #include <linux/posix-timers.h>
34 #include <linux/cpu.h>
35 #include <linux/syscalls.h>
36 #include <linux/delay.h>
38 #include <asm/uaccess.h>
39 #include <asm/unistd.h>
40 #include <asm/div64.h>
41 #include <asm/timex.h>
44 #ifdef CONFIG_TIME_INTERPOLATION
45 static void time_interpolator_update(long delta_nsec
);
47 #define time_interpolator_update(x)
50 u64 jiffies_64 __cacheline_aligned_in_smp
= INITIAL_JIFFIES
;
52 EXPORT_SYMBOL(jiffies_64
);
55 * per-CPU timer vector definitions:
57 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
58 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
59 #define TVN_SIZE (1 << TVN_BITS)
60 #define TVR_SIZE (1 << TVR_BITS)
61 #define TVN_MASK (TVN_SIZE - 1)
62 #define TVR_MASK (TVR_SIZE - 1)
64 typedef struct tvec_s
{
65 struct list_head vec
[TVN_SIZE
];
68 typedef struct tvec_root_s
{
69 struct list_head vec
[TVR_SIZE
];
72 struct tvec_t_base_s
{
74 struct timer_list
*running_timer
;
75 unsigned long timer_jiffies
;
81 } ____cacheline_aligned_in_smp
;
83 typedef struct tvec_t_base_s tvec_base_t
;
84 static DEFINE_PER_CPU(tvec_base_t
*, tvec_bases
);
85 tvec_base_t boot_tvec_bases
;
86 EXPORT_SYMBOL(boot_tvec_bases
);
88 static inline void set_running_timer(tvec_base_t
*base
,
89 struct timer_list
*timer
)
92 base
->running_timer
= timer
;
96 static void internal_add_timer(tvec_base_t
*base
, struct timer_list
*timer
)
98 unsigned long expires
= timer
->expires
;
99 unsigned long idx
= expires
- base
->timer_jiffies
;
100 struct list_head
*vec
;
102 if (idx
< TVR_SIZE
) {
103 int i
= expires
& TVR_MASK
;
104 vec
= base
->tv1
.vec
+ i
;
105 } else if (idx
< 1 << (TVR_BITS
+ TVN_BITS
)) {
106 int i
= (expires
>> TVR_BITS
) & TVN_MASK
;
107 vec
= base
->tv2
.vec
+ i
;
108 } else if (idx
< 1 << (TVR_BITS
+ 2 * TVN_BITS
)) {
109 int i
= (expires
>> (TVR_BITS
+ TVN_BITS
)) & TVN_MASK
;
110 vec
= base
->tv3
.vec
+ i
;
111 } else if (idx
< 1 << (TVR_BITS
+ 3 * TVN_BITS
)) {
112 int i
= (expires
>> (TVR_BITS
+ 2 * TVN_BITS
)) & TVN_MASK
;
113 vec
= base
->tv4
.vec
+ i
;
114 } else if ((signed long) idx
< 0) {
116 * Can happen if you add a timer with expires == jiffies,
117 * or you set a timer to go off in the past
119 vec
= base
->tv1
.vec
+ (base
->timer_jiffies
& TVR_MASK
);
122 /* If the timeout is larger than 0xffffffff on 64-bit
123 * architectures then we use the maximum timeout:
125 if (idx
> 0xffffffffUL
) {
127 expires
= idx
+ base
->timer_jiffies
;
129 i
= (expires
>> (TVR_BITS
+ 3 * TVN_BITS
)) & TVN_MASK
;
130 vec
= base
->tv5
.vec
+ i
;
135 list_add_tail(&timer
->entry
, vec
);
139 * init_timer - initialize a timer.
140 * @timer: the timer to be initialized
142 * init_timer() must be done to a timer prior calling *any* of the
143 * other timer functions.
145 void fastcall
init_timer(struct timer_list
*timer
)
147 timer
->entry
.next
= NULL
;
148 timer
->base
= per_cpu(tvec_bases
, raw_smp_processor_id());
150 EXPORT_SYMBOL(init_timer
);
152 static inline void detach_timer(struct timer_list
*timer
,
155 struct list_head
*entry
= &timer
->entry
;
157 __list_del(entry
->prev
, entry
->next
);
160 entry
->prev
= LIST_POISON2
;
164 * We are using hashed locking: holding per_cpu(tvec_bases).lock
165 * means that all timers which are tied to this base via timer->base are
166 * locked, and the base itself is locked too.
168 * So __run_timers/migrate_timers can safely modify all timers which could
169 * be found on ->tvX lists.
171 * When the timer's base is locked, and the timer removed from list, it is
172 * possible to set timer->base = NULL and drop the lock: the timer remains
175 static tvec_base_t
*lock_timer_base(struct timer_list
*timer
,
176 unsigned long *flags
)
182 if (likely(base
!= NULL
)) {
183 spin_lock_irqsave(&base
->lock
, *flags
);
184 if (likely(base
== timer
->base
))
186 /* The timer has migrated to another CPU */
187 spin_unlock_irqrestore(&base
->lock
, *flags
);
193 int __mod_timer(struct timer_list
*timer
, unsigned long expires
)
195 tvec_base_t
*base
, *new_base
;
199 BUG_ON(!timer
->function
);
201 base
= lock_timer_base(timer
, &flags
);
203 if (timer_pending(timer
)) {
204 detach_timer(timer
, 0);
208 new_base
= __get_cpu_var(tvec_bases
);
210 if (base
!= new_base
) {
212 * We are trying to schedule the timer on the local CPU.
213 * However we can't change timer's base while it is running,
214 * otherwise del_timer_sync() can't detect that the timer's
215 * handler yet has not finished. This also guarantees that
216 * the timer is serialized wrt itself.
218 if (likely(base
->running_timer
!= timer
)) {
219 /* See the comment in lock_timer_base() */
221 spin_unlock(&base
->lock
);
223 spin_lock(&base
->lock
);
228 timer
->expires
= expires
;
229 internal_add_timer(base
, timer
);
230 spin_unlock_irqrestore(&base
->lock
, flags
);
235 EXPORT_SYMBOL(__mod_timer
);
238 * add_timer_on - start a timer on a particular CPU
239 * @timer: the timer to be added
240 * @cpu: the CPU to start it on
242 * This is not very scalable on SMP. Double adds are not possible.
244 void add_timer_on(struct timer_list
*timer
, int cpu
)
246 tvec_base_t
*base
= per_cpu(tvec_bases
, cpu
);
249 BUG_ON(timer_pending(timer
) || !timer
->function
);
250 spin_lock_irqsave(&base
->lock
, flags
);
252 internal_add_timer(base
, timer
);
253 spin_unlock_irqrestore(&base
->lock
, flags
);
258 * mod_timer - modify a timer's timeout
259 * @timer: the timer to be modified
261 * mod_timer is a more efficient way to update the expire field of an
262 * active timer (if the timer is inactive it will be activated)
264 * mod_timer(timer, expires) is equivalent to:
266 * del_timer(timer); timer->expires = expires; add_timer(timer);
268 * Note that if there are multiple unserialized concurrent users of the
269 * same timer, then mod_timer() is the only safe way to modify the timeout,
270 * since add_timer() cannot modify an already running timer.
272 * The function returns whether it has modified a pending timer or not.
273 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
274 * active timer returns 1.)
276 int mod_timer(struct timer_list
*timer
, unsigned long expires
)
278 BUG_ON(!timer
->function
);
281 * This is a common optimization triggered by the
282 * networking code - if the timer is re-modified
283 * to be the same thing then just return:
285 if (timer
->expires
== expires
&& timer_pending(timer
))
288 return __mod_timer(timer
, expires
);
291 EXPORT_SYMBOL(mod_timer
);
294 * del_timer - deactive a timer.
295 * @timer: the timer to be deactivated
297 * del_timer() deactivates a timer - this works on both active and inactive
300 * The function returns whether it has deactivated a pending timer or not.
301 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
302 * active timer returns 1.)
304 int del_timer(struct timer_list
*timer
)
310 if (timer_pending(timer
)) {
311 base
= lock_timer_base(timer
, &flags
);
312 if (timer_pending(timer
)) {
313 detach_timer(timer
, 1);
316 spin_unlock_irqrestore(&base
->lock
, flags
);
322 EXPORT_SYMBOL(del_timer
);
326 * This function tries to deactivate a timer. Upon successful (ret >= 0)
327 * exit the timer is not queued and the handler is not running on any CPU.
329 * It must not be called from interrupt contexts.
331 int try_to_del_timer_sync(struct timer_list
*timer
)
337 base
= lock_timer_base(timer
, &flags
);
339 if (base
->running_timer
== timer
)
343 if (timer_pending(timer
)) {
344 detach_timer(timer
, 1);
348 spin_unlock_irqrestore(&base
->lock
, flags
);
354 * del_timer_sync - deactivate a timer and wait for the handler to finish.
355 * @timer: the timer to be deactivated
357 * This function only differs from del_timer() on SMP: besides deactivating
358 * the timer it also makes sure the handler has finished executing on other
361 * Synchronization rules: callers must prevent restarting of the timer,
362 * otherwise this function is meaningless. It must not be called from
363 * interrupt contexts. The caller must not hold locks which would prevent
364 * completion of the timer's handler. The timer's handler must not call
365 * add_timer_on(). Upon exit the timer is not queued and the handler is
366 * not running on any CPU.
368 * The function returns whether it has deactivated a pending timer or not.
370 int del_timer_sync(struct timer_list
*timer
)
373 int ret
= try_to_del_timer_sync(timer
);
379 EXPORT_SYMBOL(del_timer_sync
);
382 static int cascade(tvec_base_t
*base
, tvec_t
*tv
, int index
)
384 /* cascade all the timers from tv up one level */
385 struct list_head
*head
, *curr
;
387 head
= tv
->vec
+ index
;
390 * We are removing _all_ timers from the list, so we don't have to
391 * detach them individually, just clear the list afterwards.
393 while (curr
!= head
) {
394 struct timer_list
*tmp
;
396 tmp
= list_entry(curr
, struct timer_list
, entry
);
397 BUG_ON(tmp
->base
!= base
);
399 internal_add_timer(base
, tmp
);
401 INIT_LIST_HEAD(head
);
407 * __run_timers - run all expired timers (if any) on this CPU.
408 * @base: the timer vector to be processed.
410 * This function cascades all vectors and executes all expired timer
413 #define INDEX(N) (base->timer_jiffies >> (TVR_BITS + N * TVN_BITS)) & TVN_MASK
415 static inline void __run_timers(tvec_base_t
*base
)
417 struct timer_list
*timer
;
419 spin_lock_irq(&base
->lock
);
420 while (time_after_eq(jiffies
, base
->timer_jiffies
)) {
421 struct list_head work_list
= LIST_HEAD_INIT(work_list
);
422 struct list_head
*head
= &work_list
;
423 int index
= base
->timer_jiffies
& TVR_MASK
;
429 (!cascade(base
, &base
->tv2
, INDEX(0))) &&
430 (!cascade(base
, &base
->tv3
, INDEX(1))) &&
431 !cascade(base
, &base
->tv4
, INDEX(2)))
432 cascade(base
, &base
->tv5
, INDEX(3));
433 ++base
->timer_jiffies
;
434 list_splice_init(base
->tv1
.vec
+ index
, &work_list
);
435 while (!list_empty(head
)) {
436 void (*fn
)(unsigned long);
439 timer
= list_entry(head
->next
,struct timer_list
,entry
);
440 fn
= timer
->function
;
443 set_running_timer(base
, timer
);
444 detach_timer(timer
, 1);
445 spin_unlock_irq(&base
->lock
);
447 int preempt_count
= preempt_count();
449 if (preempt_count
!= preempt_count()) {
450 printk(KERN_WARNING
"huh, entered %p "
451 "with preempt_count %08x, exited"
458 spin_lock_irq(&base
->lock
);
461 set_running_timer(base
, NULL
);
462 spin_unlock_irq(&base
->lock
);
465 #ifdef CONFIG_NO_IDLE_HZ
467 * Find out when the next timer event is due to happen. This
468 * is used on S/390 to stop all activity when a cpus is idle.
469 * This functions needs to be called disabled.
471 unsigned long next_timer_interrupt(void)
474 struct list_head
*list
;
475 struct timer_list
*nte
;
476 unsigned long expires
;
477 unsigned long hr_expires
= MAX_JIFFY_OFFSET
;
482 hr_delta
= hrtimer_get_next_event();
483 if (hr_delta
.tv64
!= KTIME_MAX
) {
484 struct timespec tsdelta
;
485 tsdelta
= ktime_to_timespec(hr_delta
);
486 hr_expires
= timespec_to_jiffies(&tsdelta
);
488 return hr_expires
+ jiffies
;
490 hr_expires
+= jiffies
;
492 base
= __get_cpu_var(tvec_bases
);
493 spin_lock(&base
->lock
);
494 expires
= base
->timer_jiffies
+ (LONG_MAX
>> 1);
497 /* Look for timer events in tv1. */
498 j
= base
->timer_jiffies
& TVR_MASK
;
500 list_for_each_entry(nte
, base
->tv1
.vec
+ j
, entry
) {
501 expires
= nte
->expires
;
502 if (j
< (base
->timer_jiffies
& TVR_MASK
))
503 list
= base
->tv2
.vec
+ (INDEX(0));
506 j
= (j
+ 1) & TVR_MASK
;
507 } while (j
!= (base
->timer_jiffies
& TVR_MASK
));
510 varray
[0] = &base
->tv2
;
511 varray
[1] = &base
->tv3
;
512 varray
[2] = &base
->tv4
;
513 varray
[3] = &base
->tv5
;
514 for (i
= 0; i
< 4; i
++) {
517 if (list_empty(varray
[i
]->vec
+ j
)) {
518 j
= (j
+ 1) & TVN_MASK
;
521 list_for_each_entry(nte
, varray
[i
]->vec
+ j
, entry
)
522 if (time_before(nte
->expires
, expires
))
523 expires
= nte
->expires
;
524 if (j
< (INDEX(i
)) && i
< 3)
525 list
= varray
[i
+ 1]->vec
+ (INDEX(i
+ 1));
527 } while (j
!= (INDEX(i
)));
532 * The search wrapped. We need to look at the next list
533 * from next tv element that would cascade into tv element
534 * where we found the timer element.
536 list_for_each_entry(nte
, list
, entry
) {
537 if (time_before(nte
->expires
, expires
))
538 expires
= nte
->expires
;
541 spin_unlock(&base
->lock
);
543 if (time_before(hr_expires
, expires
))
550 /******************************************************************/
553 * Timekeeping variables
555 unsigned long tick_usec
= TICK_USEC
; /* USER_HZ period (usec) */
556 unsigned long tick_nsec
= TICK_NSEC
; /* ACTHZ period (nsec) */
560 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
561 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
562 * at zero at system boot time, so wall_to_monotonic will be negative,
563 * however, we will ALWAYS keep the tv_nsec part positive so we can use
564 * the usual normalization.
566 struct timespec xtime
__attribute__ ((aligned (16)));
567 struct timespec wall_to_monotonic
__attribute__ ((aligned (16)));
569 EXPORT_SYMBOL(xtime
);
571 /* Don't completely fail for HZ > 500. */
572 int tickadj
= 500/HZ
? : 1; /* microsecs */
576 * phase-lock loop variables
578 /* TIME_ERROR prevents overwriting the CMOS clock */
579 int time_state
= TIME_OK
; /* clock synchronization status */
580 int time_status
= STA_UNSYNC
; /* clock status bits */
581 long time_offset
; /* time adjustment (us) */
582 long time_constant
= 2; /* pll time constant */
583 long time_tolerance
= MAXFREQ
; /* frequency tolerance (ppm) */
584 long time_precision
= 1; /* clock precision (us) */
585 long time_maxerror
= NTP_PHASE_LIMIT
; /* maximum error (us) */
586 long time_esterror
= NTP_PHASE_LIMIT
; /* estimated error (us) */
587 static long time_phase
; /* phase offset (scaled us) */
588 long time_freq
= (((NSEC_PER_SEC
+ HZ
/2) % HZ
- HZ
/2) << SHIFT_USEC
) / NSEC_PER_USEC
;
589 /* frequency offset (scaled ppm)*/
590 static long time_adj
; /* tick adjust (scaled 1 / HZ) */
591 long time_reftime
; /* time at last adjustment (s) */
593 long time_next_adjust
;
596 * this routine handles the overflow of the microsecond field
598 * The tricky bits of code to handle the accurate clock support
599 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
600 * They were originally developed for SUN and DEC kernels.
601 * All the kudos should go to Dave for this stuff.
604 static void second_overflow(void)
608 /* Bump the maxerror field */
609 time_maxerror
+= time_tolerance
>> SHIFT_USEC
;
610 if (time_maxerror
> NTP_PHASE_LIMIT
) {
611 time_maxerror
= NTP_PHASE_LIMIT
;
612 time_status
|= STA_UNSYNC
;
616 * Leap second processing. If in leap-insert state at the end of the
617 * day, the system clock is set back one second; if in leap-delete
618 * state, the system clock is set ahead one second. The microtime()
619 * routine or external clock driver will insure that reported time is
620 * always monotonic. The ugly divides should be replaced.
622 switch (time_state
) {
624 if (time_status
& STA_INS
)
625 time_state
= TIME_INS
;
626 else if (time_status
& STA_DEL
)
627 time_state
= TIME_DEL
;
630 if (xtime
.tv_sec
% 86400 == 0) {
632 wall_to_monotonic
.tv_sec
++;
634 * The timer interpolator will make time change
635 * gradually instead of an immediate jump by one second
637 time_interpolator_update(-NSEC_PER_SEC
);
638 time_state
= TIME_OOP
;
640 printk(KERN_NOTICE
"Clock: inserting leap second "
645 if ((xtime
.tv_sec
+ 1) % 86400 == 0) {
647 wall_to_monotonic
.tv_sec
--;
649 * Use of time interpolator for a gradual change of
652 time_interpolator_update(NSEC_PER_SEC
);
653 time_state
= TIME_WAIT
;
655 printk(KERN_NOTICE
"Clock: deleting leap second "
660 time_state
= TIME_WAIT
;
663 if (!(time_status
& (STA_INS
| STA_DEL
)))
664 time_state
= TIME_OK
;
668 * Compute the phase adjustment for the next second. In PLL mode, the
669 * offset is reduced by a fixed factor times the time constant. In FLL
670 * mode the offset is used directly. In either mode, the maximum phase
671 * adjustment for each second is clamped so as to spread the adjustment
672 * over not more than the number of seconds between updates.
675 if (!(time_status
& STA_FLL
))
676 ltemp
= shift_right(ltemp
, SHIFT_KG
+ time_constant
);
677 ltemp
= min(ltemp
, (MAXPHASE
/ MINSEC
) << SHIFT_UPDATE
);
678 ltemp
= max(ltemp
, -(MAXPHASE
/ MINSEC
) << SHIFT_UPDATE
);
679 time_offset
-= ltemp
;
680 time_adj
= ltemp
<< (SHIFT_SCALE
- SHIFT_HZ
- SHIFT_UPDATE
);
683 * Compute the frequency estimate and additional phase adjustment due
684 * to frequency error for the next second.
687 time_adj
+= shift_right(ltemp
,(SHIFT_USEC
+ SHIFT_HZ
- SHIFT_SCALE
));
691 * Compensate for (HZ==100) != (1 << SHIFT_HZ). Add 25% and 3.125% to
692 * get 128.125; => only 0.125% error (p. 14)
694 time_adj
+= shift_right(time_adj
, 2) + shift_right(time_adj
, 5);
698 * Compensate for (HZ==250) != (1 << SHIFT_HZ). Add 1.5625% and
699 * 0.78125% to get 255.85938; => only 0.05% error (p. 14)
701 time_adj
+= shift_right(time_adj
, 6) + shift_right(time_adj
, 7);
705 * Compensate for (HZ==1000) != (1 << SHIFT_HZ). Add 1.5625% and
706 * 0.78125% to get 1023.4375; => only 0.05% error (p. 14)
708 time_adj
+= shift_right(time_adj
, 6) + shift_right(time_adj
, 7);
713 * Returns how many microseconds we need to add to xtime this tick
714 * in doing an adjustment requested with adjtime.
716 static long adjtime_adjustment(void)
718 long time_adjust_step
;
720 time_adjust_step
= time_adjust
;
721 if (time_adjust_step
) {
723 * We are doing an adjtime thing. Prepare time_adjust_step to
724 * be within bounds. Note that a positive time_adjust means we
725 * want the clock to run faster.
727 * Limit the amount of the step to be in the range
728 * -tickadj .. +tickadj
730 time_adjust_step
= min(time_adjust_step
, (long)tickadj
);
731 time_adjust_step
= max(time_adjust_step
, (long)-tickadj
);
733 return time_adjust_step
;
736 /* in the NTP reference this is called "hardclock()" */
737 static void update_wall_time_one_tick(void)
739 long time_adjust_step
, delta_nsec
;
741 time_adjust_step
= adjtime_adjustment();
742 if (time_adjust_step
)
743 /* Reduce by this step the amount of time left */
744 time_adjust
-= time_adjust_step
;
745 delta_nsec
= tick_nsec
+ time_adjust_step
* 1000;
747 * Advance the phase, once it gets to one microsecond, then
748 * advance the tick more.
750 time_phase
+= time_adj
;
751 if ((time_phase
>= FINENSEC
) || (time_phase
<= -FINENSEC
)) {
752 long ltemp
= shift_right(time_phase
, (SHIFT_SCALE
- 10));
753 time_phase
-= ltemp
<< (SHIFT_SCALE
- 10);
756 xtime
.tv_nsec
+= delta_nsec
;
757 time_interpolator_update(delta_nsec
);
759 /* Changes by adjtime() do not take effect till next tick. */
760 if (time_next_adjust
!= 0) {
761 time_adjust
= time_next_adjust
;
762 time_next_adjust
= 0;
767 * Return how long ticks are at the moment, that is, how much time
768 * update_wall_time_one_tick will add to xtime next time we call it
769 * (assuming no calls to do_adjtimex in the meantime).
770 * The return value is in fixed-point nanoseconds with SHIFT_SCALE-10
771 * bits to the right of the binary point.
772 * This function has no side-effects.
774 u64
current_tick_length(void)
778 delta_nsec
= tick_nsec
+ adjtime_adjustment() * 1000;
779 return ((u64
) delta_nsec
<< (SHIFT_SCALE
- 10)) + time_adj
;
783 * Using a loop looks inefficient, but "ticks" is
784 * usually just one (we shouldn't be losing ticks,
785 * we're doing this this way mainly for interrupt
786 * latency reasons, not because we think we'll
787 * have lots of lost timer ticks
789 static void update_wall_time(unsigned long ticks
)
793 update_wall_time_one_tick();
794 if (xtime
.tv_nsec
>= 1000000000) {
795 xtime
.tv_nsec
-= 1000000000;
803 * Called from the timer interrupt handler to charge one tick to the current
804 * process. user_tick is 1 if the tick is user time, 0 for system.
806 void update_process_times(int user_tick
)
808 struct task_struct
*p
= current
;
809 int cpu
= smp_processor_id();
811 /* Note: this timer irq context must be accounted for as well. */
813 account_user_time(p
, jiffies_to_cputime(1));
815 account_system_time(p
, HARDIRQ_OFFSET
, jiffies_to_cputime(1));
817 if (rcu_pending(cpu
))
818 rcu_check_callbacks(cpu
, user_tick
);
820 run_posix_cpu_timers(p
);
824 * Nr of active tasks - counted in fixed-point numbers
826 static unsigned long count_active_tasks(void)
828 return nr_active() * FIXED_1
;
832 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
833 * imply that avenrun[] is the standard name for this kind of thing.
834 * Nothing else seems to be standardized: the fractional size etc
835 * all seem to differ on different machines.
837 * Requires xtime_lock to access.
839 unsigned long avenrun
[3];
841 EXPORT_SYMBOL(avenrun
);
844 * calc_load - given tick count, update the avenrun load estimates.
845 * This is called while holding a write_lock on xtime_lock.
847 static inline void calc_load(unsigned long ticks
)
849 unsigned long active_tasks
; /* fixed-point */
850 static int count
= LOAD_FREQ
;
855 active_tasks
= count_active_tasks();
856 CALC_LOAD(avenrun
[0], EXP_1
, active_tasks
);
857 CALC_LOAD(avenrun
[1], EXP_5
, active_tasks
);
858 CALC_LOAD(avenrun
[2], EXP_15
, active_tasks
);
862 /* jiffies at the most recent update of wall time */
863 unsigned long wall_jiffies
= INITIAL_JIFFIES
;
866 * This read-write spinlock protects us from races in SMP while
867 * playing with xtime and avenrun.
869 #ifndef ARCH_HAVE_XTIME_LOCK
870 seqlock_t xtime_lock __cacheline_aligned_in_smp
= SEQLOCK_UNLOCKED
;
872 EXPORT_SYMBOL(xtime_lock
);
876 * This function runs timers and the timer-tq in bottom half context.
878 static void run_timer_softirq(struct softirq_action
*h
)
880 tvec_base_t
*base
= __get_cpu_var(tvec_bases
);
882 hrtimer_run_queues();
883 if (time_after_eq(jiffies
, base
->timer_jiffies
))
888 * Called by the local, per-CPU timer interrupt on SMP.
890 void run_local_timers(void)
892 raise_softirq(TIMER_SOFTIRQ
);
897 * Called by the timer interrupt. xtime_lock must already be taken
900 static inline void update_times(void)
904 ticks
= jiffies
- wall_jiffies
;
906 wall_jiffies
+= ticks
;
907 update_wall_time(ticks
);
913 * The 64-bit jiffies value is not atomic - you MUST NOT read it
914 * without sampling the sequence number in xtime_lock.
915 * jiffies is defined in the linker script...
918 void do_timer(struct pt_regs
*regs
)
921 /* prevent loading jiffies before storing new jiffies_64 value. */
926 #ifdef __ARCH_WANT_SYS_ALARM
929 * For backwards compatibility? This can be done in libc so Alpha
930 * and all newer ports shouldn't need it.
932 asmlinkage
unsigned long sys_alarm(unsigned int seconds
)
934 return alarm_setitimer(seconds
);
942 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
943 * should be moved into arch/i386 instead?
947 * sys_getpid - return the thread group id of the current process
949 * Note, despite the name, this returns the tgid not the pid. The tgid and
950 * the pid are identical unless CLONE_THREAD was specified on clone() in
951 * which case the tgid is the same in all threads of the same group.
953 * This is SMP safe as current->tgid does not change.
955 asmlinkage
long sys_getpid(void)
957 return current
->tgid
;
961 * Accessing ->group_leader->real_parent is not SMP-safe, it could
962 * change from under us. However, rather than getting any lock
963 * we can use an optimistic algorithm: get the parent
964 * pid, and go back and check that the parent is still
965 * the same. If it has changed (which is extremely unlikely
966 * indeed), we just try again..
968 * NOTE! This depends on the fact that even if we _do_
969 * get an old value of "parent", we can happily dereference
970 * the pointer (it was and remains a dereferencable kernel pointer
971 * no matter what): we just can't necessarily trust the result
972 * until we know that the parent pointer is valid.
974 * NOTE2: ->group_leader never changes from under us.
976 asmlinkage
long sys_getppid(void)
979 struct task_struct
*me
= current
;
980 struct task_struct
*parent
;
982 parent
= me
->group_leader
->real_parent
;
985 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
987 struct task_struct
*old
= parent
;
990 * Make sure we read the pid before re-reading the
994 parent
= me
->group_leader
->real_parent
;
1004 asmlinkage
long sys_getuid(void)
1006 /* Only we change this so SMP safe */
1007 return current
->uid
;
1010 asmlinkage
long sys_geteuid(void)
1012 /* Only we change this so SMP safe */
1013 return current
->euid
;
1016 asmlinkage
long sys_getgid(void)
1018 /* Only we change this so SMP safe */
1019 return current
->gid
;
1022 asmlinkage
long sys_getegid(void)
1024 /* Only we change this so SMP safe */
1025 return current
->egid
;
1030 static void process_timeout(unsigned long __data
)
1032 wake_up_process((task_t
*)__data
);
1036 * schedule_timeout - sleep until timeout
1037 * @timeout: timeout value in jiffies
1039 * Make the current task sleep until @timeout jiffies have
1040 * elapsed. The routine will return immediately unless
1041 * the current task state has been set (see set_current_state()).
1043 * You can set the task state as follows -
1045 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1046 * pass before the routine returns. The routine will return 0
1048 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1049 * delivered to the current task. In this case the remaining time
1050 * in jiffies will be returned, or 0 if the timer expired in time
1052 * The current task state is guaranteed to be TASK_RUNNING when this
1055 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1056 * the CPU away without a bound on the timeout. In this case the return
1057 * value will be %MAX_SCHEDULE_TIMEOUT.
1059 * In all cases the return value is guaranteed to be non-negative.
1061 fastcall
signed long __sched
schedule_timeout(signed long timeout
)
1063 struct timer_list timer
;
1064 unsigned long expire
;
1068 case MAX_SCHEDULE_TIMEOUT
:
1070 * These two special cases are useful to be comfortable
1071 * in the caller. Nothing more. We could take
1072 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1073 * but I' d like to return a valid offset (>=0) to allow
1074 * the caller to do everything it want with the retval.
1080 * Another bit of PARANOID. Note that the retval will be
1081 * 0 since no piece of kernel is supposed to do a check
1082 * for a negative retval of schedule_timeout() (since it
1083 * should never happens anyway). You just have the printk()
1084 * that will tell you if something is gone wrong and where.
1088 printk(KERN_ERR
"schedule_timeout: wrong timeout "
1089 "value %lx from %p\n", timeout
,
1090 __builtin_return_address(0));
1091 current
->state
= TASK_RUNNING
;
1096 expire
= timeout
+ jiffies
;
1098 setup_timer(&timer
, process_timeout
, (unsigned long)current
);
1099 __mod_timer(&timer
, expire
);
1101 del_singleshot_timer_sync(&timer
);
1103 timeout
= expire
- jiffies
;
1106 return timeout
< 0 ? 0 : timeout
;
1108 EXPORT_SYMBOL(schedule_timeout
);
1111 * We can use __set_current_state() here because schedule_timeout() calls
1112 * schedule() unconditionally.
1114 signed long __sched
schedule_timeout_interruptible(signed long timeout
)
1116 __set_current_state(TASK_INTERRUPTIBLE
);
1117 return schedule_timeout(timeout
);
1119 EXPORT_SYMBOL(schedule_timeout_interruptible
);
1121 signed long __sched
schedule_timeout_uninterruptible(signed long timeout
)
1123 __set_current_state(TASK_UNINTERRUPTIBLE
);
1124 return schedule_timeout(timeout
);
1126 EXPORT_SYMBOL(schedule_timeout_uninterruptible
);
1128 /* Thread ID - the internal kernel "pid" */
1129 asmlinkage
long sys_gettid(void)
1131 return current
->pid
;
1135 * sys_sysinfo - fill in sysinfo struct
1137 asmlinkage
long sys_sysinfo(struct sysinfo __user
*info
)
1140 unsigned long mem_total
, sav_total
;
1141 unsigned int mem_unit
, bitcount
;
1144 memset((char *)&val
, 0, sizeof(struct sysinfo
));
1148 seq
= read_seqbegin(&xtime_lock
);
1151 * This is annoying. The below is the same thing
1152 * posix_get_clock_monotonic() does, but it wants to
1153 * take the lock which we want to cover the loads stuff
1157 getnstimeofday(&tp
);
1158 tp
.tv_sec
+= wall_to_monotonic
.tv_sec
;
1159 tp
.tv_nsec
+= wall_to_monotonic
.tv_nsec
;
1160 if (tp
.tv_nsec
- NSEC_PER_SEC
>= 0) {
1161 tp
.tv_nsec
= tp
.tv_nsec
- NSEC_PER_SEC
;
1164 val
.uptime
= tp
.tv_sec
+ (tp
.tv_nsec
? 1 : 0);
1166 val
.loads
[0] = avenrun
[0] << (SI_LOAD_SHIFT
- FSHIFT
);
1167 val
.loads
[1] = avenrun
[1] << (SI_LOAD_SHIFT
- FSHIFT
);
1168 val
.loads
[2] = avenrun
[2] << (SI_LOAD_SHIFT
- FSHIFT
);
1170 val
.procs
= nr_threads
;
1171 } while (read_seqretry(&xtime_lock
, seq
));
1177 * If the sum of all the available memory (i.e. ram + swap)
1178 * is less than can be stored in a 32 bit unsigned long then
1179 * we can be binary compatible with 2.2.x kernels. If not,
1180 * well, in that case 2.2.x was broken anyways...
1182 * -Erik Andersen <andersee@debian.org>
1185 mem_total
= val
.totalram
+ val
.totalswap
;
1186 if (mem_total
< val
.totalram
|| mem_total
< val
.totalswap
)
1189 mem_unit
= val
.mem_unit
;
1190 while (mem_unit
> 1) {
1193 sav_total
= mem_total
;
1195 if (mem_total
< sav_total
)
1200 * If mem_total did not overflow, multiply all memory values by
1201 * val.mem_unit and set it to 1. This leaves things compatible
1202 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1207 val
.totalram
<<= bitcount
;
1208 val
.freeram
<<= bitcount
;
1209 val
.sharedram
<<= bitcount
;
1210 val
.bufferram
<<= bitcount
;
1211 val
.totalswap
<<= bitcount
;
1212 val
.freeswap
<<= bitcount
;
1213 val
.totalhigh
<<= bitcount
;
1214 val
.freehigh
<<= bitcount
;
1217 if (copy_to_user(info
, &val
, sizeof(struct sysinfo
)))
1223 static int __devinit
init_timers_cpu(int cpu
)
1228 base
= per_cpu(tvec_bases
, cpu
);
1230 static char boot_done
;
1233 * Cannot do allocation in init_timers as that runs before the
1234 * allocator initializes (and would waste memory if there are
1235 * more possible CPUs than will ever be installed/brought up).
1238 base
= kmalloc_node(sizeof(*base
), GFP_KERNEL
,
1242 memset(base
, 0, sizeof(*base
));
1244 base
= &boot_tvec_bases
;
1247 per_cpu(tvec_bases
, cpu
) = base
;
1249 spin_lock_init(&base
->lock
);
1250 for (j
= 0; j
< TVN_SIZE
; j
++) {
1251 INIT_LIST_HEAD(base
->tv5
.vec
+ j
);
1252 INIT_LIST_HEAD(base
->tv4
.vec
+ j
);
1253 INIT_LIST_HEAD(base
->tv3
.vec
+ j
);
1254 INIT_LIST_HEAD(base
->tv2
.vec
+ j
);
1256 for (j
= 0; j
< TVR_SIZE
; j
++)
1257 INIT_LIST_HEAD(base
->tv1
.vec
+ j
);
1259 base
->timer_jiffies
= jiffies
;
1263 #ifdef CONFIG_HOTPLUG_CPU
1264 static void migrate_timer_list(tvec_base_t
*new_base
, struct list_head
*head
)
1266 struct timer_list
*timer
;
1268 while (!list_empty(head
)) {
1269 timer
= list_entry(head
->next
, struct timer_list
, entry
);
1270 detach_timer(timer
, 0);
1271 timer
->base
= new_base
;
1272 internal_add_timer(new_base
, timer
);
1276 static void __devinit
migrate_timers(int cpu
)
1278 tvec_base_t
*old_base
;
1279 tvec_base_t
*new_base
;
1282 BUG_ON(cpu_online(cpu
));
1283 old_base
= per_cpu(tvec_bases
, cpu
);
1284 new_base
= get_cpu_var(tvec_bases
);
1286 local_irq_disable();
1287 spin_lock(&new_base
->lock
);
1288 spin_lock(&old_base
->lock
);
1290 BUG_ON(old_base
->running_timer
);
1292 for (i
= 0; i
< TVR_SIZE
; i
++)
1293 migrate_timer_list(new_base
, old_base
->tv1
.vec
+ i
);
1294 for (i
= 0; i
< TVN_SIZE
; i
++) {
1295 migrate_timer_list(new_base
, old_base
->tv2
.vec
+ i
);
1296 migrate_timer_list(new_base
, old_base
->tv3
.vec
+ i
);
1297 migrate_timer_list(new_base
, old_base
->tv4
.vec
+ i
);
1298 migrate_timer_list(new_base
, old_base
->tv5
.vec
+ i
);
1301 spin_unlock(&old_base
->lock
);
1302 spin_unlock(&new_base
->lock
);
1304 put_cpu_var(tvec_bases
);
1306 #endif /* CONFIG_HOTPLUG_CPU */
1308 static int __devinit
timer_cpu_notify(struct notifier_block
*self
,
1309 unsigned long action
, void *hcpu
)
1311 long cpu
= (long)hcpu
;
1313 case CPU_UP_PREPARE
:
1314 if (init_timers_cpu(cpu
) < 0)
1317 #ifdef CONFIG_HOTPLUG_CPU
1319 migrate_timers(cpu
);
1328 static struct notifier_block __devinitdata timers_nb
= {
1329 .notifier_call
= timer_cpu_notify
,
1333 void __init
init_timers(void)
1335 timer_cpu_notify(&timers_nb
, (unsigned long)CPU_UP_PREPARE
,
1336 (void *)(long)smp_processor_id());
1337 register_cpu_notifier(&timers_nb
);
1338 open_softirq(TIMER_SOFTIRQ
, run_timer_softirq
, NULL
);
1341 #ifdef CONFIG_TIME_INTERPOLATION
1343 struct time_interpolator
*time_interpolator __read_mostly
;
1344 static struct time_interpolator
*time_interpolator_list __read_mostly
;
1345 static DEFINE_SPINLOCK(time_interpolator_lock
);
1347 static inline u64
time_interpolator_get_cycles(unsigned int src
)
1349 unsigned long (*x
)(void);
1353 case TIME_SOURCE_FUNCTION
:
1354 x
= time_interpolator
->addr
;
1357 case TIME_SOURCE_MMIO64
:
1358 return readq_relaxed((void __iomem
*)time_interpolator
->addr
);
1360 case TIME_SOURCE_MMIO32
:
1361 return readl_relaxed((void __iomem
*)time_interpolator
->addr
);
1363 default: return get_cycles();
1367 static inline u64
time_interpolator_get_counter(int writelock
)
1369 unsigned int src
= time_interpolator
->source
;
1371 if (time_interpolator
->jitter
)
1377 lcycle
= time_interpolator
->last_cycle
;
1378 now
= time_interpolator_get_cycles(src
);
1379 if (lcycle
&& time_after(lcycle
, now
))
1382 /* When holding the xtime write lock, there's no need
1383 * to add the overhead of the cmpxchg. Readers are
1384 * force to retry until the write lock is released.
1387 time_interpolator
->last_cycle
= now
;
1390 /* Keep track of the last timer value returned. The use of cmpxchg here
1391 * will cause contention in an SMP environment.
1393 } while (unlikely(cmpxchg(&time_interpolator
->last_cycle
, lcycle
, now
) != lcycle
));
1397 return time_interpolator_get_cycles(src
);
1400 void time_interpolator_reset(void)
1402 time_interpolator
->offset
= 0;
1403 time_interpolator
->last_counter
= time_interpolator_get_counter(1);
1406 #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
1408 unsigned long time_interpolator_get_offset(void)
1410 /* If we do not have a time interpolator set up then just return zero */
1411 if (!time_interpolator
)
1414 return time_interpolator
->offset
+
1415 GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator
);
1418 #define INTERPOLATOR_ADJUST 65536
1419 #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST
1421 static void time_interpolator_update(long delta_nsec
)
1424 unsigned long offset
;
1426 /* If there is no time interpolator set up then do nothing */
1427 if (!time_interpolator
)
1431 * The interpolator compensates for late ticks by accumulating the late
1432 * time in time_interpolator->offset. A tick earlier than expected will
1433 * lead to a reset of the offset and a corresponding jump of the clock
1434 * forward. Again this only works if the interpolator clock is running
1435 * slightly slower than the regular clock and the tuning logic insures
1439 counter
= time_interpolator_get_counter(1);
1440 offset
= time_interpolator
->offset
+
1441 GET_TI_NSECS(counter
, time_interpolator
);
1443 if (delta_nsec
< 0 || (unsigned long) delta_nsec
< offset
)
1444 time_interpolator
->offset
= offset
- delta_nsec
;
1446 time_interpolator
->skips
++;
1447 time_interpolator
->ns_skipped
+= delta_nsec
- offset
;
1448 time_interpolator
->offset
= 0;
1450 time_interpolator
->last_counter
= counter
;
1452 /* Tuning logic for time interpolator invoked every minute or so.
1453 * Decrease interpolator clock speed if no skips occurred and an offset is carried.
1454 * Increase interpolator clock speed if we skip too much time.
1456 if (jiffies
% INTERPOLATOR_ADJUST
== 0)
1458 if (time_interpolator
->skips
== 0 && time_interpolator
->offset
> TICK_NSEC
)
1459 time_interpolator
->nsec_per_cyc
--;
1460 if (time_interpolator
->ns_skipped
> INTERPOLATOR_MAX_SKIP
&& time_interpolator
->offset
== 0)
1461 time_interpolator
->nsec_per_cyc
++;
1462 time_interpolator
->skips
= 0;
1463 time_interpolator
->ns_skipped
= 0;
1468 is_better_time_interpolator(struct time_interpolator
*new)
1470 if (!time_interpolator
)
1472 return new->frequency
> 2*time_interpolator
->frequency
||
1473 (unsigned long)new->drift
< (unsigned long)time_interpolator
->drift
;
1477 register_time_interpolator(struct time_interpolator
*ti
)
1479 unsigned long flags
;
1482 if (ti
->frequency
== 0 || ti
->mask
== 0)
1485 ti
->nsec_per_cyc
= ((u64
)NSEC_PER_SEC
<< ti
->shift
) / ti
->frequency
;
1486 spin_lock(&time_interpolator_lock
);
1487 write_seqlock_irqsave(&xtime_lock
, flags
);
1488 if (is_better_time_interpolator(ti
)) {
1489 time_interpolator
= ti
;
1490 time_interpolator_reset();
1492 write_sequnlock_irqrestore(&xtime_lock
, flags
);
1494 ti
->next
= time_interpolator_list
;
1495 time_interpolator_list
= ti
;
1496 spin_unlock(&time_interpolator_lock
);
1500 unregister_time_interpolator(struct time_interpolator
*ti
)
1502 struct time_interpolator
*curr
, **prev
;
1503 unsigned long flags
;
1505 spin_lock(&time_interpolator_lock
);
1506 prev
= &time_interpolator_list
;
1507 for (curr
= *prev
; curr
; curr
= curr
->next
) {
1515 write_seqlock_irqsave(&xtime_lock
, flags
);
1516 if (ti
== time_interpolator
) {
1517 /* we lost the best time-interpolator: */
1518 time_interpolator
= NULL
;
1519 /* find the next-best interpolator */
1520 for (curr
= time_interpolator_list
; curr
; curr
= curr
->next
)
1521 if (is_better_time_interpolator(curr
))
1522 time_interpolator
= curr
;
1523 time_interpolator_reset();
1525 write_sequnlock_irqrestore(&xtime_lock
, flags
);
1526 spin_unlock(&time_interpolator_lock
);
1528 #endif /* CONFIG_TIME_INTERPOLATION */
1531 * msleep - sleep safely even with waitqueue interruptions
1532 * @msecs: Time in milliseconds to sleep for
1534 void msleep(unsigned int msecs
)
1536 unsigned long timeout
= msecs_to_jiffies(msecs
) + 1;
1539 timeout
= schedule_timeout_uninterruptible(timeout
);
1542 EXPORT_SYMBOL(msleep
);
1545 * msleep_interruptible - sleep waiting for signals
1546 * @msecs: Time in milliseconds to sleep for
1548 unsigned long msleep_interruptible(unsigned int msecs
)
1550 unsigned long timeout
= msecs_to_jiffies(msecs
) + 1;
1552 while (timeout
&& !signal_pending(current
))
1553 timeout
= schedule_timeout_interruptible(timeout
);
1554 return jiffies_to_msecs(timeout
);
1557 EXPORT_SYMBOL(msleep_interruptible
);