4 * Kernel internal timers, basic process system calls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/notifier.h>
31 #include <linux/thread_info.h>
32 #include <linux/time.h>
33 #include <linux/jiffies.h>
34 #include <linux/posix-timers.h>
35 #include <linux/cpu.h>
36 #include <linux/syscalls.h>
37 #include <linux/delay.h>
38 #include <linux/tick.h>
39 #include <linux/kallsyms.h>
41 #include <asm/uaccess.h>
42 #include <asm/unistd.h>
43 #include <asm/div64.h>
44 #include <asm/timex.h>
47 u64 jiffies_64 __cacheline_aligned_in_smp
= INITIAL_JIFFIES
;
49 EXPORT_SYMBOL(jiffies_64
);
52 * per-CPU timer vector definitions:
54 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
55 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
56 #define TVN_SIZE (1 << TVN_BITS)
57 #define TVR_SIZE (1 << TVR_BITS)
58 #define TVN_MASK (TVN_SIZE - 1)
59 #define TVR_MASK (TVR_SIZE - 1)
61 typedef struct tvec_s
{
62 struct list_head vec
[TVN_SIZE
];
65 typedef struct tvec_root_s
{
66 struct list_head vec
[TVR_SIZE
];
69 struct tvec_t_base_s
{
71 struct timer_list
*running_timer
;
72 unsigned long timer_jiffies
;
78 } ____cacheline_aligned
;
80 typedef struct tvec_t_base_s tvec_base_t
;
82 tvec_base_t boot_tvec_bases
;
83 EXPORT_SYMBOL(boot_tvec_bases
);
84 static DEFINE_PER_CPU(tvec_base_t
*, tvec_bases
) = &boot_tvec_bases
;
87 * Note that all tvec_bases is 2 byte aligned and lower bit of
88 * base in timer_list is guaranteed to be zero. Use the LSB for
89 * the new flag to indicate whether the timer is deferrable
91 #define TBASE_DEFERRABLE_FLAG (0x1)
93 /* Functions below help us manage 'deferrable' flag */
94 static inline unsigned int tbase_get_deferrable(tvec_base_t
*base
)
96 return ((unsigned int)(unsigned long)base
& TBASE_DEFERRABLE_FLAG
);
99 static inline tvec_base_t
*tbase_get_base(tvec_base_t
*base
)
101 return ((tvec_base_t
*)((unsigned long)base
& ~TBASE_DEFERRABLE_FLAG
));
104 static inline void timer_set_deferrable(struct timer_list
*timer
)
106 timer
->base
= ((tvec_base_t
*)((unsigned long)(timer
->base
) |
107 TBASE_DEFERRABLE_FLAG
));
111 timer_set_base(struct timer_list
*timer
, tvec_base_t
*new_base
)
113 timer
->base
= (tvec_base_t
*)((unsigned long)(new_base
) |
114 tbase_get_deferrable(timer
->base
));
118 * __round_jiffies - function to round jiffies to a full second
119 * @j: the time in (absolute) jiffies that should be rounded
120 * @cpu: the processor number on which the timeout will happen
122 * __round_jiffies() rounds an absolute time in the future (in jiffies)
123 * up or down to (approximately) full seconds. This is useful for timers
124 * for which the exact time they fire does not matter too much, as long as
125 * they fire approximately every X seconds.
127 * By rounding these timers to whole seconds, all such timers will fire
128 * at the same time, rather than at various times spread out. The goal
129 * of this is to have the CPU wake up less, which saves power.
131 * The exact rounding is skewed for each processor to avoid all
132 * processors firing at the exact same time, which could lead
133 * to lock contention or spurious cache line bouncing.
135 * The return value is the rounded version of the @j parameter.
137 unsigned long __round_jiffies(unsigned long j
, int cpu
)
140 unsigned long original
= j
;
143 * We don't want all cpus firing their timers at once hitting the
144 * same lock or cachelines, so we skew each extra cpu with an extra
145 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
147 * The skew is done by adding 3*cpunr, then round, then subtract this
148 * extra offset again.
155 * If the target jiffie is just after a whole second (which can happen
156 * due to delays of the timer irq, long irq off times etc etc) then
157 * we should round down to the whole second, not up. Use 1/4th second
158 * as cutoff for this rounding as an extreme upper bound for this.
160 if (rem
< HZ
/4) /* round down */
165 /* now that we have rounded, subtract the extra skew again */
168 if (j
<= jiffies
) /* rounding ate our timeout entirely; */
172 EXPORT_SYMBOL_GPL(__round_jiffies
);
175 * __round_jiffies_relative - function to round jiffies to a full second
176 * @j: the time in (relative) jiffies that should be rounded
177 * @cpu: the processor number on which the timeout will happen
179 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
180 * up or down to (approximately) full seconds. This is useful for timers
181 * for which the exact time they fire does not matter too much, as long as
182 * they fire approximately every X seconds.
184 * By rounding these timers to whole seconds, all such timers will fire
185 * at the same time, rather than at various times spread out. The goal
186 * of this is to have the CPU wake up less, which saves power.
188 * The exact rounding is skewed for each processor to avoid all
189 * processors firing at the exact same time, which could lead
190 * to lock contention or spurious cache line bouncing.
192 * The return value is the rounded version of the @j parameter.
194 unsigned long __round_jiffies_relative(unsigned long j
, int cpu
)
197 * In theory the following code can skip a jiffy in case jiffies
198 * increments right between the addition and the later subtraction.
199 * However since the entire point of this function is to use approximate
200 * timeouts, it's entirely ok to not handle that.
202 return __round_jiffies(j
+ jiffies
, cpu
) - jiffies
;
204 EXPORT_SYMBOL_GPL(__round_jiffies_relative
);
207 * round_jiffies - function to round jiffies to a full second
208 * @j: the time in (absolute) jiffies that should be rounded
210 * round_jiffies() rounds an absolute time in the future (in jiffies)
211 * up or down to (approximately) full seconds. This is useful for timers
212 * for which the exact time they fire does not matter too much, as long as
213 * they fire approximately every X seconds.
215 * By rounding these timers to whole seconds, all such timers will fire
216 * at the same time, rather than at various times spread out. The goal
217 * of this is to have the CPU wake up less, which saves power.
219 * The return value is the rounded version of the @j parameter.
221 unsigned long round_jiffies(unsigned long j
)
223 return __round_jiffies(j
, raw_smp_processor_id());
225 EXPORT_SYMBOL_GPL(round_jiffies
);
228 * round_jiffies_relative - function to round jiffies to a full second
229 * @j: the time in (relative) jiffies that should be rounded
231 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
232 * up or down to (approximately) full seconds. This is useful for timers
233 * for which the exact time they fire does not matter too much, as long as
234 * they fire approximately every X seconds.
236 * By rounding these timers to whole seconds, all such timers will fire
237 * at the same time, rather than at various times spread out. The goal
238 * of this is to have the CPU wake up less, which saves power.
240 * The return value is the rounded version of the @j parameter.
242 unsigned long round_jiffies_relative(unsigned long j
)
244 return __round_jiffies_relative(j
, raw_smp_processor_id());
246 EXPORT_SYMBOL_GPL(round_jiffies_relative
);
249 static inline void set_running_timer(tvec_base_t
*base
,
250 struct timer_list
*timer
)
253 base
->running_timer
= timer
;
257 static void internal_add_timer(tvec_base_t
*base
, struct timer_list
*timer
)
259 unsigned long expires
= timer
->expires
;
260 unsigned long idx
= expires
- base
->timer_jiffies
;
261 struct list_head
*vec
;
263 if (idx
< TVR_SIZE
) {
264 int i
= expires
& TVR_MASK
;
265 vec
= base
->tv1
.vec
+ i
;
266 } else if (idx
< 1 << (TVR_BITS
+ TVN_BITS
)) {
267 int i
= (expires
>> TVR_BITS
) & TVN_MASK
;
268 vec
= base
->tv2
.vec
+ i
;
269 } else if (idx
< 1 << (TVR_BITS
+ 2 * TVN_BITS
)) {
270 int i
= (expires
>> (TVR_BITS
+ TVN_BITS
)) & TVN_MASK
;
271 vec
= base
->tv3
.vec
+ i
;
272 } else if (idx
< 1 << (TVR_BITS
+ 3 * TVN_BITS
)) {
273 int i
= (expires
>> (TVR_BITS
+ 2 * TVN_BITS
)) & TVN_MASK
;
274 vec
= base
->tv4
.vec
+ i
;
275 } else if ((signed long) idx
< 0) {
277 * Can happen if you add a timer with expires == jiffies,
278 * or you set a timer to go off in the past
280 vec
= base
->tv1
.vec
+ (base
->timer_jiffies
& TVR_MASK
);
283 /* If the timeout is larger than 0xffffffff on 64-bit
284 * architectures then we use the maximum timeout:
286 if (idx
> 0xffffffffUL
) {
288 expires
= idx
+ base
->timer_jiffies
;
290 i
= (expires
>> (TVR_BITS
+ 3 * TVN_BITS
)) & TVN_MASK
;
291 vec
= base
->tv5
.vec
+ i
;
296 list_add_tail(&timer
->entry
, vec
);
299 #ifdef CONFIG_TIMER_STATS
300 void __timer_stats_timer_set_start_info(struct timer_list
*timer
, void *addr
)
302 if (timer
->start_site
)
305 timer
->start_site
= addr
;
306 memcpy(timer
->start_comm
, current
->comm
, TASK_COMM_LEN
);
307 timer
->start_pid
= current
->pid
;
310 static void timer_stats_account_timer(struct timer_list
*timer
)
312 unsigned int flag
= 0;
314 if (unlikely(tbase_get_deferrable(timer
->base
)))
315 flag
|= TIMER_STATS_FLAG_DEFERRABLE
;
317 timer_stats_update_stats(timer
, timer
->start_pid
, timer
->start_site
,
318 timer
->function
, timer
->start_comm
, flag
);
322 static void timer_stats_account_timer(struct timer_list
*timer
) {}
326 * init_timer - initialize a timer.
327 * @timer: the timer to be initialized
329 * init_timer() must be done to a timer prior calling *any* of the
330 * other timer functions.
332 void fastcall
init_timer(struct timer_list
*timer
)
334 timer
->entry
.next
= NULL
;
335 timer
->base
= __raw_get_cpu_var(tvec_bases
);
336 #ifdef CONFIG_TIMER_STATS
337 timer
->start_site
= NULL
;
338 timer
->start_pid
= -1;
339 memset(timer
->start_comm
, 0, TASK_COMM_LEN
);
342 EXPORT_SYMBOL(init_timer
);
344 void fastcall
init_timer_deferrable(struct timer_list
*timer
)
347 timer_set_deferrable(timer
);
349 EXPORT_SYMBOL(init_timer_deferrable
);
351 static inline void detach_timer(struct timer_list
*timer
,
354 struct list_head
*entry
= &timer
->entry
;
356 __list_del(entry
->prev
, entry
->next
);
359 entry
->prev
= LIST_POISON2
;
363 * We are using hashed locking: holding per_cpu(tvec_bases).lock
364 * means that all timers which are tied to this base via timer->base are
365 * locked, and the base itself is locked too.
367 * So __run_timers/migrate_timers can safely modify all timers which could
368 * be found on ->tvX lists.
370 * When the timer's base is locked, and the timer removed from list, it is
371 * possible to set timer->base = NULL and drop the lock: the timer remains
374 static tvec_base_t
*lock_timer_base(struct timer_list
*timer
,
375 unsigned long *flags
)
376 __acquires(timer
->base
->lock
)
381 tvec_base_t
*prelock_base
= timer
->base
;
382 base
= tbase_get_base(prelock_base
);
383 if (likely(base
!= NULL
)) {
384 spin_lock_irqsave(&base
->lock
, *flags
);
385 if (likely(prelock_base
== timer
->base
))
387 /* The timer has migrated to another CPU */
388 spin_unlock_irqrestore(&base
->lock
, *flags
);
394 int __mod_timer(struct timer_list
*timer
, unsigned long expires
)
396 tvec_base_t
*base
, *new_base
;
400 timer_stats_timer_set_start_info(timer
);
401 BUG_ON(!timer
->function
);
403 base
= lock_timer_base(timer
, &flags
);
405 if (timer_pending(timer
)) {
406 detach_timer(timer
, 0);
410 new_base
= __get_cpu_var(tvec_bases
);
412 if (base
!= new_base
) {
414 * We are trying to schedule the timer on the local CPU.
415 * However we can't change timer's base while it is running,
416 * otherwise del_timer_sync() can't detect that the timer's
417 * handler yet has not finished. This also guarantees that
418 * the timer is serialized wrt itself.
420 if (likely(base
->running_timer
!= timer
)) {
421 /* See the comment in lock_timer_base() */
422 timer_set_base(timer
, NULL
);
423 spin_unlock(&base
->lock
);
425 spin_lock(&base
->lock
);
426 timer_set_base(timer
, base
);
430 timer
->expires
= expires
;
431 internal_add_timer(base
, timer
);
432 spin_unlock_irqrestore(&base
->lock
, flags
);
437 EXPORT_SYMBOL(__mod_timer
);
440 * add_timer_on - start a timer on a particular CPU
441 * @timer: the timer to be added
442 * @cpu: the CPU to start it on
444 * This is not very scalable on SMP. Double adds are not possible.
446 void add_timer_on(struct timer_list
*timer
, int cpu
)
448 tvec_base_t
*base
= per_cpu(tvec_bases
, cpu
);
451 timer_stats_timer_set_start_info(timer
);
452 BUG_ON(timer_pending(timer
) || !timer
->function
);
453 spin_lock_irqsave(&base
->lock
, flags
);
454 timer_set_base(timer
, base
);
455 internal_add_timer(base
, timer
);
456 spin_unlock_irqrestore(&base
->lock
, flags
);
461 * mod_timer - modify a timer's timeout
462 * @timer: the timer to be modified
463 * @expires: new timeout in jiffies
465 * mod_timer() is a more efficient way to update the expire field of an
466 * active timer (if the timer is inactive it will be activated)
468 * mod_timer(timer, expires) is equivalent to:
470 * del_timer(timer); timer->expires = expires; add_timer(timer);
472 * Note that if there are multiple unserialized concurrent users of the
473 * same timer, then mod_timer() is the only safe way to modify the timeout,
474 * since add_timer() cannot modify an already running timer.
476 * The function returns whether it has modified a pending timer or not.
477 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
478 * active timer returns 1.)
480 int mod_timer(struct timer_list
*timer
, unsigned long expires
)
482 BUG_ON(!timer
->function
);
484 timer_stats_timer_set_start_info(timer
);
486 * This is a common optimization triggered by the
487 * networking code - if the timer is re-modified
488 * to be the same thing then just return:
490 if (timer
->expires
== expires
&& timer_pending(timer
))
493 return __mod_timer(timer
, expires
);
496 EXPORT_SYMBOL(mod_timer
);
499 * del_timer - deactive a timer.
500 * @timer: the timer to be deactivated
502 * del_timer() deactivates a timer - this works on both active and inactive
505 * The function returns whether it has deactivated a pending timer or not.
506 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
507 * active timer returns 1.)
509 int del_timer(struct timer_list
*timer
)
515 timer_stats_timer_clear_start_info(timer
);
516 if (timer_pending(timer
)) {
517 base
= lock_timer_base(timer
, &flags
);
518 if (timer_pending(timer
)) {
519 detach_timer(timer
, 1);
522 spin_unlock_irqrestore(&base
->lock
, flags
);
528 EXPORT_SYMBOL(del_timer
);
532 * try_to_del_timer_sync - Try to deactivate a timer
533 * @timer: timer do del
535 * This function tries to deactivate a timer. Upon successful (ret >= 0)
536 * exit the timer is not queued and the handler is not running on any CPU.
538 * It must not be called from interrupt contexts.
540 int try_to_del_timer_sync(struct timer_list
*timer
)
546 base
= lock_timer_base(timer
, &flags
);
548 if (base
->running_timer
== timer
)
552 if (timer_pending(timer
)) {
553 detach_timer(timer
, 1);
557 spin_unlock_irqrestore(&base
->lock
, flags
);
562 EXPORT_SYMBOL(try_to_del_timer_sync
);
565 * del_timer_sync - deactivate a timer and wait for the handler to finish.
566 * @timer: the timer to be deactivated
568 * This function only differs from del_timer() on SMP: besides deactivating
569 * the timer it also makes sure the handler has finished executing on other
572 * Synchronization rules: Callers must prevent restarting of the timer,
573 * otherwise this function is meaningless. It must not be called from
574 * interrupt contexts. The caller must not hold locks which would prevent
575 * completion of the timer's handler. The timer's handler must not call
576 * add_timer_on(). Upon exit the timer is not queued and the handler is
577 * not running on any CPU.
579 * The function returns whether it has deactivated a pending timer or not.
581 int del_timer_sync(struct timer_list
*timer
)
584 int ret
= try_to_del_timer_sync(timer
);
591 EXPORT_SYMBOL(del_timer_sync
);
594 static int cascade(tvec_base_t
*base
, tvec_t
*tv
, int index
)
596 /* cascade all the timers from tv up one level */
597 struct timer_list
*timer
, *tmp
;
598 struct list_head tv_list
;
600 list_replace_init(tv
->vec
+ index
, &tv_list
);
603 * We are removing _all_ timers from the list, so we
604 * don't have to detach them individually.
606 list_for_each_entry_safe(timer
, tmp
, &tv_list
, entry
) {
607 BUG_ON(tbase_get_base(timer
->base
) != base
);
608 internal_add_timer(base
, timer
);
614 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
617 * __run_timers - run all expired timers (if any) on this CPU.
618 * @base: the timer vector to be processed.
620 * This function cascades all vectors and executes all expired timer
623 static inline void __run_timers(tvec_base_t
*base
)
625 struct timer_list
*timer
;
627 spin_lock_irq(&base
->lock
);
628 while (time_after_eq(jiffies
, base
->timer_jiffies
)) {
629 struct list_head work_list
;
630 struct list_head
*head
= &work_list
;
631 int index
= base
->timer_jiffies
& TVR_MASK
;
637 (!cascade(base
, &base
->tv2
, INDEX(0))) &&
638 (!cascade(base
, &base
->tv3
, INDEX(1))) &&
639 !cascade(base
, &base
->tv4
, INDEX(2)))
640 cascade(base
, &base
->tv5
, INDEX(3));
641 ++base
->timer_jiffies
;
642 list_replace_init(base
->tv1
.vec
+ index
, &work_list
);
643 while (!list_empty(head
)) {
644 void (*fn
)(unsigned long);
647 timer
= list_first_entry(head
, struct timer_list
,entry
);
648 fn
= timer
->function
;
651 timer_stats_account_timer(timer
);
653 set_running_timer(base
, timer
);
654 detach_timer(timer
, 1);
655 spin_unlock_irq(&base
->lock
);
657 int preempt_count
= preempt_count();
659 if (preempt_count
!= preempt_count()) {
660 printk(KERN_WARNING
"huh, entered %p "
661 "with preempt_count %08x, exited"
668 spin_lock_irq(&base
->lock
);
671 set_running_timer(base
, NULL
);
672 spin_unlock_irq(&base
->lock
);
675 #if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)
677 * Find out when the next timer event is due to happen. This
678 * is used on S/390 to stop all activity when a cpus is idle.
679 * This functions needs to be called disabled.
681 static unsigned long __next_timer_interrupt(tvec_base_t
*base
)
683 unsigned long timer_jiffies
= base
->timer_jiffies
;
684 unsigned long expires
= timer_jiffies
+ NEXT_TIMER_MAX_DELTA
;
685 int index
, slot
, array
, found
= 0;
686 struct timer_list
*nte
;
689 /* Look for timer events in tv1. */
690 index
= slot
= timer_jiffies
& TVR_MASK
;
692 list_for_each_entry(nte
, base
->tv1
.vec
+ slot
, entry
) {
693 if (tbase_get_deferrable(nte
->base
))
697 expires
= nte
->expires
;
698 /* Look at the cascade bucket(s)? */
699 if (!index
|| slot
< index
)
703 slot
= (slot
+ 1) & TVR_MASK
;
704 } while (slot
!= index
);
707 /* Calculate the next cascade event */
709 timer_jiffies
+= TVR_SIZE
- index
;
710 timer_jiffies
>>= TVR_BITS
;
713 varray
[0] = &base
->tv2
;
714 varray
[1] = &base
->tv3
;
715 varray
[2] = &base
->tv4
;
716 varray
[3] = &base
->tv5
;
718 for (array
= 0; array
< 4; array
++) {
719 tvec_t
*varp
= varray
[array
];
721 index
= slot
= timer_jiffies
& TVN_MASK
;
723 list_for_each_entry(nte
, varp
->vec
+ slot
, entry
) {
725 if (time_before(nte
->expires
, expires
))
726 expires
= nte
->expires
;
729 * Do we still search for the first timer or are
730 * we looking up the cascade buckets ?
733 /* Look at the cascade bucket(s)? */
734 if (!index
|| slot
< index
)
738 slot
= (slot
+ 1) & TVN_MASK
;
739 } while (slot
!= index
);
742 timer_jiffies
+= TVN_SIZE
- index
;
743 timer_jiffies
>>= TVN_BITS
;
749 * Check, if the next hrtimer event is before the next timer wheel
752 static unsigned long cmp_next_hrtimer_event(unsigned long now
,
753 unsigned long expires
)
755 ktime_t hr_delta
= hrtimer_get_next_event();
756 struct timespec tsdelta
;
759 if (hr_delta
.tv64
== KTIME_MAX
)
763 * Expired timer available, let it expire in the next tick
765 if (hr_delta
.tv64
<= 0)
768 tsdelta
= ktime_to_timespec(hr_delta
);
769 delta
= timespec_to_jiffies(&tsdelta
);
772 * Limit the delta to the max value, which is checked in
773 * tick_nohz_stop_sched_tick():
775 if (delta
> NEXT_TIMER_MAX_DELTA
)
776 delta
= NEXT_TIMER_MAX_DELTA
;
779 * Take rounding errors in to account and make sure, that it
780 * expires in the next tick. Otherwise we go into an endless
781 * ping pong due to tick_nohz_stop_sched_tick() retriggering
787 if (time_before(now
, expires
))
793 * get_next_timer_interrupt - return the jiffy of the next pending timer
794 * @now: current time (in jiffies)
796 unsigned long get_next_timer_interrupt(unsigned long now
)
798 tvec_base_t
*base
= __get_cpu_var(tvec_bases
);
799 unsigned long expires
;
801 spin_lock(&base
->lock
);
802 expires
= __next_timer_interrupt(base
);
803 spin_unlock(&base
->lock
);
805 if (time_before_eq(expires
, now
))
808 return cmp_next_hrtimer_event(now
, expires
);
811 #ifdef CONFIG_NO_IDLE_HZ
812 unsigned long next_timer_interrupt(void)
814 return get_next_timer_interrupt(jiffies
);
820 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
821 void account_process_tick(struct task_struct
*p
, int user_tick
)
824 account_user_time(p
, jiffies_to_cputime(1));
825 account_user_time_scaled(p
, jiffies_to_cputime(1));
827 account_system_time(p
, HARDIRQ_OFFSET
, jiffies_to_cputime(1));
828 account_system_time_scaled(p
, jiffies_to_cputime(1));
834 * Called from the timer interrupt handler to charge one tick to the current
835 * process. user_tick is 1 if the tick is user time, 0 for system.
837 void update_process_times(int user_tick
)
839 struct task_struct
*p
= current
;
840 int cpu
= smp_processor_id();
842 /* Note: this timer irq context must be accounted for as well. */
843 account_process_tick(p
, user_tick
);
845 if (rcu_pending(cpu
))
846 rcu_check_callbacks(cpu
, user_tick
);
848 run_posix_cpu_timers(p
);
852 * Nr of active tasks - counted in fixed-point numbers
854 static unsigned long count_active_tasks(void)
856 return nr_active() * FIXED_1
;
860 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
861 * imply that avenrun[] is the standard name for this kind of thing.
862 * Nothing else seems to be standardized: the fractional size etc
863 * all seem to differ on different machines.
865 * Requires xtime_lock to access.
867 unsigned long avenrun
[3];
869 EXPORT_SYMBOL(avenrun
);
872 * calc_load - given tick count, update the avenrun load estimates.
873 * This is called while holding a write_lock on xtime_lock.
875 static inline void calc_load(unsigned long ticks
)
877 unsigned long active_tasks
; /* fixed-point */
878 static int count
= LOAD_FREQ
;
881 if (unlikely(count
< 0)) {
882 active_tasks
= count_active_tasks();
884 CALC_LOAD(avenrun
[0], EXP_1
, active_tasks
);
885 CALC_LOAD(avenrun
[1], EXP_5
, active_tasks
);
886 CALC_LOAD(avenrun
[2], EXP_15
, active_tasks
);
893 * This function runs timers and the timer-tq in bottom half context.
895 static void run_timer_softirq(struct softirq_action
*h
)
897 tvec_base_t
*base
= __get_cpu_var(tvec_bases
);
899 hrtimer_run_queues();
901 if (time_after_eq(jiffies
, base
->timer_jiffies
))
906 * Called by the local, per-CPU timer interrupt on SMP.
908 void run_local_timers(void)
910 raise_softirq(TIMER_SOFTIRQ
);
915 * Called by the timer interrupt. xtime_lock must already be taken
918 static inline void update_times(unsigned long ticks
)
925 * The 64-bit jiffies value is not atomic - you MUST NOT read it
926 * without sampling the sequence number in xtime_lock.
927 * jiffies is defined in the linker script...
930 void do_timer(unsigned long ticks
)
936 #ifdef __ARCH_WANT_SYS_ALARM
939 * For backwards compatibility? This can be done in libc so Alpha
940 * and all newer ports shouldn't need it.
942 asmlinkage
unsigned long sys_alarm(unsigned int seconds
)
944 return alarm_setitimer(seconds
);
952 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
953 * should be moved into arch/i386 instead?
957 * sys_getpid - return the thread group id of the current process
959 * Note, despite the name, this returns the tgid not the pid. The tgid and
960 * the pid are identical unless CLONE_THREAD was specified on clone() in
961 * which case the tgid is the same in all threads of the same group.
963 * This is SMP safe as current->tgid does not change.
965 asmlinkage
long sys_getpid(void)
967 return task_tgid_vnr(current
);
971 * Accessing ->real_parent is not SMP-safe, it could
972 * change from under us. However, we can use a stale
973 * value of ->real_parent under rcu_read_lock(), see
974 * release_task()->call_rcu(delayed_put_task_struct).
976 asmlinkage
long sys_getppid(void)
981 pid
= task_tgid_nr_ns(current
->real_parent
, current
->nsproxy
->pid_ns
);
987 asmlinkage
long sys_getuid(void)
989 /* Only we change this so SMP safe */
993 asmlinkage
long sys_geteuid(void)
995 /* Only we change this so SMP safe */
996 return current
->euid
;
999 asmlinkage
long sys_getgid(void)
1001 /* Only we change this so SMP safe */
1002 return current
->gid
;
1005 asmlinkage
long sys_getegid(void)
1007 /* Only we change this so SMP safe */
1008 return current
->egid
;
1013 static void process_timeout(unsigned long __data
)
1015 wake_up_process((struct task_struct
*)__data
);
1019 * schedule_timeout - sleep until timeout
1020 * @timeout: timeout value in jiffies
1022 * Make the current task sleep until @timeout jiffies have
1023 * elapsed. The routine will return immediately unless
1024 * the current task state has been set (see set_current_state()).
1026 * You can set the task state as follows -
1028 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1029 * pass before the routine returns. The routine will return 0
1031 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1032 * delivered to the current task. In this case the remaining time
1033 * in jiffies will be returned, or 0 if the timer expired in time
1035 * The current task state is guaranteed to be TASK_RUNNING when this
1038 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1039 * the CPU away without a bound on the timeout. In this case the return
1040 * value will be %MAX_SCHEDULE_TIMEOUT.
1042 * In all cases the return value is guaranteed to be non-negative.
1044 fastcall
signed long __sched
schedule_timeout(signed long timeout
)
1046 struct timer_list timer
;
1047 unsigned long expire
;
1051 case MAX_SCHEDULE_TIMEOUT
:
1053 * These two special cases are useful to be comfortable
1054 * in the caller. Nothing more. We could take
1055 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1056 * but I' d like to return a valid offset (>=0) to allow
1057 * the caller to do everything it want with the retval.
1063 * Another bit of PARANOID. Note that the retval will be
1064 * 0 since no piece of kernel is supposed to do a check
1065 * for a negative retval of schedule_timeout() (since it
1066 * should never happens anyway). You just have the printk()
1067 * that will tell you if something is gone wrong and where.
1070 printk(KERN_ERR
"schedule_timeout: wrong timeout "
1071 "value %lx\n", timeout
);
1073 current
->state
= TASK_RUNNING
;
1078 expire
= timeout
+ jiffies
;
1080 setup_timer(&timer
, process_timeout
, (unsigned long)current
);
1081 __mod_timer(&timer
, expire
);
1083 del_singleshot_timer_sync(&timer
);
1085 timeout
= expire
- jiffies
;
1088 return timeout
< 0 ? 0 : timeout
;
1090 EXPORT_SYMBOL(schedule_timeout
);
1093 * We can use __set_current_state() here because schedule_timeout() calls
1094 * schedule() unconditionally.
1096 signed long __sched
schedule_timeout_interruptible(signed long timeout
)
1098 __set_current_state(TASK_INTERRUPTIBLE
);
1099 return schedule_timeout(timeout
);
1101 EXPORT_SYMBOL(schedule_timeout_interruptible
);
1103 signed long __sched
schedule_timeout_uninterruptible(signed long timeout
)
1105 __set_current_state(TASK_UNINTERRUPTIBLE
);
1106 return schedule_timeout(timeout
);
1108 EXPORT_SYMBOL(schedule_timeout_uninterruptible
);
1110 /* Thread ID - the internal kernel "pid" */
1111 asmlinkage
long sys_gettid(void)
1113 return task_pid_vnr(current
);
1117 * do_sysinfo - fill in sysinfo struct
1118 * @info: pointer to buffer to fill
1120 int do_sysinfo(struct sysinfo
*info
)
1122 unsigned long mem_total
, sav_total
;
1123 unsigned int mem_unit
, bitcount
;
1126 memset(info
, 0, sizeof(struct sysinfo
));
1130 seq
= read_seqbegin(&xtime_lock
);
1133 * This is annoying. The below is the same thing
1134 * posix_get_clock_monotonic() does, but it wants to
1135 * take the lock which we want to cover the loads stuff
1139 getnstimeofday(&tp
);
1140 tp
.tv_sec
+= wall_to_monotonic
.tv_sec
;
1141 tp
.tv_nsec
+= wall_to_monotonic
.tv_nsec
;
1142 monotonic_to_bootbased(&tp
);
1143 if (tp
.tv_nsec
- NSEC_PER_SEC
>= 0) {
1144 tp
.tv_nsec
= tp
.tv_nsec
- NSEC_PER_SEC
;
1147 info
->uptime
= tp
.tv_sec
+ (tp
.tv_nsec
? 1 : 0);
1149 info
->loads
[0] = avenrun
[0] << (SI_LOAD_SHIFT
- FSHIFT
);
1150 info
->loads
[1] = avenrun
[1] << (SI_LOAD_SHIFT
- FSHIFT
);
1151 info
->loads
[2] = avenrun
[2] << (SI_LOAD_SHIFT
- FSHIFT
);
1153 info
->procs
= nr_threads
;
1154 } while (read_seqretry(&xtime_lock
, seq
));
1160 * If the sum of all the available memory (i.e. ram + swap)
1161 * is less than can be stored in a 32 bit unsigned long then
1162 * we can be binary compatible with 2.2.x kernels. If not,
1163 * well, in that case 2.2.x was broken anyways...
1165 * -Erik Andersen <andersee@debian.org>
1168 mem_total
= info
->totalram
+ info
->totalswap
;
1169 if (mem_total
< info
->totalram
|| mem_total
< info
->totalswap
)
1172 mem_unit
= info
->mem_unit
;
1173 while (mem_unit
> 1) {
1176 sav_total
= mem_total
;
1178 if (mem_total
< sav_total
)
1183 * If mem_total did not overflow, multiply all memory values by
1184 * info->mem_unit and set it to 1. This leaves things compatible
1185 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1190 info
->totalram
<<= bitcount
;
1191 info
->freeram
<<= bitcount
;
1192 info
->sharedram
<<= bitcount
;
1193 info
->bufferram
<<= bitcount
;
1194 info
->totalswap
<<= bitcount
;
1195 info
->freeswap
<<= bitcount
;
1196 info
->totalhigh
<<= bitcount
;
1197 info
->freehigh
<<= bitcount
;
1203 asmlinkage
long sys_sysinfo(struct sysinfo __user
*info
)
1209 if (copy_to_user(info
, &val
, sizeof(struct sysinfo
)))
1216 * lockdep: we want to track each per-CPU base as a separate lock-class,
1217 * but timer-bases are kmalloc()-ed, so we need to attach separate
1220 static struct lock_class_key base_lock_keys
[NR_CPUS
];
1222 static int __cpuinit
init_timers_cpu(int cpu
)
1226 static char __cpuinitdata tvec_base_done
[NR_CPUS
];
1228 if (!tvec_base_done
[cpu
]) {
1229 static char boot_done
;
1233 * The APs use this path later in boot
1235 base
= kmalloc_node(sizeof(*base
),
1236 GFP_KERNEL
| __GFP_ZERO
,
1241 /* Make sure that tvec_base is 2 byte aligned */
1242 if (tbase_get_deferrable(base
)) {
1247 per_cpu(tvec_bases
, cpu
) = base
;
1250 * This is for the boot CPU - we use compile-time
1251 * static initialisation because per-cpu memory isn't
1252 * ready yet and because the memory allocators are not
1253 * initialised either.
1256 base
= &boot_tvec_bases
;
1258 tvec_base_done
[cpu
] = 1;
1260 base
= per_cpu(tvec_bases
, cpu
);
1263 spin_lock_init(&base
->lock
);
1264 lockdep_set_class(&base
->lock
, base_lock_keys
+ cpu
);
1266 for (j
= 0; j
< TVN_SIZE
; j
++) {
1267 INIT_LIST_HEAD(base
->tv5
.vec
+ j
);
1268 INIT_LIST_HEAD(base
->tv4
.vec
+ j
);
1269 INIT_LIST_HEAD(base
->tv3
.vec
+ j
);
1270 INIT_LIST_HEAD(base
->tv2
.vec
+ j
);
1272 for (j
= 0; j
< TVR_SIZE
; j
++)
1273 INIT_LIST_HEAD(base
->tv1
.vec
+ j
);
1275 base
->timer_jiffies
= jiffies
;
1279 #ifdef CONFIG_HOTPLUG_CPU
1280 static void migrate_timer_list(tvec_base_t
*new_base
, struct list_head
*head
)
1282 struct timer_list
*timer
;
1284 while (!list_empty(head
)) {
1285 timer
= list_first_entry(head
, struct timer_list
, entry
);
1286 detach_timer(timer
, 0);
1287 timer_set_base(timer
, new_base
);
1288 internal_add_timer(new_base
, timer
);
1292 static void __devinit
migrate_timers(int cpu
)
1294 tvec_base_t
*old_base
;
1295 tvec_base_t
*new_base
;
1298 BUG_ON(cpu_online(cpu
));
1299 old_base
= per_cpu(tvec_bases
, cpu
);
1300 new_base
= get_cpu_var(tvec_bases
);
1302 local_irq_disable();
1303 double_spin_lock(&new_base
->lock
, &old_base
->lock
,
1304 smp_processor_id() < cpu
);
1306 BUG_ON(old_base
->running_timer
);
1308 for (i
= 0; i
< TVR_SIZE
; i
++)
1309 migrate_timer_list(new_base
, old_base
->tv1
.vec
+ i
);
1310 for (i
= 0; i
< TVN_SIZE
; i
++) {
1311 migrate_timer_list(new_base
, old_base
->tv2
.vec
+ i
);
1312 migrate_timer_list(new_base
, old_base
->tv3
.vec
+ i
);
1313 migrate_timer_list(new_base
, old_base
->tv4
.vec
+ i
);
1314 migrate_timer_list(new_base
, old_base
->tv5
.vec
+ i
);
1317 double_spin_unlock(&new_base
->lock
, &old_base
->lock
,
1318 smp_processor_id() < cpu
);
1320 put_cpu_var(tvec_bases
);
1322 #endif /* CONFIG_HOTPLUG_CPU */
1324 static int __cpuinit
timer_cpu_notify(struct notifier_block
*self
,
1325 unsigned long action
, void *hcpu
)
1327 long cpu
= (long)hcpu
;
1329 case CPU_UP_PREPARE
:
1330 case CPU_UP_PREPARE_FROZEN
:
1331 if (init_timers_cpu(cpu
) < 0)
1334 #ifdef CONFIG_HOTPLUG_CPU
1336 case CPU_DEAD_FROZEN
:
1337 migrate_timers(cpu
);
1346 static struct notifier_block __cpuinitdata timers_nb
= {
1347 .notifier_call
= timer_cpu_notify
,
1351 void __init
init_timers(void)
1353 int err
= timer_cpu_notify(&timers_nb
, (unsigned long)CPU_UP_PREPARE
,
1354 (void *)(long)smp_processor_id());
1358 BUG_ON(err
== NOTIFY_BAD
);
1359 register_cpu_notifier(&timers_nb
);
1360 open_softirq(TIMER_SOFTIRQ
, run_timer_softirq
, NULL
);
1364 * msleep - sleep safely even with waitqueue interruptions
1365 * @msecs: Time in milliseconds to sleep for
1367 void msleep(unsigned int msecs
)
1369 unsigned long timeout
= msecs_to_jiffies(msecs
) + 1;
1372 timeout
= schedule_timeout_uninterruptible(timeout
);
1375 EXPORT_SYMBOL(msleep
);
1378 * msleep_interruptible - sleep waiting for signals
1379 * @msecs: Time in milliseconds to sleep for
1381 unsigned long msleep_interruptible(unsigned int msecs
)
1383 unsigned long timeout
= msecs_to_jiffies(msecs
) + 1;
1385 while (timeout
&& !signal_pending(current
))
1386 timeout
= schedule_timeout_interruptible(timeout
);
1387 return jiffies_to_msecs(timeout
);
1390 EXPORT_SYMBOL(msleep_interruptible
);