allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / kernel / hrtimer.c
blob173387f4119d91d756c351e93aadd6292ad3bbec
1 /*
2 * linux/kernel/hrtimer.c
4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
8 * High-resolution kernel timers
10 * In contrast to the low-resolution timeout API implemented in
11 * kernel/timer.c, hrtimers provide finer resolution and accuracy
12 * depending on system configuration and capabilities.
14 * These timers are currently used for:
15 * - itimers
16 * - POSIX timers
17 * - nanosleep
18 * - precise in-kernel timing
20 * Started by: Thomas Gleixner and Ingo Molnar
22 * Credits:
23 * based on kernel/timer.c
25 * Help, testing, suggestions, bugfixes, improvements were
26 * provided by:
28 * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
29 * et. al.
31 * For licencing details see kernel-base/COPYING
34 #include <linux/cpu.h>
35 #include <linux/irq.h>
36 #include <linux/module.h>
37 #include <linux/percpu.h>
38 #include <linux/hrtimer.h>
39 #include <linux/notifier.h>
40 #include <linux/syscalls.h>
41 #include <linux/kallsyms.h>
42 #include <linux/interrupt.h>
43 #include <linux/tick.h>
44 #include <linux/seq_file.h>
45 #include <linux/err.h>
47 #include <asm/uaccess.h>
49 /**
50 * ktime_get - get the monotonic time in ktime_t format
52 * returns the time in ktime_t format
54 ktime_t ktime_get(void)
56 struct timespec now;
58 ktime_get_ts(&now);
60 return timespec_to_ktime(now);
62 EXPORT_SYMBOL_GPL(ktime_get);
64 /**
65 * ktime_get_real - get the real (wall-) time in ktime_t format
67 * returns the time in ktime_t format
69 ktime_t ktime_get_real(void)
71 struct timespec now;
73 getnstimeofday(&now);
75 return timespec_to_ktime(now);
78 EXPORT_SYMBOL_GPL(ktime_get_real);
81 * The timer bases:
83 * Note: If we want to add new timer bases, we have to skip the two
84 * clock ids captured by the cpu-timers. We do this by holding empty
85 * entries rather than doing math adjustment of the clock ids.
86 * This ensures that we capture erroneous accesses to these clock ids
87 * rather than moving them into the range of valid clock id's.
89 DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
92 .clock_base =
95 .index = CLOCK_REALTIME,
96 .get_time = &ktime_get_real,
97 .resolution = KTIME_LOW_RES,
100 .index = CLOCK_MONOTONIC,
101 .get_time = &ktime_get,
102 .resolution = KTIME_LOW_RES,
108 * ktime_get_ts - get the monotonic clock in timespec format
109 * @ts: pointer to timespec variable
111 * The function calculates the monotonic clock from the realtime
112 * clock and the wall_to_monotonic offset and stores the result
113 * in normalized timespec format in the variable pointed to by @ts.
115 void ktime_get_ts(struct timespec *ts)
117 struct timespec tomono;
118 unsigned long seq;
120 do {
121 seq = read_seqbegin(&xtime_lock);
122 getnstimeofday(ts);
123 tomono = wall_to_monotonic;
125 } while (read_seqretry(&xtime_lock, seq));
127 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
128 ts->tv_nsec + tomono.tv_nsec);
130 EXPORT_SYMBOL_GPL(ktime_get_ts);
133 * Get the coarse grained time at the softirq based on xtime and
134 * wall_to_monotonic.
136 static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
138 ktime_t xtim, tomono;
139 struct timespec xts, tom;
140 unsigned long seq;
142 do {
143 seq = read_seqbegin(&xtime_lock);
144 #ifdef CONFIG_NO_HZ
145 getnstimeofday(&xts);
146 #else
147 xts = xtime;
148 #endif
149 tom = wall_to_monotonic;
150 } while (read_seqretry(&xtime_lock, seq));
152 xtim = timespec_to_ktime(xts);
153 tomono = timespec_to_ktime(tom);
154 base->clock_base[CLOCK_REALTIME].softirq_time = xtim;
155 base->clock_base[CLOCK_MONOTONIC].softirq_time =
156 ktime_add(xtim, tomono);
160 * Helper function to check, whether the timer is running the callback
161 * function
163 static inline int hrtimer_callback_running(struct hrtimer *timer)
165 return timer->state & HRTIMER_STATE_CALLBACK;
169 * Functions and macros which are different for UP/SMP systems are kept in a
170 * single place
172 #ifdef CONFIG_SMP
175 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
176 * means that all timers which are tied to this base via timer->base are
177 * locked, and the base itself is locked too.
179 * So __run_timers/migrate_timers can safely modify all timers which could
180 * be found on the lists/queues.
182 * When the timer's base is locked, and the timer removed from list, it is
183 * possible to set timer->base = NULL and drop the lock: the timer remains
184 * locked.
186 static
187 struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
188 unsigned long *flags)
190 struct hrtimer_clock_base *base;
192 for (;;) {
193 base = timer->base;
194 if (likely(base != NULL)) {
195 spin_lock_irqsave(&base->cpu_base->lock, *flags);
196 if (likely(base == timer->base))
197 return base;
198 /* The timer has migrated to another CPU: */
199 spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
201 cpu_relax();
206 * Switch the timer base to the current CPU when possible.
208 static inline struct hrtimer_clock_base *
209 switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base)
211 struct hrtimer_clock_base *new_base;
212 struct hrtimer_cpu_base *new_cpu_base;
214 new_cpu_base = &__get_cpu_var(hrtimer_bases);
215 new_base = &new_cpu_base->clock_base[base->index];
217 if (base != new_base) {
219 * We are trying to schedule the timer on the local CPU.
220 * However we can't change timer's base while it is running,
221 * so we keep it on the same CPU. No hassle vs. reprogramming
222 * the event source in the high resolution case. The softirq
223 * code will take care of this when the timer function has
224 * completed. There is no conflict as we hold the lock until
225 * the timer is enqueued.
227 if (unlikely(hrtimer_callback_running(timer)))
228 return base;
230 /* See the comment in lock_timer_base() */
231 timer->base = NULL;
232 spin_unlock(&base->cpu_base->lock);
233 spin_lock(&new_base->cpu_base->lock);
234 timer->base = new_base;
236 return new_base;
239 #else /* CONFIG_SMP */
241 static inline struct hrtimer_clock_base *
242 lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
244 struct hrtimer_clock_base *base = timer->base;
246 spin_lock_irqsave(&base->cpu_base->lock, *flags);
248 return base;
251 # define switch_hrtimer_base(t, b) (b)
253 #endif /* !CONFIG_SMP */
256 * Functions for the union type storage format of ktime_t which are
257 * too large for inlining:
259 #if BITS_PER_LONG < 64
260 # ifndef CONFIG_KTIME_SCALAR
262 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
263 * @kt: addend
264 * @nsec: the scalar nsec value to add
266 * Returns the sum of kt and nsec in ktime_t format
268 ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
270 ktime_t tmp;
272 if (likely(nsec < NSEC_PER_SEC)) {
273 tmp.tv64 = nsec;
274 } else {
275 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
277 tmp = ktime_set((long)nsec, rem);
280 return ktime_add(kt, tmp);
283 EXPORT_SYMBOL_GPL(ktime_add_ns);
284 # endif /* !CONFIG_KTIME_SCALAR */
287 * Divide a ktime value by a nanosecond value
289 u64 ktime_divns(const ktime_t kt, s64 div)
291 u64 dclc, inc, dns;
292 int sft = 0;
294 dclc = dns = ktime_to_ns(kt);
295 inc = div;
296 /* Make sure the divisor is less than 2^32: */
297 while (div >> 32) {
298 sft++;
299 div >>= 1;
301 dclc >>= sft;
302 do_div(dclc, (unsigned long) div);
304 return dclc;
306 #endif /* BITS_PER_LONG >= 64 */
308 /* High resolution timer related functions */
309 #ifdef CONFIG_HIGH_RES_TIMERS
312 * High resolution timer enabled ?
314 static int hrtimer_hres_enabled __read_mostly = 1;
317 * Enable / Disable high resolution mode
319 static int __init setup_hrtimer_hres(char *str)
321 if (!strcmp(str, "off"))
322 hrtimer_hres_enabled = 0;
323 else if (!strcmp(str, "on"))
324 hrtimer_hres_enabled = 1;
325 else
326 return 0;
327 return 1;
330 __setup("highres=", setup_hrtimer_hres);
333 * hrtimer_high_res_enabled - query, if the highres mode is enabled
335 static inline int hrtimer_is_hres_enabled(void)
337 return hrtimer_hres_enabled;
341 * Is the high resolution mode active ?
343 static inline int hrtimer_hres_active(void)
345 return __get_cpu_var(hrtimer_bases).hres_active;
349 * Reprogram the event source with checking both queues for the
350 * next event
351 * Called with interrupts disabled and base->lock held
353 static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
355 int i;
356 struct hrtimer_clock_base *base = cpu_base->clock_base;
357 ktime_t expires;
359 cpu_base->expires_next.tv64 = KTIME_MAX;
361 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
362 struct hrtimer *timer;
364 if (!base->first)
365 continue;
366 timer = rb_entry(base->first, struct hrtimer, node);
367 expires = ktime_sub(timer->expires, base->offset);
368 if (expires.tv64 < cpu_base->expires_next.tv64)
369 cpu_base->expires_next = expires;
372 if (cpu_base->expires_next.tv64 != KTIME_MAX)
373 tick_program_event(cpu_base->expires_next, 1);
377 * Shared reprogramming for clock_realtime and clock_monotonic
379 * When a timer is enqueued and expires earlier than the already enqueued
380 * timers, we have to check, whether it expires earlier than the timer for
381 * which the clock event device was armed.
383 * Called with interrupts disabled and base->cpu_base.lock held
385 static int hrtimer_reprogram(struct hrtimer *timer,
386 struct hrtimer_clock_base *base)
388 ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
389 ktime_t expires = ktime_sub(timer->expires, base->offset);
390 int res;
393 * When the callback is running, we do not reprogram the clock event
394 * device. The timer callback is either running on a different CPU or
395 * the callback is executed in the hrtimer_interupt context. The
396 * reprogramming is handled either by the softirq, which called the
397 * callback or at the end of the hrtimer_interrupt.
399 if (hrtimer_callback_running(timer))
400 return 0;
402 if (expires.tv64 >= expires_next->tv64)
403 return 0;
406 * Clockevents returns -ETIME, when the event was in the past.
408 res = tick_program_event(expires, 0);
409 if (!IS_ERR_VALUE(res))
410 *expires_next = expires;
411 return res;
416 * Retrigger next event is called after clock was set
418 * Called with interrupts disabled via on_each_cpu()
420 static void retrigger_next_event(void *arg)
422 struct hrtimer_cpu_base *base;
423 struct timespec realtime_offset;
424 unsigned long seq;
426 if (!hrtimer_hres_active())
427 return;
429 do {
430 seq = read_seqbegin(&xtime_lock);
431 set_normalized_timespec(&realtime_offset,
432 -wall_to_monotonic.tv_sec,
433 -wall_to_monotonic.tv_nsec);
434 } while (read_seqretry(&xtime_lock, seq));
436 base = &__get_cpu_var(hrtimer_bases);
438 /* Adjust CLOCK_REALTIME offset */
439 spin_lock(&base->lock);
440 base->clock_base[CLOCK_REALTIME].offset =
441 timespec_to_ktime(realtime_offset);
443 hrtimer_force_reprogram(base);
444 spin_unlock(&base->lock);
448 * Clock realtime was set
450 * Change the offset of the realtime clock vs. the monotonic
451 * clock.
453 * We might have to reprogram the high resolution timer interrupt. On
454 * SMP we call the architecture specific code to retrigger _all_ high
455 * resolution timer interrupts. On UP we just disable interrupts and
456 * call the high resolution interrupt code.
458 void clock_was_set(void)
460 /* Retrigger the CPU local events everywhere */
461 on_each_cpu(retrigger_next_event, NULL, 0, 1);
465 * During resume we might have to reprogram the high resolution timer
466 * interrupt (on the local CPU):
468 void hres_timers_resume(void)
470 WARN_ON_ONCE(num_online_cpus() > 1);
472 /* Retrigger the CPU local events: */
473 retrigger_next_event(NULL);
477 * Check, whether the timer is on the callback pending list
479 static inline int hrtimer_cb_pending(const struct hrtimer *timer)
481 return timer->state & HRTIMER_STATE_PENDING;
485 * Remove a timer from the callback pending list
487 static inline void hrtimer_remove_cb_pending(struct hrtimer *timer)
489 list_del_init(&timer->cb_entry);
493 * Initialize the high resolution related parts of cpu_base
495 static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
497 base->expires_next.tv64 = KTIME_MAX;
498 base->hres_active = 0;
499 INIT_LIST_HEAD(&base->cb_pending);
503 * Initialize the high resolution related parts of a hrtimer
505 static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
507 INIT_LIST_HEAD(&timer->cb_entry);
511 * When High resolution timers are active, try to reprogram. Note, that in case
512 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
513 * check happens. The timer gets enqueued into the rbtree. The reprogramming
514 * and expiry check is done in the hrtimer_interrupt or in the softirq.
516 static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
517 struct hrtimer_clock_base *base)
519 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
521 /* Timer is expired, act upon the callback mode */
522 switch(timer->cb_mode) {
523 case HRTIMER_CB_IRQSAFE_NO_RESTART:
525 * We can call the callback from here. No restart
526 * happens, so no danger of recursion
528 BUG_ON(timer->function(timer) != HRTIMER_NORESTART);
529 return 1;
530 case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ:
532 * This is solely for the sched tick emulation with
533 * dynamic tick support to ensure that we do not
534 * restart the tick right on the edge and end up with
535 * the tick timer in the softirq ! The calling site
536 * takes care of this.
538 return 1;
539 case HRTIMER_CB_IRQSAFE:
540 case HRTIMER_CB_SOFTIRQ:
542 * Move everything else into the softirq pending list !
544 list_add_tail(&timer->cb_entry,
545 &base->cpu_base->cb_pending);
546 timer->state = HRTIMER_STATE_PENDING;
547 raise_softirq(HRTIMER_SOFTIRQ);
548 return 1;
549 default:
550 BUG();
553 return 0;
557 * Switch to high resolution mode
559 static int hrtimer_switch_to_hres(void)
561 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
562 unsigned long flags;
564 if (base->hres_active)
565 return 1;
567 local_irq_save(flags);
569 if (tick_init_highres()) {
570 local_irq_restore(flags);
571 return 0;
573 base->hres_active = 1;
574 base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES;
575 base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES;
577 tick_setup_sched_timer();
579 /* "Retrigger" the interrupt to get things going */
580 retrigger_next_event(NULL);
581 local_irq_restore(flags);
582 printk(KERN_INFO "Switched to high resolution mode on CPU %d\n",
583 smp_processor_id());
584 return 1;
587 #else
589 static inline int hrtimer_hres_active(void) { return 0; }
590 static inline int hrtimer_is_hres_enabled(void) { return 0; }
591 static inline int hrtimer_switch_to_hres(void) { return 0; }
592 static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
593 static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
594 struct hrtimer_clock_base *base)
596 return 0;
598 static inline int hrtimer_cb_pending(struct hrtimer *timer) { return 0; }
599 static inline void hrtimer_remove_cb_pending(struct hrtimer *timer) { }
600 static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
601 static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
603 #endif /* CONFIG_HIGH_RES_TIMERS */
605 #ifdef CONFIG_TIMER_STATS
606 void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr)
608 if (timer->start_site)
609 return;
611 timer->start_site = addr;
612 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
613 timer->start_pid = current->pid;
615 #endif
618 * Counterpart to lock_timer_base above:
620 static inline
621 void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
623 spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
627 * hrtimer_forward - forward the timer expiry
628 * @timer: hrtimer to forward
629 * @now: forward past this time
630 * @interval: the interval to forward
632 * Forward the timer expiry so it will expire in the future.
633 * Returns the number of overruns.
635 u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
637 u64 orun = 1;
638 ktime_t delta;
640 delta = ktime_sub(now, timer->expires);
642 if (delta.tv64 < 0)
643 return 0;
645 if (interval.tv64 < timer->base->resolution.tv64)
646 interval.tv64 = timer->base->resolution.tv64;
648 if (unlikely(delta.tv64 >= interval.tv64)) {
649 s64 incr = ktime_to_ns(interval);
651 orun = ktime_divns(delta, incr);
652 timer->expires = ktime_add_ns(timer->expires, incr * orun);
653 if (timer->expires.tv64 > now.tv64)
654 return orun;
656 * This (and the ktime_add() below) is the
657 * correction for exact:
659 orun++;
661 timer->expires = ktime_add(timer->expires, interval);
663 * Make sure, that the result did not wrap with a very large
664 * interval.
666 if (timer->expires.tv64 < 0)
667 timer->expires = ktime_set(KTIME_SEC_MAX, 0);
669 return orun;
671 EXPORT_SYMBOL_GPL(hrtimer_forward);
674 * enqueue_hrtimer - internal function to (re)start a timer
676 * The timer is inserted in expiry order. Insertion into the
677 * red black tree is O(log(n)). Must hold the base lock.
679 static void enqueue_hrtimer(struct hrtimer *timer,
680 struct hrtimer_clock_base *base, int reprogram)
682 struct rb_node **link = &base->active.rb_node;
683 struct rb_node *parent = NULL;
684 struct hrtimer *entry;
685 int leftmost = 1;
688 * Find the right place in the rbtree:
690 while (*link) {
691 parent = *link;
692 entry = rb_entry(parent, struct hrtimer, node);
694 * We dont care about collisions. Nodes with
695 * the same expiry time stay together.
697 if (timer->expires.tv64 < entry->expires.tv64) {
698 link = &(*link)->rb_left;
699 } else {
700 link = &(*link)->rb_right;
701 leftmost = 0;
706 * Insert the timer to the rbtree and check whether it
707 * replaces the first pending timer
709 if (leftmost) {
711 * Reprogram the clock event device. When the timer is already
712 * expired hrtimer_enqueue_reprogram has either called the
713 * callback or added it to the pending list and raised the
714 * softirq.
716 * This is a NOP for !HIGHRES
718 if (reprogram && hrtimer_enqueue_reprogram(timer, base))
719 return;
721 base->first = &timer->node;
724 rb_link_node(&timer->node, parent, link);
725 rb_insert_color(&timer->node, &base->active);
727 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
728 * state of a possibly running callback.
730 timer->state |= HRTIMER_STATE_ENQUEUED;
734 * __remove_hrtimer - internal function to remove a timer
736 * Caller must hold the base lock.
738 * High resolution timer mode reprograms the clock event device when the
739 * timer is the one which expires next. The caller can disable this by setting
740 * reprogram to zero. This is useful, when the context does a reprogramming
741 * anyway (e.g. timer interrupt)
743 static void __remove_hrtimer(struct hrtimer *timer,
744 struct hrtimer_clock_base *base,
745 unsigned long newstate, int reprogram)
747 /* High res. callback list. NOP for !HIGHRES */
748 if (hrtimer_cb_pending(timer))
749 hrtimer_remove_cb_pending(timer);
750 else {
752 * Remove the timer from the rbtree and replace the
753 * first entry pointer if necessary.
755 if (base->first == &timer->node) {
756 base->first = rb_next(&timer->node);
757 /* Reprogram the clock event device. if enabled */
758 if (reprogram && hrtimer_hres_active())
759 hrtimer_force_reprogram(base->cpu_base);
761 rb_erase(&timer->node, &base->active);
763 timer->state = newstate;
767 * remove hrtimer, called with base lock held
769 static inline int
770 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
772 if (hrtimer_is_queued(timer)) {
773 int reprogram;
776 * Remove the timer and force reprogramming when high
777 * resolution mode is active and the timer is on the current
778 * CPU. If we remove a timer on another CPU, reprogramming is
779 * skipped. The interrupt event on this CPU is fired and
780 * reprogramming happens in the interrupt handler. This is a
781 * rare case and less expensive than a smp call.
783 timer_stats_hrtimer_clear_start_info(timer);
784 reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
785 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
786 reprogram);
787 return 1;
789 return 0;
793 * hrtimer_start - (re)start an relative timer on the current CPU
794 * @timer: the timer to be added
795 * @tim: expiry time
796 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
798 * Returns:
799 * 0 on success
800 * 1 when the timer was active
803 hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
805 struct hrtimer_clock_base *base, *new_base;
806 unsigned long flags;
807 int ret;
809 base = lock_hrtimer_base(timer, &flags);
811 /* Remove an active timer from the queue: */
812 ret = remove_hrtimer(timer, base);
814 /* Switch the timer base, if necessary: */
815 new_base = switch_hrtimer_base(timer, base);
817 if (mode == HRTIMER_MODE_REL) {
818 tim = ktime_add(tim, new_base->get_time());
820 * CONFIG_TIME_LOW_RES is a temporary way for architectures
821 * to signal that they simply return xtime in
822 * do_gettimeoffset(). In this case we want to round up by
823 * resolution when starting a relative timer, to avoid short
824 * timeouts. This will go away with the GTOD framework.
826 #ifdef CONFIG_TIME_LOW_RES
827 tim = ktime_add(tim, base->resolution);
828 #endif
830 * Careful here: User space might have asked for a
831 * very long sleep, so the add above might result in a
832 * negative number, which enqueues the timer in front
833 * of the queue.
835 if (tim.tv64 < 0)
836 tim.tv64 = KTIME_MAX;
838 timer->expires = tim;
840 timer_stats_hrtimer_set_start_info(timer);
843 * Only allow reprogramming if the new base is on this CPU.
844 * (it might still be on another CPU if the timer was pending)
846 enqueue_hrtimer(timer, new_base,
847 new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
849 unlock_hrtimer_base(timer, &flags);
851 return ret;
853 EXPORT_SYMBOL_GPL(hrtimer_start);
856 * hrtimer_try_to_cancel - try to deactivate a timer
857 * @timer: hrtimer to stop
859 * Returns:
860 * 0 when the timer was not active
861 * 1 when the timer was active
862 * -1 when the timer is currently excuting the callback function and
863 * cannot be stopped
865 int hrtimer_try_to_cancel(struct hrtimer *timer)
867 struct hrtimer_clock_base *base;
868 unsigned long flags;
869 int ret = -1;
871 base = lock_hrtimer_base(timer, &flags);
873 if (!hrtimer_callback_running(timer))
874 ret = remove_hrtimer(timer, base);
876 unlock_hrtimer_base(timer, &flags);
878 return ret;
881 EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
884 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
885 * @timer: the timer to be cancelled
887 * Returns:
888 * 0 when the timer was not active
889 * 1 when the timer was active
891 int hrtimer_cancel(struct hrtimer *timer)
893 for (;;) {
894 int ret = hrtimer_try_to_cancel(timer);
896 if (ret >= 0)
897 return ret;
898 cpu_relax();
901 EXPORT_SYMBOL_GPL(hrtimer_cancel);
904 * hrtimer_get_remaining - get remaining time for the timer
905 * @timer: the timer to read
907 ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
909 struct hrtimer_clock_base *base;
910 unsigned long flags;
911 ktime_t rem;
913 base = lock_hrtimer_base(timer, &flags);
914 rem = ktime_sub(timer->expires, base->get_time());
915 unlock_hrtimer_base(timer, &flags);
917 return rem;
919 EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
921 #if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)
923 * hrtimer_get_next_event - get the time until next expiry event
925 * Returns the delta to the next expiry event or KTIME_MAX if no timer
926 * is pending.
928 ktime_t hrtimer_get_next_event(void)
930 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
931 struct hrtimer_clock_base *base = cpu_base->clock_base;
932 ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
933 unsigned long flags;
934 int i;
936 spin_lock_irqsave(&cpu_base->lock, flags);
938 if (!hrtimer_hres_active()) {
939 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
940 struct hrtimer *timer;
942 if (!base->first)
943 continue;
945 timer = rb_entry(base->first, struct hrtimer, node);
946 delta.tv64 = timer->expires.tv64;
947 delta = ktime_sub(delta, base->get_time());
948 if (delta.tv64 < mindelta.tv64)
949 mindelta.tv64 = delta.tv64;
953 spin_unlock_irqrestore(&cpu_base->lock, flags);
955 if (mindelta.tv64 < 0)
956 mindelta.tv64 = 0;
957 return mindelta;
959 #endif
962 * hrtimer_init - initialize a timer to the given clock
963 * @timer: the timer to be initialized
964 * @clock_id: the clock to be used
965 * @mode: timer mode abs/rel
967 void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
968 enum hrtimer_mode mode)
970 struct hrtimer_cpu_base *cpu_base;
972 memset(timer, 0, sizeof(struct hrtimer));
974 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
976 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
977 clock_id = CLOCK_MONOTONIC;
979 timer->base = &cpu_base->clock_base[clock_id];
980 hrtimer_init_timer_hres(timer);
982 #ifdef CONFIG_TIMER_STATS
983 timer->start_site = NULL;
984 timer->start_pid = -1;
985 memset(timer->start_comm, 0, TASK_COMM_LEN);
986 #endif
988 EXPORT_SYMBOL_GPL(hrtimer_init);
991 * hrtimer_get_res - get the timer resolution for a clock
992 * @which_clock: which clock to query
993 * @tp: pointer to timespec variable to store the resolution
995 * Store the resolution of the clock selected by @which_clock in the
996 * variable pointed to by @tp.
998 int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
1000 struct hrtimer_cpu_base *cpu_base;
1002 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
1003 *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution);
1005 return 0;
1007 EXPORT_SYMBOL_GPL(hrtimer_get_res);
1009 #ifdef CONFIG_HIGH_RES_TIMERS
1012 * High resolution timer interrupt
1013 * Called with interrupts disabled
1015 void hrtimer_interrupt(struct clock_event_device *dev)
1017 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1018 struct hrtimer_clock_base *base;
1019 ktime_t expires_next, now;
1020 int i, raise = 0;
1022 BUG_ON(!cpu_base->hres_active);
1023 cpu_base->nr_events++;
1024 dev->next_event.tv64 = KTIME_MAX;
1026 retry:
1027 now = ktime_get();
1029 expires_next.tv64 = KTIME_MAX;
1031 base = cpu_base->clock_base;
1033 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1034 ktime_t basenow;
1035 struct rb_node *node;
1037 spin_lock(&cpu_base->lock);
1039 basenow = ktime_add(now, base->offset);
1041 while ((node = base->first)) {
1042 struct hrtimer *timer;
1044 timer = rb_entry(node, struct hrtimer, node);
1046 if (basenow.tv64 < timer->expires.tv64) {
1047 ktime_t expires;
1049 expires = ktime_sub(timer->expires,
1050 base->offset);
1051 if (expires.tv64 < expires_next.tv64)
1052 expires_next = expires;
1053 break;
1056 /* Move softirq callbacks to the pending list */
1057 if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1058 __remove_hrtimer(timer, base,
1059 HRTIMER_STATE_PENDING, 0);
1060 list_add_tail(&timer->cb_entry,
1061 &base->cpu_base->cb_pending);
1062 raise = 1;
1063 continue;
1066 __remove_hrtimer(timer, base,
1067 HRTIMER_STATE_CALLBACK, 0);
1068 timer_stats_account_hrtimer(timer);
1071 * Note: We clear the CALLBACK bit after
1072 * enqueue_hrtimer to avoid reprogramming of
1073 * the event hardware. This happens at the end
1074 * of this function anyway.
1076 if (timer->function(timer) != HRTIMER_NORESTART) {
1077 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
1078 enqueue_hrtimer(timer, base, 0);
1080 timer->state &= ~HRTIMER_STATE_CALLBACK;
1082 spin_unlock(&cpu_base->lock);
1083 base++;
1086 cpu_base->expires_next = expires_next;
1088 /* Reprogramming necessary ? */
1089 if (expires_next.tv64 != KTIME_MAX) {
1090 if (tick_program_event(expires_next, 0))
1091 goto retry;
1094 /* Raise softirq ? */
1095 if (raise)
1096 raise_softirq(HRTIMER_SOFTIRQ);
1099 static void run_hrtimer_softirq(struct softirq_action *h)
1101 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1103 spin_lock_irq(&cpu_base->lock);
1105 while (!list_empty(&cpu_base->cb_pending)) {
1106 enum hrtimer_restart (*fn)(struct hrtimer *);
1107 struct hrtimer *timer;
1108 int restart;
1110 timer = list_entry(cpu_base->cb_pending.next,
1111 struct hrtimer, cb_entry);
1113 timer_stats_account_hrtimer(timer);
1115 fn = timer->function;
1116 __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
1117 spin_unlock_irq(&cpu_base->lock);
1119 restart = fn(timer);
1121 spin_lock_irq(&cpu_base->lock);
1123 timer->state &= ~HRTIMER_STATE_CALLBACK;
1124 if (restart == HRTIMER_RESTART) {
1125 BUG_ON(hrtimer_active(timer));
1127 * Enqueue the timer, allow reprogramming of the event
1128 * device
1130 enqueue_hrtimer(timer, timer->base, 1);
1131 } else if (hrtimer_active(timer)) {
1133 * If the timer was rearmed on another CPU, reprogram
1134 * the event device.
1136 if (timer->base->first == &timer->node)
1137 hrtimer_reprogram(timer, timer->base);
1140 spin_unlock_irq(&cpu_base->lock);
1143 #endif /* CONFIG_HIGH_RES_TIMERS */
1146 * Expire the per base hrtimer-queue:
1148 static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
1149 int index)
1151 struct rb_node *node;
1152 struct hrtimer_clock_base *base = &cpu_base->clock_base[index];
1154 if (!base->first)
1155 return;
1157 if (base->get_softirq_time)
1158 base->softirq_time = base->get_softirq_time();
1160 spin_lock_irq(&cpu_base->lock);
1162 while ((node = base->first)) {
1163 struct hrtimer *timer;
1164 enum hrtimer_restart (*fn)(struct hrtimer *);
1165 int restart;
1167 timer = rb_entry(node, struct hrtimer, node);
1168 if (base->softirq_time.tv64 <= timer->expires.tv64)
1169 break;
1171 #ifdef CONFIG_HIGH_RES_TIMERS
1172 WARN_ON_ONCE(timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ);
1173 #endif
1174 timer_stats_account_hrtimer(timer);
1176 fn = timer->function;
1177 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
1178 spin_unlock_irq(&cpu_base->lock);
1180 restart = fn(timer);
1182 spin_lock_irq(&cpu_base->lock);
1184 timer->state &= ~HRTIMER_STATE_CALLBACK;
1185 if (restart != HRTIMER_NORESTART) {
1186 BUG_ON(hrtimer_active(timer));
1187 enqueue_hrtimer(timer, base, 0);
1190 spin_unlock_irq(&cpu_base->lock);
1194 * Called from timer softirq every jiffy, expire hrtimers:
1196 * For HRT its the fall back code to run the softirq in the timer
1197 * softirq context in case the hrtimer initialization failed or has
1198 * not been done yet.
1200 void hrtimer_run_queues(void)
1202 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1203 int i;
1205 if (hrtimer_hres_active())
1206 return;
1209 * This _is_ ugly: We have to check in the softirq context,
1210 * whether we can switch to highres and / or nohz mode. The
1211 * clocksource switch happens in the timer interrupt with
1212 * xtime_lock held. Notification from there only sets the
1213 * check bit in the tick_oneshot code, otherwise we might
1214 * deadlock vs. xtime_lock.
1216 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
1217 if (hrtimer_switch_to_hres())
1218 return;
1220 hrtimer_get_softirq_time(cpu_base);
1222 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1223 run_hrtimer_queue(cpu_base, i);
1227 * Sleep related functions:
1229 static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
1231 struct hrtimer_sleeper *t =
1232 container_of(timer, struct hrtimer_sleeper, timer);
1233 struct task_struct *task = t->task;
1235 t->task = NULL;
1236 if (task)
1237 wake_up_process(task);
1239 return HRTIMER_NORESTART;
1242 void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
1244 sl->timer.function = hrtimer_wakeup;
1245 sl->task = task;
1246 #ifdef CONFIG_HIGH_RES_TIMERS
1247 sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_RESTART;
1248 #endif
1251 static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
1253 hrtimer_init_sleeper(t, current);
1255 do {
1256 set_current_state(TASK_INTERRUPTIBLE);
1257 hrtimer_start(&t->timer, t->timer.expires, mode);
1259 if (likely(t->task))
1260 schedule();
1262 hrtimer_cancel(&t->timer);
1263 mode = HRTIMER_MODE_ABS;
1265 } while (t->task && !signal_pending(current));
1267 return t->task == NULL;
1270 long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1272 struct hrtimer_sleeper t;
1273 struct timespec __user *rmtp;
1274 struct timespec tu;
1275 ktime_t time;
1277 restart->fn = do_no_restart_syscall;
1279 hrtimer_init(&t.timer, restart->arg0, HRTIMER_MODE_ABS);
1280 t.timer.expires.tv64 = ((u64)restart->arg3 << 32) | (u64) restart->arg2;
1282 if (do_nanosleep(&t, HRTIMER_MODE_ABS))
1283 return 0;
1285 rmtp = (struct timespec __user *) restart->arg1;
1286 if (rmtp) {
1287 time = ktime_sub(t.timer.expires, t.timer.base->get_time());
1288 if (time.tv64 <= 0)
1289 return 0;
1290 tu = ktime_to_timespec(time);
1291 if (copy_to_user(rmtp, &tu, sizeof(tu)))
1292 return -EFAULT;
1295 restart->fn = hrtimer_nanosleep_restart;
1297 /* The other values in restart are already filled in */
1298 return -ERESTART_RESTARTBLOCK;
1301 long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1302 const enum hrtimer_mode mode, const clockid_t clockid)
1304 struct restart_block *restart;
1305 struct hrtimer_sleeper t;
1306 struct timespec tu;
1307 ktime_t rem;
1309 hrtimer_init(&t.timer, clockid, mode);
1310 t.timer.expires = timespec_to_ktime(*rqtp);
1311 if (do_nanosleep(&t, mode))
1312 return 0;
1314 /* Absolute timers do not update the rmtp value and restart: */
1315 if (mode == HRTIMER_MODE_ABS)
1316 return -ERESTARTNOHAND;
1318 if (rmtp) {
1319 rem = ktime_sub(t.timer.expires, t.timer.base->get_time());
1320 if (rem.tv64 <= 0)
1321 return 0;
1322 tu = ktime_to_timespec(rem);
1323 if (copy_to_user(rmtp, &tu, sizeof(tu)))
1324 return -EFAULT;
1327 restart = &current_thread_info()->restart_block;
1328 restart->fn = hrtimer_nanosleep_restart;
1329 restart->arg0 = (unsigned long) t.timer.base->index;
1330 restart->arg1 = (unsigned long) rmtp;
1331 restart->arg2 = t.timer.expires.tv64 & 0xFFFFFFFF;
1332 restart->arg3 = t.timer.expires.tv64 >> 32;
1334 return -ERESTART_RESTARTBLOCK;
1337 asmlinkage long
1338 sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
1340 struct timespec tu;
1342 if (copy_from_user(&tu, rqtp, sizeof(tu)))
1343 return -EFAULT;
1345 if (!timespec_valid(&tu))
1346 return -EINVAL;
1348 return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
1352 * Functions related to boot-time initialization:
1354 static void __devinit init_hrtimers_cpu(int cpu)
1356 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
1357 int i;
1359 spin_lock_init(&cpu_base->lock);
1361 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1362 cpu_base->clock_base[i].cpu_base = cpu_base;
1364 hrtimer_init_hres(cpu_base);
1367 #ifdef CONFIG_HOTPLUG_CPU
1369 static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1370 struct hrtimer_clock_base *new_base)
1372 struct hrtimer *timer;
1373 struct rb_node *node;
1375 while ((node = rb_first(&old_base->active))) {
1376 timer = rb_entry(node, struct hrtimer, node);
1377 BUG_ON(hrtimer_callback_running(timer));
1378 __remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0);
1379 timer->base = new_base;
1381 * Enqueue the timer. Allow reprogramming of the event device
1383 enqueue_hrtimer(timer, new_base, 1);
1387 static void migrate_hrtimers(int cpu)
1389 struct hrtimer_cpu_base *old_base, *new_base;
1390 int i;
1392 BUG_ON(cpu_online(cpu));
1393 old_base = &per_cpu(hrtimer_bases, cpu);
1394 new_base = &get_cpu_var(hrtimer_bases);
1396 tick_cancel_sched_timer(cpu);
1398 local_irq_disable();
1399 spin_lock(&new_base->lock);
1400 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1402 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1403 migrate_hrtimer_list(&old_base->clock_base[i],
1404 &new_base->clock_base[i]);
1407 spin_unlock(&old_base->lock);
1408 spin_unlock(&new_base->lock);
1409 local_irq_enable();
1410 put_cpu_var(hrtimer_bases);
1412 #endif /* CONFIG_HOTPLUG_CPU */
1414 static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
1415 unsigned long action, void *hcpu)
1417 long cpu = (long)hcpu;
1419 switch (action) {
1421 case CPU_UP_PREPARE:
1422 case CPU_UP_PREPARE_FROZEN:
1423 init_hrtimers_cpu(cpu);
1424 break;
1426 #ifdef CONFIG_HOTPLUG_CPU
1427 case CPU_DEAD:
1428 case CPU_DEAD_FROZEN:
1429 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);
1430 migrate_hrtimers(cpu);
1431 break;
1432 #endif
1434 default:
1435 break;
1438 return NOTIFY_OK;
1441 static struct notifier_block __cpuinitdata hrtimers_nb = {
1442 .notifier_call = hrtimer_cpu_notify,
1445 void __init hrtimers_init(void)
1447 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1448 (void *)(long)smp_processor_id());
1449 register_cpu_notifier(&hrtimers_nb);
1450 #ifdef CONFIG_HIGH_RES_TIMERS
1451 open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq, NULL);
1452 #endif