2 * linux/kernel/hrtimer.c
4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
8 * High-resolution kernel timers
10 * In contrast to the low-resolution timeout API implemented in
11 * kernel/timer.c, hrtimers provide finer resolution and accuracy
12 * depending on system configuration and capabilities.
14 * These timers are currently used for:
18 * - precise in-kernel timing
20 * Started by: Thomas Gleixner and Ingo Molnar
23 * based on kernel/timer.c
25 * Help, testing, suggestions, bugfixes, improvements were
28 * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
31 * For licencing details see kernel-base/COPYING
34 #include <linux/cpu.h>
35 #include <linux/irq.h>
36 #include <linux/module.h>
37 #include <linux/percpu.h>
38 #include <linux/hrtimer.h>
39 #include <linux/notifier.h>
40 #include <linux/syscalls.h>
41 #include <linux/kallsyms.h>
42 #include <linux/interrupt.h>
43 #include <linux/tick.h>
44 #include <linux/seq_file.h>
45 #include <linux/err.h>
47 #include <asm/uaccess.h>
50 * ktime_get - get the monotonic time in ktime_t format
52 * returns the time in ktime_t format
54 ktime_t
ktime_get(void)
60 return timespec_to_ktime(now
);
62 EXPORT_SYMBOL_GPL(ktime_get
);
65 * ktime_get_real - get the real (wall-) time in ktime_t format
67 * returns the time in ktime_t format
69 ktime_t
ktime_get_real(void)
75 return timespec_to_ktime(now
);
78 EXPORT_SYMBOL_GPL(ktime_get_real
);
83 * Note: If we want to add new timer bases, we have to skip the two
84 * clock ids captured by the cpu-timers. We do this by holding empty
85 * entries rather than doing math adjustment of the clock ids.
86 * This ensures that we capture erroneous accesses to these clock ids
87 * rather than moving them into the range of valid clock id's.
89 DEFINE_PER_CPU(struct hrtimer_cpu_base
, hrtimer_bases
) =
95 .index
= CLOCK_REALTIME
,
96 .get_time
= &ktime_get_real
,
97 .resolution
= KTIME_LOW_RES
,
100 .index
= CLOCK_MONOTONIC
,
101 .get_time
= &ktime_get
,
102 .resolution
= KTIME_LOW_RES
,
108 * ktime_get_ts - get the monotonic clock in timespec format
109 * @ts: pointer to timespec variable
111 * The function calculates the monotonic clock from the realtime
112 * clock and the wall_to_monotonic offset and stores the result
113 * in normalized timespec format in the variable pointed to by @ts.
115 void ktime_get_ts(struct timespec
*ts
)
117 struct timespec tomono
;
121 seq
= read_seqbegin(&xtime_lock
);
123 tomono
= wall_to_monotonic
;
125 } while (read_seqretry(&xtime_lock
, seq
));
127 set_normalized_timespec(ts
, ts
->tv_sec
+ tomono
.tv_sec
,
128 ts
->tv_nsec
+ tomono
.tv_nsec
);
130 EXPORT_SYMBOL_GPL(ktime_get_ts
);
133 * Get the coarse grained time at the softirq based on xtime and
136 static void hrtimer_get_softirq_time(struct hrtimer_cpu_base
*base
)
138 ktime_t xtim
, tomono
;
139 struct timespec xts
, tom
;
143 seq
= read_seqbegin(&xtime_lock
);
145 getnstimeofday(&xts
);
149 tom
= wall_to_monotonic
;
150 } while (read_seqretry(&xtime_lock
, seq
));
152 xtim
= timespec_to_ktime(xts
);
153 tomono
= timespec_to_ktime(tom
);
154 base
->clock_base
[CLOCK_REALTIME
].softirq_time
= xtim
;
155 base
->clock_base
[CLOCK_MONOTONIC
].softirq_time
=
156 ktime_add(xtim
, tomono
);
160 * Helper function to check, whether the timer is running the callback
163 static inline int hrtimer_callback_running(struct hrtimer
*timer
)
165 return timer
->state
& HRTIMER_STATE_CALLBACK
;
169 * Functions and macros which are different for UP/SMP systems are kept in a
175 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
176 * means that all timers which are tied to this base via timer->base are
177 * locked, and the base itself is locked too.
179 * So __run_timers/migrate_timers can safely modify all timers which could
180 * be found on the lists/queues.
182 * When the timer's base is locked, and the timer removed from list, it is
183 * possible to set timer->base = NULL and drop the lock: the timer remains
187 struct hrtimer_clock_base
*lock_hrtimer_base(const struct hrtimer
*timer
,
188 unsigned long *flags
)
190 struct hrtimer_clock_base
*base
;
194 if (likely(base
!= NULL
)) {
195 spin_lock_irqsave(&base
->cpu_base
->lock
, *flags
);
196 if (likely(base
== timer
->base
))
198 /* The timer has migrated to another CPU: */
199 spin_unlock_irqrestore(&base
->cpu_base
->lock
, *flags
);
206 * Switch the timer base to the current CPU when possible.
208 static inline struct hrtimer_clock_base
*
209 switch_hrtimer_base(struct hrtimer
*timer
, struct hrtimer_clock_base
*base
)
211 struct hrtimer_clock_base
*new_base
;
212 struct hrtimer_cpu_base
*new_cpu_base
;
214 new_cpu_base
= &__get_cpu_var(hrtimer_bases
);
215 new_base
= &new_cpu_base
->clock_base
[base
->index
];
217 if (base
!= new_base
) {
219 * We are trying to schedule the timer on the local CPU.
220 * However we can't change timer's base while it is running,
221 * so we keep it on the same CPU. No hassle vs. reprogramming
222 * the event source in the high resolution case. The softirq
223 * code will take care of this when the timer function has
224 * completed. There is no conflict as we hold the lock until
225 * the timer is enqueued.
227 if (unlikely(hrtimer_callback_running(timer
)))
230 /* See the comment in lock_timer_base() */
232 spin_unlock(&base
->cpu_base
->lock
);
233 spin_lock(&new_base
->cpu_base
->lock
);
234 timer
->base
= new_base
;
239 #else /* CONFIG_SMP */
241 static inline struct hrtimer_clock_base
*
242 lock_hrtimer_base(const struct hrtimer
*timer
, unsigned long *flags
)
244 struct hrtimer_clock_base
*base
= timer
->base
;
246 spin_lock_irqsave(&base
->cpu_base
->lock
, *flags
);
251 # define switch_hrtimer_base(t, b) (b)
253 #endif /* !CONFIG_SMP */
256 * Functions for the union type storage format of ktime_t which are
257 * too large for inlining:
259 #if BITS_PER_LONG < 64
260 # ifndef CONFIG_KTIME_SCALAR
262 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
264 * @nsec: the scalar nsec value to add
266 * Returns the sum of kt and nsec in ktime_t format
268 ktime_t
ktime_add_ns(const ktime_t kt
, u64 nsec
)
272 if (likely(nsec
< NSEC_PER_SEC
)) {
275 unsigned long rem
= do_div(nsec
, NSEC_PER_SEC
);
277 tmp
= ktime_set((long)nsec
, rem
);
280 return ktime_add(kt
, tmp
);
282 # endif /* !CONFIG_KTIME_SCALAR */
285 * Divide a ktime value by a nanosecond value
287 unsigned long ktime_divns(const ktime_t kt
, s64 div
)
292 dclc
= dns
= ktime_to_ns(kt
);
294 /* Make sure the divisor is less than 2^32: */
300 do_div(dclc
, (unsigned long) div
);
302 return (unsigned long) dclc
;
304 #endif /* BITS_PER_LONG >= 64 */
306 /* High resolution timer related functions */
307 #ifdef CONFIG_HIGH_RES_TIMERS
310 * High resolution timer enabled ?
312 static int hrtimer_hres_enabled __read_mostly
= 1;
315 * Enable / Disable high resolution mode
317 static int __init
setup_hrtimer_hres(char *str
)
319 if (!strcmp(str
, "off"))
320 hrtimer_hres_enabled
= 0;
321 else if (!strcmp(str
, "on"))
322 hrtimer_hres_enabled
= 1;
328 __setup("highres=", setup_hrtimer_hres
);
331 * hrtimer_high_res_enabled - query, if the highres mode is enabled
333 static inline int hrtimer_is_hres_enabled(void)
335 return hrtimer_hres_enabled
;
339 * Is the high resolution mode active ?
341 static inline int hrtimer_hres_active(void)
343 return __get_cpu_var(hrtimer_bases
).hres_active
;
347 * Reprogram the event source with checking both queues for the
349 * Called with interrupts disabled and base->lock held
351 static void hrtimer_force_reprogram(struct hrtimer_cpu_base
*cpu_base
)
354 struct hrtimer_clock_base
*base
= cpu_base
->clock_base
;
357 cpu_base
->expires_next
.tv64
= KTIME_MAX
;
359 for (i
= 0; i
< HRTIMER_MAX_CLOCK_BASES
; i
++, base
++) {
360 struct hrtimer
*timer
;
364 timer
= rb_entry(base
->first
, struct hrtimer
, node
);
365 expires
= ktime_sub(timer
->expires
, base
->offset
);
366 if (expires
.tv64
< cpu_base
->expires_next
.tv64
)
367 cpu_base
->expires_next
= expires
;
370 if (cpu_base
->expires_next
.tv64
!= KTIME_MAX
)
371 tick_program_event(cpu_base
->expires_next
, 1);
375 * Shared reprogramming for clock_realtime and clock_monotonic
377 * When a timer is enqueued and expires earlier than the already enqueued
378 * timers, we have to check, whether it expires earlier than the timer for
379 * which the clock event device was armed.
381 * Called with interrupts disabled and base->cpu_base.lock held
383 static int hrtimer_reprogram(struct hrtimer
*timer
,
384 struct hrtimer_clock_base
*base
)
386 ktime_t
*expires_next
= &__get_cpu_var(hrtimer_bases
).expires_next
;
387 ktime_t expires
= ktime_sub(timer
->expires
, base
->offset
);
391 * When the callback is running, we do not reprogram the clock event
392 * device. The timer callback is either running on a different CPU or
393 * the callback is executed in the hrtimer_interupt context. The
394 * reprogramming is handled either by the softirq, which called the
395 * callback or at the end of the hrtimer_interrupt.
397 if (hrtimer_callback_running(timer
))
400 if (expires
.tv64
>= expires_next
->tv64
)
404 * Clockevents returns -ETIME, when the event was in the past.
406 res
= tick_program_event(expires
, 0);
407 if (!IS_ERR_VALUE(res
))
408 *expires_next
= expires
;
414 * Retrigger next event is called after clock was set
416 * Called with interrupts disabled via on_each_cpu()
418 static void retrigger_next_event(void *arg
)
420 struct hrtimer_cpu_base
*base
;
421 struct timespec realtime_offset
;
424 if (!hrtimer_hres_active())
428 seq
= read_seqbegin(&xtime_lock
);
429 set_normalized_timespec(&realtime_offset
,
430 -wall_to_monotonic
.tv_sec
,
431 -wall_to_monotonic
.tv_nsec
);
432 } while (read_seqretry(&xtime_lock
, seq
));
434 base
= &__get_cpu_var(hrtimer_bases
);
436 /* Adjust CLOCK_REALTIME offset */
437 spin_lock(&base
->lock
);
438 base
->clock_base
[CLOCK_REALTIME
].offset
=
439 timespec_to_ktime(realtime_offset
);
441 hrtimer_force_reprogram(base
);
442 spin_unlock(&base
->lock
);
446 * Clock realtime was set
448 * Change the offset of the realtime clock vs. the monotonic
451 * We might have to reprogram the high resolution timer interrupt. On
452 * SMP we call the architecture specific code to retrigger _all_ high
453 * resolution timer interrupts. On UP we just disable interrupts and
454 * call the high resolution interrupt code.
456 void clock_was_set(void)
458 /* Retrigger the CPU local events everywhere */
459 on_each_cpu(retrigger_next_event
, NULL
, 0, 1);
463 * During resume we might have to reprogram the high resolution timer
464 * interrupt (on the local CPU):
466 void hres_timers_resume(void)
468 WARN_ON_ONCE(num_online_cpus() > 1);
470 /* Retrigger the CPU local events: */
471 retrigger_next_event(NULL
);
475 * Check, whether the timer is on the callback pending list
477 static inline int hrtimer_cb_pending(const struct hrtimer
*timer
)
479 return timer
->state
& HRTIMER_STATE_PENDING
;
483 * Remove a timer from the callback pending list
485 static inline void hrtimer_remove_cb_pending(struct hrtimer
*timer
)
487 list_del_init(&timer
->cb_entry
);
491 * Initialize the high resolution related parts of cpu_base
493 static inline void hrtimer_init_hres(struct hrtimer_cpu_base
*base
)
495 base
->expires_next
.tv64
= KTIME_MAX
;
496 base
->hres_active
= 0;
497 INIT_LIST_HEAD(&base
->cb_pending
);
501 * Initialize the high resolution related parts of a hrtimer
503 static inline void hrtimer_init_timer_hres(struct hrtimer
*timer
)
505 INIT_LIST_HEAD(&timer
->cb_entry
);
509 * When High resolution timers are active, try to reprogram. Note, that in case
510 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
511 * check happens. The timer gets enqueued into the rbtree. The reprogramming
512 * and expiry check is done in the hrtimer_interrupt or in the softirq.
514 static inline int hrtimer_enqueue_reprogram(struct hrtimer
*timer
,
515 struct hrtimer_clock_base
*base
)
517 if (base
->cpu_base
->hres_active
&& hrtimer_reprogram(timer
, base
)) {
519 /* Timer is expired, act upon the callback mode */
520 switch(timer
->cb_mode
) {
521 case HRTIMER_CB_IRQSAFE_NO_RESTART
:
523 * We can call the callback from here. No restart
524 * happens, so no danger of recursion
526 BUG_ON(timer
->function(timer
) != HRTIMER_NORESTART
);
528 case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ
:
530 * This is solely for the sched tick emulation with
531 * dynamic tick support to ensure that we do not
532 * restart the tick right on the edge and end up with
533 * the tick timer in the softirq ! The calling site
534 * takes care of this.
537 case HRTIMER_CB_IRQSAFE
:
538 case HRTIMER_CB_SOFTIRQ
:
540 * Move everything else into the softirq pending list !
542 list_add_tail(&timer
->cb_entry
,
543 &base
->cpu_base
->cb_pending
);
544 timer
->state
= HRTIMER_STATE_PENDING
;
545 raise_softirq(HRTIMER_SOFTIRQ
);
555 * Switch to high resolution mode
557 static int hrtimer_switch_to_hres(void)
559 struct hrtimer_cpu_base
*base
= &__get_cpu_var(hrtimer_bases
);
562 if (base
->hres_active
)
565 local_irq_save(flags
);
567 if (tick_init_highres()) {
568 local_irq_restore(flags
);
571 base
->hres_active
= 1;
572 base
->clock_base
[CLOCK_REALTIME
].resolution
= KTIME_HIGH_RES
;
573 base
->clock_base
[CLOCK_MONOTONIC
].resolution
= KTIME_HIGH_RES
;
575 tick_setup_sched_timer();
577 /* "Retrigger" the interrupt to get things going */
578 retrigger_next_event(NULL
);
579 local_irq_restore(flags
);
580 printk(KERN_INFO
"Switched to high resolution mode on CPU %d\n",
587 static inline int hrtimer_hres_active(void) { return 0; }
588 static inline int hrtimer_is_hres_enabled(void) { return 0; }
589 static inline int hrtimer_switch_to_hres(void) { return 0; }
590 static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base
*base
) { }
591 static inline int hrtimer_enqueue_reprogram(struct hrtimer
*timer
,
592 struct hrtimer_clock_base
*base
)
596 static inline int hrtimer_cb_pending(struct hrtimer
*timer
) { return 0; }
597 static inline void hrtimer_remove_cb_pending(struct hrtimer
*timer
) { }
598 static inline void hrtimer_init_hres(struct hrtimer_cpu_base
*base
) { }
599 static inline void hrtimer_init_timer_hres(struct hrtimer
*timer
) { }
601 #endif /* CONFIG_HIGH_RES_TIMERS */
603 #ifdef CONFIG_TIMER_STATS
604 void __timer_stats_hrtimer_set_start_info(struct hrtimer
*timer
, void *addr
)
606 if (timer
->start_site
)
609 timer
->start_site
= addr
;
610 memcpy(timer
->start_comm
, current
->comm
, TASK_COMM_LEN
);
611 timer
->start_pid
= current
->pid
;
616 * Counterpart to lock_timer_base above:
619 void unlock_hrtimer_base(const struct hrtimer
*timer
, unsigned long *flags
)
621 spin_unlock_irqrestore(&timer
->base
->cpu_base
->lock
, *flags
);
625 * hrtimer_forward - forward the timer expiry
626 * @timer: hrtimer to forward
627 * @now: forward past this time
628 * @interval: the interval to forward
630 * Forward the timer expiry so it will expire in the future.
631 * Returns the number of overruns.
634 hrtimer_forward(struct hrtimer
*timer
, ktime_t now
, ktime_t interval
)
636 unsigned long orun
= 1;
639 delta
= ktime_sub(now
, timer
->expires
);
644 if (interval
.tv64
< timer
->base
->resolution
.tv64
)
645 interval
.tv64
= timer
->base
->resolution
.tv64
;
647 if (unlikely(delta
.tv64
>= interval
.tv64
)) {
648 s64 incr
= ktime_to_ns(interval
);
650 orun
= ktime_divns(delta
, incr
);
651 timer
->expires
= ktime_add_ns(timer
->expires
, incr
* orun
);
652 if (timer
->expires
.tv64
> now
.tv64
)
655 * This (and the ktime_add() below) is the
656 * correction for exact:
660 timer
->expires
= ktime_add(timer
->expires
, interval
);
662 * Make sure, that the result did not wrap with a very large
665 if (timer
->expires
.tv64
< 0)
666 timer
->expires
= ktime_set(KTIME_SEC_MAX
, 0);
672 * enqueue_hrtimer - internal function to (re)start a timer
674 * The timer is inserted in expiry order. Insertion into the
675 * red black tree is O(log(n)). Must hold the base lock.
677 static void enqueue_hrtimer(struct hrtimer
*timer
,
678 struct hrtimer_clock_base
*base
, int reprogram
)
680 struct rb_node
**link
= &base
->active
.rb_node
;
681 struct rb_node
*parent
= NULL
;
682 struct hrtimer
*entry
;
685 * Find the right place in the rbtree:
689 entry
= rb_entry(parent
, struct hrtimer
, node
);
691 * We dont care about collisions. Nodes with
692 * the same expiry time stay together.
694 if (timer
->expires
.tv64
< entry
->expires
.tv64
)
695 link
= &(*link
)->rb_left
;
697 link
= &(*link
)->rb_right
;
701 * Insert the timer to the rbtree and check whether it
702 * replaces the first pending timer
704 if (!base
->first
|| timer
->expires
.tv64
<
705 rb_entry(base
->first
, struct hrtimer
, node
)->expires
.tv64
) {
707 * Reprogram the clock event device. When the timer is already
708 * expired hrtimer_enqueue_reprogram has either called the
709 * callback or added it to the pending list and raised the
712 * This is a NOP for !HIGHRES
714 if (reprogram
&& hrtimer_enqueue_reprogram(timer
, base
))
717 base
->first
= &timer
->node
;
720 rb_link_node(&timer
->node
, parent
, link
);
721 rb_insert_color(&timer
->node
, &base
->active
);
723 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
724 * state of a possibly running callback.
726 timer
->state
|= HRTIMER_STATE_ENQUEUED
;
730 * __remove_hrtimer - internal function to remove a timer
732 * Caller must hold the base lock.
734 * High resolution timer mode reprograms the clock event device when the
735 * timer is the one which expires next. The caller can disable this by setting
736 * reprogram to zero. This is useful, when the context does a reprogramming
737 * anyway (e.g. timer interrupt)
739 static void __remove_hrtimer(struct hrtimer
*timer
,
740 struct hrtimer_clock_base
*base
,
741 unsigned long newstate
, int reprogram
)
743 /* High res. callback list. NOP for !HIGHRES */
744 if (hrtimer_cb_pending(timer
))
745 hrtimer_remove_cb_pending(timer
);
748 * Remove the timer from the rbtree and replace the
749 * first entry pointer if necessary.
751 if (base
->first
== &timer
->node
) {
752 base
->first
= rb_next(&timer
->node
);
753 /* Reprogram the clock event device. if enabled */
754 if (reprogram
&& hrtimer_hres_active())
755 hrtimer_force_reprogram(base
->cpu_base
);
757 rb_erase(&timer
->node
, &base
->active
);
759 timer
->state
= newstate
;
763 * remove hrtimer, called with base lock held
766 remove_hrtimer(struct hrtimer
*timer
, struct hrtimer_clock_base
*base
)
768 if (hrtimer_is_queued(timer
)) {
772 * Remove the timer and force reprogramming when high
773 * resolution mode is active and the timer is on the current
774 * CPU. If we remove a timer on another CPU, reprogramming is
775 * skipped. The interrupt event on this CPU is fired and
776 * reprogramming happens in the interrupt handler. This is a
777 * rare case and less expensive than a smp call.
779 timer_stats_hrtimer_clear_start_info(timer
);
780 reprogram
= base
->cpu_base
== &__get_cpu_var(hrtimer_bases
);
781 __remove_hrtimer(timer
, base
, HRTIMER_STATE_INACTIVE
,
789 * hrtimer_start - (re)start an relative timer on the current CPU
790 * @timer: the timer to be added
792 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
796 * 1 when the timer was active
799 hrtimer_start(struct hrtimer
*timer
, ktime_t tim
, const enum hrtimer_mode mode
)
801 struct hrtimer_clock_base
*base
, *new_base
;
805 base
= lock_hrtimer_base(timer
, &flags
);
807 /* Remove an active timer from the queue: */
808 ret
= remove_hrtimer(timer
, base
);
810 /* Switch the timer base, if necessary: */
811 new_base
= switch_hrtimer_base(timer
, base
);
813 if (mode
== HRTIMER_MODE_REL
) {
814 tim
= ktime_add(tim
, new_base
->get_time());
816 * CONFIG_TIME_LOW_RES is a temporary way for architectures
817 * to signal that they simply return xtime in
818 * do_gettimeoffset(). In this case we want to round up by
819 * resolution when starting a relative timer, to avoid short
820 * timeouts. This will go away with the GTOD framework.
822 #ifdef CONFIG_TIME_LOW_RES
823 tim
= ktime_add(tim
, base
->resolution
);
826 timer
->expires
= tim
;
828 timer_stats_hrtimer_set_start_info(timer
);
831 * Only allow reprogramming if the new base is on this CPU.
832 * (it might still be on another CPU if the timer was pending)
834 enqueue_hrtimer(timer
, new_base
,
835 new_base
->cpu_base
== &__get_cpu_var(hrtimer_bases
));
837 unlock_hrtimer_base(timer
, &flags
);
841 EXPORT_SYMBOL_GPL(hrtimer_start
);
844 * hrtimer_try_to_cancel - try to deactivate a timer
845 * @timer: hrtimer to stop
848 * 0 when the timer was not active
849 * 1 when the timer was active
850 * -1 when the timer is currently excuting the callback function and
853 int hrtimer_try_to_cancel(struct hrtimer
*timer
)
855 struct hrtimer_clock_base
*base
;
859 base
= lock_hrtimer_base(timer
, &flags
);
861 if (!hrtimer_callback_running(timer
))
862 ret
= remove_hrtimer(timer
, base
);
864 unlock_hrtimer_base(timer
, &flags
);
869 EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel
);
872 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
873 * @timer: the timer to be cancelled
876 * 0 when the timer was not active
877 * 1 when the timer was active
879 int hrtimer_cancel(struct hrtimer
*timer
)
882 int ret
= hrtimer_try_to_cancel(timer
);
889 EXPORT_SYMBOL_GPL(hrtimer_cancel
);
892 * hrtimer_get_remaining - get remaining time for the timer
893 * @timer: the timer to read
895 ktime_t
hrtimer_get_remaining(const struct hrtimer
*timer
)
897 struct hrtimer_clock_base
*base
;
901 base
= lock_hrtimer_base(timer
, &flags
);
902 rem
= ktime_sub(timer
->expires
, base
->get_time());
903 unlock_hrtimer_base(timer
, &flags
);
907 EXPORT_SYMBOL_GPL(hrtimer_get_remaining
);
909 #if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)
911 * hrtimer_get_next_event - get the time until next expiry event
913 * Returns the delta to the next expiry event or KTIME_MAX if no timer
916 ktime_t
hrtimer_get_next_event(void)
918 struct hrtimer_cpu_base
*cpu_base
= &__get_cpu_var(hrtimer_bases
);
919 struct hrtimer_clock_base
*base
= cpu_base
->clock_base
;
920 ktime_t delta
, mindelta
= { .tv64
= KTIME_MAX
};
924 spin_lock_irqsave(&cpu_base
->lock
, flags
);
926 if (!hrtimer_hres_active()) {
927 for (i
= 0; i
< HRTIMER_MAX_CLOCK_BASES
; i
++, base
++) {
928 struct hrtimer
*timer
;
933 timer
= rb_entry(base
->first
, struct hrtimer
, node
);
934 delta
.tv64
= timer
->expires
.tv64
;
935 delta
= ktime_sub(delta
, base
->get_time());
936 if (delta
.tv64
< mindelta
.tv64
)
937 mindelta
.tv64
= delta
.tv64
;
941 spin_unlock_irqrestore(&cpu_base
->lock
, flags
);
943 if (mindelta
.tv64
< 0)
950 * hrtimer_init - initialize a timer to the given clock
951 * @timer: the timer to be initialized
952 * @clock_id: the clock to be used
953 * @mode: timer mode abs/rel
955 void hrtimer_init(struct hrtimer
*timer
, clockid_t clock_id
,
956 enum hrtimer_mode mode
)
958 struct hrtimer_cpu_base
*cpu_base
;
960 memset(timer
, 0, sizeof(struct hrtimer
));
962 cpu_base
= &__raw_get_cpu_var(hrtimer_bases
);
964 if (clock_id
== CLOCK_REALTIME
&& mode
!= HRTIMER_MODE_ABS
)
965 clock_id
= CLOCK_MONOTONIC
;
967 timer
->base
= &cpu_base
->clock_base
[clock_id
];
968 hrtimer_init_timer_hres(timer
);
970 #ifdef CONFIG_TIMER_STATS
971 timer
->start_site
= NULL
;
972 timer
->start_pid
= -1;
973 memset(timer
->start_comm
, 0, TASK_COMM_LEN
);
976 EXPORT_SYMBOL_GPL(hrtimer_init
);
979 * hrtimer_get_res - get the timer resolution for a clock
980 * @which_clock: which clock to query
981 * @tp: pointer to timespec variable to store the resolution
983 * Store the resolution of the clock selected by @which_clock in the
984 * variable pointed to by @tp.
986 int hrtimer_get_res(const clockid_t which_clock
, struct timespec
*tp
)
988 struct hrtimer_cpu_base
*cpu_base
;
990 cpu_base
= &__raw_get_cpu_var(hrtimer_bases
);
991 *tp
= ktime_to_timespec(cpu_base
->clock_base
[which_clock
].resolution
);
995 EXPORT_SYMBOL_GPL(hrtimer_get_res
);
997 #ifdef CONFIG_HIGH_RES_TIMERS
1000 * High resolution timer interrupt
1001 * Called with interrupts disabled
1003 void hrtimer_interrupt(struct clock_event_device
*dev
)
1005 struct hrtimer_cpu_base
*cpu_base
= &__get_cpu_var(hrtimer_bases
);
1006 struct hrtimer_clock_base
*base
;
1007 ktime_t expires_next
, now
;
1010 BUG_ON(!cpu_base
->hres_active
);
1011 cpu_base
->nr_events
++;
1012 dev
->next_event
.tv64
= KTIME_MAX
;
1017 expires_next
.tv64
= KTIME_MAX
;
1019 base
= cpu_base
->clock_base
;
1021 for (i
= 0; i
< HRTIMER_MAX_CLOCK_BASES
; i
++) {
1023 struct rb_node
*node
;
1025 spin_lock(&cpu_base
->lock
);
1027 basenow
= ktime_add(now
, base
->offset
);
1029 while ((node
= base
->first
)) {
1030 struct hrtimer
*timer
;
1032 timer
= rb_entry(node
, struct hrtimer
, node
);
1034 if (basenow
.tv64
< timer
->expires
.tv64
) {
1037 expires
= ktime_sub(timer
->expires
,
1039 if (expires
.tv64
< expires_next
.tv64
)
1040 expires_next
= expires
;
1044 /* Move softirq callbacks to the pending list */
1045 if (timer
->cb_mode
== HRTIMER_CB_SOFTIRQ
) {
1046 __remove_hrtimer(timer
, base
,
1047 HRTIMER_STATE_PENDING
, 0);
1048 list_add_tail(&timer
->cb_entry
,
1049 &base
->cpu_base
->cb_pending
);
1054 __remove_hrtimer(timer
, base
,
1055 HRTIMER_STATE_CALLBACK
, 0);
1056 timer_stats_account_hrtimer(timer
);
1059 * Note: We clear the CALLBACK bit after
1060 * enqueue_hrtimer to avoid reprogramming of
1061 * the event hardware. This happens at the end
1062 * of this function anyway.
1064 if (timer
->function(timer
) != HRTIMER_NORESTART
) {
1065 BUG_ON(timer
->state
!= HRTIMER_STATE_CALLBACK
);
1066 enqueue_hrtimer(timer
, base
, 0);
1068 timer
->state
&= ~HRTIMER_STATE_CALLBACK
;
1070 spin_unlock(&cpu_base
->lock
);
1074 cpu_base
->expires_next
= expires_next
;
1076 /* Reprogramming necessary ? */
1077 if (expires_next
.tv64
!= KTIME_MAX
) {
1078 if (tick_program_event(expires_next
, 0))
1082 /* Raise softirq ? */
1084 raise_softirq(HRTIMER_SOFTIRQ
);
1087 static void run_hrtimer_softirq(struct softirq_action
*h
)
1089 struct hrtimer_cpu_base
*cpu_base
= &__get_cpu_var(hrtimer_bases
);
1091 spin_lock_irq(&cpu_base
->lock
);
1093 while (!list_empty(&cpu_base
->cb_pending
)) {
1094 enum hrtimer_restart (*fn
)(struct hrtimer
*);
1095 struct hrtimer
*timer
;
1098 timer
= list_entry(cpu_base
->cb_pending
.next
,
1099 struct hrtimer
, cb_entry
);
1101 timer_stats_account_hrtimer(timer
);
1103 fn
= timer
->function
;
1104 __remove_hrtimer(timer
, timer
->base
, HRTIMER_STATE_CALLBACK
, 0);
1105 spin_unlock_irq(&cpu_base
->lock
);
1107 restart
= fn(timer
);
1109 spin_lock_irq(&cpu_base
->lock
);
1111 timer
->state
&= ~HRTIMER_STATE_CALLBACK
;
1112 if (restart
== HRTIMER_RESTART
) {
1113 BUG_ON(hrtimer_active(timer
));
1115 * Enqueue the timer, allow reprogramming of the event
1118 enqueue_hrtimer(timer
, timer
->base
, 1);
1119 } else if (hrtimer_active(timer
)) {
1121 * If the timer was rearmed on another CPU, reprogram
1124 if (timer
->base
->first
== &timer
->node
)
1125 hrtimer_reprogram(timer
, timer
->base
);
1128 spin_unlock_irq(&cpu_base
->lock
);
1131 #endif /* CONFIG_HIGH_RES_TIMERS */
1134 * Expire the per base hrtimer-queue:
1136 static inline void run_hrtimer_queue(struct hrtimer_cpu_base
*cpu_base
,
1139 struct rb_node
*node
;
1140 struct hrtimer_clock_base
*base
= &cpu_base
->clock_base
[index
];
1145 if (base
->get_softirq_time
)
1146 base
->softirq_time
= base
->get_softirq_time();
1148 spin_lock_irq(&cpu_base
->lock
);
1150 while ((node
= base
->first
)) {
1151 struct hrtimer
*timer
;
1152 enum hrtimer_restart (*fn
)(struct hrtimer
*);
1155 timer
= rb_entry(node
, struct hrtimer
, node
);
1156 if (base
->softirq_time
.tv64
<= timer
->expires
.tv64
)
1159 #ifdef CONFIG_HIGH_RES_TIMERS
1160 WARN_ON_ONCE(timer
->cb_mode
== HRTIMER_CB_IRQSAFE_NO_SOFTIRQ
);
1162 timer_stats_account_hrtimer(timer
);
1164 fn
= timer
->function
;
1165 __remove_hrtimer(timer
, base
, HRTIMER_STATE_CALLBACK
, 0);
1166 spin_unlock_irq(&cpu_base
->lock
);
1168 restart
= fn(timer
);
1170 spin_lock_irq(&cpu_base
->lock
);
1172 timer
->state
&= ~HRTIMER_STATE_CALLBACK
;
1173 if (restart
!= HRTIMER_NORESTART
) {
1174 BUG_ON(hrtimer_active(timer
));
1175 enqueue_hrtimer(timer
, base
, 0);
1178 spin_unlock_irq(&cpu_base
->lock
);
1182 * Called from timer softirq every jiffy, expire hrtimers:
1184 * For HRT its the fall back code to run the softirq in the timer
1185 * softirq context in case the hrtimer initialization failed or has
1186 * not been done yet.
1188 void hrtimer_run_queues(void)
1190 struct hrtimer_cpu_base
*cpu_base
= &__get_cpu_var(hrtimer_bases
);
1193 if (hrtimer_hres_active())
1197 * This _is_ ugly: We have to check in the softirq context,
1198 * whether we can switch to highres and / or nohz mode. The
1199 * clocksource switch happens in the timer interrupt with
1200 * xtime_lock held. Notification from there only sets the
1201 * check bit in the tick_oneshot code, otherwise we might
1202 * deadlock vs. xtime_lock.
1204 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
1205 if (hrtimer_switch_to_hres())
1208 hrtimer_get_softirq_time(cpu_base
);
1210 for (i
= 0; i
< HRTIMER_MAX_CLOCK_BASES
; i
++)
1211 run_hrtimer_queue(cpu_base
, i
);
1215 * Sleep related functions:
1217 static enum hrtimer_restart
hrtimer_wakeup(struct hrtimer
*timer
)
1219 struct hrtimer_sleeper
*t
=
1220 container_of(timer
, struct hrtimer_sleeper
, timer
);
1221 struct task_struct
*task
= t
->task
;
1225 wake_up_process(task
);
1227 return HRTIMER_NORESTART
;
1230 void hrtimer_init_sleeper(struct hrtimer_sleeper
*sl
, struct task_struct
*task
)
1232 sl
->timer
.function
= hrtimer_wakeup
;
1234 #ifdef CONFIG_HIGH_RES_TIMERS
1235 sl
->timer
.cb_mode
= HRTIMER_CB_IRQSAFE_NO_RESTART
;
1239 static int __sched
do_nanosleep(struct hrtimer_sleeper
*t
, enum hrtimer_mode mode
)
1241 hrtimer_init_sleeper(t
, current
);
1244 set_current_state(TASK_INTERRUPTIBLE
);
1245 hrtimer_start(&t
->timer
, t
->timer
.expires
, mode
);
1247 if (likely(t
->task
))
1250 hrtimer_cancel(&t
->timer
);
1251 mode
= HRTIMER_MODE_ABS
;
1253 } while (t
->task
&& !signal_pending(current
));
1255 return t
->task
== NULL
;
1258 long __sched
hrtimer_nanosleep_restart(struct restart_block
*restart
)
1260 struct hrtimer_sleeper t
;
1261 struct timespec __user
*rmtp
;
1265 restart
->fn
= do_no_restart_syscall
;
1267 hrtimer_init(&t
.timer
, restart
->arg0
, HRTIMER_MODE_ABS
);
1268 t
.timer
.expires
.tv64
= ((u64
)restart
->arg3
<< 32) | (u64
) restart
->arg2
;
1270 if (do_nanosleep(&t
, HRTIMER_MODE_ABS
))
1273 rmtp
= (struct timespec __user
*) restart
->arg1
;
1275 time
= ktime_sub(t
.timer
.expires
, t
.timer
.base
->get_time());
1278 tu
= ktime_to_timespec(time
);
1279 if (copy_to_user(rmtp
, &tu
, sizeof(tu
)))
1283 restart
->fn
= hrtimer_nanosleep_restart
;
1285 /* The other values in restart are already filled in */
1286 return -ERESTART_RESTARTBLOCK
;
1289 long hrtimer_nanosleep(struct timespec
*rqtp
, struct timespec __user
*rmtp
,
1290 const enum hrtimer_mode mode
, const clockid_t clockid
)
1292 struct restart_block
*restart
;
1293 struct hrtimer_sleeper t
;
1297 hrtimer_init(&t
.timer
, clockid
, mode
);
1298 t
.timer
.expires
= timespec_to_ktime(*rqtp
);
1299 if (do_nanosleep(&t
, mode
))
1302 /* Absolute timers do not update the rmtp value and restart: */
1303 if (mode
== HRTIMER_MODE_ABS
)
1304 return -ERESTARTNOHAND
;
1307 rem
= ktime_sub(t
.timer
.expires
, t
.timer
.base
->get_time());
1310 tu
= ktime_to_timespec(rem
);
1311 if (copy_to_user(rmtp
, &tu
, sizeof(tu
)))
1315 restart
= ¤t_thread_info()->restart_block
;
1316 restart
->fn
= hrtimer_nanosleep_restart
;
1317 restart
->arg0
= (unsigned long) t
.timer
.base
->index
;
1318 restart
->arg1
= (unsigned long) rmtp
;
1319 restart
->arg2
= t
.timer
.expires
.tv64
& 0xFFFFFFFF;
1320 restart
->arg3
= t
.timer
.expires
.tv64
>> 32;
1322 return -ERESTART_RESTARTBLOCK
;
1326 sys_nanosleep(struct timespec __user
*rqtp
, struct timespec __user
*rmtp
)
1330 if (copy_from_user(&tu
, rqtp
, sizeof(tu
)))
1333 if (!timespec_valid(&tu
))
1336 return hrtimer_nanosleep(&tu
, rmtp
, HRTIMER_MODE_REL
, CLOCK_MONOTONIC
);
1340 * Functions related to boot-time initialization:
1342 static void __devinit
init_hrtimers_cpu(int cpu
)
1344 struct hrtimer_cpu_base
*cpu_base
= &per_cpu(hrtimer_bases
, cpu
);
1347 spin_lock_init(&cpu_base
->lock
);
1348 lockdep_set_class(&cpu_base
->lock
, &cpu_base
->lock_key
);
1350 for (i
= 0; i
< HRTIMER_MAX_CLOCK_BASES
; i
++)
1351 cpu_base
->clock_base
[i
].cpu_base
= cpu_base
;
1353 hrtimer_init_hres(cpu_base
);
1356 #ifdef CONFIG_HOTPLUG_CPU
1358 static void migrate_hrtimer_list(struct hrtimer_clock_base
*old_base
,
1359 struct hrtimer_clock_base
*new_base
)
1361 struct hrtimer
*timer
;
1362 struct rb_node
*node
;
1364 while ((node
= rb_first(&old_base
->active
))) {
1365 timer
= rb_entry(node
, struct hrtimer
, node
);
1366 BUG_ON(hrtimer_callback_running(timer
));
1367 __remove_hrtimer(timer
, old_base
, HRTIMER_STATE_INACTIVE
, 0);
1368 timer
->base
= new_base
;
1370 * Enqueue the timer. Allow reprogramming of the event device
1372 enqueue_hrtimer(timer
, new_base
, 1);
1376 static void migrate_hrtimers(int cpu
)
1378 struct hrtimer_cpu_base
*old_base
, *new_base
;
1381 BUG_ON(cpu_online(cpu
));
1382 old_base
= &per_cpu(hrtimer_bases
, cpu
);
1383 new_base
= &get_cpu_var(hrtimer_bases
);
1385 tick_cancel_sched_timer(cpu
);
1387 local_irq_disable();
1388 double_spin_lock(&new_base
->lock
, &old_base
->lock
,
1389 smp_processor_id() < cpu
);
1391 for (i
= 0; i
< HRTIMER_MAX_CLOCK_BASES
; i
++) {
1392 migrate_hrtimer_list(&old_base
->clock_base
[i
],
1393 &new_base
->clock_base
[i
]);
1396 double_spin_unlock(&new_base
->lock
, &old_base
->lock
,
1397 smp_processor_id() < cpu
);
1399 put_cpu_var(hrtimer_bases
);
1401 #endif /* CONFIG_HOTPLUG_CPU */
1403 static int __cpuinit
hrtimer_cpu_notify(struct notifier_block
*self
,
1404 unsigned long action
, void *hcpu
)
1406 long cpu
= (long)hcpu
;
1410 case CPU_UP_PREPARE
:
1411 init_hrtimers_cpu(cpu
);
1414 #ifdef CONFIG_HOTPLUG_CPU
1416 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD
, &cpu
);
1417 migrate_hrtimers(cpu
);
1428 static struct notifier_block __cpuinitdata hrtimers_nb
= {
1429 .notifier_call
= hrtimer_cpu_notify
,
1432 void __init
hrtimers_init(void)
1434 hrtimer_cpu_notify(&hrtimers_nb
, (unsigned long)CPU_UP_PREPARE
,
1435 (void *)(long)smp_processor_id());
1436 register_cpu_notifier(&hrtimers_nb
);
1437 #ifdef CONFIG_HIGH_RES_TIMERS
1438 open_softirq(HRTIMER_SOFTIRQ
, run_hrtimer_softirq
, NULL
);