2 * linux/kernel/time/tick-sched.c
4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
8 * No idle tick implementation for low and high resolution timers
10 * Started by: Thomas Gleixner and Ingo Molnar
12 * Distribute under GPLv2.
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/percpu.h>
20 #include <linux/profile.h>
21 #include <linux/sched.h>
22 #include <linux/tick.h>
24 #include <asm/irq_regs.h>
26 #include "tick-internal.h"
29 * Per cpu nohz control structure
31 static DEFINE_PER_CPU(struct tick_sched
, tick_cpu_sched
);
34 * The time, when the last jiffy update happened. Protected by xtime_lock.
36 static ktime_t last_jiffies_update
;
38 struct tick_sched
*tick_get_tick_sched(int cpu
)
40 return &per_cpu(tick_cpu_sched
, cpu
);
44 * Must be called with interrupts disabled !
46 static void tick_do_update_jiffies64(ktime_t now
)
48 unsigned long ticks
= 0;
52 * Do a quick check without holding xtime_lock:
54 delta
= ktime_sub(now
, last_jiffies_update
);
55 if (delta
.tv64
< tick_period
.tv64
)
58 /* Reevalute with xtime_lock held */
59 write_seqlock(&xtime_lock
);
61 delta
= ktime_sub(now
, last_jiffies_update
);
62 if (delta
.tv64
>= tick_period
.tv64
) {
64 delta
= ktime_sub(delta
, tick_period
);
65 last_jiffies_update
= ktime_add(last_jiffies_update
,
68 /* Slow path for long timeouts */
69 if (unlikely(delta
.tv64
>= tick_period
.tv64
)) {
70 s64 incr
= ktime_to_ns(tick_period
);
72 ticks
= ktime_divns(delta
, incr
);
74 last_jiffies_update
= ktime_add_ns(last_jiffies_update
,
79 write_sequnlock(&xtime_lock
);
83 * Initialize and return retrieve the jiffies update.
85 static ktime_t
tick_init_jiffy_update(void)
89 write_seqlock(&xtime_lock
);
90 /* Did we start the jiffies update yet ? */
91 if (last_jiffies_update
.tv64
== 0)
92 last_jiffies_update
= tick_next_period
;
93 period
= last_jiffies_update
;
94 write_sequnlock(&xtime_lock
);
99 * NOHZ - aka dynamic tick functionality
105 static int tick_nohz_enabled __read_mostly
= 1;
108 * Enable / Disable tickless mode
110 static int __init
setup_tick_nohz(char *str
)
112 if (!strcmp(str
, "off"))
113 tick_nohz_enabled
= 0;
114 else if (!strcmp(str
, "on"))
115 tick_nohz_enabled
= 1;
121 __setup("nohz=", setup_tick_nohz
);
124 * tick_nohz_update_jiffies - update jiffies when idle was interrupted
126 * Called from interrupt entry when the CPU was idle
128 * In case the sched_tick was stopped on this CPU, we have to check if jiffies
129 * must be updated. Otherwise an interrupt handler could use a stale jiffy
130 * value. We do this unconditionally on any cpu, as we don't know whether the
131 * cpu, which has the update task assigned is in a long sleep.
133 void tick_nohz_update_jiffies(void)
135 int cpu
= smp_processor_id();
136 struct tick_sched
*ts
= &per_cpu(tick_cpu_sched
, cpu
);
140 if (!ts
->tick_stopped
)
143 cpu_clear(cpu
, nohz_cpu_mask
);
145 ts
->idle_waketime
= now
;
147 local_irq_save(flags
);
148 tick_do_update_jiffies64(now
);
149 local_irq_restore(flags
);
151 touch_softlockup_watchdog();
154 void tick_nohz_stop_idle(int cpu
)
156 struct tick_sched
*ts
= &per_cpu(tick_cpu_sched
, cpu
);
158 if (ts
->idle_active
) {
161 delta
= ktime_sub(now
, ts
->idle_entrytime
);
162 ts
->idle_lastupdate
= now
;
163 ts
->idle_sleeptime
= ktime_add(ts
->idle_sleeptime
, delta
);
166 sched_clock_idle_wakeup_event(0);
170 static ktime_t
tick_nohz_start_idle(struct tick_sched
*ts
)
175 if (ts
->idle_active
) {
176 delta
= ktime_sub(now
, ts
->idle_entrytime
);
177 ts
->idle_lastupdate
= now
;
178 ts
->idle_sleeptime
= ktime_add(ts
->idle_sleeptime
, delta
);
180 ts
->idle_entrytime
= now
;
182 sched_clock_idle_sleep_event();
186 u64
get_cpu_idle_time_us(int cpu
, u64
*last_update_time
)
188 struct tick_sched
*ts
= &per_cpu(tick_cpu_sched
, cpu
);
190 *last_update_time
= ktime_to_us(ts
->idle_lastupdate
);
191 return ktime_to_us(ts
->idle_sleeptime
);
195 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
197 * When the next event is more than a tick into the future, stop the idle tick
198 * Called either from the idle loop or from irq_exit() when an idle period was
199 * just interrupted by an interrupt which did not cause a reschedule.
201 void tick_nohz_stop_sched_tick(int inidle
)
203 unsigned long seq
, last_jiffies
, next_jiffies
, delta_jiffies
, flags
;
204 struct tick_sched
*ts
;
205 ktime_t last_update
, expires
, now
;
206 struct clock_event_device
*dev
= __get_cpu_var(tick_cpu_device
).evtdev
;
209 local_irq_save(flags
);
211 cpu
= smp_processor_id();
212 ts
= &per_cpu(tick_cpu_sched
, cpu
);
213 now
= tick_nohz_start_idle(ts
);
216 * If this cpu is offline and it is the one which updates
217 * jiffies, then give up the assignment and let it be taken by
218 * the cpu which runs the tick timer next. If we don't drop
219 * this here the jiffies might be stale and do_timer() never
222 if (unlikely(!cpu_online(cpu
))) {
223 if (cpu
== tick_do_timer_cpu
)
224 tick_do_timer_cpu
= -1;
227 if (unlikely(ts
->nohz_mode
== NOHZ_MODE_INACTIVE
))
230 if (!inidle
&& !ts
->inidle
)
238 if (unlikely(local_softirq_pending())) {
239 static int ratelimit
;
241 if (ratelimit
< 10) {
242 printk(KERN_ERR
"NOHZ: local_softirq_pending %02x\n",
243 local_softirq_pending());
250 /* Read jiffies and the time when jiffies were updated last */
252 seq
= read_seqbegin(&xtime_lock
);
253 last_update
= last_jiffies_update
;
254 last_jiffies
= jiffies
;
255 } while (read_seqretry(&xtime_lock
, seq
));
257 /* Get the next timer wheel timer */
258 next_jiffies
= get_next_timer_interrupt(last_jiffies
);
259 delta_jiffies
= next_jiffies
- last_jiffies
;
261 if (rcu_needs_cpu(cpu
))
264 * Do not stop the tick, if we are only one off
265 * or if the cpu is required for rcu
267 if (!ts
->tick_stopped
&& delta_jiffies
== 1)
270 /* Schedule the tick, if we are at least one jiffie off */
271 if ((long)delta_jiffies
>= 1) {
273 if (delta_jiffies
> 1)
274 cpu_set(cpu
, nohz_cpu_mask
);
276 * nohz_stop_sched_tick can be called several times before
277 * the nohz_restart_sched_tick is called. This happens when
278 * interrupts arrive which do not cause a reschedule. In the
279 * first call we save the current tick time, so we can restart
280 * the scheduler tick in nohz_restart_sched_tick.
282 if (!ts
->tick_stopped
) {
283 if (select_nohz_load_balancer(1)) {
285 * sched tick not stopped!
287 cpu_clear(cpu
, nohz_cpu_mask
);
291 ts
->idle_tick
= ts
->sched_timer
.expires
;
292 ts
->tick_stopped
= 1;
293 ts
->idle_jiffies
= last_jiffies
;
298 * If this cpu is the one which updates jiffies, then
299 * give up the assignment and let it be taken by the
300 * cpu which runs the tick timer next, which might be
301 * this cpu as well. If we don't drop this here the
302 * jiffies might be stale and do_timer() never
305 if (cpu
== tick_do_timer_cpu
)
306 tick_do_timer_cpu
= -1;
311 * delta_jiffies >= NEXT_TIMER_MAX_DELTA signals that
312 * there is no timer pending or at least extremly far
313 * into the future (12 days for HZ=1000). In this case
314 * we simply stop the tick timer:
316 if (unlikely(delta_jiffies
>= NEXT_TIMER_MAX_DELTA
)) {
317 ts
->idle_expires
.tv64
= KTIME_MAX
;
318 if (ts
->nohz_mode
== NOHZ_MODE_HIGHRES
)
319 hrtimer_cancel(&ts
->sched_timer
);
324 * calculate the expiry time for the next timer wheel
327 expires
= ktime_add_ns(last_update
, tick_period
.tv64
*
329 ts
->idle_expires
= expires
;
331 if (ts
->nohz_mode
== NOHZ_MODE_HIGHRES
) {
332 hrtimer_start(&ts
->sched_timer
, expires
,
334 /* Check, if the timer was already in the past */
335 if (hrtimer_active(&ts
->sched_timer
))
337 } else if (!tick_program_event(expires
, 0))
340 * We are past the event already. So we crossed a
341 * jiffie boundary. Update jiffies and raise the
344 tick_do_update_jiffies64(ktime_get());
345 cpu_clear(cpu
, nohz_cpu_mask
);
347 raise_softirq_irqoff(TIMER_SOFTIRQ
);
349 ts
->next_jiffies
= next_jiffies
;
350 ts
->last_jiffies
= last_jiffies
;
351 ts
->sleep_length
= ktime_sub(dev
->next_event
, now
);
353 local_irq_restore(flags
);
357 * tick_nohz_get_sleep_length - return the length of the current sleep
359 * Called from power state control code with interrupts disabled
361 ktime_t
tick_nohz_get_sleep_length(void)
363 struct tick_sched
*ts
= &__get_cpu_var(tick_cpu_sched
);
365 return ts
->sleep_length
;
369 * tick_nohz_restart_sched_tick - restart the idle tick from the idle task
371 * Restart the idle tick when the CPU is woken up from idle
373 void tick_nohz_restart_sched_tick(void)
375 int cpu
= smp_processor_id();
376 struct tick_sched
*ts
= &per_cpu(tick_cpu_sched
, cpu
);
381 tick_nohz_stop_idle(cpu
);
383 if (!ts
->inidle
|| !ts
->tick_stopped
) {
393 /* Update jiffies first */
394 select_nohz_load_balancer(0);
396 tick_do_update_jiffies64(now
);
397 cpu_clear(cpu
, nohz_cpu_mask
);
400 * We stopped the tick in idle. Update process times would miss the
401 * time we slept as update_process_times does only a 1 tick
402 * accounting. Enforce that this is accounted to idle !
404 ticks
= jiffies
- ts
->idle_jiffies
;
406 * We might be one off. Do not randomly account a huge number of ticks!
408 if (ticks
&& ticks
< LONG_MAX
) {
409 add_preempt_count(HARDIRQ_OFFSET
);
410 account_system_time(current
, HARDIRQ_OFFSET
,
411 jiffies_to_cputime(ticks
));
412 sub_preempt_count(HARDIRQ_OFFSET
);
415 touch_softlockup_watchdog();
417 * Cancel the scheduled timer and restore the tick
419 ts
->tick_stopped
= 0;
420 ts
->idle_exittime
= now
;
421 hrtimer_cancel(&ts
->sched_timer
);
422 ts
->sched_timer
.expires
= ts
->idle_tick
;
425 /* Forward the time to expire in the future */
426 hrtimer_forward(&ts
->sched_timer
, now
, tick_period
);
428 if (ts
->nohz_mode
== NOHZ_MODE_HIGHRES
) {
429 hrtimer_start(&ts
->sched_timer
,
430 ts
->sched_timer
.expires
,
432 /* Check, if the timer was already in the past */
433 if (hrtimer_active(&ts
->sched_timer
))
436 if (!tick_program_event(ts
->sched_timer
.expires
, 0))
439 /* Update jiffies and reread time */
440 tick_do_update_jiffies64(now
);
446 static int tick_nohz_reprogram(struct tick_sched
*ts
, ktime_t now
)
448 hrtimer_forward(&ts
->sched_timer
, now
, tick_period
);
449 return tick_program_event(ts
->sched_timer
.expires
, 0);
453 * The nohz low res interrupt handler
455 static void tick_nohz_handler(struct clock_event_device
*dev
)
457 struct tick_sched
*ts
= &__get_cpu_var(tick_cpu_sched
);
458 struct pt_regs
*regs
= get_irq_regs();
459 int cpu
= smp_processor_id();
460 ktime_t now
= ktime_get();
462 dev
->next_event
.tv64
= KTIME_MAX
;
465 * Check if the do_timer duty was dropped. We don't care about
466 * concurrency: This happens only when the cpu in charge went
467 * into a long sleep. If two cpus happen to assign themself to
468 * this duty, then the jiffies update is still serialized by
471 if (unlikely(tick_do_timer_cpu
== -1))
472 tick_do_timer_cpu
= cpu
;
474 /* Check, if the jiffies need an update */
475 if (tick_do_timer_cpu
== cpu
)
476 tick_do_update_jiffies64(now
);
479 * When we are idle and the tick is stopped, we have to touch
480 * the watchdog as we might not schedule for a really long
481 * time. This happens on complete idle SMP systems while
482 * waiting on the login prompt. We also increment the "start
483 * of idle" jiffy stamp so the idle accounting adjustment we
484 * do when we go busy again does not account too much ticks.
486 if (ts
->tick_stopped
) {
487 touch_softlockup_watchdog();
491 update_process_times(user_mode(regs
));
492 profile_tick(CPU_PROFILING
);
494 /* Do not restart, when we are in the idle loop */
495 if (ts
->tick_stopped
)
498 while (tick_nohz_reprogram(ts
, now
)) {
500 tick_do_update_jiffies64(now
);
505 * tick_nohz_switch_to_nohz - switch to nohz mode
507 static void tick_nohz_switch_to_nohz(void)
509 struct tick_sched
*ts
= &__get_cpu_var(tick_cpu_sched
);
512 if (!tick_nohz_enabled
)
516 if (tick_switch_to_oneshot(tick_nohz_handler
)) {
521 ts
->nohz_mode
= NOHZ_MODE_LOWRES
;
524 * Recycle the hrtimer in ts, so we can share the
525 * hrtimer_forward with the highres code.
527 hrtimer_init(&ts
->sched_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
528 /* Get the next period */
529 next
= tick_init_jiffy_update();
532 ts
->sched_timer
.expires
= next
;
533 if (!tick_program_event(next
, 0))
535 next
= ktime_add(next
, tick_period
);
539 printk(KERN_INFO
"Switched to NOHz mode on CPU #%d\n",
545 static inline void tick_nohz_switch_to_nohz(void) { }
550 * High resolution timer specific code
552 #ifdef CONFIG_HIGH_RES_TIMERS
554 * We rearm the timer until we get disabled by the idle code.
555 * Called with interrupts disabled and timer->base->cpu_base->lock held.
557 static enum hrtimer_restart
tick_sched_timer(struct hrtimer
*timer
)
559 struct tick_sched
*ts
=
560 container_of(timer
, struct tick_sched
, sched_timer
);
561 struct pt_regs
*regs
= get_irq_regs();
562 ktime_t now
= ktime_get();
563 int cpu
= smp_processor_id();
567 * Check if the do_timer duty was dropped. We don't care about
568 * concurrency: This happens only when the cpu in charge went
569 * into a long sleep. If two cpus happen to assign themself to
570 * this duty, then the jiffies update is still serialized by
573 if (unlikely(tick_do_timer_cpu
== -1))
574 tick_do_timer_cpu
= cpu
;
577 /* Check, if the jiffies need an update */
578 if (tick_do_timer_cpu
== cpu
)
579 tick_do_update_jiffies64(now
);
582 * Do not call, when we are not in irq context and have
583 * no valid regs pointer
587 * When we are idle and the tick is stopped, we have to touch
588 * the watchdog as we might not schedule for a really long
589 * time. This happens on complete idle SMP systems while
590 * waiting on the login prompt. We also increment the "start of
591 * idle" jiffy stamp so the idle accounting adjustment we do
592 * when we go busy again does not account too much ticks.
594 if (ts
->tick_stopped
) {
595 touch_softlockup_watchdog();
598 update_process_times(user_mode(regs
));
599 profile_tick(CPU_PROFILING
);
602 /* Do not restart, when we are in the idle loop */
603 if (ts
->tick_stopped
)
604 return HRTIMER_NORESTART
;
606 hrtimer_forward(timer
, now
, tick_period
);
608 return HRTIMER_RESTART
;
612 * tick_setup_sched_timer - setup the tick emulation timer
614 void tick_setup_sched_timer(void)
616 struct tick_sched
*ts
= &__get_cpu_var(tick_cpu_sched
);
617 ktime_t now
= ktime_get();
621 * Emulate tick processing via per-CPU hrtimers:
623 hrtimer_init(&ts
->sched_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
624 ts
->sched_timer
.function
= tick_sched_timer
;
625 ts
->sched_timer
.cb_mode
= HRTIMER_CB_IRQSAFE_NO_SOFTIRQ
;
627 /* Get the next period (per cpu) */
628 ts
->sched_timer
.expires
= tick_init_jiffy_update();
629 offset
= ktime_to_ns(tick_period
) >> 1;
630 do_div(offset
, num_possible_cpus());
631 offset
*= smp_processor_id();
632 ts
->sched_timer
.expires
= ktime_add_ns(ts
->sched_timer
.expires
, offset
);
635 hrtimer_forward(&ts
->sched_timer
, now
, tick_period
);
636 hrtimer_start(&ts
->sched_timer
, ts
->sched_timer
.expires
,
638 /* Check, if the timer was already in the past */
639 if (hrtimer_active(&ts
->sched_timer
))
645 if (tick_nohz_enabled
)
646 ts
->nohz_mode
= NOHZ_MODE_HIGHRES
;
649 #endif /* HIGH_RES_TIMERS */
651 #if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS
652 void tick_cancel_sched_timer(int cpu
)
654 struct tick_sched
*ts
= &per_cpu(tick_cpu_sched
, cpu
);
656 # ifdef CONFIG_HIGH_RES_TIMERS
657 if (ts
->sched_timer
.base
)
658 hrtimer_cancel(&ts
->sched_timer
);
661 ts
->nohz_mode
= NOHZ_MODE_INACTIVE
;
666 * Async notification about clocksource changes
668 void tick_clock_notify(void)
672 for_each_possible_cpu(cpu
)
673 set_bit(0, &per_cpu(tick_cpu_sched
, cpu
).check_clocks
);
677 * Async notification about clock event changes
679 void tick_oneshot_notify(void)
681 struct tick_sched
*ts
= &__get_cpu_var(tick_cpu_sched
);
683 set_bit(0, &ts
->check_clocks
);
687 * Check, if a change happened, which makes oneshot possible.
689 * Called cyclic from the hrtimer softirq (driven by the timer
690 * softirq) allow_nohz signals, that we can switch into low-res nohz
691 * mode, because high resolution timers are disabled (either compile
694 int tick_check_oneshot_change(int allow_nohz
)
696 struct tick_sched
*ts
= &__get_cpu_var(tick_cpu_sched
);
698 if (!test_and_clear_bit(0, &ts
->check_clocks
))
701 if (ts
->nohz_mode
!= NOHZ_MODE_INACTIVE
)
704 if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
710 tick_nohz_switch_to_nohz();