2 * linux/kernel/time/tick-sched.c
4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
8 * No idle tick implementation for low and high resolution timers
10 * Started by: Thomas Gleixner and Ingo Molnar
12 * For licencing details see kernel-base/COPYING
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/percpu.h>
20 #include <linux/profile.h>
21 #include <linux/sched.h>
22 #include <linux/tick.h>
24 #include <asm/irq_regs.h>
26 #include "tick-internal.h"
29 * Per cpu nohz control structure
31 static DEFINE_PER_CPU(struct tick_sched
, tick_cpu_sched
);
34 * The time, when the last jiffy update happened. Protected by xtime_lock.
36 static ktime_t last_jiffies_update
;
38 struct tick_sched
*tick_get_tick_sched(int cpu
)
40 return &per_cpu(tick_cpu_sched
, cpu
);
44 * Must be called with interrupts disabled !
46 static void tick_do_update_jiffies64(ktime_t now
)
48 unsigned long ticks
= 0;
51 /* Reevalute with xtime_lock held */
52 write_seqlock(&xtime_lock
);
54 delta
= ktime_sub(now
, last_jiffies_update
);
55 if (delta
.tv64
>= tick_period
.tv64
) {
57 delta
= ktime_sub(delta
, tick_period
);
58 last_jiffies_update
= ktime_add(last_jiffies_update
,
61 /* Slow path for long timeouts */
62 if (unlikely(delta
.tv64
>= tick_period
.tv64
)) {
63 s64 incr
= ktime_to_ns(tick_period
);
65 ticks
= ktime_divns(delta
, incr
);
67 last_jiffies_update
= ktime_add_ns(last_jiffies_update
,
72 write_sequnlock(&xtime_lock
);
76 * Initialize and return retrieve the jiffies update.
78 static ktime_t
tick_init_jiffy_update(void)
82 write_seqlock(&xtime_lock
);
83 /* Did we start the jiffies update yet ? */
84 if (last_jiffies_update
.tv64
== 0)
85 last_jiffies_update
= tick_next_period
;
86 period
= last_jiffies_update
;
87 write_sequnlock(&xtime_lock
);
92 * NOHZ - aka dynamic tick functionality
98 static int tick_nohz_enabled __read_mostly
= 1;
101 * Enable / Disable tickless mode
103 static int __init
setup_tick_nohz(char *str
)
105 if (!strcmp(str
, "off"))
106 tick_nohz_enabled
= 0;
107 else if (!strcmp(str
, "on"))
108 tick_nohz_enabled
= 1;
114 __setup("nohz=", setup_tick_nohz
);
117 * tick_nohz_update_jiffies - update jiffies when idle was interrupted
119 * Called from interrupt entry when the CPU was idle
121 * In case the sched_tick was stopped on this CPU, we have to check if jiffies
122 * must be updated. Otherwise an interrupt handler could use a stale jiffy
123 * value. We do this unconditionally on any cpu, as we don't know whether the
124 * cpu, which has the update task assigned is in a long sleep.
126 void tick_nohz_update_jiffies(void)
128 int cpu
= smp_processor_id();
129 struct tick_sched
*ts
= &per_cpu(tick_cpu_sched
, cpu
);
133 if (!ts
->tick_stopped
)
136 cpu_clear(cpu
, nohz_cpu_mask
);
139 local_irq_save(flags
);
140 tick_do_update_jiffies64(now
);
141 local_irq_restore(flags
);
145 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
147 * When the next event is more than a tick into the future, stop the idle tick
148 * Called either from the idle loop or from irq_exit() when an idle period was
149 * just interrupted by an interrupt which did not cause a reschedule.
151 void tick_nohz_stop_sched_tick(void)
153 unsigned long seq
, last_jiffies
, next_jiffies
, delta_jiffies
, flags
;
154 struct tick_sched
*ts
;
155 ktime_t last_update
, expires
, now
, delta
;
156 struct clock_event_device
*dev
= __get_cpu_var(tick_cpu_device
).evtdev
;
159 local_irq_save(flags
);
161 cpu
= smp_processor_id();
162 ts
= &per_cpu(tick_cpu_sched
, cpu
);
165 * If this cpu is offline and it is the one which updates
166 * jiffies, then give up the assignment and let it be taken by
167 * the cpu which runs the tick timer next. If we don't drop
168 * this here the jiffies might be stale and do_timer() never
171 if (unlikely(!cpu_online(cpu
))) {
172 if (cpu
== tick_do_timer_cpu
)
173 tick_do_timer_cpu
= -1;
176 if (unlikely(ts
->nohz_mode
== NOHZ_MODE_INACTIVE
))
182 cpu
= smp_processor_id();
183 if (unlikely(local_softirq_pending())) {
184 static int ratelimit
;
186 if (ratelimit
< 10) {
187 printk(KERN_ERR
"NOHZ: local_softirq_pending %02x\n",
188 local_softirq_pending());
195 * When called from irq_exit we need to account the idle sleep time
198 if (ts
->tick_stopped
) {
199 delta
= ktime_sub(now
, ts
->idle_entrytime
);
200 ts
->idle_sleeptime
= ktime_add(ts
->idle_sleeptime
, delta
);
203 ts
->idle_entrytime
= now
;
206 /* Read jiffies and the time when jiffies were updated last */
208 seq
= read_seqbegin(&xtime_lock
);
209 last_update
= last_jiffies_update
;
210 last_jiffies
= jiffies
;
211 } while (read_seqretry(&xtime_lock
, seq
));
213 /* Get the next timer wheel timer */
214 next_jiffies
= get_next_timer_interrupt(last_jiffies
);
215 delta_jiffies
= next_jiffies
- last_jiffies
;
217 if (rcu_needs_cpu(cpu
))
220 * Do not stop the tick, if we are only one off
221 * or if the cpu is required for rcu
223 if (!ts
->tick_stopped
&& delta_jiffies
== 1)
226 /* Schedule the tick, if we are at least one jiffie off */
227 if ((long)delta_jiffies
>= 1) {
229 if (delta_jiffies
> 1)
230 cpu_set(cpu
, nohz_cpu_mask
);
232 * nohz_stop_sched_tick can be called several times before
233 * the nohz_restart_sched_tick is called. This happens when
234 * interrupts arrive which do not cause a reschedule. In the
235 * first call we save the current tick time, so we can restart
236 * the scheduler tick in nohz_restart_sched_tick.
238 if (!ts
->tick_stopped
) {
239 if (select_nohz_load_balancer(1)) {
241 * sched tick not stopped!
243 cpu_clear(cpu
, nohz_cpu_mask
);
247 ts
->idle_tick
= ts
->sched_timer
.expires
;
248 ts
->tick_stopped
= 1;
249 ts
->idle_jiffies
= last_jiffies
;
253 * If this cpu is the one which updates jiffies, then
254 * give up the assignment and let it be taken by the
255 * cpu which runs the tick timer next, which might be
256 * this cpu as well. If we don't drop this here the
257 * jiffies might be stale and do_timer() never
260 if (cpu
== tick_do_timer_cpu
)
261 tick_do_timer_cpu
= -1;
266 * delta_jiffies >= NEXT_TIMER_MAX_DELTA signals that
267 * there is no timer pending or at least extremly far
268 * into the future (12 days for HZ=1000). In this case
269 * we simply stop the tick timer:
271 if (unlikely(delta_jiffies
>= NEXT_TIMER_MAX_DELTA
)) {
272 ts
->idle_expires
.tv64
= KTIME_MAX
;
273 if (ts
->nohz_mode
== NOHZ_MODE_HIGHRES
)
274 hrtimer_cancel(&ts
->sched_timer
);
279 * calculate the expiry time for the next timer wheel
282 expires
= ktime_add_ns(last_update
, tick_period
.tv64
*
284 ts
->idle_expires
= expires
;
286 if (ts
->nohz_mode
== NOHZ_MODE_HIGHRES
) {
287 hrtimer_start(&ts
->sched_timer
, expires
,
289 /* Check, if the timer was already in the past */
290 if (hrtimer_active(&ts
->sched_timer
))
292 } else if(!tick_program_event(expires
, 0))
295 * We are past the event already. So we crossed a
296 * jiffie boundary. Update jiffies and raise the
299 tick_do_update_jiffies64(ktime_get());
300 cpu_clear(cpu
, nohz_cpu_mask
);
302 raise_softirq_irqoff(TIMER_SOFTIRQ
);
304 ts
->next_jiffies
= next_jiffies
;
305 ts
->last_jiffies
= last_jiffies
;
306 ts
->sleep_length
= ktime_sub(dev
->next_event
, now
);
308 local_irq_restore(flags
);
312 * tick_nohz_get_sleep_length - return the length of the current sleep
314 * Called from power state control code with interrupts disabled
316 ktime_t
tick_nohz_get_sleep_length(void)
318 struct tick_sched
*ts
= &__get_cpu_var(tick_cpu_sched
);
320 return ts
->sleep_length
;
323 EXPORT_SYMBOL_GPL(tick_nohz_get_sleep_length
);
326 * nohz_restart_sched_tick - restart the idle tick from the idle task
328 * Restart the idle tick when the CPU is woken up from idle
330 void tick_nohz_restart_sched_tick(void)
332 int cpu
= smp_processor_id();
333 struct tick_sched
*ts
= &per_cpu(tick_cpu_sched
, cpu
);
337 if (!ts
->tick_stopped
)
340 /* Update jiffies first */
344 select_nohz_load_balancer(0);
345 tick_do_update_jiffies64(now
);
346 cpu_clear(cpu
, nohz_cpu_mask
);
348 /* Account the idle time */
349 delta
= ktime_sub(now
, ts
->idle_entrytime
);
350 ts
->idle_sleeptime
= ktime_add(ts
->idle_sleeptime
, delta
);
353 * We stopped the tick in idle. Update process times would miss the
354 * time we slept as update_process_times does only a 1 tick
355 * accounting. Enforce that this is accounted to idle !
357 ticks
= jiffies
- ts
->idle_jiffies
;
359 * We might be one off. Do not randomly account a huge number of ticks!
361 if (ticks
&& ticks
< LONG_MAX
) {
362 add_preempt_count(HARDIRQ_OFFSET
);
363 account_system_time(current
, HARDIRQ_OFFSET
,
364 jiffies_to_cputime(ticks
));
365 sub_preempt_count(HARDIRQ_OFFSET
);
369 * Cancel the scheduled timer and restore the tick
371 ts
->tick_stopped
= 0;
372 hrtimer_cancel(&ts
->sched_timer
);
373 ts
->sched_timer
.expires
= ts
->idle_tick
;
376 /* Forward the time to expire in the future */
377 hrtimer_forward(&ts
->sched_timer
, now
, tick_period
);
379 if (ts
->nohz_mode
== NOHZ_MODE_HIGHRES
) {
380 hrtimer_start(&ts
->sched_timer
,
381 ts
->sched_timer
.expires
,
383 /* Check, if the timer was already in the past */
384 if (hrtimer_active(&ts
->sched_timer
))
387 if (!tick_program_event(ts
->sched_timer
.expires
, 0))
390 /* Update jiffies and reread time */
391 tick_do_update_jiffies64(now
);
397 static int tick_nohz_reprogram(struct tick_sched
*ts
, ktime_t now
)
399 hrtimer_forward(&ts
->sched_timer
, now
, tick_period
);
400 return tick_program_event(ts
->sched_timer
.expires
, 0);
404 * The nohz low res interrupt handler
406 static void tick_nohz_handler(struct clock_event_device
*dev
)
408 struct tick_sched
*ts
= &__get_cpu_var(tick_cpu_sched
);
409 struct pt_regs
*regs
= get_irq_regs();
410 int cpu
= smp_processor_id();
411 ktime_t now
= ktime_get();
413 dev
->next_event
.tv64
= KTIME_MAX
;
416 * Check if the do_timer duty was dropped. We don't care about
417 * concurrency: This happens only when the cpu in charge went
418 * into a long sleep. If two cpus happen to assign themself to
419 * this duty, then the jiffies update is still serialized by
422 if (unlikely(tick_do_timer_cpu
== -1))
423 tick_do_timer_cpu
= cpu
;
425 /* Check, if the jiffies need an update */
426 if (tick_do_timer_cpu
== cpu
)
427 tick_do_update_jiffies64(now
);
430 * When we are idle and the tick is stopped, we have to touch
431 * the watchdog as we might not schedule for a really long
432 * time. This happens on complete idle SMP systems while
433 * waiting on the login prompt. We also increment the "start
434 * of idle" jiffy stamp so the idle accounting adjustment we
435 * do when we go busy again does not account too much ticks.
437 if (ts
->tick_stopped
) {
438 touch_softlockup_watchdog();
442 update_process_times(user_mode(regs
));
443 profile_tick(CPU_PROFILING
);
445 /* Do not restart, when we are in the idle loop */
446 if (ts
->tick_stopped
)
449 while (tick_nohz_reprogram(ts
, now
)) {
451 tick_do_update_jiffies64(now
);
456 * tick_nohz_switch_to_nohz - switch to nohz mode
458 static void tick_nohz_switch_to_nohz(void)
460 struct tick_sched
*ts
= &__get_cpu_var(tick_cpu_sched
);
463 if (!tick_nohz_enabled
)
467 if (tick_switch_to_oneshot(tick_nohz_handler
)) {
472 ts
->nohz_mode
= NOHZ_MODE_LOWRES
;
475 * Recycle the hrtimer in ts, so we can share the
476 * hrtimer_forward with the highres code.
478 hrtimer_init(&ts
->sched_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
479 /* Get the next period */
480 next
= tick_init_jiffy_update();
483 ts
->sched_timer
.expires
= next
;
484 if (!tick_program_event(next
, 0))
486 next
= ktime_add(next
, tick_period
);
490 printk(KERN_INFO
"Switched to NOHz mode on CPU #%d\n",
496 static inline void tick_nohz_switch_to_nohz(void) { }
501 * High resolution timer specific code
503 #ifdef CONFIG_HIGH_RES_TIMERS
505 * We rearm the timer until we get disabled by the idle code
506 * Called with interrupts disabled and timer->base->cpu_base->lock held.
508 static enum hrtimer_restart
tick_sched_timer(struct hrtimer
*timer
)
510 struct tick_sched
*ts
=
511 container_of(timer
, struct tick_sched
, sched_timer
);
512 struct hrtimer_cpu_base
*base
= timer
->base
->cpu_base
;
513 struct pt_regs
*regs
= get_irq_regs();
514 ktime_t now
= ktime_get();
515 int cpu
= smp_processor_id();
519 * Check if the do_timer duty was dropped. We don't care about
520 * concurrency: This happens only when the cpu in charge went
521 * into a long sleep. If two cpus happen to assign themself to
522 * this duty, then the jiffies update is still serialized by
525 if (unlikely(tick_do_timer_cpu
== -1))
526 tick_do_timer_cpu
= cpu
;
529 /* Check, if the jiffies need an update */
530 if (tick_do_timer_cpu
== cpu
)
531 tick_do_update_jiffies64(now
);
534 * Do not call, when we are not in irq context and have
535 * no valid regs pointer
539 * When we are idle and the tick is stopped, we have to touch
540 * the watchdog as we might not schedule for a really long
541 * time. This happens on complete idle SMP systems while
542 * waiting on the login prompt. We also increment the "start of
543 * idle" jiffy stamp so the idle accounting adjustment we do
544 * when we go busy again does not account too much ticks.
546 if (ts
->tick_stopped
) {
547 touch_softlockup_watchdog();
551 * update_process_times() might take tasklist_lock, hence
552 * drop the base lock. sched-tick hrtimers are per-CPU and
553 * never accessible by userspace APIs, so this is safe to do.
555 spin_unlock(&base
->lock
);
556 update_process_times(user_mode(regs
));
557 profile_tick(CPU_PROFILING
);
558 spin_lock(&base
->lock
);
561 /* Do not restart, when we are in the idle loop */
562 if (ts
->tick_stopped
)
563 return HRTIMER_NORESTART
;
565 hrtimer_forward(timer
, now
, tick_period
);
567 return HRTIMER_RESTART
;
571 * tick_setup_sched_timer - setup the tick emulation timer
573 void tick_setup_sched_timer(void)
575 struct tick_sched
*ts
= &__get_cpu_var(tick_cpu_sched
);
576 ktime_t now
= ktime_get();
580 * Emulate tick processing via per-CPU hrtimers:
582 hrtimer_init(&ts
->sched_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
583 ts
->sched_timer
.function
= tick_sched_timer
;
584 ts
->sched_timer
.cb_mode
= HRTIMER_CB_IRQSAFE_NO_SOFTIRQ
;
586 /* Get the next period (per cpu) */
587 ts
->sched_timer
.expires
= tick_init_jiffy_update();
588 offset
= ktime_to_ns(tick_period
) >> 1;
589 do_div(offset
, NR_CPUS
);
590 offset
*= smp_processor_id();
591 ts
->sched_timer
.expires
= ktime_add_ns(ts
->sched_timer
.expires
, offset
);
594 hrtimer_forward(&ts
->sched_timer
, now
, tick_period
);
595 hrtimer_start(&ts
->sched_timer
, ts
->sched_timer
.expires
,
597 /* Check, if the timer was already in the past */
598 if (hrtimer_active(&ts
->sched_timer
))
604 if (tick_nohz_enabled
)
605 ts
->nohz_mode
= NOHZ_MODE_HIGHRES
;
609 void tick_cancel_sched_timer(int cpu
)
611 struct tick_sched
*ts
= &per_cpu(tick_cpu_sched
, cpu
);
613 if (ts
->sched_timer
.base
)
614 hrtimer_cancel(&ts
->sched_timer
);
615 ts
->tick_stopped
= 0;
616 ts
->nohz_mode
= NOHZ_MODE_INACTIVE
;
618 #endif /* HIGH_RES_TIMERS */
621 * Async notification about clocksource changes
623 void tick_clock_notify(void)
627 for_each_possible_cpu(cpu
)
628 set_bit(0, &per_cpu(tick_cpu_sched
, cpu
).check_clocks
);
632 * Async notification about clock event changes
634 void tick_oneshot_notify(void)
636 struct tick_sched
*ts
= &__get_cpu_var(tick_cpu_sched
);
638 set_bit(0, &ts
->check_clocks
);
642 * Check, if a change happened, which makes oneshot possible.
644 * Called cyclic from the hrtimer softirq (driven by the timer
645 * softirq) allow_nohz signals, that we can switch into low-res nohz
646 * mode, because high resolution timers are disabled (either compile
649 int tick_check_oneshot_change(int allow_nohz
)
651 struct tick_sched
*ts
= &__get_cpu_var(tick_cpu_sched
);
653 if (!test_and_clear_bit(0, &ts
->check_clocks
))
656 if (ts
->nohz_mode
!= NOHZ_MODE_INACTIVE
)
659 if (!timekeeping_is_continuous() || !tick_is_oneshot_available())
665 tick_nohz_switch_to_nohz();