2 * linux/kernel/time/tick-sched.c
4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
8 * No idle tick implementation for low and high resolution timers
10 * Started by: Thomas Gleixner and Ingo Molnar
12 * For licencing details see kernel-base/COPYING
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/percpu.h>
20 #include <linux/profile.h>
21 #include <linux/sched.h>
22 #include <linux/tick.h>
24 #include <asm/irq_regs.h>
26 #include "tick-internal.h"
29 * Per cpu nohz control structure
31 static DEFINE_PER_CPU(struct tick_sched
, tick_cpu_sched
);
34 * The time, when the last jiffy update happened. Protected by xtime_lock.
36 static ktime_t last_jiffies_update
;
38 struct tick_sched
*tick_get_tick_sched(int cpu
)
40 return &per_cpu(tick_cpu_sched
, cpu
);
44 * Must be called with interrupts disabled !
46 static void tick_do_update_jiffies64(ktime_t now
)
48 unsigned long ticks
= 0;
51 /* Reevalute with xtime_lock held */
52 write_seqlock(&xtime_lock
);
54 delta
= ktime_sub(now
, last_jiffies_update
);
55 if (delta
.tv64
>= tick_period
.tv64
) {
57 delta
= ktime_sub(delta
, tick_period
);
58 last_jiffies_update
= ktime_add(last_jiffies_update
,
61 /* Slow path for long timeouts */
62 if (unlikely(delta
.tv64
>= tick_period
.tv64
)) {
63 s64 incr
= ktime_to_ns(tick_period
);
65 ticks
= ktime_divns(delta
, incr
);
67 last_jiffies_update
= ktime_add_ns(last_jiffies_update
,
72 write_sequnlock(&xtime_lock
);
76 * Initialize and return retrieve the jiffies update.
78 static ktime_t
tick_init_jiffy_update(void)
82 write_seqlock(&xtime_lock
);
83 /* Did we start the jiffies update yet ? */
84 if (last_jiffies_update
.tv64
== 0)
85 last_jiffies_update
= tick_next_period
;
86 period
= last_jiffies_update
;
87 write_sequnlock(&xtime_lock
);
92 * NOHZ - aka dynamic tick functionality
98 static int tick_nohz_enabled __read_mostly
= 1;
101 * Enable / Disable tickless mode
103 static int __init
setup_tick_nohz(char *str
)
105 if (!strcmp(str
, "off"))
106 tick_nohz_enabled
= 0;
107 else if (!strcmp(str
, "on"))
108 tick_nohz_enabled
= 1;
114 __setup("nohz=", setup_tick_nohz
);
117 * tick_nohz_update_jiffies - update jiffies when idle was interrupted
119 * Called from interrupt entry when the CPU was idle
121 * In case the sched_tick was stopped on this CPU, we have to check if jiffies
122 * must be updated. Otherwise an interrupt handler could use a stale jiffy
123 * value. We do this unconditionally on any cpu, as we don't know whether the
124 * cpu, which has the update task assigned is in a long sleep.
126 void tick_nohz_update_jiffies(void)
128 int cpu
= smp_processor_id();
129 struct tick_sched
*ts
= &per_cpu(tick_cpu_sched
, cpu
);
133 if (!ts
->tick_stopped
)
136 cpu_clear(cpu
, nohz_cpu_mask
);
139 local_irq_save(flags
);
140 tick_do_update_jiffies64(now
);
141 local_irq_restore(flags
);
145 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
147 * When the next event is more than a tick into the future, stop the idle tick
148 * Called either from the idle loop or from irq_exit() when an idle period was
149 * just interrupted by an interrupt which did not cause a reschedule.
151 void tick_nohz_stop_sched_tick(void)
153 unsigned long seq
, last_jiffies
, next_jiffies
, delta_jiffies
, flags
;
154 struct tick_sched
*ts
;
155 ktime_t last_update
, expires
, now
, delta
;
156 struct clock_event_device
*dev
= __get_cpu_var(tick_cpu_device
).evtdev
;
159 local_irq_save(flags
);
161 cpu
= smp_processor_id();
162 ts
= &per_cpu(tick_cpu_sched
, cpu
);
165 * If this cpu is offline and it is the one which updates
166 * jiffies, then give up the assignment and let it be taken by
167 * the cpu which runs the tick timer next. If we don't drop
168 * this here the jiffies might be stale and do_timer() never
171 if (unlikely(!cpu_online(cpu
))) {
172 if (cpu
== tick_do_timer_cpu
)
173 tick_do_timer_cpu
= -1;
176 if (unlikely(ts
->nohz_mode
== NOHZ_MODE_INACTIVE
))
182 cpu
= smp_processor_id();
183 if (unlikely(local_softirq_pending())) {
184 static int ratelimit
;
186 if (ratelimit
< 10) {
187 printk(KERN_ERR
"NOHZ: local_softirq_pending %02x\n",
188 local_softirq_pending());
195 * When called from irq_exit we need to account the idle sleep time
198 if (ts
->tick_stopped
) {
199 delta
= ktime_sub(now
, ts
->idle_entrytime
);
200 ts
->idle_sleeptime
= ktime_add(ts
->idle_sleeptime
, delta
);
203 ts
->idle_entrytime
= now
;
206 /* Read jiffies and the time when jiffies were updated last */
208 seq
= read_seqbegin(&xtime_lock
);
209 last_update
= last_jiffies_update
;
210 last_jiffies
= jiffies
;
211 } while (read_seqretry(&xtime_lock
, seq
));
213 /* Get the next timer wheel timer */
214 next_jiffies
= get_next_timer_interrupt(last_jiffies
);
215 delta_jiffies
= next_jiffies
- last_jiffies
;
217 if (rcu_needs_cpu(cpu
))
220 * Do not stop the tick, if we are only one off
221 * or if the cpu is required for rcu
223 if (!ts
->tick_stopped
&& delta_jiffies
== 1)
226 /* Schedule the tick, if we are at least one jiffie off */
227 if ((long)delta_jiffies
>= 1) {
229 if (delta_jiffies
> 1)
230 cpu_set(cpu
, nohz_cpu_mask
);
232 * nohz_stop_sched_tick can be called several times before
233 * the nohz_restart_sched_tick is called. This happens when
234 * interrupts arrive which do not cause a reschedule. In the
235 * first call we save the current tick time, so we can restart
236 * the scheduler tick in nohz_restart_sched_tick.
238 if (!ts
->tick_stopped
) {
239 if (select_nohz_load_balancer(1)) {
241 * sched tick not stopped!
243 cpu_clear(cpu
, nohz_cpu_mask
);
247 ts
->idle_tick
= ts
->sched_timer
.expires
;
248 ts
->tick_stopped
= 1;
249 ts
->idle_jiffies
= last_jiffies
;
253 * If this cpu is the one which updates jiffies, then
254 * give up the assignment and let it be taken by the
255 * cpu which runs the tick timer next, which might be
256 * this cpu as well. If we don't drop this here the
257 * jiffies might be stale and do_timer() never
260 if (cpu
== tick_do_timer_cpu
)
261 tick_do_timer_cpu
= -1;
266 * delta_jiffies >= NEXT_TIMER_MAX_DELTA signals that
267 * there is no timer pending or at least extremly far
268 * into the future (12 days for HZ=1000). In this case
269 * we simply stop the tick timer:
271 if (unlikely(delta_jiffies
>= NEXT_TIMER_MAX_DELTA
)) {
272 ts
->idle_expires
.tv64
= KTIME_MAX
;
273 if (ts
->nohz_mode
== NOHZ_MODE_HIGHRES
)
274 hrtimer_cancel(&ts
->sched_timer
);
279 * calculate the expiry time for the next timer wheel
282 expires
= ktime_add_ns(last_update
, tick_period
.tv64
*
284 ts
->idle_expires
= expires
;
286 if (ts
->nohz_mode
== NOHZ_MODE_HIGHRES
) {
287 hrtimer_start(&ts
->sched_timer
, expires
,
289 /* Check, if the timer was already in the past */
290 if (hrtimer_active(&ts
->sched_timer
))
292 } else if(!tick_program_event(expires
, 0))
295 * We are past the event already. So we crossed a
296 * jiffie boundary. Update jiffies and raise the
299 tick_do_update_jiffies64(ktime_get());
300 cpu_clear(cpu
, nohz_cpu_mask
);
302 raise_softirq_irqoff(TIMER_SOFTIRQ
);
304 ts
->next_jiffies
= next_jiffies
;
305 ts
->last_jiffies
= last_jiffies
;
306 ts
->sleep_length
= ktime_sub(dev
->next_event
, now
);
308 local_irq_restore(flags
);
312 * tick_nohz_get_sleep_length - return the length of the current sleep
314 * Called from power state control code with interrupts disabled
316 ktime_t
tick_nohz_get_sleep_length(void)
318 struct tick_sched
*ts
= &__get_cpu_var(tick_cpu_sched
);
320 return ts
->sleep_length
;
324 * nohz_restart_sched_tick - restart the idle tick from the idle task
326 * Restart the idle tick when the CPU is woken up from idle
328 void tick_nohz_restart_sched_tick(void)
330 int cpu
= smp_processor_id();
331 struct tick_sched
*ts
= &per_cpu(tick_cpu_sched
, cpu
);
335 if (!ts
->tick_stopped
)
338 /* Update jiffies first */
342 select_nohz_load_balancer(0);
343 tick_do_update_jiffies64(now
);
344 cpu_clear(cpu
, nohz_cpu_mask
);
346 /* Account the idle time */
347 delta
= ktime_sub(now
, ts
->idle_entrytime
);
348 ts
->idle_sleeptime
= ktime_add(ts
->idle_sleeptime
, delta
);
351 * We stopped the tick in idle. Update process times would miss the
352 * time we slept as update_process_times does only a 1 tick
353 * accounting. Enforce that this is accounted to idle !
355 ticks
= jiffies
- ts
->idle_jiffies
;
357 * We might be one off. Do not randomly account a huge number of ticks!
359 if (ticks
&& ticks
< LONG_MAX
) {
360 add_preempt_count(HARDIRQ_OFFSET
);
361 account_system_time(current
, HARDIRQ_OFFSET
,
362 jiffies_to_cputime(ticks
));
363 sub_preempt_count(HARDIRQ_OFFSET
);
367 * Cancel the scheduled timer and restore the tick
369 ts
->tick_stopped
= 0;
370 hrtimer_cancel(&ts
->sched_timer
);
371 ts
->sched_timer
.expires
= ts
->idle_tick
;
374 /* Forward the time to expire in the future */
375 hrtimer_forward(&ts
->sched_timer
, now
, tick_period
);
377 if (ts
->nohz_mode
== NOHZ_MODE_HIGHRES
) {
378 hrtimer_start(&ts
->sched_timer
,
379 ts
->sched_timer
.expires
,
381 /* Check, if the timer was already in the past */
382 if (hrtimer_active(&ts
->sched_timer
))
385 if (!tick_program_event(ts
->sched_timer
.expires
, 0))
388 /* Update jiffies and reread time */
389 tick_do_update_jiffies64(now
);
395 static int tick_nohz_reprogram(struct tick_sched
*ts
, ktime_t now
)
397 hrtimer_forward(&ts
->sched_timer
, now
, tick_period
);
398 return tick_program_event(ts
->sched_timer
.expires
, 0);
402 * The nohz low res interrupt handler
404 static void tick_nohz_handler(struct clock_event_device
*dev
)
406 struct tick_sched
*ts
= &__get_cpu_var(tick_cpu_sched
);
407 struct pt_regs
*regs
= get_irq_regs();
408 int cpu
= smp_processor_id();
409 ktime_t now
= ktime_get();
411 dev
->next_event
.tv64
= KTIME_MAX
;
414 * Check if the do_timer duty was dropped. We don't care about
415 * concurrency: This happens only when the cpu in charge went
416 * into a long sleep. If two cpus happen to assign themself to
417 * this duty, then the jiffies update is still serialized by
420 if (unlikely(tick_do_timer_cpu
== -1))
421 tick_do_timer_cpu
= cpu
;
423 /* Check, if the jiffies need an update */
424 if (tick_do_timer_cpu
== cpu
)
425 tick_do_update_jiffies64(now
);
428 * When we are idle and the tick is stopped, we have to touch
429 * the watchdog as we might not schedule for a really long
430 * time. This happens on complete idle SMP systems while
431 * waiting on the login prompt. We also increment the "start
432 * of idle" jiffy stamp so the idle accounting adjustment we
433 * do when we go busy again does not account too much ticks.
435 if (ts
->tick_stopped
) {
436 touch_softlockup_watchdog();
440 update_process_times(user_mode(regs
));
441 profile_tick(CPU_PROFILING
);
443 /* Do not restart, when we are in the idle loop */
444 if (ts
->tick_stopped
)
447 while (tick_nohz_reprogram(ts
, now
)) {
449 tick_do_update_jiffies64(now
);
454 * tick_nohz_switch_to_nohz - switch to nohz mode
456 static void tick_nohz_switch_to_nohz(void)
458 struct tick_sched
*ts
= &__get_cpu_var(tick_cpu_sched
);
461 if (!tick_nohz_enabled
)
465 if (tick_switch_to_oneshot(tick_nohz_handler
)) {
470 ts
->nohz_mode
= NOHZ_MODE_LOWRES
;
473 * Recycle the hrtimer in ts, so we can share the
474 * hrtimer_forward with the highres code.
476 hrtimer_init(&ts
->sched_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
477 /* Get the next period */
478 next
= tick_init_jiffy_update();
481 ts
->sched_timer
.expires
= next
;
482 if (!tick_program_event(next
, 0))
484 next
= ktime_add(next
, tick_period
);
488 printk(KERN_INFO
"Switched to NOHz mode on CPU #%d\n",
494 static inline void tick_nohz_switch_to_nohz(void) { }
499 * High resolution timer specific code
501 #ifdef CONFIG_HIGH_RES_TIMERS
503 * We rearm the timer until we get disabled by the idle code
504 * Called with interrupts disabled and timer->base->cpu_base->lock held.
506 static enum hrtimer_restart
tick_sched_timer(struct hrtimer
*timer
)
508 struct tick_sched
*ts
=
509 container_of(timer
, struct tick_sched
, sched_timer
);
510 struct hrtimer_cpu_base
*base
= timer
->base
->cpu_base
;
511 struct pt_regs
*regs
= get_irq_regs();
512 ktime_t now
= ktime_get();
513 int cpu
= smp_processor_id();
517 * Check if the do_timer duty was dropped. We don't care about
518 * concurrency: This happens only when the cpu in charge went
519 * into a long sleep. If two cpus happen to assign themself to
520 * this duty, then the jiffies update is still serialized by
523 if (unlikely(tick_do_timer_cpu
== -1))
524 tick_do_timer_cpu
= cpu
;
527 /* Check, if the jiffies need an update */
528 if (tick_do_timer_cpu
== cpu
)
529 tick_do_update_jiffies64(now
);
532 * Do not call, when we are not in irq context and have
533 * no valid regs pointer
537 * When we are idle and the tick is stopped, we have to touch
538 * the watchdog as we might not schedule for a really long
539 * time. This happens on complete idle SMP systems while
540 * waiting on the login prompt. We also increment the "start of
541 * idle" jiffy stamp so the idle accounting adjustment we do
542 * when we go busy again does not account too much ticks.
544 if (ts
->tick_stopped
) {
545 touch_softlockup_watchdog();
549 * update_process_times() might take tasklist_lock, hence
550 * drop the base lock. sched-tick hrtimers are per-CPU and
551 * never accessible by userspace APIs, so this is safe to do.
553 spin_unlock(&base
->lock
);
554 update_process_times(user_mode(regs
));
555 profile_tick(CPU_PROFILING
);
556 spin_lock(&base
->lock
);
559 /* Do not restart, when we are in the idle loop */
560 if (ts
->tick_stopped
)
561 return HRTIMER_NORESTART
;
563 hrtimer_forward(timer
, now
, tick_period
);
565 return HRTIMER_RESTART
;
569 * tick_setup_sched_timer - setup the tick emulation timer
571 void tick_setup_sched_timer(void)
573 struct tick_sched
*ts
= &__get_cpu_var(tick_cpu_sched
);
574 ktime_t now
= ktime_get();
578 * Emulate tick processing via per-CPU hrtimers:
580 hrtimer_init(&ts
->sched_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
581 ts
->sched_timer
.function
= tick_sched_timer
;
582 ts
->sched_timer
.cb_mode
= HRTIMER_CB_IRQSAFE_NO_SOFTIRQ
;
584 /* Get the next period (per cpu) */
585 ts
->sched_timer
.expires
= tick_init_jiffy_update();
586 offset
= ktime_to_ns(tick_period
) >> 1;
587 do_div(offset
, NR_CPUS
);
588 offset
*= smp_processor_id();
589 ts
->sched_timer
.expires
= ktime_add_ns(ts
->sched_timer
.expires
, offset
);
592 hrtimer_forward(&ts
->sched_timer
, now
, tick_period
);
593 hrtimer_start(&ts
->sched_timer
, ts
->sched_timer
.expires
,
595 /* Check, if the timer was already in the past */
596 if (hrtimer_active(&ts
->sched_timer
))
602 if (tick_nohz_enabled
)
603 ts
->nohz_mode
= NOHZ_MODE_HIGHRES
;
607 void tick_cancel_sched_timer(int cpu
)
609 struct tick_sched
*ts
= &per_cpu(tick_cpu_sched
, cpu
);
611 if (ts
->sched_timer
.base
)
612 hrtimer_cancel(&ts
->sched_timer
);
613 ts
->tick_stopped
= 0;
614 ts
->nohz_mode
= NOHZ_MODE_INACTIVE
;
616 #endif /* HIGH_RES_TIMERS */
619 * Async notification about clocksource changes
621 void tick_clock_notify(void)
625 for_each_possible_cpu(cpu
)
626 set_bit(0, &per_cpu(tick_cpu_sched
, cpu
).check_clocks
);
630 * Async notification about clock event changes
632 void tick_oneshot_notify(void)
634 struct tick_sched
*ts
= &__get_cpu_var(tick_cpu_sched
);
636 set_bit(0, &ts
->check_clocks
);
640 * Check, if a change happened, which makes oneshot possible.
642 * Called cyclic from the hrtimer softirq (driven by the timer
643 * softirq) allow_nohz signals, that we can switch into low-res nohz
644 * mode, because high resolution timers are disabled (either compile
647 int tick_check_oneshot_change(int allow_nohz
)
649 struct tick_sched
*ts
= &__get_cpu_var(tick_cpu_sched
);
651 if (!test_and_clear_bit(0, &ts
->check_clocks
))
654 if (ts
->nohz_mode
!= NOHZ_MODE_INACTIVE
)
657 if (!timekeeping_is_continuous() || !tick_is_oneshot_available())
663 tick_nohz_switch_to_nohz();