ARM: switch to use the generic show_mem() implementation
[linux-2.6/btrfs-unstable.git] / include / linux / tick.h
blob9c085dc12ae92626e3d6ae831df435a82f188c08
1 /* linux/include/linux/tick.h
3 * This file contains the structure definitions for tick related functions
5 */
6 #ifndef _LINUX_TICK_H
7 #define _LINUX_TICK_H
9 #include <linux/clockchips.h>
10 #include <linux/irqflags.h>
11 #include <linux/percpu.h>
12 #include <linux/hrtimer.h>
13 #include <linux/context_tracking_state.h>
14 #include <linux/cpumask.h>
15 #include <linux/sched.h>
17 #ifdef CONFIG_GENERIC_CLOCKEVENTS
19 enum tick_device_mode {
20 TICKDEV_MODE_PERIODIC,
21 TICKDEV_MODE_ONESHOT,
24 struct tick_device {
25 struct clock_event_device *evtdev;
26 enum tick_device_mode mode;
29 enum tick_nohz_mode {
30 NOHZ_MODE_INACTIVE,
31 NOHZ_MODE_LOWRES,
32 NOHZ_MODE_HIGHRES,
35 /**
36 * struct tick_sched - sched tick emulation and no idle tick control/stats
37 * @sched_timer: hrtimer to schedule the periodic tick in high
38 * resolution mode
39 * @last_tick: Store the last tick expiry time when the tick
40 * timer is modified for nohz sleeps. This is necessary
41 * to resume the tick timer operation in the timeline
42 * when the CPU returns from nohz sleep.
43 * @tick_stopped: Indicator that the idle tick has been stopped
44 * @idle_jiffies: jiffies at the entry to idle for idle time accounting
45 * @idle_calls: Total number of idle calls
46 * @idle_sleeps: Number of idle calls, where the sched tick was stopped
47 * @idle_entrytime: Time when the idle call was entered
48 * @idle_waketime: Time when the idle was interrupted
49 * @idle_exittime: Time when the idle state was left
50 * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
51 * @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding
52 * @sleep_length: Duration of the current idle sleep
53 * @do_timer_lst: CPU was the last one doing do_timer before going idle
55 struct tick_sched {
56 struct hrtimer sched_timer;
57 unsigned long check_clocks;
58 enum tick_nohz_mode nohz_mode;
59 ktime_t last_tick;
60 int inidle;
61 int tick_stopped;
62 unsigned long idle_jiffies;
63 unsigned long idle_calls;
64 unsigned long idle_sleeps;
65 int idle_active;
66 ktime_t idle_entrytime;
67 ktime_t idle_waketime;
68 ktime_t idle_exittime;
69 ktime_t idle_sleeptime;
70 ktime_t iowait_sleeptime;
71 ktime_t sleep_length;
72 unsigned long last_jiffies;
73 unsigned long next_jiffies;
74 ktime_t idle_expires;
75 int do_timer_last;
78 extern void __init tick_init(void);
79 extern int tick_is_oneshot_available(void);
80 extern struct tick_device *tick_get_device(int cpu);
82 extern void tick_freeze(void);
83 extern void tick_unfreeze(void);
85 # ifdef CONFIG_HIGH_RES_TIMERS
86 extern int tick_init_highres(void);
87 extern int tick_program_event(ktime_t expires, int force);
88 extern void tick_setup_sched_timer(void);
89 # endif
91 # if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
92 extern void tick_cancel_sched_timer(int cpu);
93 # else
94 static inline void tick_cancel_sched_timer(int cpu) { }
95 # endif
97 # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
98 extern struct tick_device *tick_get_broadcast_device(void);
99 extern struct cpumask *tick_get_broadcast_mask(void);
101 # ifdef CONFIG_TICK_ONESHOT
102 extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
103 # endif
105 # endif /* BROADCAST */
107 # ifdef CONFIG_TICK_ONESHOT
108 extern void tick_clock_notify(void);
109 extern int tick_check_oneshot_change(int allow_nohz);
110 extern struct tick_sched *tick_get_tick_sched(int cpu);
111 extern void tick_irq_enter(void);
112 extern int tick_oneshot_mode_active(void);
113 # ifndef arch_needs_cpu
114 # define arch_needs_cpu() (0)
115 # endif
116 # else
117 static inline void tick_clock_notify(void) { }
118 static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
119 static inline void tick_irq_enter(void) { }
120 static inline int tick_oneshot_mode_active(void) { return 0; }
121 # endif
123 #else /* CONFIG_GENERIC_CLOCKEVENTS */
124 static inline void tick_init(void) { }
125 static inline void tick_freeze(void) { }
126 static inline void tick_unfreeze(void) { }
127 static inline void tick_cancel_sched_timer(int cpu) { }
128 static inline void tick_clock_notify(void) { }
129 static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
130 static inline void tick_irq_enter(void) { }
131 static inline int tick_oneshot_mode_active(void) { return 0; }
132 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
134 # ifdef CONFIG_NO_HZ_COMMON
135 DECLARE_PER_CPU(struct tick_sched, tick_cpu_sched);
137 static inline int tick_nohz_tick_stopped(void)
139 return __this_cpu_read(tick_cpu_sched.tick_stopped);
142 extern void tick_nohz_idle_enter(void);
143 extern void tick_nohz_idle_exit(void);
144 extern void tick_nohz_irq_exit(void);
145 extern ktime_t tick_nohz_get_sleep_length(void);
146 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
147 extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
149 # else /* !CONFIG_NO_HZ_COMMON */
150 static inline int tick_nohz_tick_stopped(void)
152 return 0;
155 static inline void tick_nohz_idle_enter(void) { }
156 static inline void tick_nohz_idle_exit(void) { }
158 static inline ktime_t tick_nohz_get_sleep_length(void)
160 ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };
162 return len;
164 static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
165 static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
166 # endif /* !CONFIG_NO_HZ_COMMON */
168 #ifdef CONFIG_NO_HZ_FULL
169 extern bool tick_nohz_full_running;
170 extern cpumask_var_t tick_nohz_full_mask;
171 extern cpumask_var_t housekeeping_mask;
173 static inline bool tick_nohz_full_enabled(void)
175 if (!context_tracking_is_enabled())
176 return false;
178 return tick_nohz_full_running;
181 static inline bool tick_nohz_full_cpu(int cpu)
183 if (!tick_nohz_full_enabled())
184 return false;
186 return cpumask_test_cpu(cpu, tick_nohz_full_mask);
189 extern void __tick_nohz_full_check(void);
190 extern void tick_nohz_full_kick(void);
191 extern void tick_nohz_full_kick_cpu(int cpu);
192 extern void tick_nohz_full_kick_all(void);
193 extern void __tick_nohz_task_switch(struct task_struct *tsk);
194 #else
195 static inline bool tick_nohz_full_enabled(void) { return false; }
196 static inline bool tick_nohz_full_cpu(int cpu) { return false; }
197 static inline void __tick_nohz_full_check(void) { }
198 static inline void tick_nohz_full_kick_cpu(int cpu) { }
199 static inline void tick_nohz_full_kick(void) { }
200 static inline void tick_nohz_full_kick_all(void) { }
201 static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
202 #endif
204 static inline bool is_housekeeping_cpu(int cpu)
206 #ifdef CONFIG_NO_HZ_FULL
207 if (tick_nohz_full_enabled())
208 return cpumask_test_cpu(cpu, housekeeping_mask);
209 #endif
210 return true;
213 static inline void housekeeping_affine(struct task_struct *t)
215 #ifdef CONFIG_NO_HZ_FULL
216 if (tick_nohz_full_enabled())
217 set_cpus_allowed_ptr(t, housekeeping_mask);
219 #endif
222 static inline void tick_nohz_full_check(void)
224 if (tick_nohz_full_enabled())
225 __tick_nohz_full_check();
228 static inline void tick_nohz_task_switch(struct task_struct *tsk)
230 if (tick_nohz_full_enabled())
231 __tick_nohz_task_switch(tsk);
234 #endif