allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / arch / x86_64 / kernel / tsc.c
blob48f9a8e6aa91ff3b22f08f514f487ffc0305558d
1 #include <linux/kernel.h>
2 #include <linux/sched.h>
3 #include <linux/interrupt.h>
4 #include <linux/init.h>
5 #include <linux/clocksource.h>
6 #include <linux/time.h>
7 #include <linux/acpi.h>
8 #include <linux/cpufreq.h>
10 #include <asm/timex.h>
12 static int notsc __initdata = 0;
14 unsigned int cpu_khz; /* TSC clocks / usec, not used here */
15 EXPORT_SYMBOL(cpu_khz);
16 unsigned int tsc_khz;
17 EXPORT_SYMBOL(tsc_khz);
19 static unsigned int cyc2ns_scale __read_mostly;
21 void set_cyc2ns_scale(unsigned long khz)
23 cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz;
26 static unsigned long long cycles_2_ns(unsigned long long cyc)
28 return (cyc * cyc2ns_scale) >> NS_SCALE;
31 unsigned long long sched_clock(void)
33 unsigned long a = 0;
35 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
36 * which means it is not completely exact and may not be monotonous
37 * between CPUs. But the errors should be too small to matter for
38 * scheduling purposes.
41 rdtscll(a);
42 return cycles_2_ns(a);
45 static int tsc_unstable;
47 static inline int check_tsc_unstable(void)
49 return tsc_unstable;
51 #ifdef CONFIG_CPU_FREQ
53 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
54 * changes.
56 * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
57 * not that important because current Opteron setups do not support
58 * scaling on SMP anyroads.
60 * Should fix up last_tsc too. Currently gettimeofday in the
61 * first tick after the change will be slightly wrong.
64 #include <linux/workqueue.h>
66 static unsigned int cpufreq_delayed_issched = 0;
67 static unsigned int cpufreq_init = 0;
68 static struct work_struct cpufreq_delayed_get_work;
70 static void handle_cpufreq_delayed_get(struct work_struct *v)
72 unsigned int cpu;
73 for_each_online_cpu(cpu) {
74 cpufreq_get(cpu);
76 cpufreq_delayed_issched = 0;
79 static unsigned int ref_freq = 0;
80 static unsigned long loops_per_jiffy_ref = 0;
82 static unsigned long tsc_khz_ref = 0;
84 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
85 void *data)
87 struct cpufreq_freqs *freq = data;
88 unsigned long *lpj, dummy;
90 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
91 return 0;
93 lpj = &dummy;
94 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
95 #ifdef CONFIG_SMP
96 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
97 #else
98 lpj = &boot_cpu_data.loops_per_jiffy;
99 #endif
101 if (!ref_freq) {
102 ref_freq = freq->old;
103 loops_per_jiffy_ref = *lpj;
104 tsc_khz_ref = tsc_khz;
106 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
107 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
108 (val == CPUFREQ_RESUMECHANGE)) {
109 *lpj =
110 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
112 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
113 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
114 mark_tsc_unstable("cpufreq changes");
117 set_cyc2ns_scale(tsc_khz_ref);
119 return 0;
122 static struct notifier_block time_cpufreq_notifier_block = {
123 .notifier_call = time_cpufreq_notifier
126 static int __init cpufreq_tsc(void)
128 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
129 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
130 CPUFREQ_TRANSITION_NOTIFIER))
131 cpufreq_init = 1;
132 return 0;
135 core_initcall(cpufreq_tsc);
137 #endif
139 static int tsc_unstable = 0;
142 * Make an educated guess if the TSC is trustworthy and synchronized
143 * over all CPUs.
145 __cpuinit int unsynchronized_tsc(void)
147 if (tsc_unstable)
148 return 1;
150 #ifdef CONFIG_SMP
151 if (apic_is_clustered_box())
152 return 1;
153 #endif
154 /* Most intel systems have synchronized TSCs except for
155 multi node systems */
156 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
157 #ifdef CONFIG_ACPI
158 /* But TSC doesn't tick in C3 so don't use it there */
159 if (acpi_gbl_FADT.header.length > 0 && acpi_gbl_FADT.C3latency < 1000)
160 return 1;
161 #endif
162 return 0;
165 /* Assume multi socket systems are not synchronized */
166 return num_present_cpus() > 1;
169 int __init notsc_setup(char *s)
171 notsc = 1;
172 return 1;
175 __setup("notsc", notsc_setup);
178 /* clock source code: */
179 static cycle_t read_tsc(void)
181 cycle_t ret = (cycle_t)get_cycles_sync();
182 return ret;
185 static cycle_t __vsyscall_fn vread_tsc(void)
187 cycle_t ret = (cycle_t)get_cycles_sync();
188 return ret;
191 static struct clocksource clocksource_tsc = {
192 .name = "tsc",
193 .rating = 300,
194 .read = read_tsc,
195 .mask = CLOCKSOURCE_MASK(64),
196 .shift = 22,
197 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
198 CLOCK_SOURCE_MUST_VERIFY,
199 .vread = vread_tsc,
202 void mark_tsc_unstable(char *reason)
204 if (!tsc_unstable) {
205 tsc_unstable = 1;
206 printk("Marking TSC unstable due to %s\n", reason);
207 /* Change only the rating, when not registered */
208 if (clocksource_tsc.mult)
209 clocksource_change_rating(&clocksource_tsc, 0);
210 else
211 clocksource_tsc.rating = 0;
214 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
216 void __init init_tsc_clocksource(void)
218 if (!notsc) {
219 clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
220 clocksource_tsc.shift);
221 if (check_tsc_unstable())
222 clocksource_tsc.rating = 0;
224 clocksource_register(&clocksource_tsc);