Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux-2.6/libata-dev.git] / arch / x86 / kernel / kvmclock.c
blob0732f0089a3df2d0bcbde6b397fc8c3e1e76844c
1 /* KVM paravirtual clock driver. A clocksource implementation
2 Copyright (C) 2008 Glauber de Oliveira Costa, Red Hat Inc.
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/clocksource.h>
20 #include <linux/kvm_para.h>
21 #include <asm/pvclock.h>
22 #include <asm/msr.h>
23 #include <asm/apic.h>
24 #include <linux/percpu.h>
25 #include <linux/hardirq.h>
26 #include <linux/memblock.h>
28 #include <asm/x86_init.h>
29 #include <asm/reboot.h>
31 static int kvmclock = 1;
32 static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
33 static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
35 static int parse_no_kvmclock(char *arg)
37 kvmclock = 0;
38 return 0;
40 early_param("no-kvmclock", parse_no_kvmclock);
42 /* The hypervisor will put information about time periodically here */
43 static struct pvclock_vsyscall_time_info *hv_clock;
44 static struct pvclock_wall_clock wall_clock;
47 * The wallclock is the time of day when we booted. Since then, some time may
48 * have elapsed since the hypervisor wrote the data. So we try to account for
49 * that with system time
51 static unsigned long kvm_get_wallclock(void)
53 struct pvclock_vcpu_time_info *vcpu_time;
54 struct timespec ts;
55 int low, high;
56 int cpu;
58 low = (int)__pa_symbol(&wall_clock);
59 high = ((u64)__pa_symbol(&wall_clock) >> 32);
61 native_write_msr(msr_kvm_wall_clock, low, high);
63 preempt_disable();
64 cpu = smp_processor_id();
66 vcpu_time = &hv_clock[cpu].pvti;
67 pvclock_read_wallclock(&wall_clock, vcpu_time, &ts);
69 preempt_enable();
71 return ts.tv_sec;
74 static int kvm_set_wallclock(unsigned long now)
76 return -1;
79 static cycle_t kvm_clock_read(void)
81 struct pvclock_vcpu_time_info *src;
82 cycle_t ret;
83 int cpu;
85 preempt_disable_notrace();
86 cpu = smp_processor_id();
87 src = &hv_clock[cpu].pvti;
88 ret = pvclock_clocksource_read(src);
89 preempt_enable_notrace();
90 return ret;
93 static cycle_t kvm_clock_get_cycles(struct clocksource *cs)
95 return kvm_clock_read();
99 * If we don't do that, there is the possibility that the guest
100 * will calibrate under heavy load - thus, getting a lower lpj -
101 * and execute the delays themselves without load. This is wrong,
102 * because no delay loop can finish beforehand.
103 * Any heuristics is subject to fail, because ultimately, a large
104 * poll of guests can be running and trouble each other. So we preset
105 * lpj here
107 static unsigned long kvm_get_tsc_khz(void)
109 struct pvclock_vcpu_time_info *src;
110 int cpu;
111 unsigned long tsc_khz;
113 preempt_disable();
114 cpu = smp_processor_id();
115 src = &hv_clock[cpu].pvti;
116 tsc_khz = pvclock_tsc_khz(src);
117 preempt_enable();
118 return tsc_khz;
121 static void kvm_get_preset_lpj(void)
123 unsigned long khz;
124 u64 lpj;
126 khz = kvm_get_tsc_khz();
128 lpj = ((u64)khz * 1000);
129 do_div(lpj, HZ);
130 preset_lpj = lpj;
133 bool kvm_check_and_clear_guest_paused(void)
135 bool ret = false;
136 struct pvclock_vcpu_time_info *src;
137 int cpu = smp_processor_id();
139 if (!hv_clock)
140 return ret;
142 src = &hv_clock[cpu].pvti;
143 if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
144 src->flags &= ~PVCLOCK_GUEST_STOPPED;
145 ret = true;
148 return ret;
151 static struct clocksource kvm_clock = {
152 .name = "kvm-clock",
153 .read = kvm_clock_get_cycles,
154 .rating = 400,
155 .mask = CLOCKSOURCE_MASK(64),
156 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
159 int kvm_register_clock(char *txt)
161 int cpu = smp_processor_id();
162 int low, high, ret;
163 struct pvclock_vcpu_time_info *src = &hv_clock[cpu].pvti;
165 low = (int)slow_virt_to_phys(src) | 1;
166 high = ((u64)slow_virt_to_phys(src) >> 32);
167 ret = native_write_msr_safe(msr_kvm_system_time, low, high);
168 printk(KERN_INFO "kvm-clock: cpu %d, msr %x:%x, %s\n",
169 cpu, high, low, txt);
171 return ret;
174 static void kvm_save_sched_clock_state(void)
178 static void kvm_restore_sched_clock_state(void)
180 kvm_register_clock("primary cpu clock, resume");
183 #ifdef CONFIG_X86_LOCAL_APIC
184 static void __cpuinit kvm_setup_secondary_clock(void)
187 * Now that the first cpu already had this clocksource initialized,
188 * we shouldn't fail.
190 WARN_ON(kvm_register_clock("secondary cpu clock"));
192 #endif
195 * After the clock is registered, the host will keep writing to the
196 * registered memory location. If the guest happens to shutdown, this memory
197 * won't be valid. In cases like kexec, in which you install a new kernel, this
198 * means a random memory location will be kept being written. So before any
199 * kind of shutdown from our side, we unregister the clock by writting anything
200 * that does not have the 'enable' bit set in the msr
202 #ifdef CONFIG_KEXEC
203 static void kvm_crash_shutdown(struct pt_regs *regs)
205 native_write_msr(msr_kvm_system_time, 0, 0);
206 kvm_disable_steal_time();
207 native_machine_crash_shutdown(regs);
209 #endif
211 static void kvm_shutdown(void)
213 native_write_msr(msr_kvm_system_time, 0, 0);
214 kvm_disable_steal_time();
215 native_machine_shutdown();
218 void __init kvmclock_init(void)
220 unsigned long mem;
221 int size;
223 size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
225 if (!kvm_para_available())
226 return;
228 if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
229 msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW;
230 msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW;
231 } else if (!(kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)))
232 return;
234 printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
235 msr_kvm_system_time, msr_kvm_wall_clock);
237 mem = memblock_alloc(size, PAGE_SIZE);
238 if (!mem)
239 return;
240 hv_clock = __va(mem);
242 if (kvm_register_clock("boot clock")) {
243 hv_clock = NULL;
244 memblock_free(mem, size);
245 return;
247 pv_time_ops.sched_clock = kvm_clock_read;
248 x86_platform.calibrate_tsc = kvm_get_tsc_khz;
249 x86_platform.get_wallclock = kvm_get_wallclock;
250 x86_platform.set_wallclock = kvm_set_wallclock;
251 #ifdef CONFIG_X86_LOCAL_APIC
252 x86_cpuinit.early_percpu_clock_init =
253 kvm_setup_secondary_clock;
254 #endif
255 x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
256 x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
257 machine_ops.shutdown = kvm_shutdown;
258 #ifdef CONFIG_KEXEC
259 machine_ops.crash_shutdown = kvm_crash_shutdown;
260 #endif
261 kvm_get_preset_lpj();
262 clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
263 pv_info.paravirt_enabled = 1;
264 pv_info.name = "KVM";
266 if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
267 pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
270 int __init kvm_setup_vsyscall_timeinfo(void)
272 #ifdef CONFIG_X86_64
273 int cpu;
274 int ret;
275 u8 flags;
276 struct pvclock_vcpu_time_info *vcpu_time;
277 unsigned int size;
279 size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
281 preempt_disable();
282 cpu = smp_processor_id();
284 vcpu_time = &hv_clock[cpu].pvti;
285 flags = pvclock_read_flags(vcpu_time);
287 if (!(flags & PVCLOCK_TSC_STABLE_BIT)) {
288 preempt_enable();
289 return 1;
292 if ((ret = pvclock_init_vsyscall(hv_clock, size))) {
293 preempt_enable();
294 return ret;
297 preempt_enable();
299 kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
300 #endif
301 return 0;