x86: remove duplicated vsyscall nsec update
[linux-2.6/x86.git] / arch / x86 / kernel / vsyscall_64.c
blob8a67e282cb5e69837a9fbba529428de8f78201e0
1 /*
2 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
3 * Copyright 2003 Andi Kleen, SuSE Labs.
5 * Thanks to hpa@transmeta.com for some useful hint.
6 * Special thanks to Ingo Molnar for his early experience with
7 * a different vsyscall implementation for Linux/IA32 and for the name.
9 * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
10 * at virtual address -10Mbyte+1024bytes etc... There are at max 4
11 * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
12 * jumping out of line if necessary. We cannot add more with this
13 * mechanism because older kernels won't return -ENOSYS.
14 * If we want more than four we need a vDSO.
16 * Note: the concept clashes with user mode linux. If you use UML and
17 * want per guest time just set the kernel.vsyscall64 sysctl to 0.
20 #include <linux/time.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/timer.h>
24 #include <linux/seqlock.h>
25 #include <linux/jiffies.h>
26 #include <linux/sysctl.h>
27 #include <linux/clocksource.h>
28 #include <linux/getcpu.h>
29 #include <linux/cpu.h>
30 #include <linux/smp.h>
31 #include <linux/notifier.h>
33 #include <asm/vsyscall.h>
34 #include <asm/pgtable.h>
35 #include <asm/page.h>
36 #include <asm/unistd.h>
37 #include <asm/fixmap.h>
38 #include <asm/errno.h>
39 #include <asm/io.h>
40 #include <asm/segment.h>
41 #include <asm/desc.h>
42 #include <asm/topology.h>
43 #include <asm/vgtod.h>
45 #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
46 #define __syscall_clobber "r11","rcx","memory"
47 #define __pa_vsymbol(x) \
48 ({unsigned long v; \
49 extern char __vsyscall_0; \
50 asm("" : "=r" (v) : "0" (x)); \
51 ((v - VSYSCALL_FIRST_PAGE) + __pa_symbol(&__vsyscall_0)); })
54 * vsyscall_gtod_data contains data that is :
55 * - readonly from vsyscalls
56 * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
57 * Try to keep this structure as small as possible to avoid cache line ping pongs
59 int __vgetcpu_mode __section_vgetcpu_mode;
61 struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data =
63 .lock = SEQLOCK_UNLOCKED,
64 .sysctl_enabled = 1,
67 void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
69 unsigned long flags;
71 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
72 /* copy vsyscall data */
73 vsyscall_gtod_data.clock.vread = clock->vread;
74 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
75 vsyscall_gtod_data.clock.mask = clock->mask;
76 vsyscall_gtod_data.clock.mult = clock->mult;
77 vsyscall_gtod_data.clock.shift = clock->shift;
78 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
79 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
80 vsyscall_gtod_data.sys_tz = sys_tz;
81 vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
82 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
85 /* RED-PEN may want to readd seq locking, but then the variable should be
86 * write-once.
88 static __always_inline void do_get_tz(struct timezone * tz)
90 *tz = __vsyscall_gtod_data.sys_tz;
93 static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
95 int ret;
96 asm volatile("vsysc2: syscall"
97 : "=a" (ret)
98 : "0" (__NR_gettimeofday),"D" (tv),"S" (tz)
99 : __syscall_clobber );
100 return ret;
103 static __always_inline long time_syscall(long *t)
105 long secs;
106 asm volatile("vsysc1: syscall"
107 : "=a" (secs)
108 : "0" (__NR_time),"D" (t) : __syscall_clobber);
109 return secs;
112 static __always_inline void do_vgettimeofday(struct timeval * tv)
114 cycle_t now, base, mask, cycle_delta;
115 unsigned seq;
116 unsigned long mult, shift, nsec;
117 cycle_t (*vread)(void);
118 do {
119 seq = read_seqbegin(&__vsyscall_gtod_data.lock);
121 vread = __vsyscall_gtod_data.clock.vread;
122 if (unlikely(!__vsyscall_gtod_data.sysctl_enabled || !vread)) {
123 gettimeofday(tv,NULL);
124 return;
126 now = vread();
127 base = __vsyscall_gtod_data.clock.cycle_last;
128 mask = __vsyscall_gtod_data.clock.mask;
129 mult = __vsyscall_gtod_data.clock.mult;
130 shift = __vsyscall_gtod_data.clock.shift;
132 tv->tv_sec = __vsyscall_gtod_data.wall_time_sec;
133 nsec = __vsyscall_gtod_data.wall_time_nsec;
134 } while (read_seqretry(&__vsyscall_gtod_data.lock, seq));
136 /* calculate interval: */
137 cycle_delta = (now - base) & mask;
138 /* convert to nsecs: */
139 nsec += (cycle_delta * mult) >> shift;
141 while (nsec >= NSEC_PER_SEC) {
142 tv->tv_sec += 1;
143 nsec -= NSEC_PER_SEC;
145 tv->tv_usec = nsec / NSEC_PER_USEC;
148 int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
150 if (tv)
151 do_vgettimeofday(tv);
152 if (tz)
153 do_get_tz(tz);
154 return 0;
157 /* This will break when the xtime seconds get inaccurate, but that is
158 * unlikely */
159 time_t __vsyscall(1) vtime(time_t *t)
161 struct timeval tv;
162 time_t result;
163 if (unlikely(!__vsyscall_gtod_data.sysctl_enabled))
164 return time_syscall(t);
166 vgettimeofday(&tv, 0);
167 result = tv.tv_sec;
168 if (t)
169 *t = result;
170 return result;
173 /* Fast way to get current CPU and node.
174 This helps to do per node and per CPU caches in user space.
175 The result is not guaranteed without CPU affinity, but usually
176 works out because the scheduler tries to keep a thread on the same
177 CPU.
179 tcache must point to a two element sized long array.
180 All arguments can be NULL. */
181 long __vsyscall(2)
182 vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
184 unsigned int dummy, p;
185 unsigned long j = 0;
187 /* Fast cache - only recompute value once per jiffies and avoid
188 relatively costly rdtscp/cpuid otherwise.
189 This works because the scheduler usually keeps the process
190 on the same CPU and this syscall doesn't guarantee its
191 results anyways.
192 We do this here because otherwise user space would do it on
193 its own in a likely inferior way (no access to jiffies).
194 If you don't like it pass NULL. */
195 if (tcache && tcache->blob[0] == (j = __jiffies)) {
196 p = tcache->blob[1];
197 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
198 /* Load per CPU data from RDTSCP */
199 rdtscp(dummy, dummy, p);
200 } else {
201 /* Load per CPU data from GDT */
202 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
204 if (tcache) {
205 tcache->blob[0] = j;
206 tcache->blob[1] = p;
208 if (cpu)
209 *cpu = p & 0xfff;
210 if (node)
211 *node = p >> 12;
212 return 0;
215 long __vsyscall(3) venosys_1(void)
217 return -ENOSYS;
220 #ifdef CONFIG_SYSCTL
222 #define SYSCALL 0x050f
223 #define NOP2 0x9090
226 * NOP out syscall in vsyscall page when not needed.
228 static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
229 void __user *buffer, size_t *lenp, loff_t *ppos)
231 extern u16 vsysc1, vsysc2;
232 u16 __iomem *map1;
233 u16 __iomem *map2;
234 int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
235 if (!write)
236 return ret;
237 /* gcc has some trouble with __va(__pa()), so just do it this
238 way. */
239 map1 = ioremap(__pa_vsymbol(&vsysc1), 2);
240 if (!map1)
241 return -ENOMEM;
242 map2 = ioremap(__pa_vsymbol(&vsysc2), 2);
243 if (!map2) {
244 ret = -ENOMEM;
245 goto out;
247 if (!vsyscall_gtod_data.sysctl_enabled) {
248 writew(SYSCALL, map1);
249 writew(SYSCALL, map2);
250 } else {
251 writew(NOP2, map1);
252 writew(NOP2, map2);
254 iounmap(map2);
255 out:
256 iounmap(map1);
257 return ret;
260 static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
261 void __user *oldval, size_t __user *oldlenp,
262 void __user *newval, size_t newlen)
264 return -ENOSYS;
267 static ctl_table kernel_table2[] = {
268 { .ctl_name = 99, .procname = "vsyscall64",
269 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
270 .mode = 0644,
271 .strategy = vsyscall_sysctl_nostrat,
272 .proc_handler = vsyscall_sysctl_change },
276 static ctl_table kernel_root_table2[] = {
277 { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
278 .child = kernel_table2 },
282 #endif
284 /* Assume __initcall executes before all user space. Hopefully kmod
285 doesn't violate that. We'll find out if it does. */
286 static void __cpuinit vsyscall_set_cpu(int cpu)
288 unsigned long *d;
289 unsigned long node = 0;
290 #ifdef CONFIG_NUMA
291 node = cpu_to_node(cpu);
292 #endif
293 if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
294 write_rdtscp_aux((node << 12) | cpu);
296 /* Store cpu number in limit so that it can be loaded quickly
297 in user space in vgetcpu.
298 12 bits for the CPU and 8 bits for the node. */
299 d = (unsigned long *)(cpu_gdt(cpu) + GDT_ENTRY_PER_CPU);
300 *d = 0x0f40000000000ULL;
301 *d |= cpu;
302 *d |= (node & 0xf) << 12;
303 *d |= (node >> 4) << 48;
306 static void __cpuinit cpu_vsyscall_init(void *arg)
308 /* preemption should be already off */
309 vsyscall_set_cpu(raw_smp_processor_id());
312 static int __cpuinit
313 cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
315 long cpu = (long)arg;
316 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
317 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
318 return NOTIFY_DONE;
321 static void __init map_vsyscall(void)
323 extern char __vsyscall_0;
324 unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
326 /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
327 __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
330 static int __init vsyscall_init(void)
332 BUG_ON(((unsigned long) &vgettimeofday !=
333 VSYSCALL_ADDR(__NR_vgettimeofday)));
334 BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
335 BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
336 BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
337 map_vsyscall();
338 #ifdef CONFIG_SYSCTL
339 register_sysctl_table(kernel_root_table2);
340 #endif
341 on_each_cpu(cpu_vsyscall_init, NULL, 0, 1);
342 hotcpu_notifier(cpu_vsyscall_notifier, 0);
343 return 0;
346 __initcall(vsyscall_init);