[JFFS2] Fix more breakage caused by janitorial meddling.
[linux-2.6/kmemtrace.git] / arch / x86_64 / kernel / vsyscall.c
blob9468fb20b0bc709ee0044b63e1095036d0866e02
1 /*
2 * linux/arch/x86_64/kernel/vsyscall.c
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright 2003 Andi Kleen, SuSE Labs.
7 * Thanks to hpa@transmeta.com for some useful hint.
8 * Special thanks to Ingo Molnar for his early experience with
9 * a different vsyscall implementation for Linux/IA32 and for the name.
11 * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
12 * at virtual address -10Mbyte+1024bytes etc... There are at max 4
13 * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
14 * jumping out of line if necessary. We cannot add more with this
15 * mechanism because older kernels won't return -ENOSYS.
16 * If we want more than four we need a vDSO.
18 * Note: the concept clashes with user mode linux. If you use UML and
19 * want per guest time just set the kernel.vsyscall64 sysctl to 0.
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/timer.h>
26 #include <linux/seqlock.h>
27 #include <linux/jiffies.h>
28 #include <linux/sysctl.h>
30 #include <asm/vsyscall.h>
31 #include <asm/pgtable.h>
32 #include <asm/page.h>
33 #include <asm/fixmap.h>
34 #include <asm/errno.h>
35 #include <asm/io.h>
37 #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
39 int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
40 seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
42 #include <asm/unistd.h>
44 static __always_inline void timeval_normalize(struct timeval * tv)
46 time_t __sec;
48 __sec = tv->tv_usec / 1000000;
49 if (__sec) {
50 tv->tv_usec %= 1000000;
51 tv->tv_sec += __sec;
55 static __always_inline void do_vgettimeofday(struct timeval * tv)
57 long sequence, t;
58 unsigned long sec, usec;
60 do {
61 sequence = read_seqbegin(&__xtime_lock);
63 sec = __xtime.tv_sec;
64 usec = (__xtime.tv_nsec / 1000) +
65 (__jiffies - __wall_jiffies) * (1000000 / HZ);
67 if (__vxtime.mode != VXTIME_HPET) {
68 t = get_cycles_sync();
69 if (t < __vxtime.last_tsc)
70 t = __vxtime.last_tsc;
71 usec += ((t - __vxtime.last_tsc) *
72 __vxtime.tsc_quot) >> 32;
73 /* See comment in x86_64 do_gettimeofday. */
74 } else {
75 usec += ((readl((void *)fix_to_virt(VSYSCALL_HPET) + 0xf0) -
76 __vxtime.last) * __vxtime.quot) >> 32;
78 } while (read_seqretry(&__xtime_lock, sequence));
80 tv->tv_sec = sec + usec / 1000000;
81 tv->tv_usec = usec % 1000000;
84 /* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
85 static __always_inline void do_get_tz(struct timezone * tz)
87 *tz = __sys_tz;
90 static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
92 int ret;
93 asm volatile("vsysc2: syscall"
94 : "=a" (ret)
95 : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
96 return ret;
99 static __always_inline long time_syscall(long *t)
101 long secs;
102 asm volatile("vsysc1: syscall"
103 : "=a" (secs)
104 : "0" (__NR_time),"D" (t) : __syscall_clobber);
105 return secs;
108 int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
110 if (unlikely(!__sysctl_vsyscall))
111 return gettimeofday(tv,tz);
112 if (tv)
113 do_vgettimeofday(tv);
114 if (tz)
115 do_get_tz(tz);
116 return 0;
119 /* This will break when the xtime seconds get inaccurate, but that is
120 * unlikely */
121 time_t __vsyscall(1) vtime(time_t *t)
123 if (unlikely(!__sysctl_vsyscall))
124 return time_syscall(t);
125 else if (t)
126 *t = __xtime.tv_sec;
127 return __xtime.tv_sec;
130 long __vsyscall(2) venosys_0(void)
132 return -ENOSYS;
135 long __vsyscall(3) venosys_1(void)
137 return -ENOSYS;
140 #ifdef CONFIG_SYSCTL
142 #define SYSCALL 0x050f
143 #define NOP2 0x9090
146 * NOP out syscall in vsyscall page when not needed.
148 static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
149 void __user *buffer, size_t *lenp, loff_t *ppos)
151 extern u16 vsysc1, vsysc2;
152 u16 *map1, *map2;
153 int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
154 if (!write)
155 return ret;
156 /* gcc has some trouble with __va(__pa()), so just do it this
157 way. */
158 map1 = ioremap(__pa_symbol(&vsysc1), 2);
159 if (!map1)
160 return -ENOMEM;
161 map2 = ioremap(__pa_symbol(&vsysc2), 2);
162 if (!map2) {
163 ret = -ENOMEM;
164 goto out;
166 if (!sysctl_vsyscall) {
167 *map1 = SYSCALL;
168 *map2 = SYSCALL;
169 } else {
170 *map1 = NOP2;
171 *map2 = NOP2;
173 iounmap(map2);
174 out:
175 iounmap(map1);
176 return ret;
179 static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
180 void __user *oldval, size_t __user *oldlenp,
181 void __user *newval, size_t newlen,
182 void **context)
184 return -ENOSYS;
187 static ctl_table kernel_table2[] = {
188 { .ctl_name = 99, .procname = "vsyscall64",
189 .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
190 .strategy = vsyscall_sysctl_nostrat,
191 .proc_handler = vsyscall_sysctl_change },
192 { 0, }
195 static ctl_table kernel_root_table2[] = {
196 { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
197 .child = kernel_table2 },
198 { 0 },
201 #endif
203 static void __init map_vsyscall(void)
205 extern char __vsyscall_0;
206 unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
208 __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
211 static int __init vsyscall_init(void)
213 BUG_ON(((unsigned long) &vgettimeofday !=
214 VSYSCALL_ADDR(__NR_vgettimeofday)));
215 BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
216 BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
217 map_vsyscall();
218 #ifdef CONFIG_SYSCTL
219 register_sysctl_table(kernel_root_table2, 0);
220 #endif
221 return 0;
224 __initcall(vsyscall_init);