2 * linux/arch/x86_64/kernel/vsyscall.c
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright 2003 Andi Kleen, SuSE Labs.
7 * Thanks to hpa@transmeta.com for some useful hint.
8 * Special thanks to Ingo Molnar for his early experience with
9 * a different vsyscall implementation for Linux/IA32 and for the name.
11 * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
12 * at virtual address -10Mbyte+1024bytes etc... There are at max 4
13 * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
14 * jumping out of line if necessary. We cannot add more with this
15 * mechanism because older kernels won't return -ENOSYS.
16 * If we want more than four we need a vDSO.
18 * Note: the concept clashes with user mode linux. If you use UML and
19 * want per guest time just set the kernel.vsyscall64 sysctl to 0.
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/timer.h>
26 #include <linux/seqlock.h>
27 #include <linux/jiffies.h>
28 #include <linux/sysctl.h>
29 #include <linux/clocksource.h>
30 #include <linux/getcpu.h>
31 #include <linux/cpu.h>
32 #include <linux/smp.h>
33 #include <linux/notifier.h>
35 #include <asm/vsyscall.h>
36 #include <asm/pgtable.h>
38 #include <asm/unistd.h>
39 #include <asm/fixmap.h>
40 #include <asm/errno.h>
42 #include <asm/segment.h>
44 #include <asm/topology.h>
45 #include <asm/vgtod.h>
47 #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
48 #define __syscall_clobber "r11","rcx","memory"
49 #define __pa_vsymbol(x) \
51 extern char __vsyscall_0; \
52 asm("" : "=r" (v) : "0" (x)); \
53 ((v - VSYSCALL_FIRST_PAGE) + __pa_symbol(&__vsyscall_0)); })
56 * vsyscall_gtod_data contains data that is :
57 * - readonly from vsyscalls
58 * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
59 * Try to keep this structure as small as possible to avoid cache line ping pongs
61 int __vgetcpu_mode __section_vgetcpu_mode
;
63 struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data
=
65 .lock
= SEQLOCK_UNLOCKED
,
69 void update_vsyscall(struct timespec
*wall_time
, struct clocksource
*clock
)
73 write_seqlock_irqsave(&vsyscall_gtod_data
.lock
, flags
);
74 /* copy vsyscall data */
75 vsyscall_gtod_data
.clock
.vread
= clock
->vread
;
76 vsyscall_gtod_data
.clock
.cycle_last
= clock
->cycle_last
;
77 vsyscall_gtod_data
.clock
.mask
= clock
->mask
;
78 vsyscall_gtod_data
.clock
.mult
= clock
->mult
;
79 vsyscall_gtod_data
.clock
.shift
= clock
->shift
;
80 vsyscall_gtod_data
.wall_time_sec
= wall_time
->tv_sec
;
81 vsyscall_gtod_data
.wall_time_nsec
= wall_time
->tv_nsec
;
82 vsyscall_gtod_data
.sys_tz
= sys_tz
;
83 vsyscall_gtod_data
.wall_time_nsec
= wall_time
->tv_nsec
;
84 vsyscall_gtod_data
.wall_to_monotonic
= wall_to_monotonic
;
85 write_sequnlock_irqrestore(&vsyscall_gtod_data
.lock
, flags
);
88 /* RED-PEN may want to readd seq locking, but then the variable should be
91 static __always_inline
void do_get_tz(struct timezone
* tz
)
93 *tz
= __vsyscall_gtod_data
.sys_tz
;
96 static __always_inline
int gettimeofday(struct timeval
*tv
, struct timezone
*tz
)
99 asm volatile("vsysc2: syscall"
101 : "0" (__NR_gettimeofday
),"D" (tv
),"S" (tz
)
102 : __syscall_clobber
);
106 static __always_inline
long time_syscall(long *t
)
109 asm volatile("vsysc1: syscall"
111 : "0" (__NR_time
),"D" (t
) : __syscall_clobber
);
115 static __always_inline
void do_vgettimeofday(struct timeval
* tv
)
117 cycle_t now
, base
, mask
, cycle_delta
;
119 unsigned long mult
, shift
, nsec
;
120 cycle_t (*vread
)(void);
122 seq
= read_seqbegin(&__vsyscall_gtod_data
.lock
);
124 vread
= __vsyscall_gtod_data
.clock
.vread
;
125 if (unlikely(!__vsyscall_gtod_data
.sysctl_enabled
|| !vread
)) {
126 gettimeofday(tv
,NULL
);
130 base
= __vsyscall_gtod_data
.clock
.cycle_last
;
131 mask
= __vsyscall_gtod_data
.clock
.mask
;
132 mult
= __vsyscall_gtod_data
.clock
.mult
;
133 shift
= __vsyscall_gtod_data
.clock
.shift
;
135 tv
->tv_sec
= __vsyscall_gtod_data
.wall_time_sec
;
136 nsec
= __vsyscall_gtod_data
.wall_time_nsec
;
137 } while (read_seqretry(&__vsyscall_gtod_data
.lock
, seq
));
139 /* calculate interval: */
140 cycle_delta
= (now
- base
) & mask
;
141 /* convert to nsecs: */
142 nsec
+= (cycle_delta
* mult
) >> shift
;
144 while (nsec
>= NSEC_PER_SEC
) {
146 nsec
-= NSEC_PER_SEC
;
148 tv
->tv_usec
= nsec
/ NSEC_PER_USEC
;
151 int __vsyscall(0) vgettimeofday(struct timeval
* tv
, struct timezone
* tz
)
154 do_vgettimeofday(tv
);
160 /* This will break when the xtime seconds get inaccurate, but that is
162 time_t __vsyscall(1) vtime(time_t *t
)
166 if (unlikely(!__vsyscall_gtod_data
.sysctl_enabled
))
167 return time_syscall(t
);
169 vgettimeofday(&tv
, 0);
176 /* Fast way to get current CPU and node.
177 This helps to do per node and per CPU caches in user space.
178 The result is not guaranteed without CPU affinity, but usually
179 works out because the scheduler tries to keep a thread on the same
182 tcache must point to a two element sized long array.
183 All arguments can be NULL. */
185 vgetcpu(unsigned *cpu
, unsigned *node
, struct getcpu_cache
*tcache
)
187 unsigned int dummy
, p
;
190 /* Fast cache - only recompute value once per jiffies and avoid
191 relatively costly rdtscp/cpuid otherwise.
192 This works because the scheduler usually keeps the process
193 on the same CPU and this syscall doesn't guarantee its
195 We do this here because otherwise user space would do it on
196 its own in a likely inferior way (no access to jiffies).
197 If you don't like it pass NULL. */
198 if (tcache
&& tcache
->blob
[0] == (j
= __jiffies
)) {
200 } else if (__vgetcpu_mode
== VGETCPU_RDTSCP
) {
201 /* Load per CPU data from RDTSCP */
202 rdtscp(dummy
, dummy
, p
);
204 /* Load per CPU data from GDT */
205 asm("lsl %1,%0" : "=r" (p
) : "r" (__PER_CPU_SEG
));
218 long __vsyscall(3) venosys_1(void)
225 #define SYSCALL 0x050f
229 * NOP out syscall in vsyscall page when not needed.
231 static int vsyscall_sysctl_change(ctl_table
*ctl
, int write
, struct file
* filp
,
232 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
234 extern u16 vsysc1
, vsysc2
;
237 int ret
= proc_dointvec(ctl
, write
, filp
, buffer
, lenp
, ppos
);
240 /* gcc has some trouble with __va(__pa()), so just do it this
242 map1
= ioremap(__pa_vsymbol(&vsysc1
), 2);
245 map2
= ioremap(__pa_vsymbol(&vsysc2
), 2);
250 if (!vsyscall_gtod_data
.sysctl_enabled
) {
251 writew(SYSCALL
, map1
);
252 writew(SYSCALL
, map2
);
263 static int vsyscall_sysctl_nostrat(ctl_table
*t
, int __user
*name
, int nlen
,
264 void __user
*oldval
, size_t __user
*oldlenp
,
265 void __user
*newval
, size_t newlen
)
270 static ctl_table kernel_table2
[] = {
271 { .ctl_name
= 99, .procname
= "vsyscall64",
272 .data
= &vsyscall_gtod_data
.sysctl_enabled
, .maxlen
= sizeof(int),
274 .strategy
= vsyscall_sysctl_nostrat
,
275 .proc_handler
= vsyscall_sysctl_change
},
279 static ctl_table kernel_root_table2
[] = {
280 { .ctl_name
= CTL_KERN
, .procname
= "kernel", .mode
= 0555,
281 .child
= kernel_table2
},
287 /* Assume __initcall executes before all user space. Hopefully kmod
288 doesn't violate that. We'll find out if it does. */
289 static void __cpuinit
vsyscall_set_cpu(int cpu
)
292 unsigned long node
= 0;
294 node
= cpu_to_node
[cpu
];
296 if (cpu_has(&cpu_data
[cpu
], X86_FEATURE_RDTSCP
))
297 write_rdtscp_aux((node
<< 12) | cpu
);
299 /* Store cpu number in limit so that it can be loaded quickly
300 in user space in vgetcpu.
301 12 bits for the CPU and 8 bits for the node. */
302 d
= (unsigned long *)(cpu_gdt(cpu
) + GDT_ENTRY_PER_CPU
);
303 *d
= 0x0f40000000000ULL
;
305 *d
|= (node
& 0xf) << 12;
306 *d
|= (node
>> 4) << 48;
309 static void __cpuinit
cpu_vsyscall_init(void *arg
)
311 /* preemption should be already off */
312 vsyscall_set_cpu(raw_smp_processor_id());
316 cpu_vsyscall_notifier(struct notifier_block
*n
, unsigned long action
, void *arg
)
318 long cpu
= (long)arg
;
319 if (action
== CPU_ONLINE
|| action
== CPU_ONLINE_FROZEN
)
320 smp_call_function_single(cpu
, cpu_vsyscall_init
, NULL
, 0, 1);
324 static void __init
map_vsyscall(void)
326 extern char __vsyscall_0
;
327 unsigned long physaddr_page0
= __pa_symbol(&__vsyscall_0
);
329 /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
330 __set_fixmap(VSYSCALL_FIRST_PAGE
, physaddr_page0
, PAGE_KERNEL_VSYSCALL
);
333 static int __init
vsyscall_init(void)
335 BUG_ON(((unsigned long) &vgettimeofday
!=
336 VSYSCALL_ADDR(__NR_vgettimeofday
)));
337 BUG_ON((unsigned long) &vtime
!= VSYSCALL_ADDR(__NR_vtime
));
338 BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE
)));
339 BUG_ON((unsigned long) &vgetcpu
!= VSYSCALL_ADDR(__NR_vgetcpu
));
342 register_sysctl_table(kernel_root_table2
);
344 on_each_cpu(cpu_vsyscall_init
, NULL
, 0, 1);
345 hotcpu_notifier(cpu_vsyscall_notifier
, 0);
349 __initcall(vsyscall_init
);