Consolidate vDSO macros and usage
[glibc.git] / sysdeps / unix / sysv / linux / powerpc / powerpc32 / sysdep.h
blobcbce32415907d4f014c831afe7946332376137d8
1 /* Copyright (C) 1992-2015 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <http://www.gnu.org/licenses/>. */
18 #ifndef _LINUX_POWERPC_SYSDEP_H
19 #define _LINUX_POWERPC_SYSDEP_H 1
21 #include <sysdeps/unix/powerpc/sysdep.h>
22 #include <tls.h>
24 /* Some systen calls got renamed over time, but retained the same semantics.
25 Handle them here so they can be catched by both C and assembler stubs in
26 glibc. */
28 #ifdef __NR_pread64
29 # ifdef __NR_pread
30 # error "__NR_pread and __NR_pread64 both defined???"
31 # endif
32 # define __NR_pread __NR_pread64
33 #endif
35 #ifdef __NR_pwrite64
36 # ifdef __NR_pwrite
37 # error "__NR_pwrite and __NR_pwrite64 both defined???"
38 # endif
39 # define __NR_pwrite __NR_pwrite64
40 #endif
42 /* For Linux we can use the system call table in the header file
43 /usr/include/asm/unistd.h
44 of the kernel. But these symbols do not follow the SYS_* syntax
45 so we have to redefine the `SYS_ify' macro here. */
46 #undef SYS_ify
47 #define SYS_ify(syscall_name) __NR_##syscall_name
49 #ifndef __ASSEMBLER__
51 # include <errno.h>
53 /* Define a macro which expands inline into the wrapper code for a VDSO
54 call. This use is for internal calls that do not need to handle errors
55 normally. It will never touch errno.
56 On powerpc a system call basically clobbers the same registers like a
57 function call, with the exception of LR (which is needed for the
58 "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal
59 an error return status). */
60 # define INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, nr, type, args...) \
61 ({ \
62 register void *r0 __asm__ ("r0"); \
63 register long int r3 __asm__ ("r3"); \
64 register long int r4 __asm__ ("r4"); \
65 register long int r5 __asm__ ("r5"); \
66 register long int r6 __asm__ ("r6"); \
67 register long int r7 __asm__ ("r7"); \
68 register long int r8 __asm__ ("r8"); \
69 register long int r9 __asm__ ("r9"); \
70 register long int r10 __asm__ ("r10"); \
71 register long int r11 __asm__ ("r11"); \
72 register long int r12 __asm__ ("r12"); \
73 register type rval __asm__ ("r3"); \
74 LOADARGS_##nr (funcptr, args); \
75 __asm__ __volatile__ \
76 ("mtctr %0\n\t" \
77 "bctrl\n\t" \
78 "mfcr %0" \
79 : "+r" (r0), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), \
80 "+r" (r8), "+r" (r9), "+r" (r10), "+r" (r11), "+r" (r12) \
81 : : "cr0", "ctr", "lr", "memory"); \
82 err = (long int) r0; \
83 __asm__ __volatile__ ("" : "=r" (rval) : "r" (r3), "r" (r4)); \
84 rval; \
87 #define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...) \
88 INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, nr, long int, args)
90 # undef INLINE_SYSCALL
91 # define INLINE_SYSCALL(name, nr, args...) \
92 ({ \
93 INTERNAL_SYSCALL_DECL (sc_err); \
94 long int sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, args); \
95 if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
96 { \
97 __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \
98 sc_ret = -1L; \
99 } \
100 sc_ret; \
103 /* Define a macro which expands inline into the wrapper code for a system
104 call. This use is for internal calls that do not need to handle errors
105 normally. It will never touch errno.
106 On powerpc a system call basically clobbers the same registers like a
107 function call, with the exception of LR (which is needed for the
108 "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal
109 an error return status). */
111 # undef INTERNAL_SYSCALL_DECL
112 # define INTERNAL_SYSCALL_DECL(err) long int err __attribute__ ((unused))
114 # undef INTERNAL_SYSCALL
115 # define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
116 ({ \
117 register long int r0 __asm__ ("r0"); \
118 register long int r3 __asm__ ("r3"); \
119 register long int r4 __asm__ ("r4"); \
120 register long int r5 __asm__ ("r5"); \
121 register long int r6 __asm__ ("r6"); \
122 register long int r7 __asm__ ("r7"); \
123 register long int r8 __asm__ ("r8"); \
124 register long int r9 __asm__ ("r9"); \
125 register long int r10 __asm__ ("r10"); \
126 register long int r11 __asm__ ("r11"); \
127 register long int r12 __asm__ ("r12"); \
128 LOADARGS_##nr(name, args); \
129 ABORT_TRANSACTION; \
130 __asm__ __volatile__ \
131 ("sc \n\t" \
132 "mfcr %0" \
133 : "=&r" (r0), \
134 "=&r" (r3), "=&r" (r4), "=&r" (r5), "=&r" (r6), "=&r" (r7), \
135 "=&r" (r8), "=&r" (r9), "=&r" (r10), "=&r" (r11), "=&r" (r12) \
136 : ASM_INPUT_##nr \
137 : "cr0", "ctr", "memory"); \
138 err = r0; \
139 (int) r3; \
141 # define INTERNAL_SYSCALL(name, err, nr, args...) \
142 INTERNAL_SYSCALL_NCS (__NR_##name, err, nr, ##args)
144 # undef INTERNAL_SYSCALL_ERROR_P
145 # define INTERNAL_SYSCALL_ERROR_P(val, err) \
146 ((void) (val), __builtin_expect ((err) & (1 << 28), 0))
148 # undef INTERNAL_SYSCALL_ERRNO
149 # define INTERNAL_SYSCALL_ERRNO(val, err) (val)
151 # define INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK(name, err, type, nr, args...) \
152 ({ \
153 type sc_ret = ENOSYS; \
155 __typeof (__vdso_##name) vdsop = __vdso_##name; \
156 PTR_DEMANGLE (vdsop); \
157 if (vdsop != NULL) \
158 sc_ret = \
159 INTERNAL_VSYSCALL_CALL_TYPE (vdsop, err, nr, type, ##args); \
160 else \
161 err = 1 << 28; \
162 sc_ret; \
165 /* List of system calls which are supported as vsyscalls. */
166 # define HAVE_CLOCK_GETRES_VSYSCALL 1
167 # define HAVE_CLOCK_GETTIME_VSYSCALL 1
170 # define LOADARGS_0(name, dummy) \
171 r0 = name
172 # define LOADARGS_1(name, __arg1) \
173 long int arg1 = (long int) (__arg1); \
174 LOADARGS_0(name, 0); \
175 extern void __illegally_sized_syscall_arg1 (void); \
176 if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 4) \
177 __illegally_sized_syscall_arg1 (); \
178 r3 = arg1
179 # define LOADARGS_2(name, __arg1, __arg2) \
180 long int arg2 = (long int) (__arg2); \
181 LOADARGS_1(name, __arg1); \
182 extern void __illegally_sized_syscall_arg2 (void); \
183 if (__builtin_classify_type (__arg2) != 5 && sizeof (__arg2) > 4) \
184 __illegally_sized_syscall_arg2 (); \
185 r4 = arg2
186 # define LOADARGS_3(name, __arg1, __arg2, __arg3) \
187 long int arg3 = (long int) (__arg3); \
188 LOADARGS_2(name, __arg1, __arg2); \
189 extern void __illegally_sized_syscall_arg3 (void); \
190 if (__builtin_classify_type (__arg3) != 5 && sizeof (__arg3) > 4) \
191 __illegally_sized_syscall_arg3 (); \
192 r5 = arg3
193 # define LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4) \
194 long int arg4 = (long int) (__arg4); \
195 LOADARGS_3(name, __arg1, __arg2, __arg3); \
196 extern void __illegally_sized_syscall_arg4 (void); \
197 if (__builtin_classify_type (__arg4) != 5 && sizeof (__arg4) > 4) \
198 __illegally_sized_syscall_arg4 (); \
199 r6 = arg4
200 # define LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \
201 long int arg5 = (long int) (__arg5); \
202 LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4); \
203 extern void __illegally_sized_syscall_arg5 (void); \
204 if (__builtin_classify_type (__arg5) != 5 && sizeof (__arg5) > 4) \
205 __illegally_sized_syscall_arg5 (); \
206 r7 = arg5
207 # define LOADARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \
208 long int arg6 = (long int) (__arg6); \
209 LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \
210 extern void __illegally_sized_syscall_arg6 (void); \
211 if (__builtin_classify_type (__arg6) != 5 && sizeof (__arg6) > 4) \
212 __illegally_sized_syscall_arg6 (); \
213 r8 = arg6
215 # define ASM_INPUT_0 "0" (r0)
216 # define ASM_INPUT_1 ASM_INPUT_0, "1" (r3)
217 # define ASM_INPUT_2 ASM_INPUT_1, "2" (r4)
218 # define ASM_INPUT_3 ASM_INPUT_2, "3" (r5)
219 # define ASM_INPUT_4 ASM_INPUT_3, "4" (r6)
220 # define ASM_INPUT_5 ASM_INPUT_4, "5" (r7)
221 # define ASM_INPUT_6 ASM_INPUT_5, "6" (r8)
223 #endif /* __ASSEMBLER__ */
226 /* Pointer mangling support. */
227 #if IS_IN (rtld)
228 /* We cannot use the thread descriptor because in ld.so we use setjmp
229 earlier than the descriptor is initialized. */
230 #else
231 # ifdef __ASSEMBLER__
232 # define PTR_MANGLE(reg, tmpreg) \
233 lwz tmpreg,POINTER_GUARD(r2); \
234 xor reg,tmpreg,reg
235 # define PTR_MANGLE2(reg, tmpreg) \
236 xor reg,tmpreg,reg
237 # define PTR_MANGLE3(destreg, reg, tmpreg) \
238 lwz tmpreg,POINTER_GUARD(r2); \
239 xor destreg,tmpreg,reg
240 # define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg)
241 # define PTR_DEMANGLE2(reg, tmpreg) PTR_MANGLE2 (reg, tmpreg)
242 # define PTR_DEMANGLE3(destreg, reg, tmpreg) PTR_MANGLE3 (destreg, reg, tmpreg)
243 # else
244 # define PTR_MANGLE(var) \
245 (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
246 # define PTR_DEMANGLE(var) PTR_MANGLE (var)
247 # endif
248 #endif
250 #endif /* linux/powerpc/powerpc32/sysdep.h */