Update copyright dates with scripts/update-copyrights.
[glibc.git] / sysdeps / unix / sysv / linux / tile / sysdep.h
blob074b91676472741c2e5f6242263a02e2a162de9c
1 /* Copyright (C) 2011-2015 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Chris Metcalf <cmetcalf@tilera.com>, 2011.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library. If not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <asm/unistd.h>
20 #include <sysdeps/tile/sysdep.h>
21 #include <sysdeps/unix/sysv/linux/generic/sysdep.h>
22 #include <sys/syscall.h>
24 #undef SYS_ify
25 #define SYS_ify(syscall_name) __NR_##syscall_name
28 #ifdef __ASSEMBLER__
30 /* The actual implementation of doing a syscall. */
31 #define DO_CALL(syscall_name, args) \
32 moveli TREG_SYSCALL_NR_NAME, SYS_ify(syscall_name); \
33 swint1
35 /* TILE Linux returns the result in r0 (or a negative errno).
36 The kernel "owns" the code to decide if a given value is an error,
37 and puts errno in r1 if so, or otherwise zero. */
38 #define PSEUDO(name, syscall_name, args) \
39 ENTRY (name); \
40 DO_CALL(syscall_name, args); \
41 BNEZ r1, 0f
43 #define ret jrp lr
45 #ifndef PIC
46 /* For static code, on error jump to __syscall_error directly. */
47 # define SYSCALL_ERROR_NAME __syscall_error
48 #elif IS_IN (libc) || IS_IN (libpthread)
49 /* Use the internal name for libc/libpthread shared objects. */
50 # define SYSCALL_ERROR_NAME __GI___syscall_error
51 #else
52 /* Otherwise, on error do a full PLT jump. */
53 # define SYSCALL_ERROR_NAME plt(__syscall_error)
54 #endif
56 #define PSEUDO_END(name) \
57 0: \
58 j SYSCALL_ERROR_NAME; \
59 END (name)
61 #define PSEUDO_NOERRNO(name, syscall_name, args) \
62 ENTRY (name); \
63 DO_CALL(syscall_name, args)
65 #define ret_NOERRNO jrp lr
67 #define PSEUDO_END_NOERRNO(name) \
68 END (name)
70 /* Convenience wrappers. */
71 #define SYSCALL__(name, args) PSEUDO (__##name, name, args)
72 #define SYSCALL(name, args) PSEUDO (name, name, args)
74 #else /* not __ASSEMBLER__ */
76 #include <errno.h>
78 /* Define a macro which expands inline into the wrapper code for a system
79 call. */
80 # undef INLINE_SYSCALL
81 # define INLINE_SYSCALL(name, nr, args...) \
82 ({ \
83 INTERNAL_SYSCALL_DECL (err); \
84 unsigned long val = INTERNAL_SYSCALL (name, err, nr, args); \
85 if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (val, err), 0)) \
86 { \
87 __set_errno (INTERNAL_SYSCALL_ERRNO (val, err)); \
88 val = -1; \
89 } \
90 (long) val; })
92 #undef INTERNAL_SYSCALL
93 #define INTERNAL_SYSCALL(name, err, nr, args...) \
94 internal_syscall##nr (SYS_ify (name), err, args)
96 #undef INTERNAL_SYSCALL_NCS
97 #define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \
98 internal_syscall##nr (number, err, args)
100 #undef INTERNAL_SYSCALL_DECL
101 #define INTERNAL_SYSCALL_DECL(err) int err
103 #undef INTERNAL_SYSCALL_ERROR_P
104 #define INTERNAL_SYSCALL_ERROR_P(val, err) ({ (void) (val); (err) != 0; })
106 #undef INTERNAL_SYSCALL_ERRNO
107 #define INTERNAL_SYSCALL_ERRNO(val, err) ({ (void) (val); (err); })
109 #define internal_syscall0(num, err, dummy...) \
110 ({ \
111 long _sys_result, __SYSCALL_CLOBBER_DECLS; \
112 __asm__ __volatile__ ( \
113 "swint1" \
114 : "=R00" (_sys_result), "=R01" (err), __SYSCALL_CLOBBER_OUTPUTS \
115 : "R10" (num) \
116 : __SYSCALL_CLOBBERS); \
117 _sys_result; \
120 #define internal_syscall1(num, err, arg0) \
121 ({ \
122 long _sys_result, __SYSCALL_CLOBBER_DECLS; \
123 __asm__ __volatile__ ( \
124 "swint1" \
125 : "=R00" (_sys_result), "=R01" (err), __SYSCALL_CLOBBER_OUTPUTS \
126 : "R10" (num), "R00" (arg0) \
127 : __SYSCALL_CLOBBERS); \
128 _sys_result; \
131 #define internal_syscall2(num, err, arg0, arg1) \
132 ({ \
133 long _sys_result, __SYSCALL_CLOBBER_DECLS; \
134 __asm__ __volatile__ ( \
135 "swint1" \
136 : "=R00" (_sys_result), "=R01" (err), __SYSCALL_CLOBBER_OUTPUTS \
137 : "R10" (num), "R00" (arg0), "R01" (arg1) \
138 : __SYSCALL_CLOBBERS); \
139 _sys_result; \
142 #define internal_syscall3(num, err, arg0, arg1, arg2) \
143 ({ \
144 long _sys_result, __SYSCALL_CLOBBER_DECLS; \
145 __asm__ __volatile__ ( \
146 "swint1" \
147 : "=R00" (_sys_result), "=R01" (err), __SYSCALL_CLOBBER_OUTPUTS \
148 : "R10" (num), "R00" (arg0), "R01" (arg1), "R02" (arg2) \
149 : __SYSCALL_CLOBBERS); \
150 _sys_result; \
153 #define internal_syscall4(num, err, arg0, arg1, arg2, arg3) \
154 ({ \
155 long _sys_result, __SYSCALL_CLOBBER_DECLS; \
156 __asm__ __volatile__ ( \
157 "swint1" \
158 : "=R00" (_sys_result), "=R01" (err), __SYSCALL_CLOBBER_OUTPUTS \
159 : "R10" (num), "R00" (arg0), "R01" (arg1), "R02" (arg2), \
160 "R03" (arg3) \
161 : __SYSCALL_CLOBBERS); \
162 _sys_result; \
165 #define internal_syscall5(num, err, arg0, arg1, arg2, arg3, arg4) \
166 ({ \
167 long _sys_result, __SYSCALL_CLOBBER_DECLS; \
168 __asm__ __volatile__ ( \
169 "swint1" \
170 : "=R00" (_sys_result), "=R01" (err), __SYSCALL_CLOBBER_OUTPUTS \
171 : "R10" (num), "R00" (arg0), "R01" (arg1), "R02" (arg2), \
172 "R03" (arg3), "R04" (arg4) \
173 : __SYSCALL_CLOBBERS); \
174 _sys_result; \
177 #define internal_syscall6(num, err, arg0, arg1, arg2, arg3, arg4, arg5) \
178 ({ \
179 long _sys_result, __SYSCALL_CLOBBER_DECLS; \
180 __asm__ __volatile__ ( \
181 "swint1" \
182 : "=R00" (_sys_result), "=R01" (err), __SYSCALL_CLOBBER_OUTPUTS \
183 : "R10" (num), "R00" (arg0), "R01" (arg1), "R02" (arg2), \
184 "R03" (arg3), "R04" (arg4), "R05" (arg5) \
185 : __SYSCALL_CLOBBERS); \
186 _sys_result; \
189 #undef __SYSCALL_CLOBBERS
190 #define __SYSCALL_CLOBBERS \
191 "r6", "r7", \
192 "r8", "r9", "r11", "r12", "r13", "r14", "r15", \
193 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", \
194 "r24", "r25", "r26", "r27", "r28", "r29", "memory"
196 /* gcc doesn't seem to allow an input operand to be clobbered, so we
197 fake it with dummy outputs. */
198 #define __SYSCALL_CLOBBER_DECLS \
199 _clobber_r2, _clobber_r3, _clobber_r4, _clobber_r5, _clobber_r10
201 #define __SYSCALL_CLOBBER_OUTPUTS \
202 "=R02" (_clobber_r2), "=R03" (_clobber_r3), "=R04" (_clobber_r4), \
203 "=R05" (_clobber_r5), "=R10" (_clobber_r10)
205 /* This version is for kernels that implement system calls that
206 behave like function calls as far as register saving.
207 It falls back to the syscall in the case that the vDSO doesn't
208 exist or fails for ENOSYS */
209 # ifdef SHARED
210 # define INLINE_VSYSCALL(name, nr, args...) \
211 ({ \
212 __label__ out; \
213 __label__ iserr; \
214 INTERNAL_SYSCALL_DECL (sc_err); \
215 long int sc_ret; \
217 __typeof (__vdso_##name) vdsop = __vdso_##name; \
218 if (vdsop != NULL) \
220 sc_ret = vdsop (args); \
221 if (!INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
222 goto out; \
223 if (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err) != ENOSYS) \
224 goto iserr; \
227 sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, ##args); \
228 if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
230 iserr: \
231 __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \
232 sc_ret = -1L; \
234 out: \
235 sc_ret; \
237 # define INTERNAL_VSYSCALL(name, err, nr, args...) \
238 ({ \
239 __label__ out; \
240 long int v_ret; \
242 __typeof (__vdso_##name) vdsop = __vdso_##name; \
243 if (vdsop != NULL) \
245 v_ret = vdsop (args); \
246 if (!INTERNAL_SYSCALL_ERROR_P (v_ret, err) \
247 || INTERNAL_SYSCALL_ERRNO (v_ret, err) != ENOSYS) \
248 goto out; \
250 v_ret = INTERNAL_SYSCALL (name, err, nr, ##args); \
251 out: \
252 v_ret; \
255 # else
256 # define INLINE_VSYSCALL(name, nr, args...) \
257 INLINE_SYSCALL (name, nr, ##args)
258 # define INTERNAL_VSYSCALL(name, err, nr, args...) \
259 INTERNAL_SYSCALL (name, err, nr, ##args)
260 # endif
261 #endif /* not __ASSEMBLER__ */
263 /* List of system calls which are supported as vsyscalls. */
264 #define HAVE_CLOCK_GETTIME_VSYSCALL 1
266 /* Pointer mangling support. */
267 #if IS_IN (rtld)
268 /* We cannot use the thread descriptor because in ld.so we use setjmp
269 earlier than the descriptor is initialized. */
270 #else
271 # ifdef __ASSEMBLER__
272 # define PTR_MANGLE(reg, tmpreg) \
273 ADDLI_PTR tmpreg, pt, POINTER_GUARD; \
274 LD tmpreg, tmpreg; \
275 xor reg, tmpreg, reg
276 # define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg)
277 # else
278 # define PTR_MANGLE(var) \
279 (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
280 # define PTR_DEMANGLE(var) PTR_MANGLE (var)
281 # endif
282 #endif