1 /* Copyright (C) 2011-2015 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Chris Metcalf <cmetcalf@tilera.com>, 2011.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library. If not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <asm/unistd.h>
20 #include <sysdeps/tile/sysdep.h>
21 #include <sysdeps/unix/sysv/linux/generic/sysdep.h>
22 #include <sys/syscall.h>
25 #define SYS_ify(syscall_name) __NR_##syscall_name
30 /* The actual implementation of doing a syscall. */
31 #define DO_CALL(syscall_name, args) \
32 moveli TREG_SYSCALL_NR_NAME, SYS_ify(syscall_name); \
35 /* TILE Linux returns the result in r0 (or a negative errno).
36 The kernel "owns" the code to decide if a given value is an error,
37 and puts errno in r1 if so, or otherwise zero. */
38 #define PSEUDO(name, syscall_name, args) \
40 DO_CALL(syscall_name, args); \
46 /* For static code, on error jump to __syscall_error directly. */
47 # define SYSCALL_ERROR_NAME __syscall_error
48 #elif IS_IN (libc) || IS_IN (libpthread)
49 /* Use the internal name for libc/libpthread shared objects. */
50 # define SYSCALL_ERROR_NAME __GI___syscall_error
52 /* Otherwise, on error do a full PLT jump. */
53 # define SYSCALL_ERROR_NAME plt(__syscall_error)
56 #define PSEUDO_END(name) \
58 j SYSCALL_ERROR_NAME; \
61 #define PSEUDO_NOERRNO(name, syscall_name, args) \
63 DO_CALL(syscall_name, args)
65 #define ret_NOERRNO jrp lr
67 #define PSEUDO_END_NOERRNO(name) \
70 /* Convenience wrappers. */
71 #define SYSCALL__(name, args) PSEUDO (__##name, name, args)
72 #define SYSCALL(name, args) PSEUDO (name, name, args)
74 #else /* not __ASSEMBLER__ */
78 /* Define a macro which expands inline into the wrapper code for a system
80 # undef INLINE_SYSCALL
81 # define INLINE_SYSCALL(name, nr, args...) \
83 INTERNAL_SYSCALL_DECL (err); \
84 unsigned long val = INTERNAL_SYSCALL (name, err, nr, args); \
85 if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (val, err), 0)) \
87 __set_errno (INTERNAL_SYSCALL_ERRNO (val, err)); \
92 #undef INTERNAL_SYSCALL
93 #define INTERNAL_SYSCALL(name, err, nr, args...) \
94 internal_syscall##nr (SYS_ify (name), err, args)
96 #undef INTERNAL_SYSCALL_NCS
97 #define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \
98 internal_syscall##nr (number, err, args)
100 #undef INTERNAL_SYSCALL_DECL
101 #define INTERNAL_SYSCALL_DECL(err) int err
103 #undef INTERNAL_SYSCALL_ERROR_P
104 #define INTERNAL_SYSCALL_ERROR_P(val, err) ({ (void) (val); (err) != 0; })
106 #undef INTERNAL_SYSCALL_ERRNO
107 #define INTERNAL_SYSCALL_ERRNO(val, err) ({ (void) (val); (err); })
109 #define internal_syscall0(num, err, dummy...) \
111 long _sys_result, __SYSCALL_CLOBBER_DECLS; \
112 __asm__ __volatile__ ( \
114 : "=R00" (_sys_result), "=R01" (err), __SYSCALL_CLOBBER_OUTPUTS \
116 : __SYSCALL_CLOBBERS); \
120 #define internal_syscall1(num, err, arg0) \
122 long _sys_result, __SYSCALL_CLOBBER_DECLS; \
123 __asm__ __volatile__ ( \
125 : "=R00" (_sys_result), "=R01" (err), __SYSCALL_CLOBBER_OUTPUTS \
126 : "R10" (num), "R00" (arg0) \
127 : __SYSCALL_CLOBBERS); \
131 #define internal_syscall2(num, err, arg0, arg1) \
133 long _sys_result, __SYSCALL_CLOBBER_DECLS; \
134 __asm__ __volatile__ ( \
136 : "=R00" (_sys_result), "=R01" (err), __SYSCALL_CLOBBER_OUTPUTS \
137 : "R10" (num), "R00" (arg0), "R01" (arg1) \
138 : __SYSCALL_CLOBBERS); \
142 #define internal_syscall3(num, err, arg0, arg1, arg2) \
144 long _sys_result, __SYSCALL_CLOBBER_DECLS; \
145 __asm__ __volatile__ ( \
147 : "=R00" (_sys_result), "=R01" (err), __SYSCALL_CLOBBER_OUTPUTS \
148 : "R10" (num), "R00" (arg0), "R01" (arg1), "R02" (arg2) \
149 : __SYSCALL_CLOBBERS); \
153 #define internal_syscall4(num, err, arg0, arg1, arg2, arg3) \
155 long _sys_result, __SYSCALL_CLOBBER_DECLS; \
156 __asm__ __volatile__ ( \
158 : "=R00" (_sys_result), "=R01" (err), __SYSCALL_CLOBBER_OUTPUTS \
159 : "R10" (num), "R00" (arg0), "R01" (arg1), "R02" (arg2), \
161 : __SYSCALL_CLOBBERS); \
165 #define internal_syscall5(num, err, arg0, arg1, arg2, arg3, arg4) \
167 long _sys_result, __SYSCALL_CLOBBER_DECLS; \
168 __asm__ __volatile__ ( \
170 : "=R00" (_sys_result), "=R01" (err), __SYSCALL_CLOBBER_OUTPUTS \
171 : "R10" (num), "R00" (arg0), "R01" (arg1), "R02" (arg2), \
172 "R03" (arg3), "R04" (arg4) \
173 : __SYSCALL_CLOBBERS); \
177 #define internal_syscall6(num, err, arg0, arg1, arg2, arg3, arg4, arg5) \
179 long _sys_result, __SYSCALL_CLOBBER_DECLS; \
180 __asm__ __volatile__ ( \
182 : "=R00" (_sys_result), "=R01" (err), __SYSCALL_CLOBBER_OUTPUTS \
183 : "R10" (num), "R00" (arg0), "R01" (arg1), "R02" (arg2), \
184 "R03" (arg3), "R04" (arg4), "R05" (arg5) \
185 : __SYSCALL_CLOBBERS); \
189 #undef __SYSCALL_CLOBBERS
190 #define __SYSCALL_CLOBBERS \
192 "r8", "r9", "r11", "r12", "r13", "r14", "r15", \
193 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", \
194 "r24", "r25", "r26", "r27", "r28", "r29", "memory"
196 /* gcc doesn't seem to allow an input operand to be clobbered, so we
197 fake it with dummy outputs. */
198 #define __SYSCALL_CLOBBER_DECLS \
199 _clobber_r2, _clobber_r3, _clobber_r4, _clobber_r5, _clobber_r10
201 #define __SYSCALL_CLOBBER_OUTPUTS \
202 "=R02" (_clobber_r2), "=R03" (_clobber_r3), "=R04" (_clobber_r4), \
203 "=R05" (_clobber_r5), "=R10" (_clobber_r10)
205 /* This version is for kernels that implement system calls that
206 behave like function calls as far as register saving.
207 It falls back to the syscall in the case that the vDSO doesn't
208 exist or fails for ENOSYS */
210 # define INLINE_VSYSCALL(name, nr, args...) \
214 INTERNAL_SYSCALL_DECL (sc_err); \
217 __typeof (__vdso_##name) vdsop = __vdso_##name; \
220 sc_ret = vdsop (args); \
221 if (!INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
223 if (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err) != ENOSYS) \
227 sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, ##args); \
228 if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
231 __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \
237 # define INTERNAL_VSYSCALL(name, err, nr, args...) \
242 __typeof (__vdso_##name) vdsop = __vdso_##name; \
245 v_ret = vdsop (args); \
246 if (!INTERNAL_SYSCALL_ERROR_P (v_ret, err) \
247 || INTERNAL_SYSCALL_ERRNO (v_ret, err) != ENOSYS) \
250 v_ret = INTERNAL_SYSCALL (name, err, nr, ##args); \
256 # define INLINE_VSYSCALL(name, nr, args...) \
257 INLINE_SYSCALL (name, nr, ##args)
258 # define INTERNAL_VSYSCALL(name, err, nr, args...) \
259 INTERNAL_SYSCALL (name, err, nr, ##args)
261 #endif /* not __ASSEMBLER__ */
263 /* List of system calls which are supported as vsyscalls. */
264 #define HAVE_CLOCK_GETTIME_VSYSCALL 1
266 /* Pointer mangling support. */
268 /* We cannot use the thread descriptor because in ld.so we use setjmp
269 earlier than the descriptor is initialized. */
271 # ifdef __ASSEMBLER__
272 # define PTR_MANGLE(reg, tmpreg) \
273 ADDLI_PTR tmpreg, pt, POINTER_GUARD; \
276 # define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg)
278 # define PTR_MANGLE(var) \
279 (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
280 # define PTR_DEMANGLE(var) PTR_MANGLE (var)