Update copyright dates with scripts/update-copyrights.
[glibc.git] / sysdeps / unix / sysv / linux / mips / mips64 / n32 / sysdep.h
blobac663bc1f23c6b941174b03730f0c89ff7001025
1 /* Copyright (C) 2000-2015 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library. If not, see
16 <http://www.gnu.org/licenses/>. */
18 #ifndef _LINUX_MIPS_SYSDEP_H
19 #define _LINUX_MIPS_SYSDEP_H 1
21 /* There is some commonality. */
22 #include <sysdeps/unix/mips/mips64/n32/sysdep.h>
24 #include <tls.h>
26 /* In order to get __set_errno() definition in INLINE_SYSCALL. */
27 #ifndef __ASSEMBLER__
28 #include <errno.h>
29 #endif
31 /* For Linux we can use the system call table in the header file
32 /usr/include/asm/unistd.h
33 of the kernel. But these symbols do not follow the SYS_* syntax
34 so we have to redefine the `SYS_ify' macro here. */
35 #undef SYS_ify
36 #define SYS_ify(syscall_name) __NR_##syscall_name
38 #ifdef __ASSEMBLER__
40 /* We don't want the label for the error handler to be visible in the symbol
41 table when we define it here. */
42 # define SYSCALL_ERROR_LABEL 99b
44 #else /* ! __ASSEMBLER__ */
46 /* Convert X to a long long, without losing any bits if it is one
47 already or warning if it is a 32-bit pointer. */
48 #define ARGIFY(X) ((long long) (__typeof__ ((X) - (X))) (X))
50 /* Define a macro which expands into the inline wrapper code for a system
51 call. */
52 #undef INLINE_SYSCALL
53 #define INLINE_SYSCALL(name, nr, args...) \
54 ({ INTERNAL_SYSCALL_DECL(err); \
55 long result_var = INTERNAL_SYSCALL (name, err, nr, args); \
56 if ( INTERNAL_SYSCALL_ERROR_P (result_var, err) ) \
57 { \
58 __set_errno (INTERNAL_SYSCALL_ERRNO (result_var, err)); \
59 result_var = -1L; \
60 } \
61 result_var; })
63 #undef INTERNAL_SYSCALL_DECL
64 #define INTERNAL_SYSCALL_DECL(err) long err __attribute__ ((unused))
66 #undef INTERNAL_SYSCALL_ERROR_P
67 #define INTERNAL_SYSCALL_ERROR_P(val, err) ((void) (val), (long) (err))
69 #undef INTERNAL_SYSCALL_ERRNO
70 #define INTERNAL_SYSCALL_ERRNO(val, err) ((void) (err), val)
72 /* Note that the original Linux syscall restart convention required the
73 instruction immediately preceding SYSCALL to initialize $v0 with the
74 syscall number. Then if a restart triggered, $v0 would have been
75 clobbered by the syscall interrupted, and needed to be reinititalized.
76 The kernel would decrement the PC by 4 before switching back to the
77 user mode so that $v0 had been reloaded before SYSCALL was executed
78 again. This implied the place $v0 was loaded from must have been
79 preserved across a syscall, e.g. an immediate, static register, stack
80 slot, etc.
82 The convention was relaxed in Linux with a change applied to the kernel
83 GIT repository as commit 96187fb0bc30cd7919759d371d810e928048249d, that
84 first appeared in the 2.6.36 release. Since then the kernel has had
85 code that reloads $v0 upon syscall restart and resumes right at the
86 SYSCALL instruction, so no special arrangement is needed anymore.
88 For backwards compatibility with existing kernel binaries we support
89 the old convention by choosing the instruction preceding SYSCALL
90 carefully. This also means we have to force a 32-bit encoding of the
91 microMIPS MOVE instruction if one is used. */
93 #ifdef __mips_micromips
94 # define MOVE32 "move32"
95 #else
96 # define MOVE32 "move"
97 #endif
99 #undef INTERNAL_SYSCALL
100 #define INTERNAL_SYSCALL(name, err, nr, args...) \
101 internal_syscall##nr ("li\t%0, %2\t\t\t# " #name "\n\t", \
102 "IK" (SYS_ify (name)), \
103 0, err, args)
105 #undef INTERNAL_SYSCALL_NCS
106 #define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \
107 internal_syscall##nr (MOVE32 "\t%0, %2\n\t", \
108 "r" (__s0), \
109 number, err, args)
111 #define internal_syscall0(v0_init, input, number, err, dummy...) \
112 ({ \
113 long _sys_result; \
116 register long long __s0 asm ("$16") __attribute__ ((unused)) \
117 = (number); \
118 register long long __v0 asm ("$2"); \
119 register long long __a3 asm ("$7"); \
120 __asm__ volatile ( \
121 ".set\tnoreorder\n\t" \
122 v0_init \
123 "syscall\n\t" \
124 ".set reorder" \
125 : "=r" (__v0), "=r" (__a3) \
126 : input \
127 : __SYSCALL_CLOBBERS); \
128 err = __a3; \
129 _sys_result = __v0; \
131 _sys_result; \
134 #define internal_syscall1(v0_init, input, number, err, arg1) \
135 ({ \
136 long _sys_result; \
139 register long long __s0 asm ("$16") __attribute__ ((unused)) \
140 = (number); \
141 register long long __v0 asm ("$2"); \
142 register long long __a0 asm ("$4") = ARGIFY (arg1); \
143 register long long __a3 asm ("$7"); \
144 __asm__ volatile ( \
145 ".set\tnoreorder\n\t" \
146 v0_init \
147 "syscall\n\t" \
148 ".set reorder" \
149 : "=r" (__v0), "=r" (__a3) \
150 : input, "r" (__a0) \
151 : __SYSCALL_CLOBBERS); \
152 err = __a3; \
153 _sys_result = __v0; \
155 _sys_result; \
158 #define internal_syscall2(v0_init, input, number, err, arg1, arg2) \
159 ({ \
160 long _sys_result; \
163 register long long __s0 asm ("$16") __attribute__ ((unused)) \
164 = (number); \
165 register long long __v0 asm ("$2"); \
166 register long long __a0 asm ("$4") = ARGIFY (arg1); \
167 register long long __a1 asm ("$5") = ARGIFY (arg2); \
168 register long long __a3 asm ("$7"); \
169 __asm__ volatile ( \
170 ".set\tnoreorder\n\t" \
171 v0_init \
172 "syscall\n\t" \
173 ".set\treorder" \
174 : "=r" (__v0), "=r" (__a3) \
175 : input, "r" (__a0), "r" (__a1) \
176 : __SYSCALL_CLOBBERS); \
177 err = __a3; \
178 _sys_result = __v0; \
180 _sys_result; \
183 #define internal_syscall3(v0_init, input, number, err, \
184 arg1, arg2, arg3) \
185 ({ \
186 long _sys_result; \
189 register long long __s0 asm ("$16") __attribute__ ((unused)) \
190 = (number); \
191 register long long __v0 asm ("$2"); \
192 register long long __a0 asm ("$4") = ARGIFY (arg1); \
193 register long long __a1 asm ("$5") = ARGIFY (arg2); \
194 register long long __a2 asm ("$6") = ARGIFY (arg3); \
195 register long long __a3 asm ("$7"); \
196 __asm__ volatile ( \
197 ".set\tnoreorder\n\t" \
198 v0_init \
199 "syscall\n\t" \
200 ".set\treorder" \
201 : "=r" (__v0), "=r" (__a3) \
202 : input, "r" (__a0), "r" (__a1), "r" (__a2) \
203 : __SYSCALL_CLOBBERS); \
204 err = __a3; \
205 _sys_result = __v0; \
207 _sys_result; \
210 #define internal_syscall4(v0_init, input, number, err, \
211 arg1, arg2, arg3, arg4) \
212 ({ \
213 long _sys_result; \
216 register long long __s0 asm ("$16") __attribute__ ((unused)) \
217 = (number); \
218 register long long __v0 asm ("$2"); \
219 register long long __a0 asm ("$4") = ARGIFY (arg1); \
220 register long long __a1 asm ("$5") = ARGIFY (arg2); \
221 register long long __a2 asm ("$6") = ARGIFY (arg3); \
222 register long long __a3 asm ("$7") = ARGIFY (arg4); \
223 __asm__ volatile ( \
224 ".set\tnoreorder\n\t" \
225 v0_init \
226 "syscall\n\t" \
227 ".set\treorder" \
228 : "=r" (__v0), "+r" (__a3) \
229 : input, "r" (__a0), "r" (__a1), "r" (__a2) \
230 : __SYSCALL_CLOBBERS); \
231 err = __a3; \
232 _sys_result = __v0; \
234 _sys_result; \
237 #define internal_syscall5(v0_init, input, number, err, \
238 arg1, arg2, arg3, arg4, arg5) \
239 ({ \
240 long _sys_result; \
243 register long long __s0 asm ("$16") __attribute__ ((unused)) \
244 = (number); \
245 register long long __v0 asm ("$2"); \
246 register long long __a0 asm ("$4") = ARGIFY (arg1); \
247 register long long __a1 asm ("$5") = ARGIFY (arg2); \
248 register long long __a2 asm ("$6") = ARGIFY (arg3); \
249 register long long __a3 asm ("$7") = ARGIFY (arg4); \
250 register long long __a4 asm ("$8") = ARGIFY (arg5); \
251 __asm__ volatile ( \
252 ".set\tnoreorder\n\t" \
253 v0_init \
254 "syscall\n\t" \
255 ".set\treorder" \
256 : "=r" (__v0), "+r" (__a3) \
257 : input, "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4) \
258 : __SYSCALL_CLOBBERS); \
259 err = __a3; \
260 _sys_result = __v0; \
262 _sys_result; \
265 #define internal_syscall6(v0_init, input, number, err, \
266 arg1, arg2, arg3, arg4, arg5, arg6) \
267 ({ \
268 long _sys_result; \
271 register long long __s0 asm ("$16") __attribute__ ((unused)) \
272 = (number); \
273 register long long __v0 asm ("$2"); \
274 register long long __a0 asm ("$4") = ARGIFY (arg1); \
275 register long long __a1 asm ("$5") = ARGIFY (arg2); \
276 register long long __a2 asm ("$6") = ARGIFY (arg3); \
277 register long long __a3 asm ("$7") = ARGIFY (arg4); \
278 register long long __a4 asm ("$8") = ARGIFY (arg5); \
279 register long long __a5 asm ("$9") = ARGIFY (arg6); \
280 __asm__ volatile ( \
281 ".set\tnoreorder\n\t" \
282 v0_init \
283 "syscall\n\t" \
284 ".set\treorder" \
285 : "=r" (__v0), "+r" (__a3) \
286 : input, "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), \
287 "r" (__a5) \
288 : __SYSCALL_CLOBBERS); \
289 err = __a3; \
290 _sys_result = __v0; \
292 _sys_result; \
295 #define __SYSCALL_CLOBBERS "$1", "$3", "$10", "$11", "$12", "$13", \
296 "$14", "$15", "$24", "$25", "hi", "lo", "memory"
297 #endif /* __ASSEMBLER__ */
299 /* Pointer mangling is not yet supported for MIPS. */
300 #define PTR_MANGLE(var) (void) (var)
301 #define PTR_DEMANGLE(var) (void) (var)
303 #endif /* linux/mips/sysdep.h */