Update copyright dates with scripts/update-copyrights.
[glibc.git] / sysdeps / unix / sysv / linux / mips / mips32 / sysdep.h
blobe5025bad5bda6ab7059597aa9026f5896fa47be2
1 /* Copyright (C) 2000-2015 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library. If not, see
16 <http://www.gnu.org/licenses/>. */
18 #ifndef _LINUX_MIPS_MIPS32_SYSDEP_H
19 #define _LINUX_MIPS_MIPS32_SYSDEP_H 1
21 /* There is some commonality. */
22 #include <sysdeps/unix/mips/mips32/sysdep.h>
24 #include <tls.h>
26 /* In order to get __set_errno() definition in INLINE_SYSCALL. */
27 #ifndef __ASSEMBLER__
28 #include <errno.h>
29 #endif
31 /* For Linux we can use the system call table in the header file
32 /usr/include/asm/unistd.h
33 of the kernel. But these symbols do not follow the SYS_* syntax
34 so we have to redefine the `SYS_ify' macro here. */
35 #undef SYS_ify
36 #define SYS_ify(syscall_name) __NR_##syscall_name
38 #ifdef __ASSEMBLER__
40 /* We don't want the label for the error handler to be visible in the symbol
41 table when we define it here. */
42 #ifdef __PIC__
43 # define SYSCALL_ERROR_LABEL 99b
44 #endif
46 #else /* ! __ASSEMBLER__ */
48 /* Define a macro which expands into the inline wrapper code for a system
49 call. */
50 #undef INLINE_SYSCALL
51 #define INLINE_SYSCALL(name, nr, args...) \
52 ({ INTERNAL_SYSCALL_DECL(err); \
53 long result_var = INTERNAL_SYSCALL (name, err, nr, args); \
54 if ( INTERNAL_SYSCALL_ERROR_P (result_var, err) ) \
55 { \
56 __set_errno (INTERNAL_SYSCALL_ERRNO (result_var, err)); \
57 result_var = -1L; \
58 } \
59 result_var; })
61 #undef INTERNAL_SYSCALL_DECL
62 #define INTERNAL_SYSCALL_DECL(err) long err __attribute__ ((unused))
64 #undef INTERNAL_SYSCALL_ERROR_P
65 #define INTERNAL_SYSCALL_ERROR_P(val, err) ((void) (val), (long) (err))
67 #undef INTERNAL_SYSCALL_ERRNO
68 #define INTERNAL_SYSCALL_ERRNO(val, err) ((void) (err), val)
70 /* Note that the original Linux syscall restart convention required the
71 instruction immediately preceding SYSCALL to initialize $v0 with the
72 syscall number. Then if a restart triggered, $v0 would have been
73 clobbered by the syscall interrupted, and needed to be reinititalized.
74 The kernel would decrement the PC by 4 before switching back to the
75 user mode so that $v0 had been reloaded before SYSCALL was executed
76 again. This implied the place $v0 was loaded from must have been
77 preserved across a syscall, e.g. an immediate, static register, stack
78 slot, etc.
80 The convention was relaxed in Linux with a change applied to the kernel
81 GIT repository as commit 96187fb0bc30cd7919759d371d810e928048249d, that
82 first appeared in the 2.6.36 release. Since then the kernel has had
83 code that reloads $v0 upon syscall restart and resumes right at the
84 SYSCALL instruction, so no special arrangement is needed anymore.
86 For backwards compatibility with existing kernel binaries we support
87 the old convention by choosing the instruction preceding SYSCALL
88 carefully. This also means we have to force a 32-bit encoding of the
89 microMIPS MOVE instruction if one is used. */
91 #ifdef __mips_micromips
92 # define MOVE32 "move32"
93 #else
94 # define MOVE32 "move"
95 #endif
97 #undef INTERNAL_SYSCALL
98 #undef INTERNAL_SYSCALL_NCS
100 #ifdef __mips16
101 /* There's no MIPS16 syscall instruction, so we go through out-of-line
102 standard MIPS wrappers. These do use inline snippets below though,
103 through INTERNAL_SYSCALL_MIPS16. Spilling the syscall number to
104 memory gives the best code in that case, avoiding the need to save
105 and restore a static register. */
107 # include <mips16-syscall.h>
109 # define INTERNAL_SYSCALL(name, err, nr, args...) \
110 INTERNAL_SYSCALL_NCS (SYS_ify (name), err, nr, args)
112 # define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \
113 ({ \
114 union __mips16_syscall_return ret; \
115 ret.val = __mips16_syscall##nr (args, number); \
116 err = ret.reg.v1; \
117 ret.reg.v0; \
120 # define INTERNAL_SYSCALL_MIPS16(number, err, nr, args...) \
121 internal_syscall##nr ("lw\t%0, %2\n\t", \
122 "R" (number), \
123 0, err, args)
125 #else /* !__mips16 */
126 # define INTERNAL_SYSCALL(name, err, nr, args...) \
127 internal_syscall##nr ("li\t%0, %2\t\t\t# " #name "\n\t", \
128 "IK" (SYS_ify (name)), \
129 0, err, args)
131 # define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \
132 internal_syscall##nr (MOVE32 "\t%0, %2\n\t", \
133 "r" (__s0), \
134 number, err, args)
136 #endif /* !__mips16 */
138 #define internal_syscall0(v0_init, input, number, err, dummy...) \
139 ({ \
140 long _sys_result; \
143 register long __s0 asm ("$16") __attribute__ ((unused)) \
144 = (number); \
145 register long __v0 asm ("$2"); \
146 register long __a3 asm ("$7"); \
147 __asm__ volatile ( \
148 ".set\tnoreorder\n\t" \
149 v0_init \
150 "syscall\n\t" \
151 ".set reorder" \
152 : "=r" (__v0), "=r" (__a3) \
153 : input \
154 : __SYSCALL_CLOBBERS); \
155 err = __a3; \
156 _sys_result = __v0; \
158 _sys_result; \
161 #define internal_syscall1(v0_init, input, number, err, arg1) \
162 ({ \
163 long _sys_result; \
166 register long __s0 asm ("$16") __attribute__ ((unused)) \
167 = (number); \
168 register long __v0 asm ("$2"); \
169 register long __a0 asm ("$4") = (long) (arg1); \
170 register long __a3 asm ("$7"); \
171 __asm__ volatile ( \
172 ".set\tnoreorder\n\t" \
173 v0_init \
174 "syscall\n\t" \
175 ".set reorder" \
176 : "=r" (__v0), "=r" (__a3) \
177 : input, "r" (__a0) \
178 : __SYSCALL_CLOBBERS); \
179 err = __a3; \
180 _sys_result = __v0; \
182 _sys_result; \
185 #define internal_syscall2(v0_init, input, number, err, arg1, arg2) \
186 ({ \
187 long _sys_result; \
190 register long __s0 asm ("$16") __attribute__ ((unused)) \
191 = (number); \
192 register long __v0 asm ("$2"); \
193 register long __a0 asm ("$4") = (long) (arg1); \
194 register long __a1 asm ("$5") = (long) (arg2); \
195 register long __a3 asm ("$7"); \
196 __asm__ volatile ( \
197 ".set\tnoreorder\n\t" \
198 v0_init \
199 "syscall\n\t" \
200 ".set\treorder" \
201 : "=r" (__v0), "=r" (__a3) \
202 : input, "r" (__a0), "r" (__a1) \
203 : __SYSCALL_CLOBBERS); \
204 err = __a3; \
205 _sys_result = __v0; \
207 _sys_result; \
210 #define internal_syscall3(v0_init, input, number, err, \
211 arg1, arg2, arg3) \
212 ({ \
213 long _sys_result; \
216 register long __s0 asm ("$16") __attribute__ ((unused)) \
217 = (number); \
218 register long __v0 asm ("$2"); \
219 register long __a0 asm ("$4") = (long) (arg1); \
220 register long __a1 asm ("$5") = (long) (arg2); \
221 register long __a2 asm ("$6") = (long) (arg3); \
222 register long __a3 asm ("$7"); \
223 __asm__ volatile ( \
224 ".set\tnoreorder\n\t" \
225 v0_init \
226 "syscall\n\t" \
227 ".set\treorder" \
228 : "=r" (__v0), "=r" (__a3) \
229 : input, "r" (__a0), "r" (__a1), "r" (__a2) \
230 : __SYSCALL_CLOBBERS); \
231 err = __a3; \
232 _sys_result = __v0; \
234 _sys_result; \
237 #define internal_syscall4(v0_init, input, number, err, \
238 arg1, arg2, arg3, arg4) \
239 ({ \
240 long _sys_result; \
243 register long __s0 asm ("$16") __attribute__ ((unused)) \
244 = (number); \
245 register long __v0 asm ("$2"); \
246 register long __a0 asm ("$4") = (long) (arg1); \
247 register long __a1 asm ("$5") = (long) (arg2); \
248 register long __a2 asm ("$6") = (long) (arg3); \
249 register long __a3 asm ("$7") = (long) (arg4); \
250 __asm__ volatile ( \
251 ".set\tnoreorder\n\t" \
252 v0_init \
253 "syscall\n\t" \
254 ".set\treorder" \
255 : "=r" (__v0), "+r" (__a3) \
256 : input, "r" (__a0), "r" (__a1), "r" (__a2) \
257 : __SYSCALL_CLOBBERS); \
258 err = __a3; \
259 _sys_result = __v0; \
261 _sys_result; \
264 /* We need to use a frame pointer for the functions in which we
265 adjust $sp around the syscall, or debug information and unwind
266 information will be $sp relative and thus wrong during the syscall. As
267 of GCC 4.7, this is sufficient. */
268 #define FORCE_FRAME_POINTER \
269 void *volatile __fp_force __attribute__ ((unused)) = alloca (4)
271 #define internal_syscall5(v0_init, input, number, err, \
272 arg1, arg2, arg3, arg4, arg5) \
273 ({ \
274 long _sys_result; \
276 FORCE_FRAME_POINTER; \
278 register long __s0 asm ("$16") __attribute__ ((unused)) \
279 = (number); \
280 register long __v0 asm ("$2"); \
281 register long __a0 asm ("$4") = (long) (arg1); \
282 register long __a1 asm ("$5") = (long) (arg2); \
283 register long __a2 asm ("$6") = (long) (arg3); \
284 register long __a3 asm ("$7") = (long) (arg4); \
285 __asm__ volatile ( \
286 ".set\tnoreorder\n\t" \
287 "subu\t$29, 32\n\t" \
288 "sw\t%6, 16($29)\n\t" \
289 v0_init \
290 "syscall\n\t" \
291 "addiu\t$29, 32\n\t" \
292 ".set\treorder" \
293 : "=r" (__v0), "+r" (__a3) \
294 : input, "r" (__a0), "r" (__a1), "r" (__a2), \
295 "r" ((long) (arg5)) \
296 : __SYSCALL_CLOBBERS); \
297 err = __a3; \
298 _sys_result = __v0; \
300 _sys_result; \
303 #define internal_syscall6(v0_init, input, number, err, \
304 arg1, arg2, arg3, arg4, arg5, arg6) \
305 ({ \
306 long _sys_result; \
308 FORCE_FRAME_POINTER; \
310 register long __s0 asm ("$16") __attribute__ ((unused)) \
311 = (number); \
312 register long __v0 asm ("$2"); \
313 register long __a0 asm ("$4") = (long) (arg1); \
314 register long __a1 asm ("$5") = (long) (arg2); \
315 register long __a2 asm ("$6") = (long) (arg3); \
316 register long __a3 asm ("$7") = (long) (arg4); \
317 __asm__ volatile ( \
318 ".set\tnoreorder\n\t" \
319 "subu\t$29, 32\n\t" \
320 "sw\t%6, 16($29)\n\t" \
321 "sw\t%7, 20($29)\n\t" \
322 v0_init \
323 "syscall\n\t" \
324 "addiu\t$29, 32\n\t" \
325 ".set\treorder" \
326 : "=r" (__v0), "+r" (__a3) \
327 : input, "r" (__a0), "r" (__a1), "r" (__a2), \
328 "r" ((long) (arg5)), "r" ((long) (arg6)) \
329 : __SYSCALL_CLOBBERS); \
330 err = __a3; \
331 _sys_result = __v0; \
333 _sys_result; \
336 #define internal_syscall7(v0_init, input, number, err, \
337 arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
338 ({ \
339 long _sys_result; \
341 FORCE_FRAME_POINTER; \
343 register long __s0 asm ("$16") __attribute__ ((unused)) \
344 = (number); \
345 register long __v0 asm ("$2"); \
346 register long __a0 asm ("$4") = (long) (arg1); \
347 register long __a1 asm ("$5") = (long) (arg2); \
348 register long __a2 asm ("$6") = (long) (arg3); \
349 register long __a3 asm ("$7") = (long) (arg4); \
350 __asm__ volatile ( \
351 ".set\tnoreorder\n\t" \
352 "subu\t$29, 32\n\t" \
353 "sw\t%6, 16($29)\n\t" \
354 "sw\t%7, 20($29)\n\t" \
355 "sw\t%8, 24($29)\n\t" \
356 v0_init \
357 "syscall\n\t" \
358 "addiu\t$29, 32\n\t" \
359 ".set\treorder" \
360 : "=r" (__v0), "+r" (__a3) \
361 : input, "r" (__a0), "r" (__a1), "r" (__a2), \
362 "r" ((long) (arg5)), "r" ((long) (arg6)), "r" ((long) (arg7)) \
363 : __SYSCALL_CLOBBERS); \
364 err = __a3; \
365 _sys_result = __v0; \
367 _sys_result; \
370 #define __SYSCALL_CLOBBERS "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", \
371 "$14", "$15", "$24", "$25", "hi", "lo", "memory"
373 #endif /* __ASSEMBLER__ */
375 /* Pointer mangling is not yet supported for MIPS. */
376 #define PTR_MANGLE(var) (void) (var)
377 #define PTR_DEMANGLE(var) (void) (var)
379 #endif /* linux/mips/mips32/sysdep.h */