Update copyright notices with scripts/update-copyrights
[glibc.git] / ports / sysdeps / unix / alpha / sysdep.h
blob7425026241b82fbfefc6c08ed1ec2622b21ed725
1 /* Copyright (C) 1992-2014 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Brendan Kehoe (brendan@zen.org).
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library. If not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <sysdeps/unix/sysdep.h>
21 #ifdef __ASSEMBLER__
23 #ifdef __linux__
24 # include <alpha/regdef.h>
25 #else
26 # include <regdef.h>
27 #endif
29 #ifdef IS_IN_rtld
30 # include <dl-sysdep.h> /* Defines RTLD_PRIVATE_ERRNO. */
31 #endif
34 #define __LABEL(x) x##:
36 #define LEAF(name, framesize) \
37 .globl name; \
38 .align 4; \
39 .ent name, 0; \
40 __LABEL(name) \
41 .frame sp, framesize, ra
43 #define ENTRY(name) \
44 .globl name; \
45 .align 4; \
46 .ent name, 0; \
47 __LABEL(name) \
48 .frame sp, 0, ra
50 /* Mark the end of function SYM. */
51 #undef END
52 #define END(sym) .end sym
54 #ifdef PROF
55 # define PSEUDO_PROF \
56 .set noat; \
57 lda AT, _mcount; \
58 jsr AT, (AT), _mcount; \
59 .set at
60 #else
61 # define PSEUDO_PROF
62 #endif
64 #ifdef PROF
65 # define PSEUDO_PROLOGUE \
66 .frame sp, 0, ra; \
67 ldgp gp,0(pv); \
68 PSEUDO_PROF; \
69 .prologue 1
70 #elif defined PIC
71 # define PSEUDO_PROLOGUE \
72 .frame sp, 0, ra; \
73 .prologue 0
74 #else
75 # define PSEUDO_PROLOGUE \
76 .frame sp, 0, ra; \
77 ldgp gp,0(pv); \
78 .prologue 1
79 #endif /* PROF */
81 #ifdef PROF
82 # define USEPV_PROF std
83 #else
84 # define USEPV_PROF no
85 #endif
87 #if RTLD_PRIVATE_ERRNO
88 # define SYSCALL_ERROR_LABEL $syscall_error
89 # define SYSCALL_ERROR_HANDLER \
90 $syscall_error: \
91 stl v0, rtld_errno(gp) !gprel; \
92 lda v0, -1; \
93 ret
94 # define SYSCALL_ERROR_FALLTHRU
95 #elif defined(PIC)
96 # define SYSCALL_ERROR_LABEL __syscall_error !samegp
97 # define SYSCALL_ERROR_HANDLER
98 # define SYSCALL_ERROR_FALLTHRU br SYSCALL_ERROR_LABEL
99 #else
100 # define SYSCALL_ERROR_LABEL $syscall_error
101 # define SYSCALL_ERROR_HANDLER \
102 $syscall_error: \
103 jmp $31, __syscall_error
104 # define SYSCALL_ERROR_FALLTHRU
105 #endif /* RTLD_PRIVATE_ERRNO */
107 /* Overridden by specific syscalls. */
108 #undef PSEUDO_PREPARE_ARGS
109 #define PSEUDO_PREPARE_ARGS /* Nothing. */
111 #define PSEUDO(name, syscall_name, args) \
112 .globl name; \
113 .align 4; \
114 .ent name,0; \
115 __LABEL(name) \
116 PSEUDO_PROLOGUE; \
117 PSEUDO_PREPARE_ARGS \
118 lda v0, SYS_ify(syscall_name); \
119 call_pal PAL_callsys; \
120 bne a3, SYSCALL_ERROR_LABEL
122 #undef PSEUDO_END
123 #define PSEUDO_END(sym) \
124 SYSCALL_ERROR_HANDLER; \
125 END(sym)
127 #define PSEUDO_NOERRNO(name, syscall_name, args) \
128 .globl name; \
129 .align 4; \
130 .ent name,0; \
131 __LABEL(name) \
132 PSEUDO_PROLOGUE; \
133 PSEUDO_PREPARE_ARGS \
134 lda v0, SYS_ify(syscall_name); \
135 call_pal PAL_callsys;
137 #undef PSEUDO_END_NOERRNO
138 #define PSEUDO_END_NOERRNO(sym) END(sym)
140 #define ret_NOERRNO ret
142 #define PSEUDO_ERRVAL(name, syscall_name, args) \
143 .globl name; \
144 .align 4; \
145 .ent name,0; \
146 __LABEL(name) \
147 PSEUDO_PROLOGUE; \
148 PSEUDO_PREPARE_ARGS \
149 lda v0, SYS_ify(syscall_name); \
150 call_pal PAL_callsys;
152 #undef PSEUDO_END_ERRVAL
153 #define PSEUDO_END_ERRVAL(sym) END(sym)
155 #define ret_ERRVAL ret
157 #define r0 v0
158 #define r1 a4
160 #define MOVE(x,y) mov x,y
162 #else /* !ASSEMBLER */
164 /* In order to get __set_errno() definition in INLINE_SYSCALL. */
165 #include <errno.h>
167 /* ??? Linux needs to be able to override INLINE_SYSCALL for one
168 particular special case. Make this easy. */
170 #undef INLINE_SYSCALL
171 #define INLINE_SYSCALL(name, nr, args...) \
172 INLINE_SYSCALL1(name, nr, args)
174 #define INLINE_SYSCALL1(name, nr, args...) \
175 ({ \
176 long _sc_ret, _sc_err; \
177 inline_syscall##nr(__NR_##name, args); \
178 if (__builtin_expect (_sc_err, 0)) \
180 __set_errno (_sc_ret); \
181 _sc_ret = -1L; \
183 _sc_ret; \
186 #define INTERNAL_SYSCALL(name, err_out, nr, args...) \
187 INTERNAL_SYSCALL1(name, err_out, nr, args)
189 #define INTERNAL_SYSCALL1(name, err_out, nr, args...) \
190 INTERNAL_SYSCALL_NCS(__NR_##name, err_out, nr, args)
192 #define INTERNAL_SYSCALL_NCS(name, err_out, nr, args...) \
193 ({ \
194 long _sc_ret, _sc_err; \
195 inline_syscall##nr(name, args); \
196 err_out = _sc_err; \
197 _sc_ret; \
200 #define INTERNAL_SYSCALL_DECL(err) \
201 long int err __attribute__((unused))
203 /* The normal Alpha calling convention sign-extends 32-bit quantties
204 no matter what the "real" sign of the 32-bit type. We want to
205 preserve that when filling in values for the kernel. */
206 #define syscall_promote(arg) \
207 (sizeof(arg) == 4 ? (long)(int)(long)(arg) : (long)(arg))
209 /* Make sure and "use" the variable that we're not returning,
210 in order to suppress unused variable warnings. */
211 #define INTERNAL_SYSCALL_ERROR_P(val, err) ((void)val, err)
212 #define INTERNAL_SYSCALL_ERRNO(val, err) ((void)err, val)
214 #define inline_syscall_clobbers \
215 "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", \
216 "$22", "$23", "$24", "$25", "$27", "$28", "memory"
218 /* It is moderately important optimization-wise to limit the lifetime
219 of the hard-register variables as much as possible. Thus we copy
220 in/out as close to the asm as possible. */
222 #define inline_syscall0(name, args...) \
224 register long _sc_19 __asm__("$19"); \
225 register long _sc_0 = name; \
226 __asm__ __volatile__ \
227 ("callsys # %0 %1 <= %2" \
228 : "+v"(_sc_0), "=r"(_sc_19) \
229 : : inline_syscall_clobbers, \
230 "$16", "$17", "$18", "$20", "$21"); \
231 _sc_ret = _sc_0, _sc_err = _sc_19; \
234 #define inline_syscall1(name,arg1) \
236 register long _tmp_16 = syscall_promote (arg1); \
237 register long _sc_0 = name; \
238 register long _sc_16 __asm__("$16") = _tmp_16; \
239 register long _sc_19 __asm__("$19"); \
240 __asm__ __volatile__ \
241 ("callsys # %0 %1 <= %2 %3" \
242 : "+v"(_sc_0), "=r"(_sc_19), "+r"(_sc_16) \
243 : : inline_syscall_clobbers, \
244 "$17", "$18", "$20", "$21"); \
245 _sc_ret = _sc_0, _sc_err = _sc_19; \
248 #define inline_syscall2(name,arg1,arg2) \
250 register long _tmp_16 = syscall_promote (arg1); \
251 register long _tmp_17 = syscall_promote (arg2); \
252 register long _sc_0 = name; \
253 register long _sc_16 __asm__("$16") = _tmp_16; \
254 register long _sc_17 __asm__("$17") = _tmp_17; \
255 register long _sc_19 __asm__("$19"); \
256 __asm__ __volatile__ \
257 ("callsys # %0 %1 <= %2 %3 %4" \
258 : "+v"(_sc_0), "=r"(_sc_19), \
259 "+r"(_sc_16), "+r"(_sc_17) \
260 : : inline_syscall_clobbers, \
261 "$18", "$20", "$21"); \
262 _sc_ret = _sc_0, _sc_err = _sc_19; \
265 #define inline_syscall3(name,arg1,arg2,arg3) \
267 register long _tmp_16 = syscall_promote (arg1); \
268 register long _tmp_17 = syscall_promote (arg2); \
269 register long _tmp_18 = syscall_promote (arg3); \
270 register long _sc_0 = name; \
271 register long _sc_16 __asm__("$16") = _tmp_16; \
272 register long _sc_17 __asm__("$17") = _tmp_17; \
273 register long _sc_18 __asm__("$18") = _tmp_18; \
274 register long _sc_19 __asm__("$19"); \
275 __asm__ __volatile__ \
276 ("callsys # %0 %1 <= %2 %3 %4 %5" \
277 : "+v"(_sc_0), "=r"(_sc_19), "+r"(_sc_16), \
278 "+r"(_sc_17), "+r"(_sc_18) \
279 : : inline_syscall_clobbers, "$20", "$21"); \
280 _sc_ret = _sc_0, _sc_err = _sc_19; \
283 #define inline_syscall4(name,arg1,arg2,arg3,arg4) \
285 register long _tmp_16 = syscall_promote (arg1); \
286 register long _tmp_17 = syscall_promote (arg2); \
287 register long _tmp_18 = syscall_promote (arg3); \
288 register long _tmp_19 = syscall_promote (arg4); \
289 register long _sc_0 = name; \
290 register long _sc_16 __asm__("$16") = _tmp_16; \
291 register long _sc_17 __asm__("$17") = _tmp_17; \
292 register long _sc_18 __asm__("$18") = _tmp_18; \
293 register long _sc_19 __asm__("$19") = _tmp_19; \
294 __asm__ __volatile__ \
295 ("callsys # %0 %1 <= %2 %3 %4 %5 %6" \
296 : "+v"(_sc_0), "+r"(_sc_19), "+r"(_sc_16), \
297 "+r"(_sc_17), "+r"(_sc_18) \
298 : : inline_syscall_clobbers, "$20", "$21"); \
299 _sc_ret = _sc_0, _sc_err = _sc_19; \
302 #define inline_syscall5(name,arg1,arg2,arg3,arg4,arg5) \
304 register long _tmp_16 = syscall_promote (arg1); \
305 register long _tmp_17 = syscall_promote (arg2); \
306 register long _tmp_18 = syscall_promote (arg3); \
307 register long _tmp_19 = syscall_promote (arg4); \
308 register long _tmp_20 = syscall_promote (arg5); \
309 register long _sc_0 = name; \
310 register long _sc_16 __asm__("$16") = _tmp_16; \
311 register long _sc_17 __asm__("$17") = _tmp_17; \
312 register long _sc_18 __asm__("$18") = _tmp_18; \
313 register long _sc_19 __asm__("$19") = _tmp_19; \
314 register long _sc_20 __asm__("$20") = _tmp_20; \
315 __asm__ __volatile__ \
316 ("callsys # %0 %1 <= %2 %3 %4 %5 %6 %7" \
317 : "+v"(_sc_0), "+r"(_sc_19), "+r"(_sc_16), \
318 "+r"(_sc_17), "+r"(_sc_18), "+r"(_sc_20) \
319 : : inline_syscall_clobbers, "$21"); \
320 _sc_ret = _sc_0, _sc_err = _sc_19; \
323 #define inline_syscall6(name,arg1,arg2,arg3,arg4,arg5,arg6) \
325 register long _tmp_16 = syscall_promote (arg1); \
326 register long _tmp_17 = syscall_promote (arg2); \
327 register long _tmp_18 = syscall_promote (arg3); \
328 register long _tmp_19 = syscall_promote (arg4); \
329 register long _tmp_20 = syscall_promote (arg5); \
330 register long _tmp_21 = syscall_promote (arg6); \
331 register long _sc_0 = name; \
332 register long _sc_16 __asm__("$16") = _tmp_16; \
333 register long _sc_17 __asm__("$17") = _tmp_17; \
334 register long _sc_18 __asm__("$18") = _tmp_18; \
335 register long _sc_19 __asm__("$19") = _tmp_19; \
336 register long _sc_20 __asm__("$20") = _tmp_20; \
337 register long _sc_21 __asm__("$21") = _tmp_21; \
338 __asm__ __volatile__ \
339 ("callsys # %0 %1 <= %2 %3 %4 %5 %6 %7 %8" \
340 : "+v"(_sc_0), "+r"(_sc_19), "+r"(_sc_16), \
341 "+r"(_sc_17), "+r"(_sc_18), "+r"(_sc_20), \
342 "+r"(_sc_21) \
343 : : inline_syscall_clobbers); \
344 _sc_ret = _sc_0, _sc_err = _sc_19; \
346 #endif /* ASSEMBLER */
348 /* Pointer mangling support. Note that tls access is slow enough that
349 we don't deoptimize things by placing the pointer check value there. */
351 #ifdef __ASSEMBLER__
352 # if defined NOT_IN_libc && defined IS_IN_rtld
353 # define PTR_MANGLE(dst, src, tmp) \
354 ldah tmp, __pointer_chk_guard_local($29) !gprelhigh; \
355 ldq tmp, __pointer_chk_guard_local(tmp) !gprellow; \
356 xor src, tmp, dst
357 # define PTR_MANGLE2(dst, src, tmp) \
358 xor src, tmp, dst
359 # elif defined SHARED
360 # define PTR_MANGLE(dst, src, tmp) \
361 ldq tmp, __pointer_chk_guard; \
362 xor src, tmp, dst
363 # else
364 # define PTR_MANGLE(dst, src, tmp) \
365 ldq tmp, __pointer_chk_guard_local; \
366 xor src, tmp, dst
367 # endif
368 # define PTR_MANGLE2(dst, src, tmp) \
369 xor src, tmp, dst
370 # define PTR_DEMANGLE(dst, tmp) PTR_MANGLE(dst, dst, tmp)
371 # define PTR_DEMANGLE2(dst, tmp) PTR_MANGLE2(dst, dst, tmp)
372 #else
373 # include <stdint.h>
374 # if (defined NOT_IN_libc && defined IS_IN_rtld) \
375 || (!defined SHARED && (!defined NOT_IN_libc || defined IS_IN_libpthread))
376 extern uintptr_t __pointer_chk_guard_local attribute_relro attribute_hidden;
377 # define PTR_MANGLE(var) \
378 (var) = (__typeof (var)) ((uintptr_t) (var) ^ __pointer_chk_guard_local)
379 # else
380 extern const uintptr_t __pointer_chk_guard attribute_relro;
381 # define PTR_MANGLE(var) \
382 (var) = (__typeof(var)) ((uintptr_t) (var) ^ __pointer_chk_guard)
383 # endif
384 # define PTR_DEMANGLE(var) PTR_MANGLE(var)
385 #endif /* ASSEMBLER */