Replace FSF snail mail address by URL.
[glibc.git] / sysdeps / unix / alpha / sysdep.h
blob4cc88942c8e703ed179c5f76ae14fb714f4adc4f
1 /* Copyright (C) 1992, 1995, 1996, 2000, 2003, 2004, 2006, 2010, 2012
2 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Brendan Kehoe (brendan@zen.org).
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library. If not, see
18 <http://www.gnu.org/licenses/>. */
20 #include <sysdeps/unix/sysdep.h>
22 #ifdef __ASSEMBLER__
24 #ifdef __linux__
25 # include <alpha/regdef.h>
26 #else
27 # include <regdef.h>
28 #endif
30 #ifdef IS_IN_rtld
31 # include <dl-sysdep.h> /* Defines RTLD_PRIVATE_ERRNO. */
32 #endif
35 #ifdef __STDC__
36 #define __LABEL(x) x##:
37 #else
38 #define __LABEL(x) x/**/:
39 #endif
41 #define LEAF(name, framesize) \
42 .globl name; \
43 .align 4; \
44 .ent name, 0; \
45 __LABEL(name) \
46 .frame sp, framesize, ra
48 #define ENTRY(name) \
49 .globl name; \
50 .align 4; \
51 .ent name, 0; \
52 __LABEL(name) \
53 .frame sp, 0, ra
55 /* Mark the end of function SYM. */
56 #undef END
57 #define END(sym) .end sym
59 #ifdef PROF
60 # define PSEUDO_PROLOGUE \
61 .frame sp, 0, ra; \
62 ldgp gp,0(pv); \
63 .set noat; \
64 lda AT,_mcount; \
65 jsr AT,(AT),_mcount; \
66 .set at; \
67 .prologue 1
68 #elif defined PIC
69 # define PSEUDO_PROLOGUE \
70 .frame sp, 0, ra; \
71 .prologue 0
72 #else
73 # define PSEUDO_PROLOGUE \
74 .frame sp, 0, ra; \
75 ldgp gp,0(pv); \
76 .prologue 1
77 #endif /* PROF */
79 #if RTLD_PRIVATE_ERRNO
80 # define SYSCALL_ERROR_LABEL $syscall_error
81 # define SYSCALL_ERROR_HANDLER \
82 stl v0, rtld_errno(gp) !gprel; \
83 lda v0, -1; \
84 ret
85 #elif defined(PIC)
86 # define SYSCALL_ERROR_LABEL __syscall_error !samegp
87 # define SYSCALL_ERROR_HANDLER
88 #else
89 # define SYSCALL_ERROR_LABEL $syscall_error
90 # define SYSCALL_ERROR_HANDLER \
91 jmp $31, __syscall_error
92 #endif /* RTLD_PRIVATE_ERRNO */
94 /* Overridden by specific syscalls. */
95 #undef PSEUDO_PREPARE_ARGS
96 #define PSEUDO_PREPARE_ARGS /* Nothing. */
98 #define PSEUDO(name, syscall_name, args) \
99 .globl name; \
100 .align 4; \
101 .ent name,0; \
102 __LABEL(name) \
103 PSEUDO_PROLOGUE; \
104 PSEUDO_PREPARE_ARGS \
105 lda v0, SYS_ify(syscall_name); \
106 call_pal PAL_callsys; \
107 bne a3, SYSCALL_ERROR_LABEL
109 #undef PSEUDO_END
110 #if defined(PIC) && !RTLD_PRIVATE_ERRNO
111 # define PSEUDO_END(sym) END(sym)
112 #else
113 # define PSEUDO_END(sym) \
114 $syscall_error: \
115 SYSCALL_ERROR_HANDLER; \
116 END(sym)
117 #endif
119 #define PSEUDO_NOERRNO(name, syscall_name, args) \
120 .globl name; \
121 .align 4; \
122 .ent name,0; \
123 __LABEL(name) \
124 PSEUDO_PROLOGUE; \
125 PSEUDO_PREPARE_ARGS \
126 lda v0, SYS_ify(syscall_name); \
127 call_pal PAL_callsys;
129 #undef PSEUDO_END_NOERRNO
130 #define PSEUDO_END_NOERRNO(sym) END(sym)
132 #define ret_NOERRNO ret
134 #define PSEUDO_ERRVAL(name, syscall_name, args) \
135 .globl name; \
136 .align 4; \
137 .ent name,0; \
138 __LABEL(name) \
139 PSEUDO_PROLOGUE; \
140 PSEUDO_PREPARE_ARGS \
141 lda v0, SYS_ify(syscall_name); \
142 call_pal PAL_callsys;
144 #undef PSEUDO_END_ERRVAL
145 #define PSEUDO_END_ERRVAL(sym) END(sym)
147 #define ret_ERRVAL ret
149 #define r0 v0
150 #define r1 a4
152 #define MOVE(x,y) mov x,y
154 #else /* !ASSEMBLER */
156 /* ??? Linux needs to be able to override INLINE_SYSCALL for one
157 particular special case. Make this easy. */
159 #undef INLINE_SYSCALL
160 #define INLINE_SYSCALL(name, nr, args...) \
161 INLINE_SYSCALL1(name, nr, args)
163 #define INLINE_SYSCALL1(name, nr, args...) \
164 ({ \
165 long _sc_ret, _sc_err; \
166 inline_syscall##nr(__NR_##name, args); \
167 if (__builtin_expect (_sc_err, 0)) \
169 __set_errno (_sc_ret); \
170 _sc_ret = -1L; \
172 _sc_ret; \
175 #define INTERNAL_SYSCALL(name, err_out, nr, args...) \
176 INTERNAL_SYSCALL1(name, err_out, nr, args)
178 #define INTERNAL_SYSCALL1(name, err_out, nr, args...) \
179 INTERNAL_SYSCALL_NCS(__NR_##name, err_out, nr, args)
181 #define INTERNAL_SYSCALL_NCS(name, err_out, nr, args...) \
182 ({ \
183 long _sc_ret, _sc_err; \
184 inline_syscall##nr(name, args); \
185 err_out = _sc_err; \
186 _sc_ret; \
189 #define INTERNAL_SYSCALL_DECL(err) \
190 long int err __attribute__((unused))
192 /* The normal Alpha calling convention sign-extends 32-bit quantties
193 no matter what the "real" sign of the 32-bit type. We want to
194 preserve that when filling in values for the kernel. */
195 #define syscall_promote(arg) \
196 (sizeof(arg) == 4 ? (long)(int)(long)(arg) : (long)(arg))
198 /* Make sure and "use" the variable that we're not returning,
199 in order to suppress unused variable warnings. */
200 #define INTERNAL_SYSCALL_ERROR_P(val, err) ((void)val, err)
201 #define INTERNAL_SYSCALL_ERRNO(val, err) ((void)err, val)
203 #define inline_syscall_clobbers \
204 "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", \
205 "$22", "$23", "$24", "$25", "$27", "$28", "memory"
207 /* It is moderately important optimization-wise to limit the lifetime
208 of the hard-register variables as much as possible. Thus we copy
209 in/out as close to the asm as possible. */
211 #define inline_syscall0(name, args...) \
213 register long _sc_19 __asm__("$19"); \
214 register long _sc_0 = name; \
215 __asm__ __volatile__ \
216 ("callsys # %0 %1 <= %2" \
217 : "+v"(_sc_0), "=r"(_sc_19) \
218 : : inline_syscall_clobbers, \
219 "$16", "$17", "$18", "$20", "$21"); \
220 _sc_ret = _sc_0, _sc_err = _sc_19; \
223 #define inline_syscall1(name,arg1) \
225 register long _tmp_16 = syscall_promote (arg1); \
226 register long _sc_0 = name; \
227 register long _sc_16 __asm__("$16") = _tmp_16; \
228 register long _sc_19 __asm__("$19"); \
229 __asm__ __volatile__ \
230 ("callsys # %0 %1 <= %2 %3" \
231 : "+v"(_sc_0), "=r"(_sc_19), "+r"(_sc_16) \
232 : : inline_syscall_clobbers, \
233 "$17", "$18", "$20", "$21"); \
234 _sc_ret = _sc_0, _sc_err = _sc_19; \
237 #define inline_syscall2(name,arg1,arg2) \
239 register long _tmp_16 = syscall_promote (arg1); \
240 register long _tmp_17 = syscall_promote (arg2); \
241 register long _sc_0 = name; \
242 register long _sc_16 __asm__("$16") = _tmp_16; \
243 register long _sc_17 __asm__("$17") = _tmp_17; \
244 register long _sc_19 __asm__("$19"); \
245 __asm__ __volatile__ \
246 ("callsys # %0 %1 <= %2 %3 %4" \
247 : "+v"(_sc_0), "=r"(_sc_19), \
248 "+r"(_sc_16), "+r"(_sc_17) \
249 : : inline_syscall_clobbers, \
250 "$18", "$20", "$21"); \
251 _sc_ret = _sc_0, _sc_err = _sc_19; \
254 #define inline_syscall3(name,arg1,arg2,arg3) \
256 register long _tmp_16 = syscall_promote (arg1); \
257 register long _tmp_17 = syscall_promote (arg2); \
258 register long _tmp_18 = syscall_promote (arg3); \
259 register long _sc_0 = name; \
260 register long _sc_16 __asm__("$16") = _tmp_16; \
261 register long _sc_17 __asm__("$17") = _tmp_17; \
262 register long _sc_18 __asm__("$18") = _tmp_18; \
263 register long _sc_19 __asm__("$19"); \
264 __asm__ __volatile__ \
265 ("callsys # %0 %1 <= %2 %3 %4 %5" \
266 : "+v"(_sc_0), "=r"(_sc_19), "+r"(_sc_16), \
267 "+r"(_sc_17), "+r"(_sc_18) \
268 : : inline_syscall_clobbers, "$20", "$21"); \
269 _sc_ret = _sc_0, _sc_err = _sc_19; \
272 #define inline_syscall4(name,arg1,arg2,arg3,arg4) \
274 register long _tmp_16 = syscall_promote (arg1); \
275 register long _tmp_17 = syscall_promote (arg2); \
276 register long _tmp_18 = syscall_promote (arg3); \
277 register long _tmp_19 = syscall_promote (arg4); \
278 register long _sc_0 = name; \
279 register long _sc_16 __asm__("$16") = _tmp_16; \
280 register long _sc_17 __asm__("$17") = _tmp_17; \
281 register long _sc_18 __asm__("$18") = _tmp_18; \
282 register long _sc_19 __asm__("$19") = _tmp_19; \
283 __asm__ __volatile__ \
284 ("callsys # %0 %1 <= %2 %3 %4 %5 %6" \
285 : "+v"(_sc_0), "+r"(_sc_19), "+r"(_sc_16), \
286 "+r"(_sc_17), "+r"(_sc_18) \
287 : : inline_syscall_clobbers, "$20", "$21"); \
288 _sc_ret = _sc_0, _sc_err = _sc_19; \
291 #define inline_syscall5(name,arg1,arg2,arg3,arg4,arg5) \
293 register long _tmp_16 = syscall_promote (arg1); \
294 register long _tmp_17 = syscall_promote (arg2); \
295 register long _tmp_18 = syscall_promote (arg3); \
296 register long _tmp_19 = syscall_promote (arg4); \
297 register long _tmp_20 = syscall_promote (arg5); \
298 register long _sc_0 = name; \
299 register long _sc_16 __asm__("$16") = _tmp_16; \
300 register long _sc_17 __asm__("$17") = _tmp_17; \
301 register long _sc_18 __asm__("$18") = _tmp_18; \
302 register long _sc_19 __asm__("$19") = _tmp_19; \
303 register long _sc_20 __asm__("$20") = _tmp_20; \
304 __asm__ __volatile__ \
305 ("callsys # %0 %1 <= %2 %3 %4 %5 %6 %7" \
306 : "+v"(_sc_0), "+r"(_sc_19), "+r"(_sc_16), \
307 "+r"(_sc_17), "+r"(_sc_18), "+r"(_sc_20) \
308 : : inline_syscall_clobbers, "$21"); \
309 _sc_ret = _sc_0, _sc_err = _sc_19; \
312 #define inline_syscall6(name,arg1,arg2,arg3,arg4,arg5,arg6) \
314 register long _tmp_16 = syscall_promote (arg1); \
315 register long _tmp_17 = syscall_promote (arg2); \
316 register long _tmp_18 = syscall_promote (arg3); \
317 register long _tmp_19 = syscall_promote (arg4); \
318 register long _tmp_20 = syscall_promote (arg5); \
319 register long _tmp_21 = syscall_promote (arg6); \
320 register long _sc_0 = name; \
321 register long _sc_16 __asm__("$16") = _tmp_16; \
322 register long _sc_17 __asm__("$17") = _tmp_17; \
323 register long _sc_18 __asm__("$18") = _tmp_18; \
324 register long _sc_19 __asm__("$19") = _tmp_19; \
325 register long _sc_20 __asm__("$20") = _tmp_20; \
326 register long _sc_21 __asm__("$21") = _tmp_21; \
327 __asm__ __volatile__ \
328 ("callsys # %0 %1 <= %2 %3 %4 %5 %6 %7 %8" \
329 : "+v"(_sc_0), "+r"(_sc_19), "+r"(_sc_16), \
330 "+r"(_sc_17), "+r"(_sc_18), "+r"(_sc_20), \
331 "+r"(_sc_21) \
332 : : inline_syscall_clobbers); \
333 _sc_ret = _sc_0, _sc_err = _sc_19; \
336 /* Pointer mangling support. Note that tls access is slow enough that
337 we don't deoptimize things by placing the pointer check value there. */
339 #include <stdint.h>
341 #if defined NOT_IN_libc && defined IS_IN_rtld
342 # ifdef __ASSEMBLER__
343 # define PTR_MANGLE(dst, src, tmp) \
344 ldah tmp, __pointer_chk_guard_local($29) !gprelhigh; \
345 ldq tmp, __pointer_chk_guard_local(tmp) !gprellow; \
346 xor src, tmp, dst
347 # define PTR_MANGLE2(dst, src, tmp) \
348 xor src, tmp, dst
349 # define PTR_DEMANGLE(dst, tmp) PTR_MANGLE(dst, dst, tmp)
350 # define PTR_DEMANGLE2(dst, tmp) PTR_MANGLE2(dst, dst, tmp)
351 # else
352 extern uintptr_t __pointer_chk_guard_local attribute_relro attribute_hidden;
353 # define PTR_MANGLE(var) \
354 (var) = (__typeof (var)) ((uintptr_t) (var) ^ __pointer_chk_guard_local)
355 # define PTR_DEMANGLE(var) PTR_MANGLE(var)
356 # endif
357 #elif defined PIC
358 # ifdef __ASSEMBLER__
359 # define PTR_MANGLE(dst, src, tmp) \
360 ldq tmp, __pointer_chk_guard; \
361 xor src, tmp, dst
362 # define PTR_MANGLE2(dst, src, tmp) \
363 xor src, tmp, dst
364 # define PTR_DEMANGLE(dst, tmp) PTR_MANGLE(dst, dst, tmp)
365 # define PTR_DEMANGLE2(dst, tmp) PTR_MANGLE2(dst, dst, tmp)
366 # else
367 extern const uintptr_t __pointer_chk_guard attribute_relro;
368 # define PTR_MANGLE(var) \
369 (var) = (__typeof(var)) ((uintptr_t) (var) ^ __pointer_chk_guard)
370 # define PTR_DEMANGLE(var) PTR_MANGLE(var)
371 # endif
372 #else
373 /* There exists generic C code that assumes that PTR_MANGLE is always
374 defined. When generating code for the static libc, we don't have
375 __pointer_chk_guard defined. Nor is there any place that would
376 initialize it if it were defined, so there's little point in doing
377 anything more than nothing. */
378 # ifndef __ASSEMBLER__
379 # define PTR_MANGLE(var)
380 # define PTR_DEMANGLE(var)
381 # endif
382 #endif
384 #endif /* ASSEMBLER */