Consolidate pread/pread64 implementations
[glibc.git] / sysdeps / unix / sysv / linux / powerpc / powerpc64 / sysdep.h
blob6e272230d36cea1048fdaf804a50ff3b43f639b6
1 /* Copyright (C) 1992-2016 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <http://www.gnu.org/licenses/>. */
18 /* Alan Modra <amodra@bigpond.net.au> rewrote the INLINE_SYSCALL macro */
20 #ifndef _LINUX_POWERPC_SYSDEP_H
21 #define _LINUX_POWERPC_SYSDEP_H 1
23 #include <sysdeps/unix/sysv/linux/sysdep.h>
24 #include <sysdeps/unix/powerpc/sysdep.h>
25 #include <tls.h>
27 /* Define __set_errno() for INLINE_SYSCALL macro below. */
28 #ifndef __ASSEMBLER__
29 #include <errno.h>
30 #endif
32 /* Some systen calls got renamed over time, but retained the same semantics.
33 Handle them here so they can be catched by both C and assembler stubs in
34 glibc. */
36 #ifdef __NR_pwrite64
37 # ifdef __NR_pwrite
38 # error "__NR_pwrite and __NR_pwrite64 both defined???"
39 # endif
40 # define __NR_pwrite __NR_pwrite64
41 #endif
43 /* For Linux we can use the system call table in the header file
44 /usr/include/asm/unistd.h
45 of the kernel. But these symbols do not follow the SYS_* syntax
46 so we have to redefine the `SYS_ify' macro here. */
47 #undef SYS_ify
48 #define SYS_ify(syscall_name) __NR_##syscall_name
50 #ifdef __ASSEMBLER__
52 /* This seems to always be the case on PPC. */
53 # define ALIGNARG(log2) log2
54 # define ASM_SIZE_DIRECTIVE(name) .size name,.-name
56 #endif /* __ASSEMBLER__ */
58 /* This version is for internal uses when there is no desire
59 to set errno */
60 #define INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK(name, err, type, nr, args...) \
61 ({ \
62 type sc_ret = ENOSYS; \
64 __typeof (__vdso_##name) vdsop = __vdso_##name; \
65 PTR_DEMANGLE (vdsop); \
66 if (vdsop != NULL) \
67 sc_ret = \
68 INTERNAL_VSYSCALL_CALL_TYPE (vdsop, err, type, nr, ##args); \
69 else \
70 err = 1 << 28; \
71 sc_ret; \
74 /* List of system calls which are supported as vsyscalls. */
75 #define HAVE_CLOCK_GETRES_VSYSCALL 1
76 #define HAVE_CLOCK_GETTIME_VSYSCALL 1
77 #define HAVE_GETCPU_VSYSCALL 1
79 /* Define a macro which expands inline into the wrapper code for a system
80 call. This use is for internal calls that do not need to handle errors
81 normally. It will never touch errno. This returns just what the kernel
82 gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set)
83 the negation of the return value in the kernel gets reverted. */
85 #define INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, type, nr, args...) \
86 ({ \
87 register void *r0 __asm__ ("r0"); \
88 register long int r3 __asm__ ("r3"); \
89 register long int r4 __asm__ ("r4"); \
90 register long int r5 __asm__ ("r5"); \
91 register long int r6 __asm__ ("r6"); \
92 register long int r7 __asm__ ("r7"); \
93 register long int r8 __asm__ ("r8"); \
94 register type rval __asm__ ("r3"); \
95 LOADARGS_##nr (funcptr, args); \
96 __asm__ __volatile__ \
97 ("mtctr %0\n\t" \
98 "bctrl\n\t" \
99 "mfcr %0\n\t" \
100 "0:" \
101 : "+r" (r0), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), \
102 "+r" (r7), "+r" (r8) \
103 : : "r9", "r10", "r11", "r12", "cr0", "ctr", "lr", "memory"); \
104 err = (long int) r0; \
105 __asm__ __volatile__ ("" : "=r" (rval) : "r" (r3)); \
106 rval; \
109 #define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...) \
110 INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, long int, nr, args)
112 #undef INLINE_SYSCALL
114 /* This version is for kernels that implement system calls that
115 behave like function calls as far as register saving. */
116 #define INLINE_SYSCALL(name, nr, args...) \
117 ({ \
118 INTERNAL_SYSCALL_DECL (sc_err); \
119 long int sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, args); \
120 if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
122 __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \
123 sc_ret = -1L; \
125 sc_ret; \
128 /* Define a macro which expands inline into the wrapper code for a system
129 call. This use is for internal calls that do not need to handle errors
130 normally. It will never touch errno. This returns just what the kernel
131 gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set)
132 the negation of the return value in the kernel gets reverted. */
134 #undef INTERNAL_SYSCALL
135 #define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
136 ({ \
137 register long int r0 __asm__ ("r0"); \
138 register long int r3 __asm__ ("r3"); \
139 register long int r4 __asm__ ("r4"); \
140 register long int r5 __asm__ ("r5"); \
141 register long int r6 __asm__ ("r6"); \
142 register long int r7 __asm__ ("r7"); \
143 register long int r8 __asm__ ("r8"); \
144 LOADARGS_##nr (name, ##args); \
145 ABORT_TRANSACTION; \
146 __asm__ __volatile__ \
147 ("sc\n\t" \
148 "mfcr %0\n\t" \
149 "0:" \
150 : "=&r" (r0), \
151 "=&r" (r3), "=&r" (r4), "=&r" (r5), \
152 "=&r" (r6), "=&r" (r7), "=&r" (r8) \
153 : ASM_INPUT_##nr \
154 : "r9", "r10", "r11", "r12", \
155 "cr0", "ctr", "memory"); \
156 err = r0; \
157 r3; \
159 #define INTERNAL_SYSCALL(name, err, nr, args...) \
160 INTERNAL_SYSCALL_NCS (__NR_##name, err, nr, args)
162 #undef INTERNAL_SYSCALL_DECL
163 #define INTERNAL_SYSCALL_DECL(err) long int err __attribute__ ((unused))
165 #undef INTERNAL_SYSCALL_ERROR_P
166 #define INTERNAL_SYSCALL_ERROR_P(val, err) \
167 ((void) (val), __builtin_expect ((err) & (1 << 28), 0))
169 #undef INTERNAL_SYSCALL_ERRNO
170 #define INTERNAL_SYSCALL_ERRNO(val, err) (val)
172 #define LOADARGS_0(name, dummy) \
173 r0 = name
174 #define LOADARGS_1(name, __arg1) \
175 long int arg1 = (long int) (__arg1); \
176 LOADARGS_0(name, 0); \
177 extern void __illegally_sized_syscall_arg1 (void); \
178 if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 8) \
179 __illegally_sized_syscall_arg1 (); \
180 r3 = arg1
181 #define LOADARGS_2(name, __arg1, __arg2) \
182 long int arg2 = (long int) (__arg2); \
183 LOADARGS_1(name, __arg1); \
184 extern void __illegally_sized_syscall_arg2 (void); \
185 if (__builtin_classify_type (__arg2) != 5 && sizeof (__arg2) > 8) \
186 __illegally_sized_syscall_arg2 (); \
187 r4 = arg2
188 #define LOADARGS_3(name, __arg1, __arg2, __arg3) \
189 long int arg3 = (long int) (__arg3); \
190 LOADARGS_2(name, __arg1, __arg2); \
191 extern void __illegally_sized_syscall_arg3 (void); \
192 if (__builtin_classify_type (__arg3) != 5 && sizeof (__arg3) > 8) \
193 __illegally_sized_syscall_arg3 (); \
194 r5 = arg3
195 #define LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4) \
196 long int arg4 = (long int) (__arg4); \
197 LOADARGS_3(name, __arg1, __arg2, __arg3); \
198 extern void __illegally_sized_syscall_arg4 (void); \
199 if (__builtin_classify_type (__arg4) != 5 && sizeof (__arg4) > 8) \
200 __illegally_sized_syscall_arg4 (); \
201 r6 = arg4
202 #define LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \
203 long int arg5 = (long int) (__arg5); \
204 LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4); \
205 extern void __illegally_sized_syscall_arg5 (void); \
206 if (__builtin_classify_type (__arg5) != 5 && sizeof (__arg5) > 8) \
207 __illegally_sized_syscall_arg5 (); \
208 r7 = arg5
209 #define LOADARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \
210 long int arg6 = (long int) (__arg6); \
211 LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \
212 extern void __illegally_sized_syscall_arg6 (void); \
213 if (__builtin_classify_type (__arg6) != 5 && sizeof (__arg6) > 8) \
214 __illegally_sized_syscall_arg6 (); \
215 r8 = arg6
217 #define ASM_INPUT_0 "0" (r0)
218 #define ASM_INPUT_1 ASM_INPUT_0, "1" (r3)
219 #define ASM_INPUT_2 ASM_INPUT_1, "2" (r4)
220 #define ASM_INPUT_3 ASM_INPUT_2, "3" (r5)
221 #define ASM_INPUT_4 ASM_INPUT_3, "4" (r6)
222 #define ASM_INPUT_5 ASM_INPUT_4, "5" (r7)
223 #define ASM_INPUT_6 ASM_INPUT_5, "6" (r8)
226 /* Pointer mangling support. */
227 #if IS_IN (rtld)
228 /* We cannot use the thread descriptor because in ld.so we use setjmp
229 earlier than the descriptor is initialized. */
230 #else
231 # ifdef __ASSEMBLER__
232 # define PTR_MANGLE(reg, tmpreg) \
233 ld tmpreg,POINTER_GUARD(r13); \
234 xor reg,tmpreg,reg
235 # define PTR_MANGLE2(reg, tmpreg) \
236 xor reg,tmpreg,reg
237 # define PTR_MANGLE3(destreg, reg, tmpreg) \
238 ld tmpreg,POINTER_GUARD(r13); \
239 xor destreg,tmpreg,reg
240 # define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg)
241 # define PTR_DEMANGLE2(reg, tmpreg) PTR_MANGLE2 (reg, tmpreg)
242 # define PTR_DEMANGLE3(destreg, reg, tmpreg) PTR_MANGLE3 (destreg, reg, tmpreg)
243 # else
244 # define PTR_MANGLE(var) \
245 (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
246 # define PTR_DEMANGLE(var) PTR_MANGLE (var)
247 # endif
248 #endif
250 #endif /* linux/powerpc/powerpc64/sysdep.h */