Add comments for GCC 5 requirement
[glibc.git] / sysdeps / unix / sysv / linux / i386 / sysdep.h
blob76db7b8ab956982cfbb91209ac3892913c20f5a0
1 /* Copyright (C) 1992-2015 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper, <drepper@gnu.org>, August 1995.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #ifndef _LINUX_I386_SYSDEP_H
20 #define _LINUX_I386_SYSDEP_H 1
22 /* There is some commonality. */
23 #include <sysdeps/unix/sysv/linux/sysdep.h>
24 #include <sysdeps/unix/i386/sysdep.h>
25 /* Defines RTLD_PRIVATE_ERRNO and USE_DL_SYSINFO. */
26 #include <dl-sysdep.h>
27 #include <tls.h>
30 /* For Linux we can use the system call table in the header file
31 /usr/include/asm/unistd.h
32 of the kernel. But these symbols do not follow the SYS_* syntax
33 so we have to redefine the `SYS_ify' macro here. */
34 #undef SYS_ify
35 #define SYS_ify(syscall_name) __NR_##syscall_name
37 #if defined USE_DL_SYSINFO \
38 && (IS_IN (libc) || IS_IN (libpthread))
39 # define I386_USE_SYSENTER 1
40 #else
41 # undef I386_USE_SYSENTER
42 #endif
44 #ifdef __ASSEMBLER__
46 /* Linux uses a negative return value to indicate syscall errors,
47 unlike most Unices, which use the condition codes' carry flag.
49 Since version 2.1 the return value of a system call might be
50 negative even if the call succeeded. E.g., the `lseek' system call
51 might return a large offset. Therefore we must not anymore test
52 for < 0, but test for a real error by making sure the value in %eax
53 is a real error number. Linus said he will make sure the no syscall
54 returns a value in -1 .. -4095 as a valid result so we can savely
55 test with -4095. */
57 /* We don't want the label for the error handle to be global when we define
58 it here. */
59 #define SYSCALL_ERROR_LABEL __syscall_error
61 #undef PSEUDO
62 #define PSEUDO(name, syscall_name, args) \
63 .text; \
64 ENTRY (name) \
65 DO_CALL (syscall_name, args); \
66 cmpl $-4095, %eax; \
67 jae SYSCALL_ERROR_LABEL
69 #undef PSEUDO_END
70 #define PSEUDO_END(name) \
71 SYSCALL_ERROR_HANDLER \
72 END (name)
74 #undef PSEUDO_NOERRNO
75 #define PSEUDO_NOERRNO(name, syscall_name, args) \
76 .text; \
77 ENTRY (name) \
78 DO_CALL (syscall_name, args)
80 #undef PSEUDO_END_NOERRNO
81 #define PSEUDO_END_NOERRNO(name) \
82 END (name)
84 #define ret_NOERRNO ret
86 /* The function has to return the error code. */
87 #undef PSEUDO_ERRVAL
88 #define PSEUDO_ERRVAL(name, syscall_name, args) \
89 .text; \
90 ENTRY (name) \
91 DO_CALL (syscall_name, args); \
92 negl %eax
94 #undef PSEUDO_END_ERRVAL
95 #define PSEUDO_END_ERRVAL(name) \
96 END (name)
98 #define ret_ERRVAL ret
100 #define SYSCALL_ERROR_HANDLER /* Nothing here; code in sysdep.c is used. */
102 /* The original calling convention for system calls on Linux/i386 is
103 to use int $0x80. */
104 #ifdef I386_USE_SYSENTER
105 # ifdef SHARED
106 # define ENTER_KERNEL call *%gs:SYSINFO_OFFSET
107 # else
108 # define ENTER_KERNEL call *_dl_sysinfo
109 # endif
110 #else
111 # define ENTER_KERNEL int $0x80
112 #endif
114 /* Linux takes system call arguments in registers:
116 syscall number %eax call-clobbered
117 arg 1 %ebx call-saved
118 arg 2 %ecx call-clobbered
119 arg 3 %edx call-clobbered
120 arg 4 %esi call-saved
121 arg 5 %edi call-saved
122 arg 6 %ebp call-saved
124 The stack layout upon entering the function is:
126 24(%esp) Arg# 6
127 20(%esp) Arg# 5
128 16(%esp) Arg# 4
129 12(%esp) Arg# 3
130 8(%esp) Arg# 2
131 4(%esp) Arg# 1
132 (%esp) Return address
134 (Of course a function with say 3 arguments does not have entries for
135 arguments 4, 5, and 6.)
137 The following code tries hard to be optimal. A general assumption
138 (which is true according to the data books I have) is that
140 2 * xchg is more expensive than pushl + movl + popl
142 Beside this a neat trick is used. The calling conventions for Linux
143 tell that among the registers used for parameters %ecx and %edx need
144 not be saved. Beside this we may clobber this registers even when
145 they are not used for parameter passing.
147 As a result one can see below that we save the content of the %ebx
148 register in the %edx register when we have less than 3 arguments
149 (2 * movl is less expensive than pushl + popl).
151 Second unlike for the other registers we don't save the content of
152 %ecx and %edx when we have more than 1 and 2 registers resp.
154 The code below might look a bit long but we have to take care for
155 the pipelined processors (i586). Here the `pushl' and `popl'
156 instructions are marked as NP (not pairable) but the exception is
157 two consecutive of these instruction. This gives no penalty on
158 other processors though. */
160 #undef DO_CALL
161 #define DO_CALL(syscall_name, args) \
162 PUSHARGS_##args \
163 DOARGS_##args \
164 movl $SYS_ify (syscall_name), %eax; \
165 ENTER_KERNEL \
166 POPARGS_##args
168 #define PUSHARGS_0 /* No arguments to push. */
169 #define DOARGS_0 /* No arguments to frob. */
170 #define POPARGS_0 /* No arguments to pop. */
171 #define _PUSHARGS_0 /* No arguments to push. */
172 #define _DOARGS_0(n) /* No arguments to frob. */
173 #define _POPARGS_0 /* No arguments to pop. */
175 #define PUSHARGS_1 movl %ebx, %edx; L(SAVEBX1): PUSHARGS_0
176 #define DOARGS_1 _DOARGS_1 (4)
177 #define POPARGS_1 POPARGS_0; movl %edx, %ebx; L(RESTBX1):
178 #define _PUSHARGS_1 pushl %ebx; cfi_adjust_cfa_offset (4); \
179 cfi_rel_offset (ebx, 0); L(PUSHBX1): _PUSHARGS_0
180 #define _DOARGS_1(n) movl n(%esp), %ebx; _DOARGS_0(n-4)
181 #define _POPARGS_1 _POPARGS_0; popl %ebx; cfi_adjust_cfa_offset (-4); \
182 cfi_restore (ebx); L(POPBX1):
184 #define PUSHARGS_2 PUSHARGS_1
185 #define DOARGS_2 _DOARGS_2 (8)
186 #define POPARGS_2 POPARGS_1
187 #define _PUSHARGS_2 _PUSHARGS_1
188 #define _DOARGS_2(n) movl n(%esp), %ecx; _DOARGS_1 (n-4)
189 #define _POPARGS_2 _POPARGS_1
191 #define PUSHARGS_3 _PUSHARGS_2
192 #define DOARGS_3 _DOARGS_3 (16)
193 #define POPARGS_3 _POPARGS_3
194 #define _PUSHARGS_3 _PUSHARGS_2
195 #define _DOARGS_3(n) movl n(%esp), %edx; _DOARGS_2 (n-4)
196 #define _POPARGS_3 _POPARGS_2
198 #define PUSHARGS_4 _PUSHARGS_4
199 #define DOARGS_4 _DOARGS_4 (24)
200 #define POPARGS_4 _POPARGS_4
201 #define _PUSHARGS_4 pushl %esi; cfi_adjust_cfa_offset (4); \
202 cfi_rel_offset (esi, 0); L(PUSHSI1): _PUSHARGS_3
203 #define _DOARGS_4(n) movl n(%esp), %esi; _DOARGS_3 (n-4)
204 #define _POPARGS_4 _POPARGS_3; popl %esi; cfi_adjust_cfa_offset (-4); \
205 cfi_restore (esi); L(POPSI1):
207 #define PUSHARGS_5 _PUSHARGS_5
208 #define DOARGS_5 _DOARGS_5 (32)
209 #define POPARGS_5 _POPARGS_5
210 #define _PUSHARGS_5 pushl %edi; cfi_adjust_cfa_offset (4); \
211 cfi_rel_offset (edi, 0); L(PUSHDI1): _PUSHARGS_4
212 #define _DOARGS_5(n) movl n(%esp), %edi; _DOARGS_4 (n-4)
213 #define _POPARGS_5 _POPARGS_4; popl %edi; cfi_adjust_cfa_offset (-4); \
214 cfi_restore (edi); L(POPDI1):
216 #define PUSHARGS_6 _PUSHARGS_6
217 #define DOARGS_6 _DOARGS_6 (40)
218 #define POPARGS_6 _POPARGS_6
219 #define _PUSHARGS_6 pushl %ebp; cfi_adjust_cfa_offset (4); \
220 cfi_rel_offset (ebp, 0); L(PUSHBP1): _PUSHARGS_5
221 #define _DOARGS_6(n) movl n(%esp), %ebp; _DOARGS_5 (n-4)
222 #define _POPARGS_6 _POPARGS_5; popl %ebp; cfi_adjust_cfa_offset (-4); \
223 cfi_restore (ebp); L(POPBP1):
225 #else /* !__ASSEMBLER__ */
227 extern int __syscall_error (int)
228 attribute_hidden __attribute__ ((__regparm__ (1)));
230 /* Since GCC 5 and above can properly spill %ebx with PIC when needed,
231 we can inline syscalls with 6 arguments if GCC 5 or above is used
232 to compile glibc. */
234 #if !__GNUC_PREREQ (5,0)
235 /* We need some help from the assembler to generate optimal code. We
236 define some macros here which later will be used. */
237 asm (".L__X'%ebx = 1\n\t"
238 ".L__X'%ecx = 2\n\t"
239 ".L__X'%edx = 2\n\t"
240 ".L__X'%eax = 3\n\t"
241 ".L__X'%esi = 3\n\t"
242 ".L__X'%edi = 3\n\t"
243 ".L__X'%ebp = 3\n\t"
244 ".L__X'%esp = 3\n\t"
245 ".macro bpushl name reg\n\t"
246 ".if 1 - \\name\n\t"
247 ".if 2 - \\name\n\t"
248 "error\n\t"
249 ".else\n\t"
250 "xchgl \\reg, %ebx\n\t"
251 ".endif\n\t"
252 ".endif\n\t"
253 ".endm\n\t"
254 ".macro bpopl name reg\n\t"
255 ".if 1 - \\name\n\t"
256 ".if 2 - \\name\n\t"
257 "error\n\t"
258 ".else\n\t"
259 "xchgl \\reg, %ebx\n\t"
260 ".endif\n\t"
261 ".endif\n\t"
262 ".endm\n\t");
264 /* Six-argument syscalls use an out-of-line helper, because an inline
265 asm using all registers apart from %esp cannot work reliably and
266 the assembler does not support describing an asm that saves and
267 restores %ebp itself as a separate stack frame. This structure
268 stores the arguments not passed in registers; %edi is passed with a
269 pointer to this structure. */
270 struct libc_do_syscall_args
272 int ebx, edi, ebp;
274 #endif
276 /* Define a macro which expands inline into the wrapper code for a system
277 call. */
278 #undef INLINE_SYSCALL
279 #if IS_IN (libc)
280 # define INLINE_SYSCALL(name, nr, args...) \
281 ({ \
282 unsigned int resultvar = INTERNAL_SYSCALL (name, , nr, args); \
283 __glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (resultvar, )) \
284 ? __syscall_error (-INTERNAL_SYSCALL_ERRNO (resultvar, )) \
285 : (int) resultvar; })
286 #else
287 # define INLINE_SYSCALL(name, nr, args...) \
288 ({ \
289 unsigned int resultvar = INTERNAL_SYSCALL (name, , nr, args); \
290 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (resultvar, ))) \
292 __set_errno (INTERNAL_SYSCALL_ERRNO (resultvar, )); \
293 resultvar = 0xffffffff; \
295 (int) resultvar; })
296 #endif
298 /* Set error number and return -1. Return the internal function,
299 __syscall_error, which sets errno from the negative error number
300 and returns -1, to avoid PIC. */
301 #undef INLINE_SYSCALL_ERROR_RETURN_VALUE
302 #define INLINE_SYSCALL_ERROR_RETURN_VALUE(resultvar) \
303 __syscall_error (-(resultvar))
305 /* List of system calls which are supported as vsyscalls. */
306 # define HAVE_CLOCK_GETTIME_VSYSCALL 1
307 # define HAVE_GETTIMEOFDAY_VSYSCALL 1
309 /* Define a macro which expands inline into the wrapper code for a system
310 call. This use is for internal calls that do not need to handle errors
311 normally. It will never touch errno. This returns just what the kernel
312 gave back.
314 The _NCS variant allows non-constant syscall numbers but it is not
315 possible to use more than four parameters. */
316 #undef INTERNAL_SYSCALL
317 #define INTERNAL_SYSCALL_MAIN_0(name, err, args...) \
318 INTERNAL_SYSCALL_MAIN_INLINE(name, err, 0, args)
319 #define INTERNAL_SYSCALL_MAIN_1(name, err, args...) \
320 INTERNAL_SYSCALL_MAIN_INLINE(name, err, 1, args)
321 #define INTERNAL_SYSCALL_MAIN_2(name, err, args...) \
322 INTERNAL_SYSCALL_MAIN_INLINE(name, err, 2, args)
323 #define INTERNAL_SYSCALL_MAIN_3(name, err, args...) \
324 INTERNAL_SYSCALL_MAIN_INLINE(name, err, 3, args)
325 #define INTERNAL_SYSCALL_MAIN_4(name, err, args...) \
326 INTERNAL_SYSCALL_MAIN_INLINE(name, err, 4, args)
327 #define INTERNAL_SYSCALL_MAIN_5(name, err, args...) \
328 INTERNAL_SYSCALL_MAIN_INLINE(name, err, 5, args)
329 /* Each object using 6-argument inline syscalls must include a
330 definition of __libc_do_syscall. */
331 #if __GNUC_PREREQ (5,0)
332 # define INTERNAL_SYSCALL_MAIN_6(name, err, args...) \
333 INTERNAL_SYSCALL_MAIN_INLINE(name, err, 6, args)
334 #else /* GCC 5 */
335 # define INTERNAL_SYSCALL_MAIN_6(name, err, arg1, arg2, arg3, \
336 arg4, arg5, arg6) \
337 struct libc_do_syscall_args _xv = \
339 (int) (arg1), \
340 (int) (arg5), \
341 (int) (arg6) \
342 }; \
343 asm volatile ( \
344 "movl %1, %%eax\n\t" \
345 "call __libc_do_syscall" \
346 : "=a" (resultvar) \
347 : "i" (__NR_##name), "c" (arg2), "d" (arg3), "S" (arg4), "D" (&_xv) \
348 : "memory", "cc")
349 #endif /* GCC 5 */
350 #define INTERNAL_SYSCALL(name, err, nr, args...) \
351 ({ \
352 register unsigned int resultvar; \
353 INTERNAL_SYSCALL_MAIN_##nr (name, err, args); \
354 (int) resultvar; })
355 #ifdef I386_USE_SYSENTER
356 # if __GNUC_PREREQ (5,0)
357 # ifdef SHARED
358 # define INTERNAL_SYSCALL_MAIN_INLINE(name, err, nr, args...) \
359 LOADREGS_##nr(args) \
360 asm volatile ( \
361 "call *%%gs:%P2" \
362 : "=a" (resultvar) \
363 : "a" (__NR_##name), "i" (offsetof (tcbhead_t, sysinfo)) \
364 ASMARGS_##nr(args) : "memory", "cc")
365 # define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
366 ({ \
367 register unsigned int resultvar; \
368 LOADREGS_##nr(args) \
369 asm volatile ( \
370 "call *%%gs:%P2" \
371 : "=a" (resultvar) \
372 : "a" (name), "i" (offsetof (tcbhead_t, sysinfo)) \
373 ASMARGS_##nr(args) : "memory", "cc"); \
374 (int) resultvar; })
375 # else
376 # define INTERNAL_SYSCALL_MAIN_INLINE(name, err, nr, args...) \
377 LOADREGS_##nr(args) \
378 asm volatile ( \
379 "call *_dl_sysinfo" \
380 : "=a" (resultvar) \
381 : "a" (__NR_##name) ASMARGS_##nr(args) : "memory", "cc")
382 # define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
383 ({ \
384 register unsigned int resultvar; \
385 LOADREGS_##nr(args) \
386 asm volatile ( \
387 "call *_dl_sysinfo" \
388 : "=a" (resultvar) \
389 : "a" (name) ASMARGS_##nr(args) : "memory", "cc"); \
390 (int) resultvar; })
391 # endif
392 # else /* GCC 5 */
393 # ifdef SHARED
394 # define INTERNAL_SYSCALL_MAIN_INLINE(name, err, nr, args...) \
395 EXTRAVAR_##nr \
396 asm volatile ( \
397 LOADARGS_##nr \
398 "movl %1, %%eax\n\t" \
399 "call *%%gs:%P2\n\t" \
400 RESTOREARGS_##nr \
401 : "=a" (resultvar) \
402 : "i" (__NR_##name), "i" (offsetof (tcbhead_t, sysinfo)) \
403 ASMFMT_##nr(args) : "memory", "cc")
404 # define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
405 ({ \
406 register unsigned int resultvar; \
407 EXTRAVAR_##nr \
408 asm volatile ( \
409 LOADARGS_##nr \
410 "call *%%gs:%P2\n\t" \
411 RESTOREARGS_##nr \
412 : "=a" (resultvar) \
413 : "0" (name), "i" (offsetof (tcbhead_t, sysinfo)) \
414 ASMFMT_##nr(args) : "memory", "cc"); \
415 (int) resultvar; })
416 # else
417 # define INTERNAL_SYSCALL_MAIN_INLINE(name, err, nr, args...) \
418 EXTRAVAR_##nr \
419 asm volatile ( \
420 LOADARGS_##nr \
421 "movl %1, %%eax\n\t" \
422 "call *_dl_sysinfo\n\t" \
423 RESTOREARGS_##nr \
424 : "=a" (resultvar) \
425 : "i" (__NR_##name) ASMFMT_##nr(args) : "memory", "cc")
426 # define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
427 ({ \
428 register unsigned int resultvar; \
429 EXTRAVAR_##nr \
430 asm volatile ( \
431 LOADARGS_##nr \
432 "call *_dl_sysinfo\n\t" \
433 RESTOREARGS_##nr \
434 : "=a" (resultvar) \
435 : "0" (name) ASMFMT_##nr(args) : "memory", "cc"); \
436 (int) resultvar; })
437 # endif
438 # endif /* GCC 5 */
439 #else
440 # if __GNUC_PREREQ (5,0)
441 # define INTERNAL_SYSCALL_MAIN_INLINE(name, err, nr, args...) \
442 LOADREGS_##nr(args) \
443 asm volatile ( \
444 "int $0x80" \
445 : "=a" (resultvar) \
446 : "a" (__NR_##name) ASMARGS_##nr(args) : "memory", "cc")
447 # define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
448 ({ \
449 register unsigned int resultvar; \
450 LOADREGS_##nr(args) \
451 asm volatile ( \
452 "int $0x80" \
453 : "=a" (resultvar) \
454 : "a" (name) ASMARGS_##nr(args) : "memory", "cc"); \
455 (int) resultvar; })
456 # else /* GCC 5 */
457 # define INTERNAL_SYSCALL_MAIN_INLINE(name, err, nr, args...) \
458 EXTRAVAR_##nr \
459 asm volatile ( \
460 LOADARGS_##nr \
461 "movl %1, %%eax\n\t" \
462 "int $0x80\n\t" \
463 RESTOREARGS_##nr \
464 : "=a" (resultvar) \
465 : "i" (__NR_##name) ASMFMT_##nr(args) : "memory", "cc")
466 # define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
467 ({ \
468 register unsigned int resultvar; \
469 EXTRAVAR_##nr \
470 asm volatile ( \
471 LOADARGS_##nr \
472 "int $0x80\n\t" \
473 RESTOREARGS_##nr \
474 : "=a" (resultvar) \
475 : "0" (name) ASMFMT_##nr(args) : "memory", "cc"); \
476 (int) resultvar; })
477 # endif /* GCC 5 */
478 #endif
480 #undef INTERNAL_SYSCALL_DECL
481 #define INTERNAL_SYSCALL_DECL(err) do { } while (0)
483 #undef INTERNAL_SYSCALL_ERROR_P
484 #define INTERNAL_SYSCALL_ERROR_P(val, err) \
485 ((unsigned int) (val) >= 0xfffff001u)
487 #undef INTERNAL_SYSCALL_ERRNO
488 #define INTERNAL_SYSCALL_ERRNO(val, err) (-(val))
490 #define LOADARGS_0
491 #ifdef __PIC__
492 # if defined I386_USE_SYSENTER && defined SHARED
493 # define LOADARGS_1 \
494 "bpushl .L__X'%k3, %k3\n\t"
495 # define LOADARGS_5 \
496 "movl %%ebx, %4\n\t" \
497 "movl %3, %%ebx\n\t"
498 # else
499 # define LOADARGS_1 \
500 "bpushl .L__X'%k2, %k2\n\t"
501 # define LOADARGS_5 \
502 "movl %%ebx, %3\n\t" \
503 "movl %2, %%ebx\n\t"
504 # endif
505 # define LOADARGS_2 LOADARGS_1
506 # define LOADARGS_3 \
507 "xchgl %%ebx, %%edi\n\t"
508 # define LOADARGS_4 LOADARGS_3
509 #else
510 # define LOADARGS_1
511 # define LOADARGS_2
512 # define LOADARGS_3
513 # define LOADARGS_4
514 # define LOADARGS_5
515 #endif
517 #define RESTOREARGS_0
518 #ifdef __PIC__
519 # if defined I386_USE_SYSENTER && defined SHARED
520 # define RESTOREARGS_1 \
521 "bpopl .L__X'%k3, %k3\n\t"
522 # define RESTOREARGS_5 \
523 "movl %4, %%ebx"
524 # else
525 # define RESTOREARGS_1 \
526 "bpopl .L__X'%k2, %k2\n\t"
527 # define RESTOREARGS_5 \
528 "movl %3, %%ebx"
529 # endif
530 # define RESTOREARGS_2 RESTOREARGS_1
531 # define RESTOREARGS_3 \
532 "xchgl %%edi, %%ebx\n\t"
533 # define RESTOREARGS_4 RESTOREARGS_3
534 #else
535 # define RESTOREARGS_1
536 # define RESTOREARGS_2
537 # define RESTOREARGS_3
538 # define RESTOREARGS_4
539 # define RESTOREARGS_5
540 #endif
542 #if __GNUC_PREREQ (5,0)
543 # define LOADREGS_0()
544 # define ASMARGS_0()
545 # define LOADREGS_1(arg1) \
546 LOADREGS_0 ()
547 # define ASMARGS_1(arg1) \
548 ASMARGS_0 (), "b" ((unsigned int) (arg1))
549 # define LOADREGS_2(arg1, arg2) \
550 LOADREGS_1 (arg1)
551 # define ASMARGS_2(arg1, arg2) \
552 ASMARGS_1 (arg1), "c" ((unsigned int) (arg2))
553 # define LOADREGS_3(arg1, arg2, arg3) \
554 LOADREGS_2 (arg1, arg2)
555 # define ASMARGS_3(arg1, arg2, arg3) \
556 ASMARGS_2 (arg1, arg2), "d" ((unsigned int) (arg3))
557 # define LOADREGS_4(arg1, arg2, arg3, arg4) \
558 LOADREGS_3 (arg1, arg2, arg3)
559 # define ASMARGS_4(arg1, arg2, arg3, arg4) \
560 ASMARGS_3 (arg1, arg2, arg3), "S" ((unsigned int) (arg4))
561 # define LOADREGS_5(arg1, arg2, arg3, arg4, arg5) \
562 LOADREGS_4 (arg1, arg2, arg3, arg4)
563 # define ASMARGS_5(arg1, arg2, arg3, arg4, arg5) \
564 ASMARGS_4 (arg1, arg2, arg3, arg4), "D" ((unsigned int) (arg5))
565 # define LOADREGS_6(arg1, arg2, arg3, arg4, arg5, arg6) \
566 register unsigned int _a6 asm ("ebp") = (unsigned int) (arg6); \
567 LOADREGS_5 (arg1, arg2, arg3, arg4, arg5)
568 # define ASMARGS_6(arg1, arg2, arg3, arg4, arg5, arg6) \
569 ASMARGS_5 (arg1, arg2, arg3, arg4, arg5), "r" (_a6)
570 #endif /* GCC 5 */
572 #define ASMFMT_0()
573 #ifdef __PIC__
574 # define ASMFMT_1(arg1) \
575 , "cd" (arg1)
576 # define ASMFMT_2(arg1, arg2) \
577 , "d" (arg1), "c" (arg2)
578 # define ASMFMT_3(arg1, arg2, arg3) \
579 , "D" (arg1), "c" (arg2), "d" (arg3)
580 # define ASMFMT_4(arg1, arg2, arg3, arg4) \
581 , "D" (arg1), "c" (arg2), "d" (arg3), "S" (arg4)
582 # define ASMFMT_5(arg1, arg2, arg3, arg4, arg5) \
583 , "0" (arg1), "m" (_xv), "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
584 #else
585 # define ASMFMT_1(arg1) \
586 , "b" (arg1)
587 # define ASMFMT_2(arg1, arg2) \
588 , "b" (arg1), "c" (arg2)
589 # define ASMFMT_3(arg1, arg2, arg3) \
590 , "b" (arg1), "c" (arg2), "d" (arg3)
591 # define ASMFMT_4(arg1, arg2, arg3, arg4) \
592 , "b" (arg1), "c" (arg2), "d" (arg3), "S" (arg4)
593 # define ASMFMT_5(arg1, arg2, arg3, arg4, arg5) \
594 , "b" (arg1), "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
595 #endif
597 #define EXTRAVAR_0
598 #define EXTRAVAR_1
599 #define EXTRAVAR_2
600 #define EXTRAVAR_3
601 #define EXTRAVAR_4
602 #ifdef __PIC__
603 # define EXTRAVAR_5 int _xv;
604 #else
605 # define EXTRAVAR_5
606 #endif
608 /* Consistency check for position-independent code. */
609 #if defined __PIC__ && !__GNUC_PREREQ (5,0)
610 # define check_consistency() \
611 ({ int __res; \
612 __asm__ __volatile__ \
613 (LOAD_PIC_REG_STR (cx) ";" \
614 "subl %%ebx, %%ecx;" \
615 "je 1f;" \
616 "ud2;" \
617 "1:\n" \
618 : "=c" (__res)); \
619 __res; })
620 #endif
622 #endif /* __ASSEMBLER__ */
625 /* Pointer mangling support. */
626 #if IS_IN (rtld)
627 /* We cannot use the thread descriptor because in ld.so we use setjmp
628 earlier than the descriptor is initialized. Using a global variable
629 is too complicated here since we have no PC-relative addressing mode. */
630 #else
631 # ifdef __ASSEMBLER__
632 # define PTR_MANGLE(reg) xorl %gs:POINTER_GUARD, reg; \
633 roll $9, reg
634 # define PTR_DEMANGLE(reg) rorl $9, reg; \
635 xorl %gs:POINTER_GUARD, reg
636 # else
637 # define PTR_MANGLE(var) asm ("xorl %%gs:%c2, %0\n" \
638 "roll $9, %0" \
639 : "=r" (var) \
640 : "0" (var), \
641 "i" (offsetof (tcbhead_t, \
642 pointer_guard)))
643 # define PTR_DEMANGLE(var) asm ("rorl $9, %0\n" \
644 "xorl %%gs:%c2, %0" \
645 : "=r" (var) \
646 : "0" (var), \
647 "i" (offsetof (tcbhead_t, \
648 pointer_guard)))
649 # endif
650 #endif
652 #endif /* linux/i386/sysdep.h */