2.9
[glibc/nacl-glibc.git] / sysdeps / unix / sysv / linux / i386 / sysdep.h
blob89d5b12043906666d854cf8e289307ebd901ee62
1 /* Copyright (C) 1992,1993,1995-2000,2002-2006,2007
2 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper, <drepper@gnu.org>, August 1995.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, write to the Free
18 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307 USA. */
21 #ifndef _LINUX_I386_SYSDEP_H
22 #define _LINUX_I386_SYSDEP_H 1
24 /* There is some commonality. */
25 #include <sysdeps/unix/i386/sysdep.h>
26 #include <bp-sym.h>
27 #include <bp-asm.h>
28 /* Defines RTLD_PRIVATE_ERRNO and USE_DL_SYSINFO. */
29 #include <dl-sysdep.h>
30 #include <tls.h>
33 /* For Linux we can use the system call table in the header file
34 /usr/include/asm/unistd.h
35 of the kernel. But these symbols do not follow the SYS_* syntax
36 so we have to redefine the `SYS_ify' macro here. */
37 #undef SYS_ify
38 #define SYS_ify(syscall_name) __NR_##syscall_name
40 #if defined USE_DL_SYSINFO \
41 && (!defined NOT_IN_libc || defined IS_IN_libpthread)
42 # define I386_USE_SYSENTER 1
43 #else
44 # undef I386_USE_SYSENTER
45 #endif
47 #ifdef __ASSEMBLER__
49 /* Linux uses a negative return value to indicate syscall errors,
50 unlike most Unices, which use the condition codes' carry flag.
52 Since version 2.1 the return value of a system call might be
53 negative even if the call succeeded. E.g., the `lseek' system call
54 might return a large offset. Therefore we must not anymore test
55 for < 0, but test for a real error by making sure the value in %eax
56 is a real error number. Linus said he will make sure the no syscall
57 returns a value in -1 .. -4095 as a valid result so we can savely
58 test with -4095. */
60 /* We don't want the label for the error handle to be global when we define
61 it here. */
62 #ifdef PIC
63 # define SYSCALL_ERROR_LABEL 0f
64 #else
65 # define SYSCALL_ERROR_LABEL syscall_error
66 #endif
68 #undef PSEUDO
69 #define PSEUDO(name, syscall_name, args) \
70 .text; \
71 ENTRY (name) \
72 DO_CALL (syscall_name, args); \
73 cmpl $-4095, %eax; \
74 jae SYSCALL_ERROR_LABEL; \
75 L(pseudo_end):
77 #undef PSEUDO_END
78 #define PSEUDO_END(name) \
79 SYSCALL_ERROR_HANDLER \
80 END (name)
82 #undef PSEUDO_NOERRNO
83 #define PSEUDO_NOERRNO(name, syscall_name, args) \
84 .text; \
85 ENTRY (name) \
86 DO_CALL (syscall_name, args)
88 #undef PSEUDO_END_NOERRNO
89 #define PSEUDO_END_NOERRNO(name) \
90 END (name)
92 #define ret_NOERRNO ret
94 /* The function has to return the error code. */
95 #undef PSEUDO_ERRVAL
96 #define PSEUDO_ERRVAL(name, syscall_name, args) \
97 .text; \
98 ENTRY (name) \
99 DO_CALL (syscall_name, args); \
100 negl %eax
102 #undef PSEUDO_END_ERRVAL
103 #define PSEUDO_END_ERRVAL(name) \
104 END (name)
106 #define ret_ERRVAL ret
108 #ifndef PIC
109 # define SYSCALL_ERROR_HANDLER /* Nothing here; code in sysdep.S is used. */
110 #else
112 # if RTLD_PRIVATE_ERRNO
113 # define SYSCALL_ERROR_HANDLER \
114 0:SETUP_PIC_REG(cx); \
115 addl $_GLOBAL_OFFSET_TABLE_, %ecx; \
116 xorl %edx, %edx; \
117 subl %eax, %edx; \
118 movl %edx, rtld_errno@GOTOFF(%ecx); \
119 orl $-1, %eax; \
120 jmp L(pseudo_end);
122 # elif defined _LIBC_REENTRANT
124 # if USE___THREAD
125 # ifndef NOT_IN_libc
126 # define SYSCALL_ERROR_ERRNO __libc_errno
127 # else
128 # define SYSCALL_ERROR_ERRNO errno
129 # endif
130 # define SYSCALL_ERROR_HANDLER \
131 0:SETUP_PIC_REG (cx); \
132 addl $_GLOBAL_OFFSET_TABLE_, %ecx; \
133 movl SYSCALL_ERROR_ERRNO@GOTNTPOFF(%ecx), %ecx; \
134 xorl %edx, %edx; \
135 subl %eax, %edx; \
136 SYSCALL_ERROR_HANDLER_TLS_STORE (%edx, %ecx); \
137 orl $-1, %eax; \
138 jmp L(pseudo_end);
139 # ifndef NO_TLS_DIRECT_SEG_REFS
140 # define SYSCALL_ERROR_HANDLER_TLS_STORE(src, destoff) \
141 movl src, %gs:(destoff)
142 # else
143 # define SYSCALL_ERROR_HANDLER_TLS_STORE(src, destoff) \
144 addl %gs:0, destoff; \
145 movl src, (destoff)
146 # endif
147 # else
148 # define SYSCALL_ERROR_HANDLER \
149 0:pushl %ebx; \
150 cfi_adjust_cfa_offset (4); \
151 cfi_rel_offset (ebx, 0); \
152 SETUP_PIC_REG (bx); \
153 addl $_GLOBAL_OFFSET_TABLE_, %ebx; \
154 xorl %edx, %edx; \
155 subl %eax, %edx; \
156 pushl %edx; \
157 cfi_adjust_cfa_offset (4); \
158 PUSH_ERRNO_LOCATION_RETURN; \
159 call BP_SYM (__errno_location)@PLT; \
160 POP_ERRNO_LOCATION_RETURN; \
161 popl %ecx; \
162 cfi_adjust_cfa_offset (-4); \
163 popl %ebx; \
164 cfi_adjust_cfa_offset (-4); \
165 cfi_restore (ebx); \
166 movl %ecx, (%eax); \
167 orl $-1, %eax; \
168 jmp L(pseudo_end);
169 /* A quick note: it is assumed that the call to `__errno_location' does
170 not modify the stack! */
171 # endif
172 # else
173 /* Store (- %eax) into errno through the GOT. */
174 # define SYSCALL_ERROR_HANDLER \
175 0:SETUP_PIC_REG(cx); \
176 addl $_GLOBAL_OFFSET_TABLE_, %ecx; \
177 xorl %edx, %edx; \
178 subl %eax, %edx; \
179 movl errno@GOT(%ecx), %ecx; \
180 movl %edx, (%ecx); \
181 orl $-1, %eax; \
182 jmp L(pseudo_end);
183 # endif /* _LIBC_REENTRANT */
184 #endif /* PIC */
187 /* The original calling convention for system calls on Linux/i386 is
188 to use int $0x80. */
189 #ifdef I386_USE_SYSENTER
190 # ifdef SHARED
191 # define ENTER_KERNEL call *%gs:SYSINFO_OFFSET
192 # else
193 # define ENTER_KERNEL call *_dl_sysinfo
194 # endif
195 #else
196 # define ENTER_KERNEL int $0x80
197 #endif
199 /* Linux takes system call arguments in registers:
201 syscall number %eax call-clobbered
202 arg 1 %ebx call-saved
203 arg 2 %ecx call-clobbered
204 arg 3 %edx call-clobbered
205 arg 4 %esi call-saved
206 arg 5 %edi call-saved
208 The stack layout upon entering the function is:
210 20(%esp) Arg# 5
211 16(%esp) Arg# 4
212 12(%esp) Arg# 3
213 8(%esp) Arg# 2
214 4(%esp) Arg# 1
215 (%esp) Return address
217 (Of course a function with say 3 arguments does not have entries for
218 arguments 4 and 5.)
220 The following code tries hard to be optimal. A general assumption
221 (which is true according to the data books I have) is that
223 2 * xchg is more expensive than pushl + movl + popl
225 Beside this a neat trick is used. The calling conventions for Linux
226 tell that among the registers used for parameters %ecx and %edx need
227 not be saved. Beside this we may clobber this registers even when
228 they are not used for parameter passing.
230 As a result one can see below that we save the content of the %ebx
231 register in the %edx register when we have less than 3 arguments
232 (2 * movl is less expensive than pushl + popl).
234 Second unlike for the other registers we don't save the content of
235 %ecx and %edx when we have more than 1 and 2 registers resp.
237 The code below might look a bit long but we have to take care for
238 the pipelined processors (i586). Here the `pushl' and `popl'
239 instructions are marked as NP (not pairable) but the exception is
240 two consecutive of these instruction. This gives no penalty on
241 other processors though. */
243 #undef DO_CALL
244 #define DO_CALL(syscall_name, args) \
245 PUSHARGS_##args \
246 DOARGS_##args \
247 movl $SYS_ify (syscall_name), %eax; \
248 ENTER_KERNEL \
249 POPARGS_##args
251 #define PUSHARGS_0 /* No arguments to push. */
252 #define DOARGS_0 /* No arguments to frob. */
253 #define POPARGS_0 /* No arguments to pop. */
254 #define _PUSHARGS_0 /* No arguments to push. */
255 #define _DOARGS_0(n) /* No arguments to frob. */
256 #define _POPARGS_0 /* No arguments to pop. */
258 #define PUSHARGS_1 movl %ebx, %edx; L(SAVEBX1): PUSHARGS_0
259 #define DOARGS_1 _DOARGS_1 (4)
260 #define POPARGS_1 POPARGS_0; movl %edx, %ebx; L(RESTBX1):
261 #define _PUSHARGS_1 pushl %ebx; cfi_adjust_cfa_offset (4); \
262 cfi_rel_offset (ebx, 0); L(PUSHBX1): _PUSHARGS_0
263 #define _DOARGS_1(n) movl n(%esp), %ebx; _DOARGS_0(n-4)
264 #define _POPARGS_1 _POPARGS_0; popl %ebx; cfi_adjust_cfa_offset (-4); \
265 cfi_restore (ebx); L(POPBX1):
267 #define PUSHARGS_2 PUSHARGS_1
268 #define DOARGS_2 _DOARGS_2 (8)
269 #define POPARGS_2 POPARGS_1
270 #define _PUSHARGS_2 _PUSHARGS_1
271 #define _DOARGS_2(n) movl n(%esp), %ecx; _DOARGS_1 (n-4)
272 #define _POPARGS_2 _POPARGS_1
274 #define PUSHARGS_3 _PUSHARGS_2
275 #define DOARGS_3 _DOARGS_3 (16)
276 #define POPARGS_3 _POPARGS_3
277 #define _PUSHARGS_3 _PUSHARGS_2
278 #define _DOARGS_3(n) movl n(%esp), %edx; _DOARGS_2 (n-4)
279 #define _POPARGS_3 _POPARGS_2
281 #define PUSHARGS_4 _PUSHARGS_4
282 #define DOARGS_4 _DOARGS_4 (24)
283 #define POPARGS_4 _POPARGS_4
284 #define _PUSHARGS_4 pushl %esi; cfi_adjust_cfa_offset (4); \
285 cfi_rel_offset (esi, 0); L(PUSHSI1): _PUSHARGS_3
286 #define _DOARGS_4(n) movl n(%esp), %esi; _DOARGS_3 (n-4)
287 #define _POPARGS_4 _POPARGS_3; popl %esi; cfi_adjust_cfa_offset (-4); \
288 cfi_restore (esi); L(POPSI1):
290 #define PUSHARGS_5 _PUSHARGS_5
291 #define DOARGS_5 _DOARGS_5 (32)
292 #define POPARGS_5 _POPARGS_5
293 #define _PUSHARGS_5 pushl %edi; cfi_adjust_cfa_offset (4); \
294 cfi_rel_offset (edi, 0); L(PUSHDI1): _PUSHARGS_4
295 #define _DOARGS_5(n) movl n(%esp), %edi; _DOARGS_4 (n-4)
296 #define _POPARGS_5 _POPARGS_4; popl %edi; cfi_adjust_cfa_offset (-4); \
297 cfi_restore (edi); L(POPDI1):
299 #define PUSHARGS_6 _PUSHARGS_6
300 #define DOARGS_6 _DOARGS_6 (40)
301 #define POPARGS_6 _POPARGS_6
302 #define _PUSHARGS_6 pushl %ebp; cfi_adjust_cfa_offset (4); \
303 cfi_rel_offset (ebp, 0); L(PUSHBP1): _PUSHARGS_5
304 #define _DOARGS_6(n) movl n(%esp), %ebp; _DOARGS_5 (n-4)
305 #define _POPARGS_6 _POPARGS_5; popl %ebp; cfi_adjust_cfa_offset (-4); \
306 cfi_restore (ebp); L(POPBP1):
308 #else /* !__ASSEMBLER__ */
310 /* We need some help from the assembler to generate optimal code. We
311 define some macros here which later will be used. */
312 asm (".L__X'%ebx = 1\n\t"
313 ".L__X'%ecx = 2\n\t"
314 ".L__X'%edx = 2\n\t"
315 ".L__X'%eax = 3\n\t"
316 ".L__X'%esi = 3\n\t"
317 ".L__X'%edi = 3\n\t"
318 ".L__X'%ebp = 3\n\t"
319 ".L__X'%esp = 3\n\t"
320 ".macro bpushl name reg\n\t"
321 ".if 1 - \\name\n\t"
322 ".if 2 - \\name\n\t"
323 "error\n\t"
324 ".else\n\t"
325 "xchgl \\reg, %ebx\n\t"
326 ".endif\n\t"
327 ".endif\n\t"
328 ".endm\n\t"
329 ".macro bpopl name reg\n\t"
330 ".if 1 - \\name\n\t"
331 ".if 2 - \\name\n\t"
332 "error\n\t"
333 ".else\n\t"
334 "xchgl \\reg, %ebx\n\t"
335 ".endif\n\t"
336 ".endif\n\t"
337 ".endm\n\t");
339 /* Define a macro which expands inline into the wrapper code for a system
340 call. */
341 #undef INLINE_SYSCALL
342 #define INLINE_SYSCALL(name, nr, args...) \
343 ({ \
344 unsigned int resultvar = INTERNAL_SYSCALL (name, , nr, args); \
345 if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (resultvar, ), 0)) \
347 __set_errno (INTERNAL_SYSCALL_ERRNO (resultvar, )); \
348 resultvar = 0xffffffff; \
350 (int) resultvar; })
352 /* Define a macro which expands inline into the wrapper code for a system
353 call. This use is for internal calls that do not need to handle errors
354 normally. It will never touch errno. This returns just what the kernel
355 gave back.
357 The _NCS variant allows non-constant syscall numbers but it is not
358 possible to use more than four parameters. */
359 #undef INTERNAL_SYSCALL
360 #ifdef I386_USE_SYSENTER
361 # ifdef SHARED
362 # define INTERNAL_SYSCALL(name, err, nr, args...) \
363 ({ \
364 register unsigned int resultvar; \
365 EXTRAVAR_##nr \
366 asm volatile ( \
367 LOADARGS_##nr \
368 "movl %1, %%eax\n\t" \
369 "call *%%gs:%P2\n\t" \
370 RESTOREARGS_##nr \
371 : "=a" (resultvar) \
372 : "i" (__NR_##name), "i" (offsetof (tcbhead_t, sysinfo)) \
373 ASMFMT_##nr(args) : "memory", "cc"); \
374 (int) resultvar; })
375 # define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
376 ({ \
377 register unsigned int resultvar; \
378 EXTRAVAR_##nr \
379 asm volatile ( \
380 LOADARGS_##nr \
381 "call *%%gs:%P2\n\t" \
382 RESTOREARGS_##nr \
383 : "=a" (resultvar) \
384 : "0" (name), "i" (offsetof (tcbhead_t, sysinfo)) \
385 ASMFMT_##nr(args) : "memory", "cc"); \
386 (int) resultvar; })
387 # else
388 # define INTERNAL_SYSCALL(name, err, nr, args...) \
389 ({ \
390 register unsigned int resultvar; \
391 EXTRAVAR_##nr \
392 asm volatile ( \
393 LOADARGS_##nr \
394 "movl %1, %%eax\n\t" \
395 "call *_dl_sysinfo\n\t" \
396 RESTOREARGS_##nr \
397 : "=a" (resultvar) \
398 : "i" (__NR_##name) ASMFMT_##nr(args) : "memory", "cc"); \
399 (int) resultvar; })
400 # define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
401 ({ \
402 register unsigned int resultvar; \
403 EXTRAVAR_##nr \
404 asm volatile ( \
405 LOADARGS_##nr \
406 "call *_dl_sysinfo\n\t" \
407 RESTOREARGS_##nr \
408 : "=a" (resultvar) \
409 : "0" (name) ASMFMT_##nr(args) : "memory", "cc"); \
410 (int) resultvar; })
411 # endif
412 #else
413 # define INTERNAL_SYSCALL(name, err, nr, args...) \
414 ({ \
415 register unsigned int resultvar; \
416 EXTRAVAR_##nr \
417 asm volatile ( \
418 LOADARGS_##nr \
419 "movl %1, %%eax\n\t" \
420 "int $0x80\n\t" \
421 RESTOREARGS_##nr \
422 : "=a" (resultvar) \
423 : "i" (__NR_##name) ASMFMT_##nr(args) : "memory", "cc"); \
424 (int) resultvar; })
425 # define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
426 ({ \
427 register unsigned int resultvar; \
428 EXTRAVAR_##nr \
429 asm volatile ( \
430 LOADARGS_##nr \
431 "int $0x80\n\t" \
432 RESTOREARGS_##nr \
433 : "=a" (resultvar) \
434 : "0" (name) ASMFMT_##nr(args) : "memory", "cc"); \
435 (int) resultvar; })
436 #endif
438 #undef INTERNAL_SYSCALL_DECL
439 #define INTERNAL_SYSCALL_DECL(err) do { } while (0)
441 #undef INTERNAL_SYSCALL_ERROR_P
442 #define INTERNAL_SYSCALL_ERROR_P(val, err) \
443 ((unsigned int) (val) >= 0xfffff001u)
445 #undef INTERNAL_SYSCALL_ERRNO
446 #define INTERNAL_SYSCALL_ERRNO(val, err) (-(val))
448 #define LOADARGS_0
449 #ifdef __PIC__
450 # if defined I386_USE_SYSENTER && defined SHARED
451 # define LOADARGS_1 \
452 "bpushl .L__X'%k3, %k3\n\t"
453 # define LOADARGS_5 \
454 "movl %%ebx, %4\n\t" \
455 "movl %3, %%ebx\n\t"
456 # else
457 # define LOADARGS_1 \
458 "bpushl .L__X'%k2, %k2\n\t"
459 # define LOADARGS_5 \
460 "movl %%ebx, %3\n\t" \
461 "movl %2, %%ebx\n\t"
462 # endif
463 # define LOADARGS_2 LOADARGS_1
464 # define LOADARGS_3 \
465 "xchgl %%ebx, %%edi\n\t"
466 # define LOADARGS_4 LOADARGS_3
467 #else
468 # define LOADARGS_1
469 # define LOADARGS_2
470 # define LOADARGS_3
471 # define LOADARGS_4
472 # define LOADARGS_5
473 #endif
475 #define RESTOREARGS_0
476 #ifdef __PIC__
477 # if defined I386_USE_SYSENTER && defined SHARED
478 # define RESTOREARGS_1 \
479 "bpopl .L__X'%k3, %k3\n\t"
480 # define RESTOREARGS_5 \
481 "movl %4, %%ebx"
482 # else
483 # define RESTOREARGS_1 \
484 "bpopl .L__X'%k2, %k2\n\t"
485 # define RESTOREARGS_5 \
486 "movl %3, %%ebx"
487 # endif
488 # define RESTOREARGS_2 RESTOREARGS_1
489 # define RESTOREARGS_3 \
490 "xchgl %%edi, %%ebx\n\t"
491 # define RESTOREARGS_4 RESTOREARGS_3
492 #else
493 # define RESTOREARGS_1
494 # define RESTOREARGS_2
495 # define RESTOREARGS_3
496 # define RESTOREARGS_4
497 # define RESTOREARGS_5
498 #endif
500 #define ASMFMT_0()
501 #ifdef __PIC__
502 # define ASMFMT_1(arg1) \
503 , "cd" (arg1)
504 # define ASMFMT_2(arg1, arg2) \
505 , "d" (arg1), "c" (arg2)
506 # define ASMFMT_3(arg1, arg2, arg3) \
507 , "D" (arg1), "c" (arg2), "d" (arg3)
508 # define ASMFMT_4(arg1, arg2, arg3, arg4) \
509 , "D" (arg1), "c" (arg2), "d" (arg3), "S" (arg4)
510 # define ASMFMT_5(arg1, arg2, arg3, arg4, arg5) \
511 , "0" (arg1), "m" (_xv), "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
512 #else
513 # define ASMFMT_1(arg1) \
514 , "b" (arg1)
515 # define ASMFMT_2(arg1, arg2) \
516 , "b" (arg1), "c" (arg2)
517 # define ASMFMT_3(arg1, arg2, arg3) \
518 , "b" (arg1), "c" (arg2), "d" (arg3)
519 # define ASMFMT_4(arg1, arg2, arg3, arg4) \
520 , "b" (arg1), "c" (arg2), "d" (arg3), "S" (arg4)
521 # define ASMFMT_5(arg1, arg2, arg3, arg4, arg5) \
522 , "b" (arg1), "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
523 #endif
525 #define EXTRAVAR_0
526 #define EXTRAVAR_1
527 #define EXTRAVAR_2
528 #define EXTRAVAR_3
529 #define EXTRAVAR_4
530 #ifdef __PIC__
531 # define EXTRAVAR_5 int _xv;
532 #else
533 # define EXTRAVAR_5
534 #endif
536 /* Consistency check for position-independent code. */
537 #ifdef __PIC__
538 # define check_consistency() \
539 ({ int __res; \
540 __asm__ __volatile__ \
541 ("call __i686.get_pc_thunk.cx;" \
542 "addl $_GLOBAL_OFFSET_TABLE_, %%ecx;" \
543 "subl %%ebx, %%ecx;" \
544 "je 1f;" \
545 "ud2;" \
546 "1:\n" \
547 ".section .gnu.linkonce.t.__i686.get_pc_thunk.cx,\"ax\",@progbits;" \
548 ".globl __i686.get_pc_thunk.cx;" \
549 ".hidden __i686.get_pc_thunk.cx;" \
550 ".type __i686.get_pc_thunk.cx,@function;" \
551 "__i686.get_pc_thunk.cx:" \
552 "movl (%%esp), %%ecx;" \
553 "ret;" \
554 ".previous" \
555 : "=c" (__res)); \
556 __res; })
557 #endif
559 #endif /* __ASSEMBLER__ */
562 /* Pointer mangling support. */
563 #if defined NOT_IN_libc && defined IS_IN_rtld
564 /* We cannot use the thread descriptor because in ld.so we use setjmp
565 earlier than the descriptor is initialized. Using a global variable
566 is too complicated here since we have no PC-relative addressing mode. */
567 #else
568 # ifdef __ASSEMBLER__
569 # define PTR_MANGLE(reg) xorl %gs:POINTER_GUARD, reg; \
570 roll $9, reg
571 # define PTR_DEMANGLE(reg) rorl $9, reg; \
572 xorl %gs:POINTER_GUARD, reg
573 # else
574 # define PTR_MANGLE(var) asm ("xorl %%gs:%c2, %0\n" \
575 "roll $9, %0" \
576 : "=r" (var) \
577 : "0" (var), \
578 "i" (offsetof (tcbhead_t, \
579 pointer_guard)))
580 # define PTR_DEMANGLE(var) asm ("rorl $9, %0\n" \
581 "xorl %%gs:%c2, %0" \
582 : "=r" (var) \
583 : "0" (var), \
584 "i" (offsetof (tcbhead_t, \
585 pointer_guard)))
586 # endif
587 #endif
589 #endif /* linux/i386/sysdep.h */