1 # x86/x86_64 support for -fsplit-stack.
2 # Copyright (C) 2009-2018 Free Software Foundation, Inc.
3 # Contributed by Ian Lance Taylor <iant@google.com>.
5 # This file is part of GCC.
7 # GCC is free software; you can redistribute it and/or modify it under
8 # the terms of the GNU General Public License as published by the Free
9 # Software Foundation; either version 3, or (at your option) any later
12 # GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 # WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 # Under Section 7 of GPL version 3, you are granted additional
18 # permissions described in the GCC Runtime Library Exception, version
19 # 3.1, as published by the Free Software Foundation.
21 # You should have received a copy of the GNU General Public License and
22 # a copy of the GCC Runtime Library Exception along with this program;
23 # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 # <http://www.gnu.org/licenses/>.
27 # Support for allocating more stack space when using -fsplit-stack.
28 # When a function discovers that it needs more stack space, it will
29 # call __morestack with the size of the stack frame and the size of
30 # the parameters to copy from the old stack frame to the new one.
31 # The __morestack function preserves the parameter registers and
32 # calls __generic_morestack to actually allocate the stack space.
34 # When this is called stack space is very low, but we ensure that
35 # there is enough space to push the parameter registers and to call
36 # __generic_morestack.
38 # When calling __generic_morestack, FRAME_SIZE points to the size of
39 # the desired frame when the function is called, and the function
40 # sets it to the size of the allocated stack. OLD_STACK points to
41 # the parameters on the old stack and PARAM_SIZE is the number of
42 # bytes of parameters to copy to the new stack. These are the
43 # parameters of the function that called __morestack. The
44 # __generic_morestack function returns the new stack pointer,
45 # pointing to the address of the first copied parameter. The return
46 # value minus the returned *FRAME_SIZE will be the first address on
47 # the stack which we should not use.
49 # void *__generic_morestack (size_t *frame_size, void *old_stack,
52 # The __morestack routine has to arrange for the caller to return to a
53 # stub on the new stack. The stub is responsible for restoring the
54 # old stack pointer and returning to the caller's caller. This calls
55 # __generic_releasestack to retrieve the old stack pointer and release
56 # the newly allocated stack.
58 # void *__generic_releasestack (size_t *available);
60 # We do a little dance so that the processor's call/return return
61 # address prediction works out. The compiler arranges for the caller
63 # call __generic_morestack
66 # // carry on with function
67 # After we allocate more stack, we call L, which is in our caller.
68 # When that returns (to the predicted instruction), we release the
69 # stack segment and reset the stack pointer. We then return to the
70 # predicted instruction, namely the ret instruction immediately after
71 # the call to __generic_morestack. That then returns to the caller of
72 # the original caller.
75 # The amount of extra space we ask for. In general this has to be
76 # enough for the dynamic loader to find a symbol and for a signal
80 #define BACKOFF (1024)
82 #define BACKOFF (1536)
86 # The amount of space we ask for when calling non-split-stack code.
87 #define NON_SPLIT_STACK 0x100000
89 # This entry point is for split-stack code which calls non-split-stack
90 # code. When the linker sees this case, it converts the call to
91 # __morestack to call __morestack_non_split instead. We just bump the
92 # requested stack space by 16K.
96 .global __morestack_non_split
97 .hidden __morestack_non_split
100 .type __morestack_non_split,@function
103 __morestack_non_split:
108 # See below for an extended explanation of this.
111 pushl %eax # Save %eax in case it is a parameter.
113 .cfi_adjust_cfa_offset 4 # Account for pushed register.
115 movl %esp,%eax # Current stack,
116 subl 8(%esp),%eax # less required stack frame size,
117 subl $NON_SPLIT_STACK,%eax # less space for non-split code.
118 cmpl %gs:0x30,%eax # See if we have enough space.
119 jb 2f # Get more space if we need it.
122 # %esp + 20: stack pointer after two returns
123 # %esp + 16: return address of morestack caller's caller
124 # %esp + 12: size of parameters
125 # %esp + 8: new stack frame size
126 # %esp + 4: return address of this function
129 # Since we aren't doing a full split stack, we don't need to
130 # do anything when our caller returns. So we return to our
131 # caller rather than calling it, and let it return as usual.
132 # To make that work we adjust the return address.
134 # This breaks call/return address prediction for the call to
135 # this function. I can't figure out a way to make it work
136 # short of copying the parameters down the stack, which will
137 # probably take more clock cycles than we will lose breaking
138 # call/return address prediction. We will only break
139 # prediction for this call, not for our caller.
141 movl 4(%esp),%eax # Increment the return address
142 cmpb $0xc3,(%eax) # to skip the ret instruction;
147 # If the instruction that we return to is
148 # leal 20(%ebp),{%eax,%ecx,%edx}
149 # then we have been called by a varargs function that expects
150 # %ebp to hold a real value. That can only work if we do the
151 # full stack split routine. FIXME: This is fragile.
164 movl %eax,4(%esp) # Update return address.
166 popl %eax # Restore %eax and stack.
168 .cfi_adjust_cfa_offset -4 # Account for popped register.
170 ret $8 # Return to caller, popping args.
173 .cfi_adjust_cfa_offset 4 # Back to where we were.
175 popl %eax # Restore %eax and stack.
177 .cfi_adjust_cfa_offset -4 # Account for popped register.
179 # Increment space we request.
180 addl $NON_SPLIT_STACK+0x1000+BACKOFF,4(%esp)
182 # Fall through into morestack.
186 # See below for an extended explanation of this.
189 pushq %rax # Save %rax in case caller is using
190 # it to preserve original %r10.
191 .cfi_adjust_cfa_offset 8 # Adjust for pushed register.
193 movq %rsp,%rax # Current stack,
194 subq %r10,%rax # less required stack frame size,
195 subq $NON_SPLIT_STACK,%rax # less space for non-split code.
198 cmpq %fs:0x70,%rax # See if we have enough space.
203 jb 2f # Get more space if we need it.
205 # If the instruction that we return to is
206 # leaq 24(%rbp), %r11n
207 # then we have been called by a varargs function that expects
208 # %ebp to hold a real value. That can only work if we do the
209 # full stack split routine. FIXME: This is fragile.
211 incq %rax # Skip ret instruction in caller.
212 cmpl $0x185d8d4c,(%rax)
215 # This breaks call/return prediction, as described above.
216 incq 8(%rsp) # Increment the return address.
218 popq %rax # Restore register.
220 .cfi_adjust_cfa_offset -8 # Adjust for popped register.
222 ret # Return to caller.
225 popq %rax # Restore register.
227 .cfi_adjust_cfa_offset -8 # Adjust for popped register.
229 # Increment space we request.
230 addq $NON_SPLIT_STACK+0x1000+BACKOFF,%r10
232 # Fall through into morestack.
238 .size __morestack_non_split, . - __morestack_non_split
241 # __morestack_non_split falls through into __morestack.
244 # The __morestack function.
250 .type __morestack,@function
261 # The 32-bit __morestack function.
263 # We use a cleanup to restore the stack guard if an exception
264 # is thrown through this code.
266 .cfi_personality 0,__gcc_personality_v0
269 .cfi_personality 0x9b,DW.ref.__gcc_personality_v0
270 .cfi_lsda 0x1b,.LLSDA1
273 # We return below with a ret $8. We will return to a single
274 # return instruction, which will return to the caller of our
275 # caller. We let the unwinder skip that single return
276 # instruction, and just return to the real caller.
278 # Here CFA points just past the return address on the stack,
279 # e.g., on function entry it is %esp + 4. The stack looks
281 # CFA + 12: stack pointer after two returns
282 # CFA + 8: return address of morestack caller's caller
283 # CFA + 4: size of parameters
284 # CFA: new stack frame size
285 # CFA - 4: return address of this function
286 # CFA - 8: previous value of %ebp; %ebp points here
287 # Setting the new CFA to be the current CFA + 12 (i.e., %esp +
288 # 16) will make the unwinder pick up the right return address.
293 .cfi_adjust_cfa_offset 4
294 .cfi_offset %ebp, -20
296 .cfi_def_cfa_register %ebp
298 # In 32-bit mode the parameters are pushed on the stack. The
299 # argument size is pushed then the new stack frame size is
302 # In the body of a non-leaf function, the stack pointer will
303 # be aligned to a 16-byte boundary. That is CFA + 12 in the
304 # stack picture above: (CFA + 12) % 16 == 0. At this point we
305 # have %esp == CFA - 8, so %esp % 16 == 12. We need some
306 # space for saving registers and passing parameters, and we
307 # need to wind up with %esp % 16 == 0.
310 # Because our cleanup code may need to clobber %ebx, we need
311 # to save it here so the unwinder can restore the value used
312 # by the caller. Note that we don't have to restore the
313 # register, since we don't change it, we just have to save it
316 .cfi_offset %ebx, -24
318 # In 32-bit mode the registers %eax, %edx, and %ecx may be
319 # used for parameters, depending on the regparm and fastcall
326 call __morestack_block_signals
328 movl 12(%ebp),%eax # The size of the parameters.
330 leal 20(%ebp),%eax # Address of caller's parameters.
332 addl $BACKOFF,8(%ebp) # Ask for backoff bytes.
333 leal 8(%ebp),%eax # The address of the new frame size.
336 call __generic_morestack
338 movl %eax,%esp # Switch to the new stack.
339 subl 8(%ebp),%eax # The end of the stack space.
340 addl $BACKOFF,%eax # Back off 512 bytes.
343 # FIXME: The offset must match
344 # TARGET_THREAD_SPLIT_STACK_OFFSET in
345 # gcc/config/i386/linux.h.
346 movl %eax,%gs:0x30 # Save the new stack boundary.
348 call __morestack_unblock_signals
350 movl -12(%ebp),%edx # Restore registers.
353 movl 4(%ebp),%eax # Increment the return address
354 cmpb $0xc3,(%eax) # to skip the ret instruction;
359 movl %eax,-12(%ebp) # Store return address in an
362 movl -8(%ebp),%eax # Restore the last register.
364 call *-12(%ebp) # Call our caller!
366 # The caller will return here, as predicted.
368 # Save the registers which may hold a return value. We
369 # assume that __generic_releasestack does not touch any
370 # floating point or vector registers.
374 # Push the arguments to __generic_releasestack now so that the
375 # stack is at a 16-byte boundary for
376 # __morestack_block_signals.
377 pushl $0 # Where the available space is returned.
378 leal 0(%esp),%eax # Push its address.
381 call __morestack_block_signals
383 call __generic_releasestack
385 subl 4(%esp),%eax # Subtract available space.
386 addl $BACKOFF,%eax # Back off 512 bytes.
388 movl %eax,%gs:0x30 # Save the new stack boundary.
390 addl $8,%esp # Remove values from stack.
392 # We need to restore the old stack pointer, which is in %rbp,
393 # before we unblock signals. We also need to restore %eax and
394 # %edx after we unblock signals but before we return. Do this
395 # by moving %eax and %edx from the current stack to the old
398 popl %edx # Pop return value from current stack.
401 movl %ebp,%esp # Restore stack pointer.
403 # As before, we now have %esp % 16 == 12.
405 pushl %eax # Push return value on old stack.
407 subl $4,%esp # Align stack to 16-byte boundary.
409 call __morestack_unblock_signals
412 popl %edx # Restore return value.
417 # We never changed %ebx, so we don't have to actually restore it.
422 .cfi_def_cfa %esp, 16
423 ret $8 # Return to caller, which will
424 # immediately return. Pop
425 # arguments as we go.
427 # This is the cleanup code called by the stack unwinder when unwinding
428 # through the code between .LEHB0 and .LEHE0 above.
432 subl $16,%esp # Maintain 16 byte alignment.
433 movl %eax,4(%esp) # Save exception header.
434 movl %ebp,(%esp) # Stack pointer after resume.
435 call __generic_findstack
436 movl %ebp,%ecx # Get the stack pointer.
437 subl %eax,%ecx # Subtract available space.
438 addl $BACKOFF,%ecx # Back off 512 bytes.
439 movl %ecx,%gs:0x30 # Save new stack boundary.
440 movl 4(%esp),%eax # Function argument.
443 call __x86.get_pc_thunk.bx # %ebx may not be set up for us.
444 addl $_GLOBAL_OFFSET_TABLE_, %ebx
445 call _Unwind_Resume@PLT # Resume unwinding.
450 #else /* defined(__x86_64__) */
453 # The 64-bit __morestack function.
455 # We use a cleanup to restore the stack guard if an exception
456 # is thrown through this code.
458 .cfi_personality 0x3,__gcc_personality_v0
459 .cfi_lsda 0x3,.LLSDA1
461 .cfi_personality 0x9b,DW.ref.__gcc_personality_v0
462 .cfi_lsda 0x1b,.LLSDA1
465 # We will return a single return instruction, which will
466 # return to the caller of our caller. Let the unwinder skip
467 # that single return instruction, and just return to the real
471 # Set up a normal backtrace.
473 .cfi_adjust_cfa_offset 8
474 .cfi_offset %rbp, -24
476 .cfi_def_cfa_register %rbp
478 # In 64-bit mode the new stack frame size is passed in r10
479 # and the argument size is passed in r11.
481 addq $BACKOFF,%r10 # Ask for backoff bytes.
482 pushq %r10 # Save new frame size.
484 # In 64-bit mode the registers %rdi, %rsi, %rdx, %rcx, %r8,
485 # and %r9 may be used for parameters. We also preserve %rax
486 # which the caller may use to hold %r10.
498 # We entered morestack with the stack pointer aligned to a
499 # 16-byte boundary (the call to morestack's caller used 8
500 # bytes, and the call to morestack used 8 bytes). We have now
501 # pushed 10 registers, so we are still aligned to a 16-byte
504 call __morestack_block_signals
506 leaq -8(%rbp),%rdi # Address of new frame size.
507 leaq 24(%rbp),%rsi # The caller's parameters.
508 popq %rdx # The size of the parameters.
510 subq $8,%rsp # Align stack.
512 call __generic_morestack
514 movq -8(%rbp),%r10 # Reload modified frame size
515 movq %rax,%rsp # Switch to the new stack.
516 subq %r10,%rax # The end of the stack space.
517 addq $BACKOFF,%rax # Back off 1024 bytes.
520 # FIXME: The offset must match
521 # TARGET_THREAD_SPLIT_STACK_OFFSET in
522 # gcc/config/i386/linux64.h.
523 # Macro to save the new stack boundary.
525 #define X86_64_SAVE_NEW_STACK_BOUNDARY(reg) movq %r##reg,%fs:0x70
527 #define X86_64_SAVE_NEW_STACK_BOUNDARY(reg) movl %e##reg,%fs:0x40
529 X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
531 call __morestack_unblock_signals
533 movq -24(%rbp),%rdi # Restore registers.
540 movq 8(%rbp),%r10 # Increment the return address
541 incq %r10 # to skip the ret instruction;
544 movq -16(%rbp),%rax # Restore caller's %rax.
546 call *%r10 # Call our caller!
548 # The caller will return here, as predicted.
550 # Save the registers which may hold a return value. We
551 # assume that __generic_releasestack does not touch any
552 # floating point or vector registers.
556 call __morestack_block_signals
558 pushq $0 # For alignment.
559 pushq $0 # Where the available space is returned.
560 leaq 0(%rsp),%rdi # Pass its address.
562 call __generic_releasestack
564 subq 0(%rsp),%rax # Subtract available space.
565 addq $BACKOFF,%rax # Back off 1024 bytes.
567 X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
569 addq $16,%rsp # Remove values from stack.
571 # We need to restore the old stack pointer, which is in %rbp,
572 # before we unblock signals. We also need to restore %rax and
573 # %rdx after we unblock signals but before we return. Do this
574 # by moving %rax and %rdx from the current stack to the old
577 popq %rdx # Pop return value from current stack.
580 movq %rbp,%rsp # Restore stack pointer.
582 # Now (%rsp & 16) == 8.
584 subq $8,%rsp # For alignment.
585 pushq %rax # Push return value on old stack.
588 call __morestack_unblock_signals
590 popq %rdx # Restore return value.
597 .cfi_def_cfa %rsp, 16
598 ret # Return to caller, which will
599 # immediately return.
601 # This is the cleanup code called by the stack unwinder when unwinding
602 # through the code between .LEHB0 and .LEHE0 above.
606 subq $16,%rsp # Maintain 16 byte alignment.
607 movq %rax,(%rsp) # Save exception header.
608 movq %rbp,%rdi # Stack pointer after resume.
609 call __generic_findstack
610 movq %rbp,%rcx # Get the stack pointer.
611 subq %rax,%rcx # Subtract available space.
612 addq $BACKOFF,%rcx # Back off 1024 bytes.
613 X86_64_SAVE_NEW_STACK_BOUNDARY (cx)
614 movq (%rsp),%rdi # Restore exception data for call.
616 call _Unwind_Resume@PLT # Resume unwinding.
618 call _Unwind_Resume # Resume unwinding.
621 #endif /* defined(__x86_64__) */
625 .size __morestack, . - __morestack
628 #if !defined(__x86_64__) && defined(__PIC__)
629 # Output the thunk to get PC into bx, since we use it above.
630 .section .text.__x86.get_pc_thunk.bx,"axG",@progbits,__x86.get_pc_thunk.bx,comdat
631 .globl __x86.get_pc_thunk.bx
632 .hidden __x86.get_pc_thunk.bx
634 .type __x86.get_pc_thunk.bx, @function
636 __x86.get_pc_thunk.bx:
642 .size __x86.get_pc_thunk.bx, . - __x86.get_pc_thunk.bx
646 # The exception table. This tells the personality routine to execute
647 # the exception handler.
649 .section .gcc_except_table,"a",@progbits
652 .byte 0xff # @LPStart format (omit)
653 .byte 0xff # @TType format (omit)
654 .byte 0x1 # call-site format (uleb128)
655 .uleb128 .LLSDACSE1-.LLSDACSB1 # Call-site table length
657 .uleb128 .LEHB0-.LFB1 # region 0 start
658 .uleb128 .LEHE0-.LEHB0 # length
659 .uleb128 .L1-.LFB1 # landing pad
664 .global __gcc_personality_v0
666 # Build a position independent reference to the basic
667 # personality function.
668 .hidden DW.ref.__gcc_personality_v0
669 .weak DW.ref.__gcc_personality_v0
670 .section .data.DW.ref.__gcc_personality_v0,"awG",@progbits,DW.ref.__gcc_personality_v0,comdat
671 .type DW.ref.__gcc_personality_v0, @object
672 DW.ref.__gcc_personality_v0:
675 .size DW.ref.__gcc_personality_v0, 4
676 .long __gcc_personality_v0
679 .size DW.ref.__gcc_personality_v0, 8
680 .quad __gcc_personality_v0
684 #if defined __x86_64__ && defined __LP64__
686 # This entry point is used for the large model. With this entry point
687 # the upper 32 bits of %r10 hold the argument size and the lower 32
688 # bits hold the new stack frame size. There doesn't seem to be a way
689 # to know in the assembler code that we are assembling for the large
690 # model, and there doesn't seem to be a large model multilib anyhow.
691 # If one is developed, then the non-PIC code is probably OK since we
692 # will probably be close to the morestack code, but the PIC code
693 # almost certainly needs to be changed. FIXME.
696 .global __morestack_large_model
697 .hidden __morestack_large_model
700 .type __morestack_large_model,@function
703 __morestack_large_model:
709 andl $0xffffffff, %r10d
715 .size __morestack_large_model, . - __morestack_large_model
718 #endif /* __x86_64__ && __LP64__ */
720 # Initialize the stack test value when the program starts or when a
721 # new thread starts. We don't know how large the main stack is, so we
722 # guess conservatively. We might be able to use getrlimit here.
725 .global __stack_split_initialize
726 .hidden __stack_split_initialize
729 .type __stack_split_initialize, @function
732 __stack_split_initialize:
737 leal -16000(%esp),%eax # We should have at least 16K.
739 subl $4,%esp # Align stack.
743 call __generic_morestack_set_initial_sp@PLT
745 call __generic_morestack_set_initial_sp
750 #else /* defined(__x86_64__) */
752 leaq -16000(%rsp),%rax # We should have at least 16K.
753 X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
754 subq $8,%rsp # Align stack.
758 call __generic_morestack_set_initial_sp@PLT
760 call __generic_morestack_set_initial_sp
765 #endif /* defined(__x86_64__) */
768 .size __stack_split_initialize, . - __stack_split_initialize
771 # Routines to get and set the guard, for __splitstack_getcontext,
772 # __splitstack_setcontext, and __splitstack_makecontext.
774 # void *__morestack_get_guard (void) returns the current stack guard.
776 .global __morestack_get_guard
777 .hidden __morestack_get_guard
780 .type __morestack_get_guard,@function
783 __morestack_get_guard:
797 .size __morestack_get_guard, . - __morestack_get_guard
800 # void __morestack_set_guard (void *) sets the stack guard.
801 .global __morestack_set_guard
802 .hidden __morestack_set_guard
805 .type __morestack_set_guard,@function
808 __morestack_set_guard:
814 X86_64_SAVE_NEW_STACK_BOUNDARY (di)
819 .size __morestack_set_guard, . - __morestack_set_guard
822 # void *__morestack_make_guard (void *, size_t) returns the stack
823 # guard value for a stack.
824 .global __morestack_make_guard
825 .hidden __morestack_make_guard
828 .type __morestack_make_guard,@function
831 __morestack_make_guard:
845 .size __morestack_make_guard, . - __morestack_make_guard
848 # Make __stack_split_initialize a high priority constructor. FIXME:
849 # This is ELF specific.
851 .section .ctors.65535,"aw",@progbits
855 .long __stack_split_initialize
856 .long __morestack_load_mmap
859 .quad __stack_split_initialize
860 .quad __morestack_load_mmap
864 .section .note.GNU-stack,"",@progbits
865 .section .note.GNU-split-stack,"",@progbits
866 .section .note.GNU-no-split-stack,"",@progbits