1 /* Library support for -fsplit-stack. */
2 /* Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by Ian Lance Taylor <iant@google.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* powerpc 32-bit not supported. */
27 #if !defined __powerpc__ || defined __powerpc64__
31 #include "coretypes.h"
33 #include "libgcc_tm.h"
35 /* If inhibit_libc is defined, we can not compile this file. The
36 effect is that people will not be able to use -fsplit-stack. That
37 is much better than failing the build particularly since people
38 will want to define inhibit_libc while building a compiler which
52 #include "generic-morestack.h"
54 typedef unsigned uintptr_type
__attribute__ ((mode (pointer
)));
56 /* This file contains subroutines that are used by code compiled with
59 /* Declare functions to avoid warnings--there is no header file for
60 these internal functions. We give most of these functions the
61 flatten attribute in order to minimize their stack usage--here we
62 must minimize stack usage even at the cost of code size, and in
63 general inlining everything will do that. */
66 __generic_morestack_set_initial_sp (void *sp
, size_t len
)
67 __attribute__ ((no_split_stack
, flatten
, visibility ("hidden")));
70 __generic_morestack (size_t *frame_size
, void *old_stack
, size_t param_size
)
71 __attribute__ ((no_split_stack
, flatten
, visibility ("hidden")));
74 __generic_releasestack (size_t *pavailable
)
75 __attribute__ ((no_split_stack
, flatten
, visibility ("hidden")));
78 __morestack_block_signals (void)
79 __attribute__ ((no_split_stack
, flatten
, visibility ("hidden")));
82 __morestack_unblock_signals (void)
83 __attribute__ ((no_split_stack
, flatten
, visibility ("hidden")));
86 __generic_findstack (void *stack
)
87 __attribute__ ((no_split_stack
, flatten
, visibility ("hidden")));
90 __morestack_load_mmap (void)
91 __attribute__ ((no_split_stack
, visibility ("hidden")));
94 __morestack_allocate_stack_space (size_t size
)
95 __attribute__ ((visibility ("hidden")));
97 /* These are functions which -fsplit-stack code can call. These are
98 not called by the compiler, and are not hidden. FIXME: These
99 should be in some header file somewhere, somehow. */
102 __splitstack_find (void *, void *, size_t *, void **, void **, void **)
103 __attribute__ ((visibility ("default")));
106 __splitstack_block_signals (int *, int *)
107 __attribute__ ((visibility ("default")));
110 __splitstack_getcontext (void *context
[10])
111 __attribute__ ((no_split_stack
, visibility ("default")));
114 __splitstack_setcontext (void *context
[10])
115 __attribute__ ((no_split_stack
, visibility ("default")));
118 __splitstack_makecontext (size_t, void *context
[10], size_t *)
119 __attribute__ ((visibility ("default")));
122 __splitstack_resetcontext (void *context
[10], size_t *)
123 __attribute__ ((visibility ("default")));
126 __splitstack_releasecontext (void *context
[10])
127 __attribute__ ((visibility ("default")));
130 __splitstack_block_signals_context (void *context
[10], int *, int *)
131 __attribute__ ((visibility ("default")));
134 __splitstack_find_context (void *context
[10], size_t *, void **, void **,
136 __attribute__ ((visibility ("default")));
138 /* These functions must be defined by the processor specific code. */
140 extern void *__morestack_get_guard (void)
141 __attribute__ ((no_split_stack
, visibility ("hidden")));
143 extern void __morestack_set_guard (void *)
144 __attribute__ ((no_split_stack
, visibility ("hidden")));
146 extern void *__morestack_make_guard (void *, size_t)
147 __attribute__ ((no_split_stack
, visibility ("hidden")));
149 /* When we allocate a stack segment we put this header at the
154 /* The previous stack segment--when a function running on this stack
155 segment returns, it will run on the previous one. */
156 struct stack_segment
*prev
;
157 /* The next stack segment, if it has been allocated--when a function
158 is running on this stack segment, the next one is not being
160 struct stack_segment
*next
;
161 /* The total size of this stack segment. */
163 /* The stack address when this stack was created. This is used when
164 popping the stack. */
166 /* A list of memory blocks allocated by dynamic stack
168 struct dynamic_allocation_blocks
*dynamic_allocation
;
169 /* A list of dynamic memory blocks no longer needed. */
170 struct dynamic_allocation_blocks
*free_dynamic_allocation
;
171 /* An extra pointer in case we need some more information some
176 /* This structure holds the (approximate) initial stack pointer and
177 size for the system supplied stack for a thread. This is set when
178 the thread is created. We also store a sigset_t here to hold the
179 signal mask while splitting the stack, since we don't want to store
180 that on the stack. */
184 /* The initial stack pointer. */
186 /* The stack length. */
188 /* A signal mask, put here so that the thread can use it without
189 needing stack space. */
191 /* Non-zero if we should not block signals. This is a reversed flag
192 so that the default zero value is the safe value. The type is
193 uintptr_type because it replaced one of the void * pointers in
195 uintptr_type dont_block_signals
;
196 /* Some extra space for later extensibility. */
200 /* A list of memory blocks allocated by dynamic stack allocation.
201 This is used for code that calls alloca or uses variably sized
204 struct dynamic_allocation_blocks
206 /* The next block in the list. */
207 struct dynamic_allocation_blocks
*next
;
208 /* The size of the allocated memory. */
210 /* The allocated memory. */
214 /* These thread local global variables must be shared by all split
215 stack code across shared library boundaries. Therefore, they have
216 default visibility. They have extensibility fields if needed for
217 new versions. If more radical changes are needed, new code can be
218 written using new variable names, while still using the existing
219 variables in a backward compatible manner. Symbol versioning is
220 also used, although, since these variables are only referenced by
221 code in this file and generic-morestack-thread.c, it is likely that
222 simply using new names will suffice. */
224 /* The first stack segment allocated for this thread. */
226 __thread
struct stack_segment
*__morestack_segments
227 __attribute__ ((visibility ("default")));
229 /* The stack segment that we think we are currently using. This will
230 be correct in normal usage, but will be incorrect if an exception
231 unwinds into a different stack segment or if longjmp jumps to a
232 different stack segment. */
234 __thread
struct stack_segment
*__morestack_current_segment
235 __attribute__ ((visibility ("default")));
237 /* The initial stack pointer and size for this thread. */
239 __thread
struct initial_sp __morestack_initial_sp
240 __attribute__ ((visibility ("default")));
242 /* A static signal mask, to avoid taking up stack space. */
244 static sigset_t __morestack_fullmask
;
246 /* Convert an integer to a decimal string without using much stack
247 space. Return a pointer to the part of the buffer to use. We this
248 instead of sprintf because sprintf will require too much stack
252 print_int (int val
, char *buf
, int buflen
, size_t *print_len
)
258 uval
= (unsigned int) val
;
271 buf
[i
] = '0' + (uval
% 10);
274 while (uval
!= 0 && i
> 0);
283 *print_len
= buflen
- i
;
287 /* Print the string MSG/LEN, the errno number ERR, and a newline on
288 stderr. Then crash. */
291 __morestack_fail (const char *, size_t, int) __attribute__ ((noreturn
));
294 __morestack_fail (const char *msg
, size_t len
, int err
)
297 static const char nl
[] = "\n";
299 union { char *p
; const char *cp
; } const_cast;
302 iov
[0].iov_base
= const_cast.p
;
303 iov
[0].iov_len
= len
;
304 /* We can't call strerror, because it may try to translate the error
305 message, and that would use too much stack space. */
306 iov
[1].iov_base
= print_int (err
, buf
, sizeof buf
, &iov
[1].iov_len
);
307 const_cast.cp
= &nl
[0];
308 iov
[2].iov_base
= const_cast.p
;
309 iov
[2].iov_len
= sizeof nl
- 1;
310 /* FIXME: On systems without writev we need to issue three write
311 calls, or punt on printing errno. For now this is irrelevant
312 since stack splitting only works on GNU/Linux anyhow. */
317 /* Allocate a new stack segment. FRAME_SIZE is the required frame
320 static struct stack_segment
*
321 allocate_segment (size_t frame_size
)
323 static unsigned int static_pagesize
;
324 static int use_guard_page
;
325 unsigned int pagesize
;
326 unsigned int overhead
;
327 unsigned int allocate
;
329 struct stack_segment
*pss
;
331 pagesize
= static_pagesize
;
336 pagesize
= getpagesize ();
338 #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
339 p
= __sync_val_compare_and_swap (&static_pagesize
, 0, pagesize
);
341 /* Just hope this assignment is atomic. */
342 static_pagesize
= pagesize
;
346 use_guard_page
= getenv ("SPLIT_STACK_GUARD") != 0;
348 /* FIXME: I'm not sure this assert should be in the released
350 assert (p
== 0 || p
== pagesize
);
353 overhead
= sizeof (struct stack_segment
);
356 if (allocate
< MINSIGSTKSZ
)
357 allocate
= ((MINSIGSTKSZ
+ overhead
+ pagesize
- 1)
359 if (allocate
< frame_size
)
360 allocate
= ((frame_size
+ overhead
+ pagesize
- 1)
364 allocate
+= pagesize
;
366 /* FIXME: If this binary requires an executable stack, then we need
367 to set PROT_EXEC. Unfortunately figuring that out is complicated
368 and target dependent. We would need to use dl_iterate_phdr to
369 see if there is any object which does not have a PT_GNU_STACK
370 phdr, though only for architectures which use that mechanism. */
371 space
= mmap (NULL
, allocate
, PROT_READ
| PROT_WRITE
,
372 MAP_ANONYMOUS
| MAP_PRIVATE
, -1, 0);
373 if (space
== MAP_FAILED
)
375 static const char msg
[] =
376 "unable to allocate additional stack space: errno ";
377 __morestack_fail (msg
, sizeof msg
- 1, errno
);
384 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
386 space
= (char *) space
+ pagesize
;
388 guard
= space
+ allocate
- pagesize
;
391 mprotect (guard
, pagesize
, PROT_NONE
);
392 allocate
-= pagesize
;
395 pss
= (struct stack_segment
*) space
;
399 pss
->size
= allocate
- overhead
;
400 pss
->dynamic_allocation
= NULL
;
401 pss
->free_dynamic_allocation
= NULL
;
407 /* Free a list of dynamic blocks. */
410 free_dynamic_blocks (struct dynamic_allocation_blocks
*p
)
414 struct dynamic_allocation_blocks
*next
;
423 /* Merge two lists of dynamic blocks. */
425 static struct dynamic_allocation_blocks
*
426 merge_dynamic_blocks (struct dynamic_allocation_blocks
*a
,
427 struct dynamic_allocation_blocks
*b
)
429 struct dynamic_allocation_blocks
**pp
;
435 for (pp
= &a
->next
; *pp
!= NULL
; pp
= &(*pp
)->next
)
441 /* Release stack segments. If FREE_DYNAMIC is non-zero, we also free
442 any dynamic blocks. Otherwise we return them. */
444 struct dynamic_allocation_blocks
*
445 __morestack_release_segments (struct stack_segment
**pp
, int free_dynamic
)
447 struct dynamic_allocation_blocks
*ret
;
448 struct stack_segment
*pss
;
454 struct stack_segment
*next
;
455 unsigned int allocate
;
459 if (pss
->dynamic_allocation
!= NULL
460 || pss
->free_dynamic_allocation
!= NULL
)
464 free_dynamic_blocks (pss
->dynamic_allocation
);
465 free_dynamic_blocks (pss
->free_dynamic_allocation
);
469 ret
= merge_dynamic_blocks (pss
->dynamic_allocation
, ret
);
470 ret
= merge_dynamic_blocks (pss
->free_dynamic_allocation
, ret
);
474 allocate
= pss
->size
+ sizeof (struct stack_segment
);
475 if (munmap (pss
, allocate
) < 0)
477 static const char msg
[] = "munmap of stack space failed: errno ";
478 __morestack_fail (msg
, sizeof msg
- 1, errno
);
488 /* This function is called by a processor specific function to set the
489 initial stack pointer for a thread. The operating system will
490 always create a stack for a thread. Here we record a stack pointer
491 near the base of that stack. The size argument lets the processor
492 specific code estimate how much stack space is available on this
496 __generic_morestack_set_initial_sp (void *sp
, size_t len
)
498 /* The stack pointer most likely starts on a page boundary. Adjust
499 to the nearest 512 byte boundary. It's not essential that we be
500 precise here; getting it wrong will just leave some stack space
502 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
503 sp
= (void *) ((((__UINTPTR_TYPE__
) sp
+ 511U) / 512U) * 512U);
505 sp
= (void *) ((((__UINTPTR_TYPE__
) sp
- 511U) / 512U) * 512U);
508 __morestack_initial_sp
.sp
= sp
;
509 __morestack_initial_sp
.len
= len
;
510 sigemptyset (&__morestack_initial_sp
.mask
);
512 sigfillset (&__morestack_fullmask
);
513 #if defined(__GLIBC__) && defined(__linux__)
514 /* In glibc, the first two real time signals are used by the NPTL
515 threading library. By taking them out of the set of signals, we
516 avoiding copying the signal mask in pthread_sigmask. More
517 importantly, pthread_sigmask uses less stack space on x86_64. */
518 sigdelset (&__morestack_fullmask
, __SIGRTMIN
);
519 sigdelset (&__morestack_fullmask
, __SIGRTMIN
+ 1);
523 /* This function is called by a processor specific function which is
524 run in the prologue when more stack is needed. The processor
525 specific function handles the details of saving registers and
526 frobbing the actual stack pointer. This function is responsible
527 for allocating a new stack segment and for copying a parameter
528 block from the old stack to the new one. On function entry
529 *PFRAME_SIZE is the size of the required stack frame--the returned
530 stack must be at least this large. On function exit *PFRAME_SIZE
531 is the amount of space remaining on the allocated stack. OLD_STACK
532 points at the parameters the old stack (really the current one
533 while this function is running). OLD_STACK is saved so that it can
534 be returned by a later call to __generic_releasestack. PARAM_SIZE
535 is the size in bytes of parameters to copy to the new stack. This
536 function returns a pointer to the new stack segment, pointing to
537 the memory after the parameters have been copied. The returned
538 value minus the returned *PFRAME_SIZE (or plus if the stack grows
539 upward) is the first address on the stack which should not be used.
541 This function is running on the old stack and has only a limited
542 amount of stack space available. */
545 __generic_morestack (size_t *pframe_size
, void *old_stack
, size_t param_size
)
547 size_t frame_size
= *pframe_size
;
548 struct stack_segment
*current
;
549 struct stack_segment
**pp
;
550 struct dynamic_allocation_blocks
*dynamic
;
557 current
= __morestack_current_segment
;
559 pp
= current
!= NULL
? ¤t
->next
: &__morestack_segments
;
560 if (*pp
!= NULL
&& (*pp
)->size
< frame_size
)
561 dynamic
= __morestack_release_segments (pp
, 0);
568 current
= allocate_segment (frame_size
+ param_size
);
569 current
->prev
= __morestack_current_segment
;
573 current
->old_stack
= old_stack
;
575 __morestack_current_segment
= current
;
579 /* Move the free blocks onto our list. We don't want to call
580 free here, as we are short on stack space. */
581 current
->free_dynamic_allocation
=
582 merge_dynamic_blocks (dynamic
, current
->free_dynamic_allocation
);
585 *pframe_size
= current
->size
- param_size
;
587 /* Align the returned stack to a 32-byte boundary. */
588 aligned
= (param_size
+ 31) & ~ (size_t) 31;
590 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
592 char *bottom
= (char *) (current
+ 1) + current
->size
;
593 to
= bottom
- aligned
;
594 ret
= bottom
- aligned
;
598 to
+= aligned
- param_size
;
599 ret
= (char *) (current
+ 1) + aligned
;
602 /* We don't call memcpy to avoid worrying about the dynamic linker
603 trying to resolve it. */
604 from
= (char *) old_stack
;
605 for (i
= 0; i
< param_size
; i
++)
611 /* This function is called by a processor specific function when it is
612 ready to release a stack segment. We don't actually release the
613 stack segment, we just move back to the previous one. The current
614 stack segment will still be available if we need it in
615 __generic_morestack. This returns a pointer to the new stack
616 segment to use, which is the one saved by a previous call to
617 __generic_morestack. The processor specific function is then
618 responsible for actually updating the stack pointer. This sets
619 *PAVAILABLE to the amount of stack space now available. */
622 __generic_releasestack (size_t *pavailable
)
624 struct stack_segment
*current
;
627 current
= __morestack_current_segment
;
628 old_stack
= current
->old_stack
;
629 current
= current
->prev
;
630 __morestack_current_segment
= current
;
634 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
635 *pavailable
= (char *) old_stack
- (char *) (current
+ 1);
637 *pavailable
= (char *) (current
+ 1) + current
->size
- (char *) old_stack
;
644 /* We have popped back to the original stack. */
645 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
646 if ((char *) old_stack
>= (char *) __morestack_initial_sp
.sp
)
649 used
= (char *) __morestack_initial_sp
.sp
- (char *) old_stack
;
651 if ((char *) old_stack
<= (char *) __morestack_initial_sp
.sp
)
654 used
= (char *) old_stack
- (char *) __morestack_initial_sp
.sp
;
657 if (used
> __morestack_initial_sp
.len
)
660 *pavailable
= __morestack_initial_sp
.len
- used
;
666 /* Block signals while splitting the stack. This avoids trouble if we
667 try to invoke a signal handler which itself wants to split the
670 extern int pthread_sigmask (int, const sigset_t
*, sigset_t
*)
671 __attribute__ ((weak
));
674 __morestack_block_signals (void)
676 if (__morestack_initial_sp
.dont_block_signals
)
678 else if (pthread_sigmask
)
679 pthread_sigmask (SIG_BLOCK
, &__morestack_fullmask
,
680 &__morestack_initial_sp
.mask
);
682 sigprocmask (SIG_BLOCK
, &__morestack_fullmask
,
683 &__morestack_initial_sp
.mask
);
686 /* Unblock signals while splitting the stack. */
689 __morestack_unblock_signals (void)
691 if (__morestack_initial_sp
.dont_block_signals
)
693 else if (pthread_sigmask
)
694 pthread_sigmask (SIG_SETMASK
, &__morestack_initial_sp
.mask
, NULL
);
696 sigprocmask (SIG_SETMASK
, &__morestack_initial_sp
.mask
, NULL
);
699 /* This function is called to allocate dynamic stack space, for alloca
700 or a variably sized array. This is a regular function with
701 sufficient stack space, so we just use malloc to allocate the
702 space. We attach the allocated blocks to the current stack
703 segment, so that they will eventually be reused or freed. */
706 __morestack_allocate_stack_space (size_t size
)
708 struct stack_segment
*seg
, *current
;
709 struct dynamic_allocation_blocks
*p
;
711 /* We have to block signals to avoid getting confused if we get
712 interrupted by a signal whose handler itself uses alloca or a
713 variably sized array. */
714 __morestack_block_signals ();
716 /* Since we don't want to call free while we are low on stack space,
717 we may have a list of already allocated blocks waiting to be
718 freed. Release them all, unless we find one that is large
719 enough. We don't look at every block to see if one is large
720 enough, just the first one, because we aren't trying to build a
721 memory allocator here, we're just trying to speed up common
724 current
= __morestack_current_segment
;
726 for (seg
= __morestack_segments
; seg
!= NULL
; seg
= seg
->next
)
728 p
= seg
->free_dynamic_allocation
;
733 seg
->free_dynamic_allocation
= p
->next
;
737 free_dynamic_blocks (p
);
738 seg
->free_dynamic_allocation
= NULL
;
745 /* We need to allocate additional memory. */
746 p
= malloc (sizeof (*p
));
750 p
->block
= malloc (size
);
751 if (p
->block
== NULL
)
755 /* If we are still on the initial stack, then we have a space leak.
759 p
->next
= current
->dynamic_allocation
;
760 current
->dynamic_allocation
= p
;
763 __morestack_unblock_signals ();
768 /* Find the stack segment for STACK and return the amount of space
769 available. This is used when unwinding the stack because of an
770 exception, in order to reset the stack guard correctly. */
773 __generic_findstack (void *stack
)
775 struct stack_segment
*pss
;
778 for (pss
= __morestack_current_segment
; pss
!= NULL
; pss
= pss
->prev
)
780 if ((char *) pss
< (char *) stack
781 && (char *) pss
+ pss
->size
> (char *) stack
)
783 __morestack_current_segment
= pss
;
784 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
785 return (char *) stack
- (char *) (pss
+ 1);
787 return (char *) (pss
+ 1) + pss
->size
- (char *) stack
;
792 /* We have popped back to the original stack. */
794 if (__morestack_initial_sp
.sp
== NULL
)
797 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
798 if ((char *) stack
>= (char *) __morestack_initial_sp
.sp
)
801 used
= (char *) __morestack_initial_sp
.sp
- (char *) stack
;
803 if ((char *) stack
<= (char *) __morestack_initial_sp
.sp
)
806 used
= (char *) stack
- (char *) __morestack_initial_sp
.sp
;
809 if (used
> __morestack_initial_sp
.len
)
812 return __morestack_initial_sp
.len
- used
;
815 /* This function is called at program startup time to make sure that
816 mmap, munmap, and getpagesize are resolved if linking dynamically.
817 We want to resolve them while we have enough stack for them, rather
818 than calling into the dynamic linker while low on stack space. */
821 __morestack_load_mmap (void)
823 /* Call with bogus values to run faster. We don't care if the call
824 fails. Pass __MORESTACK_CURRENT_SEGMENT to make sure that any
825 TLS accessor function is resolved. */
826 mmap (__morestack_current_segment
, 0, PROT_READ
, MAP_ANONYMOUS
, -1, 0);
827 mprotect (NULL
, 0, 0);
828 munmap (0, getpagesize ());
831 /* This function may be used to iterate over the stack segments.
832 This can be called like this.
833 void *next_segment = NULL;
834 void *next_sp = NULL;
835 void *initial_sp = NULL;
838 while ((stack = __splitstack_find (next_segment, next_sp, &stack_size,
839 &next_segment, &next_sp,
840 &initial_sp)) != NULL)
842 // Stack segment starts at stack and is stack_size bytes long.
845 There is no way to iterate over the stack segments of a different
846 thread. However, what is permitted is for one thread to call this
847 with the first two values NULL, to pass next_segment, next_sp, and
848 initial_sp to a different thread, and then to suspend one way or
849 another. A different thread may run the subsequent
850 __morestack_find iterations. Of course, this will only work if the
851 first thread is suspended during the __morestack_find iterations.
852 If not, the second thread will be looking at the stack while it is
853 changing, and anything could happen.
855 FIXME: This should be declared in some header file, but where? */
858 __splitstack_find (void *segment_arg
, void *sp
, size_t *len
,
859 void **next_segment
, void **next_sp
,
862 struct stack_segment
*segment
;
866 if (segment_arg
== (void *) (uintptr_type
) 1)
868 char *isp
= (char *) *initial_sp
;
873 *next_segment
= (void *) (uintptr_type
) 2;
875 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
876 if ((char *) sp
>= isp
)
878 *len
= (char *) isp
- (char *) sp
;
881 if ((char *) sp
<= (char *) isp
)
883 *len
= (char *) sp
- (char *) isp
;
887 else if (segment_arg
== (void *) (uintptr_type
) 2)
889 else if (segment_arg
!= NULL
)
890 segment
= (struct stack_segment
*) segment_arg
;
893 *initial_sp
= __morestack_initial_sp
.sp
;
894 segment
= __morestack_current_segment
;
895 sp
= (void *) &segment
;
899 return __splitstack_find ((void *) (uintptr_type
) 1, sp
, len
,
900 next_segment
, next_sp
, initial_sp
);
901 if ((char *) sp
>= (char *) (segment
+ 1)
902 && (char *) sp
<= (char *) (segment
+ 1) + segment
->size
)
904 segment
= segment
->prev
;
908 if (segment
->prev
== NULL
)
909 *next_segment
= (void *) (uintptr_type
) 1;
911 *next_segment
= segment
->prev
;
913 /* The old_stack value is the address of the function parameters of
914 the function which called __morestack. So if f1 called f2 which
915 called __morestack, the stack looks like this:
917 parameters <- old_stack
920 registers pushed by __morestack
922 The registers pushed by __morestack may not be visible on any
923 other stack, if we are being called by a signal handler
924 immediately after the call to __morestack_unblock_signals. We
925 want to adjust our return value to include those registers. This
926 is target dependent. */
928 nsp
= (char *) segment
->old_stack
;
932 /* We've reached the top of the stack. */
933 *next_segment
= (void *) (uintptr_type
) 2;
937 #if defined (__x86_64__)
938 nsp
-= 12 * sizeof (void *);
939 #elif defined (__i386__)
940 nsp
-= 6 * sizeof (void *);
941 #elif defined __powerpc64__
943 #error "unrecognized target"
946 *next_sp
= (void *) nsp
;
949 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
950 *len
= (char *) (segment
+ 1) + segment
->size
- (char *) sp
;
953 *len
= (char *) sp
- (char *) (segment
+ 1);
954 ret
= (void *) (segment
+ 1);
960 /* Tell the split stack code whether it has to block signals while
961 manipulating the stack. This is for programs in which some threads
962 block all signals. If a thread already blocks signals, there is no
963 need for the split stack code to block them as well. If NEW is not
964 NULL, then if *NEW is non-zero signals will be blocked while
965 splitting the stack, otherwise they will not. If OLD is not NULL,
966 *OLD will be set to the old value. */
969 __splitstack_block_signals (int *new, int *old
)
972 *old
= __morestack_initial_sp
.dont_block_signals
? 0 : 1;
974 __morestack_initial_sp
.dont_block_signals
= *new ? 0 : 1;
977 /* The offsets into the arrays used by __splitstack_getcontext and
978 __splitstack_setcontext. */
980 enum __splitstack_context_offsets
982 MORESTACK_SEGMENTS
= 0,
993 /* Get the current split stack context. This may be used for
994 coroutine switching, similar to getcontext. The argument should
995 have at least 10 void *pointers for extensibility, although we
996 don't currently use all of them. This would normally be called
997 immediately before a call to getcontext or swapcontext or
1001 __splitstack_getcontext (void *context
[NUMBER_OFFSETS
])
1003 memset (context
, 0, NUMBER_OFFSETS
* sizeof (void *));
1004 context
[MORESTACK_SEGMENTS
] = (void *) __morestack_segments
;
1005 context
[CURRENT_SEGMENT
] = (void *) __morestack_current_segment
;
1006 context
[CURRENT_STACK
] = (void *) &context
;
1007 context
[STACK_GUARD
] = __morestack_get_guard ();
1008 context
[INITIAL_SP
] = (void *) __morestack_initial_sp
.sp
;
1009 context
[INITIAL_SP_LEN
] = (void *) (uintptr_type
) __morestack_initial_sp
.len
;
1010 context
[BLOCK_SIGNALS
] = (void *) __morestack_initial_sp
.dont_block_signals
;
1013 /* Set the current split stack context. The argument should be a
1014 context previously passed to __splitstack_getcontext. This would
1015 normally be called immediately after a call to getcontext or
1016 swapcontext or setjmp if something jumped to it. */
1019 __splitstack_setcontext (void *context
[NUMBER_OFFSETS
])
1021 __morestack_segments
= (struct stack_segment
*) context
[MORESTACK_SEGMENTS
];
1022 __morestack_current_segment
=
1023 (struct stack_segment
*) context
[CURRENT_SEGMENT
];
1024 __morestack_set_guard (context
[STACK_GUARD
]);
1025 __morestack_initial_sp
.sp
= context
[INITIAL_SP
];
1026 __morestack_initial_sp
.len
= (size_t) context
[INITIAL_SP_LEN
];
1027 __morestack_initial_sp
.dont_block_signals
=
1028 (uintptr_type
) context
[BLOCK_SIGNALS
];
1031 /* Create a new split stack context. This will allocate a new stack
1032 segment which may be used by a coroutine. STACK_SIZE is the
1033 minimum size of the new stack. The caller is responsible for
1034 actually setting the stack pointer. This would normally be called
1035 before a call to makecontext, and the returned stack pointer and
1036 size would be used to set the uc_stack field. A function called
1037 via makecontext on a stack created by __splitstack_makecontext may
1038 not return. Note that the returned pointer points to the lowest
1039 address in the stack space, and thus may not be the value to which
1040 to set the stack pointer. */
1043 __splitstack_makecontext (size_t stack_size
, void *context
[NUMBER_OFFSETS
],
1046 struct stack_segment
*segment
;
1049 memset (context
, 0, NUMBER_OFFSETS
* sizeof (void *));
1050 segment
= allocate_segment (stack_size
);
1051 context
[MORESTACK_SEGMENTS
] = segment
;
1052 context
[CURRENT_SEGMENT
] = segment
;
1053 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
1054 initial_sp
= (void *) ((char *) (segment
+ 1) + segment
->size
);
1056 initial_sp
= (void *) (segment
+ 1);
1058 context
[STACK_GUARD
] = __morestack_make_guard (initial_sp
, segment
->size
);
1059 context
[INITIAL_SP
] = NULL
;
1060 context
[INITIAL_SP_LEN
] = 0;
1061 *size
= segment
->size
;
1062 return (void *) (segment
+ 1);
1065 /* Given an existing split stack context, reset it back to the start
1066 of the stack. Return the stack pointer and size, appropriate for
1067 use with makecontext. This may be used if a coroutine exits, in
1068 order to reuse the stack segments for a new coroutine. */
1071 __splitstack_resetcontext (void *context
[10], size_t *size
)
1073 struct stack_segment
*segment
;
1075 size_t initial_size
;
1078 /* Reset the context assuming that MORESTACK_SEGMENTS, INITIAL_SP
1079 and INITIAL_SP_LEN are correct. */
1081 segment
= context
[MORESTACK_SEGMENTS
];
1082 context
[CURRENT_SEGMENT
] = segment
;
1083 context
[CURRENT_STACK
] = NULL
;
1084 if (segment
== NULL
)
1086 initial_sp
= context
[INITIAL_SP
];
1087 initial_size
= (uintptr_type
) context
[INITIAL_SP_LEN
];
1089 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
1090 ret
= (void *) ((char *) ret
- initial_size
);
1095 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
1096 initial_sp
= (void *) ((char *) (segment
+ 1) + segment
->size
);
1098 initial_sp
= (void *) (segment
+ 1);
1100 initial_size
= segment
->size
;
1101 ret
= (void *) (segment
+ 1);
1103 context
[STACK_GUARD
] = __morestack_make_guard (initial_sp
, initial_size
);
1104 context
[BLOCK_SIGNALS
] = NULL
;
1105 *size
= initial_size
;
1109 /* Release all the memory associated with a splitstack context. This
1110 may be used if a coroutine exits and the associated stack should be
1114 __splitstack_releasecontext (void *context
[10])
1116 __morestack_release_segments (((struct stack_segment
**)
1117 &context
[MORESTACK_SEGMENTS
]),
1121 /* Like __splitstack_block_signals, but operating on CONTEXT, rather
1122 than on the current state. */
1125 __splitstack_block_signals_context (void *context
[NUMBER_OFFSETS
], int *new,
1129 *old
= ((uintptr_type
) context
[BLOCK_SIGNALS
]) != 0 ? 0 : 1;
1131 context
[BLOCK_SIGNALS
] = (void *) (uintptr_type
) (*new ? 0 : 1);
1134 /* Find the stack segments associated with a split stack context.
1135 This will return the address of the first stack segment and set
1136 *STACK_SIZE to its size. It will set next_segment, next_sp, and
1137 initial_sp which may be passed to __splitstack_find to find the
1138 remaining segments. */
1141 __splitstack_find_context (void *context
[NUMBER_OFFSETS
], size_t *stack_size
,
1142 void **next_segment
, void **next_sp
,
1146 struct stack_segment
*segment
;
1148 *initial_sp
= context
[INITIAL_SP
];
1150 sp
= context
[CURRENT_STACK
];
1153 /* Most likely this context was created but was never used. The
1154 value 2 is a code used by __splitstack_find to mean that we
1155 have reached the end of the list of stacks. */
1156 *next_segment
= (void *) (uintptr_type
) 2;
1162 segment
= context
[CURRENT_SEGMENT
];
1163 if (segment
== NULL
)
1165 /* Most likely this context was saved by a thread which was not
1166 created using __splistack_makecontext and which has never
1167 split the stack. The value 1 is a code used by
1168 __splitstack_find to look at the initial stack. */
1169 segment
= (struct stack_segment
*) (uintptr_type
) 1;
1172 return __splitstack_find (segment
, sp
, stack_size
, next_segment
, next_sp
,
1176 #endif /* !defined (inhibit_libc) */
1177 #endif /* not powerpc 32-bit */