class.c (check_bases): Propagate non-literality.
[official-gcc.git] / libgcc / generic-morestack.c
blob3709d32086458488d2aeb18bc5018e29dbbb3f22
1 /* Library support for -fsplit-stack. */
2 /* Copyright (C) 2009, 2010 Free Software Foundation, Inc.
3 Contributed by Ian Lance Taylor <iant@google.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 #include "tconfig.h"
27 #include "tsystem.h"
28 #include "coretypes.h"
29 #include "tm.h"
31 /* If inhibit_libc is defined, we can not compile this file. The
32 effect is that people will not be able to use -fsplit-stack. That
33 is much better than failing the build particularly since people
34 will want to define inhibit_libc while building a compiler which
35 can build glibc. */
37 #ifndef inhibit_libc
39 #include <assert.h>
40 #include <errno.h>
41 #include <signal.h>
42 #include <stdlib.h>
43 #include <unistd.h>
44 #include <sys/mman.h>
45 #include <sys/uio.h>
47 #include "generic-morestack.h"
49 /* This file contains subroutines that are used by code compiled with
50 -fsplit-stack. */
52 /* Declare functions to avoid warnings--there is no header file for
53 these internal functions. We give most of these functions the
54 flatten attribute in order to minimize their stack usage--here we
55 must minimize stack usage even at the cost of code size, and in
56 general inlining everything will do that. */
58 extern void
59 __generic_morestack_set_initial_sp (void *sp, size_t len)
60 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
62 extern void *
63 __generic_morestack (size_t *frame_size, void *old_stack, size_t param_size)
64 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
66 extern void *
67 __generic_releasestack (size_t *pavailable)
68 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
70 extern void
71 __morestack_block_signals (void)
72 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
74 extern void
75 __morestack_unblock_signals (void)
76 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
78 extern size_t
79 __generic_findstack (void *stack)
80 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
82 extern void
83 __morestack_load_mmap (void)
84 __attribute__ ((no_split_stack, visibility ("hidden")));
86 extern void *
87 __morestack_allocate_stack_space (size_t size)
88 __attribute__ ((visibility ("hidden")));
90 /* This is a function which -fsplit-stack code can call to get a list
91 of the stacks. Since it is not called only by the compiler, it is
92 not hidden. */
94 extern void *
95 __splitstack_find (void *, void *, size_t *, void **, void **, void **)
96 __attribute__ ((visibility ("default")));
98 /* When we allocate a stack segment we put this header at the
99 start. */
101 struct stack_segment
103 /* The previous stack segment--when a function running on this stack
104 segment returns, it will run on the previous one. */
105 struct stack_segment *prev;
106 /* The next stack segment, if it has been allocated--when a function
107 is running on this stack segment, the next one is not being
108 used. */
109 struct stack_segment *next;
110 /* The total size of this stack segment. */
111 size_t size;
112 /* The stack address when this stack was created. This is used when
113 popping the stack. */
114 void *old_stack;
115 /* A list of memory blocks allocated by dynamic stack
116 allocation. */
117 struct dynamic_allocation_blocks *dynamic_allocation;
118 /* A list of dynamic memory blocks no longer needed. */
119 struct dynamic_allocation_blocks *free_dynamic_allocation;
120 /* An extra pointer in case we need some more information some
121 day. */
122 void *extra;
125 /* This structure holds the (approximate) initial stack pointer and
126 size for the system supplied stack for a thread. This is set when
127 the thread is created. We also store a sigset_t here to hold the
128 signal mask while splitting the stack, since we don't want to store
129 that on the stack. */
131 struct initial_sp
133 /* The initial stack pointer. */
134 void *sp;
135 /* The stack length. */
136 size_t len;
137 /* A signal mask, put here so that the thread can use it without
138 needing stack space. */
139 sigset_t mask;
140 /* Some extra space for later extensibility. */
141 void *extra[5];
144 /* A list of memory blocks allocated by dynamic stack allocation.
145 This is used for code that calls alloca or uses variably sized
146 arrays. */
148 struct dynamic_allocation_blocks
150 /* The next block in the list. */
151 struct dynamic_allocation_blocks *next;
152 /* The size of the allocated memory. */
153 size_t size;
154 /* The allocated memory. */
155 void *block;
158 /* These thread local global variables must be shared by all split
159 stack code across shared library boundaries. Therefore, they have
160 default visibility. They have extensibility fields if needed for
161 new versions. If more radical changes are needed, new code can be
162 written using new variable names, while still using the existing
163 variables in a backward compatible manner. Symbol versioning is
164 also used, although, since these variables are only referenced by
165 code in this file and generic-morestack-thread.c, it is likely that
166 simply using new names will suffice. */
168 /* The first stack segment allocated for this thread. */
170 __thread struct stack_segment *__morestack_segments
171 __attribute__ ((visibility ("default")));
173 /* The stack segment that we think we are currently using. This will
174 be correct in normal usage, but will be incorrect if an exception
175 unwinds into a different stack segment or if longjmp jumps to a
176 different stack segment. */
178 __thread struct stack_segment *__morestack_current_segment
179 __attribute__ ((visibility ("default")));
181 /* The initial stack pointer and size for this thread. */
183 __thread struct initial_sp __morestack_initial_sp
184 __attribute__ ((visibility ("default")));
186 /* A static signal mask, to avoid taking up stack space. */
188 static sigset_t __morestack_fullmask;
190 /* Convert an integer to a decimal string without using much stack
191 space. Return a pointer to the part of the buffer to use. We this
192 instead of sprintf because sprintf will require too much stack
193 space. */
195 static char *
196 print_int (int val, char *buf, int buflen, size_t *print_len)
198 int is_negative;
199 int i;
200 unsigned int uval;
202 uval = (unsigned int) val;
203 if (val >= 0)
204 is_negative = 0;
205 else
207 is_negative = 1;
208 uval = - uval;
211 i = buflen;
214 --i;
215 buf[i] = '0' + (uval % 10);
216 uval /= 10;
218 while (uval != 0 && i > 0);
220 if (is_negative)
222 if (i > 0)
223 --i;
224 buf[i] = '-';
227 *print_len = buflen - i;
228 return buf + i;
231 /* Print the string MSG/LEN, the errno number ERR, and a newline on
232 stderr. Then crash. */
234 void
235 __morestack_fail (const char *, size_t, int) __attribute__ ((noreturn));
237 void
238 __morestack_fail (const char *msg, size_t len, int err)
240 char buf[24];
241 static const char nl[] = "\n";
242 struct iovec iov[3];
243 union { char *p; const char *cp; } const_cast;
245 const_cast.cp = msg;
246 iov[0].iov_base = const_cast.p;
247 iov[0].iov_len = len;
248 /* We can't call strerror, because it may try to translate the error
249 message, and that would use too much stack space. */
250 iov[1].iov_base = print_int (err, buf, sizeof buf, &iov[1].iov_len);
251 const_cast.cp = &nl[0];
252 iov[2].iov_base = const_cast.p;
253 iov[2].iov_len = sizeof nl - 1;
254 /* FIXME: On systems without writev we need to issue three write
255 calls, or punt on printing errno. For now this is irrelevant
256 since stack splitting only works on GNU/Linux anyhow. */
257 writev (2, iov, 3);
258 abort ();
261 /* Allocate a new stack segment. FRAME_SIZE is the required frame
262 size. */
264 static struct stack_segment *
265 allocate_segment (size_t frame_size)
267 static unsigned int static_pagesize;
268 static int use_guard_page;
269 unsigned int pagesize;
270 unsigned int overhead;
271 unsigned int allocate;
272 void *space;
273 struct stack_segment *pss;
275 pagesize = static_pagesize;
276 if (pagesize == 0)
278 unsigned int p;
280 pagesize = getpagesize ();
282 #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
283 p = __sync_val_compare_and_swap (&static_pagesize, 0, pagesize);
284 #else
285 /* Just hope this assignment is atomic. */
286 static_pagesize = pagesize;
287 p = 0;
288 #endif
290 use_guard_page = getenv ("SPLIT_STACK_GUARD") != 0;
292 /* FIXME: I'm not sure this assert should be in the released
293 code. */
294 assert (p == 0 || p == pagesize);
297 overhead = sizeof (struct stack_segment);
299 allocate = pagesize;
300 if (allocate < MINSIGSTKSZ)
301 allocate = ((MINSIGSTKSZ + overhead + pagesize - 1)
302 & ~ (pagesize - 1));
303 if (allocate < frame_size)
304 allocate = ((frame_size + overhead + pagesize - 1)
305 & ~ (pagesize - 1));
307 if (use_guard_page)
308 allocate += pagesize;
310 /* FIXME: If this binary requires an executable stack, then we need
311 to set PROT_EXEC. Unfortunately figuring that out is complicated
312 and target dependent. We would need to use dl_iterate_phdr to
313 see if there is any object which does not have a PT_GNU_STACK
314 phdr, though only for architectures which use that mechanism. */
315 space = mmap (NULL, allocate, PROT_READ | PROT_WRITE,
316 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
317 if (space == MAP_FAILED)
319 static const char msg[] =
320 "unable to allocate additional stack space: errno ";
321 __morestack_fail (msg, sizeof msg - 1, errno);
324 if (use_guard_page)
326 void *guard;
328 #ifdef STACK_GROWS_DOWNWARD
329 guard = space;
330 space = (char *) space + pagesize;
331 #else
332 guard = space + allocate - pagesize;
333 #endif
335 mprotect (guard, pagesize, PROT_NONE);
336 allocate -= pagesize;
339 pss = (struct stack_segment *) space;
341 pss->prev = __morestack_current_segment;
342 pss->next = NULL;
343 pss->size = allocate - overhead;
344 pss->dynamic_allocation = NULL;
345 pss->free_dynamic_allocation = NULL;
346 pss->extra = NULL;
348 if (__morestack_current_segment != NULL)
349 __morestack_current_segment->next = pss;
350 else
351 __morestack_segments = pss;
353 return pss;
356 /* Free a list of dynamic blocks. */
358 static void
359 free_dynamic_blocks (struct dynamic_allocation_blocks *p)
361 while (p != NULL)
363 struct dynamic_allocation_blocks *next;
365 next = p->next;
366 free (p->block);
367 free (p);
368 p = next;
372 /* Merge two lists of dynamic blocks. */
374 static struct dynamic_allocation_blocks *
375 merge_dynamic_blocks (struct dynamic_allocation_blocks *a,
376 struct dynamic_allocation_blocks *b)
378 struct dynamic_allocation_blocks **pp;
380 if (a == NULL)
381 return b;
382 if (b == NULL)
383 return a;
384 for (pp = &a->next; *pp != NULL; pp = &(*pp)->next)
386 *pp = b;
387 return a;
390 /* Release stack segments. If FREE_DYNAMIC is non-zero, we also free
391 any dynamic blocks. Otherwise we return them. */
393 struct dynamic_allocation_blocks *
394 __morestack_release_segments (struct stack_segment **pp, int free_dynamic)
396 struct dynamic_allocation_blocks *ret;
397 struct stack_segment *pss;
399 ret = NULL;
400 pss = *pp;
401 while (pss != NULL)
403 struct stack_segment *next;
404 unsigned int allocate;
406 next = pss->next;
408 if (pss->dynamic_allocation != NULL
409 || pss->free_dynamic_allocation != NULL)
411 if (free_dynamic)
413 free_dynamic_blocks (pss->dynamic_allocation);
414 free_dynamic_blocks (pss->free_dynamic_allocation);
416 else
418 ret = merge_dynamic_blocks (pss->dynamic_allocation, ret);
419 ret = merge_dynamic_blocks (pss->free_dynamic_allocation, ret);
423 allocate = pss->size + sizeof (struct stack_segment);
424 if (munmap (pss, allocate) < 0)
426 static const char msg[] = "munmap of stack space failed: errno ";
427 __morestack_fail (msg, sizeof msg - 1, errno);
430 pss = next;
432 *pp = NULL;
434 return ret;
437 /* This function is called by a processor specific function to set the
438 initial stack pointer for a thread. The operating system will
439 always create a stack for a thread. Here we record a stack pointer
440 near the base of that stack. The size argument lets the processor
441 specific code estimate how much stack space is available on this
442 initial stack. */
444 void
445 __generic_morestack_set_initial_sp (void *sp, size_t len)
447 /* The stack pointer most likely starts on a page boundary. Adjust
448 to the nearest 512 byte boundary. It's not essential that we be
449 precise here; getting it wrong will just leave some stack space
450 unused. */
451 #ifdef STACK_GROWS_DOWNWARD
452 sp = (void *) ((((__UINTPTR_TYPE__) sp + 511U) / 512U) * 512U);
453 #else
454 sp = (void *) ((((__UINTPTR_TYPE__) sp - 511U) / 512U) * 512U);
455 #endif
457 __morestack_initial_sp.sp = sp;
458 __morestack_initial_sp.len = len;
459 sigemptyset (&__morestack_initial_sp.mask);
461 sigfillset (&__morestack_fullmask);
462 #ifdef __linux__
463 /* On Linux, the first two real time signals are used by the NPTL
464 threading library. By taking them out of the set of signals, we
465 avoiding copying the signal mask in pthread_sigmask. More
466 importantly, pthread_sigmask uses less stack space on x86_64. */
467 sigdelset (&__morestack_fullmask, __SIGRTMIN);
468 sigdelset (&__morestack_fullmask, __SIGRTMIN + 1);
469 #endif
472 /* This function is called by a processor specific function which is
473 run in the prologue when more stack is needed. The processor
474 specific function handles the details of saving registers and
475 frobbing the actual stack pointer. This function is responsible
476 for allocating a new stack segment and for copying a parameter
477 block from the old stack to the new one. On function entry
478 *PFRAME_SIZE is the size of the required stack frame--the returned
479 stack must be at least this large. On function exit *PFRAME_SIZE
480 is the amount of space remaining on the allocated stack. OLD_STACK
481 points at the parameters the old stack (really the current one
482 while this function is running). OLD_STACK is saved so that it can
483 be returned by a later call to __generic_releasestack. PARAM_SIZE
484 is the size in bytes of parameters to copy to the new stack. This
485 function returns a pointer to the new stack segment, pointing to
486 the memory after the parameters have been copied. The returned
487 value minus the returned *PFRAME_SIZE (or plus if the stack grows
488 upward) is the first address on the stack which should not be used.
490 This function is running on the old stack and has only a limited
491 amount of stack space available. */
493 void *
494 __generic_morestack (size_t *pframe_size, void *old_stack, size_t param_size)
496 size_t frame_size = *pframe_size;
497 struct stack_segment *current;
498 struct stack_segment **pp;
499 struct dynamic_allocation_blocks *dynamic;
500 char *from;
501 char *to;
502 void *ret;
503 size_t i;
505 current = __morestack_current_segment;
507 pp = current != NULL ? &current->next : &__morestack_segments;
508 if (*pp != NULL && (*pp)->size < frame_size)
509 dynamic = __morestack_release_segments (pp, 0);
510 else
511 dynamic = NULL;
512 current = *pp;
514 if (current == NULL)
515 current = allocate_segment (frame_size);
517 current->old_stack = old_stack;
519 __morestack_current_segment = current;
521 if (dynamic != NULL)
523 /* Move the free blocks onto our list. We don't want to call
524 free here, as we are short on stack space. */
525 current->free_dynamic_allocation =
526 merge_dynamic_blocks (dynamic, current->free_dynamic_allocation);
529 *pframe_size = current->size - param_size;
531 #ifdef STACK_GROWS_DOWNWARD
533 char *bottom = (char *) (current + 1) + current->size;
534 to = bottom - param_size;
535 ret = bottom - param_size;
537 #else
538 to = current + 1;
539 ret = (char *) (current + 1) + param_size;
540 #endif
542 /* We don't call memcpy to avoid worrying about the dynamic linker
543 trying to resolve it. */
544 from = (char *) old_stack;
545 for (i = 0; i < param_size; i++)
546 *to++ = *from++;
548 return ret;
551 /* This function is called by a processor specific function when it is
552 ready to release a stack segment. We don't actually release the
553 stack segment, we just move back to the previous one. The current
554 stack segment will still be available if we need it in
555 __generic_morestack. This returns a pointer to the new stack
556 segment to use, which is the one saved by a previous call to
557 __generic_morestack. The processor specific function is then
558 responsible for actually updating the stack pointer. This sets
559 *PAVAILABLE to the amount of stack space now available. */
561 void *
562 __generic_releasestack (size_t *pavailable)
564 struct stack_segment *current;
565 void *old_stack;
567 current = __morestack_current_segment;
568 old_stack = current->old_stack;
569 current = current->prev;
570 __morestack_current_segment = current;
572 if (current != NULL)
574 #ifdef STACK_GROWS_DOWNWARD
575 *pavailable = (char *) old_stack - (char *) (current + 1);
576 #else
577 *pavailable = (char *) (current + 1) + current->size - (char *) old_stack;
578 #endif
580 else
582 size_t used;
584 /* We have popped back to the original stack. */
585 #ifdef STACK_GROWS_DOWNWARD
586 if ((char *) old_stack >= (char *) __morestack_initial_sp.sp)
587 used = 0;
588 else
589 used = (char *) __morestack_initial_sp.sp - (char *) old_stack;
590 #else
591 if ((char *) old_stack <= (char *) __morestack_initial_sp.sp)
592 used = 0;
593 else
594 used = (char *) old_stack - (char *) __morestack_initial_sp.sp;
595 #endif
597 if (used > __morestack_initial_sp.len)
598 *pavailable = 0;
599 else
600 *pavailable = __morestack_initial_sp.len - used;
603 return old_stack;
606 /* Block signals while splitting the stack. This avoids trouble if we
607 try to invoke a signal handler which itself wants to split the
608 stack. */
610 extern int pthread_sigmask (int, const sigset_t *, sigset_t *)
611 __attribute__ ((weak));
613 void
614 __morestack_block_signals (void)
616 if (pthread_sigmask)
617 pthread_sigmask (SIG_BLOCK, &__morestack_fullmask,
618 &__morestack_initial_sp.mask);
619 else
620 sigprocmask (SIG_BLOCK, &__morestack_fullmask,
621 &__morestack_initial_sp.mask);
624 /* Unblock signals while splitting the stack. */
626 void
627 __morestack_unblock_signals (void)
629 if (pthread_sigmask)
630 pthread_sigmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
631 else
632 sigprocmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
635 /* This function is called to allocate dynamic stack space, for alloca
636 or a variably sized array. This is a regular function with
637 sufficient stack space, so we just use malloc to allocate the
638 space. We attach the allocated blocks to the current stack
639 segment, so that they will eventually be reused or freed. */
641 void *
642 __morestack_allocate_stack_space (size_t size)
644 struct stack_segment *seg, *current;
645 struct dynamic_allocation_blocks *p;
647 /* We have to block signals to avoid getting confused if we get
648 interrupted by a signal whose handler itself uses alloca or a
649 variably sized array. */
650 __morestack_block_signals ();
652 /* Since we don't want to call free while we are low on stack space,
653 we may have a list of already allocated blocks waiting to be
654 freed. Release them all, unless we find one that is large
655 enough. We don't look at every block to see if one is large
656 enough, just the first one, because we aren't trying to build a
657 memory allocator here, we're just trying to speed up common
658 cases. */
660 current = __morestack_current_segment;
661 p = NULL;
662 for (seg = __morestack_segments; seg != NULL; seg = seg->next)
664 p = seg->free_dynamic_allocation;
665 if (p != NULL)
667 if (p->size >= size)
669 seg->free_dynamic_allocation = p->next;
670 break;
673 free_dynamic_blocks (p);
674 seg->free_dynamic_allocation = NULL;
675 p = NULL;
679 if (p == NULL)
681 /* We need to allocate additional memory. */
682 p = malloc (sizeof (*p));
683 if (p == NULL)
684 abort ();
685 p->size = size;
686 p->block = malloc (size);
687 if (p->block == NULL)
688 abort ();
691 /* If we are still on the initial stack, then we have a space leak.
692 FIXME. */
693 if (current != NULL)
695 p->next = current->dynamic_allocation;
696 current->dynamic_allocation = p;
699 __morestack_unblock_signals ();
701 return p->block;
704 /* Find the stack segment for STACK and return the amount of space
705 available. This is used when unwinding the stack because of an
706 exception, in order to reset the stack guard correctly. */
708 size_t
709 __generic_findstack (void *stack)
711 struct stack_segment *pss;
712 size_t used;
714 for (pss = __morestack_current_segment; pss != NULL; pss = pss->prev)
716 if ((char *) pss < (char *) stack
717 && (char *) pss + pss->size > (char *) stack)
719 __morestack_current_segment = pss;
720 #ifdef STACK_GROWS_DOWNWARD
721 return (char *) stack - (char *) (pss + 1);
722 #else
723 return (char *) (pss + 1) + pss->size - (char *) stack;
724 #endif
728 /* We have popped back to the original stack. */
729 #ifdef STACK_GROWS_DOWNWARD
730 if ((char *) stack >= (char *) __morestack_initial_sp.sp)
731 used = 0;
732 else
733 used = (char *) __morestack_initial_sp.sp - (char *) stack;
734 #else
735 if ((char *) stack <= (char *) __morestack_initial_sp.sp)
736 used = 0;
737 else
738 used = (char *) stack - (char *) __morestack_initial_sp.sp;
739 #endif
741 if (used > __morestack_initial_sp.len)
742 return 0;
743 else
744 return __morestack_initial_sp.len - used;
747 /* This function is called at program startup time to make sure that
748 mmap, munmap, and getpagesize are resolved if linking dynamically.
749 We want to resolve them while we have enough stack for them, rather
750 than calling into the dynamic linker while low on stack space. */
752 void
753 __morestack_load_mmap (void)
755 /* Call with bogus values to run faster. We don't care if the call
756 fails. Pass __MORESTACK_CURRENT_SEGMENT to make sure that any
757 TLS accessor function is resolved. */
758 mmap (__morestack_current_segment, 0, PROT_READ, MAP_ANONYMOUS, -1, 0);
759 mprotect (NULL, 0, 0);
760 munmap (0, getpagesize ());
763 /* This function may be used to iterate over the stack segments.
764 This can be called like this.
765 void *next_segment = NULL;
766 void *next_sp = NULL;
767 void *initial_sp = NULL;
768 void *stack;
769 size_t stack_size;
770 while ((stack = __splitstack_find (next_segment, next_sp, &stack_size,
771 &next_segment, &next_sp,
772 &initial_sp)) != NULL)
774 // Stack segment starts at stack and is stack_size bytes long.
777 There is no way to iterate over the stack segments of a different
778 thread. However, what is permitted is for one thread to call this
779 with the first two values NULL, to pass next_segment, next_sp, and
780 initial_sp to a different thread, and then to suspend one way or
781 another. A different thread may run the subsequent
782 __morestack_find iterations. Of course, this will only work if the
783 first thread is suspended during the __morestack_find iterations.
784 If not, the second thread will be looking at the stack while it is
785 changing, and anything could happen.
787 FIXME: This should be declared in some header file, but where? */
789 void *
790 __splitstack_find (void *segment_arg, void *sp, size_t *len,
791 void **next_segment, void **next_sp,
792 void **initial_sp)
794 struct stack_segment *segment;
795 void *ret;
796 char *nsp;
798 if (segment_arg == (void *) 1)
800 char *isp = (char *) *initial_sp;
802 *next_segment = (void *) 2;
803 *next_sp = NULL;
804 #ifdef STACK_GROWS_DOWNWARD
805 if ((char *) sp >= isp)
806 return NULL;
807 *len = (char *) isp - (char *) sp;
808 return sp;
809 #else
810 if ((char *) sp <= (char *) isp)
811 return NULL;
812 *len = (char *) sp - (char *) isp;
813 return (void *) isp;
814 #endif
816 else if (segment_arg == (void *) 2)
817 return NULL;
818 else if (segment_arg != NULL)
819 segment = (struct stack_segment *) segment_arg;
820 else
822 *initial_sp = __morestack_initial_sp.sp;
823 segment = __morestack_current_segment;
824 sp = (void *) &segment;
825 while (1)
827 if (segment == NULL)
828 return __splitstack_find ((void *) 1, sp, len, next_segment,
829 next_sp, initial_sp);
830 if ((char *) sp >= (char *) (segment + 1)
831 && (char *) sp <= (char *) (segment + 1) + segment->size)
832 break;
833 segment = segment->prev;
837 if (segment->prev == NULL)
838 *next_segment = (void *) 1;
839 else
840 *next_segment = segment->prev;
842 /* The old_stack value is the address of the function parameters of
843 the function which called __morestack. So if f1 called f2 which
844 called __morestack, the stack looks like this:
846 parameters <- old_stack
847 return in f1
848 return in f2
849 data pushed by __morestack
851 On x86, the data pushed by __morestack includes the saved value
852 of the ebp/rbp register. We want our caller to be able to see
853 that value, which can not be found on any other stack. So we
854 adjust accordingly. This may need to be tweaked for other
855 targets. */
857 nsp = (char *) segment->old_stack;
858 #ifdef STACK_GROWS_DOWNWARD
859 nsp -= 3 * sizeof (void *);
860 #else
861 nsp += 3 * sizeof (void *);
862 #endif
863 *next_sp = (void *) nsp;
865 #ifdef STACK_GROWS_DOWNWARD
866 *len = (char *) (segment + 1) + segment->size - (char *) sp;
867 ret = (void *) sp;
868 #else
869 *len = (char *) sp - (char *) (segment + 1);
870 ret = (void *) (segment + 1);
871 #endif
873 return ret;
876 #endif /* !defined (inhibit_libc) */