Bring in all split-stack work done over on gccgo branch.
[official-gcc.git] / libgcc / generic-morestack.c
blob7f96f04c1cb4d29a9e994c4d6c168a14ce4a5242
1 /* Library support for -fsplit-stack. */
2 /* Copyright (C) 2009 Free Software Foundation, Inc.
3 Contributed by Ian Lance Taylor <iant@google.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 #include "tconfig.h"
27 #include "tsystem.h"
28 #include "coretypes.h"
29 #include "tm.h"
31 /* If inhibit_libc is defined, we can not compile this file. The
32 effect is that people will not be able to use -fsplit-stack. That
33 is much better than failing the build particularly since people
34 will want to define inhibit_libc while building a compiler which
35 can build glibc. */
37 #ifndef inhibit_libc
39 #include <assert.h>
40 #include <errno.h>
41 #include <signal.h>
42 #include <stdlib.h>
43 #include <unistd.h>
44 #include <sys/mman.h>
45 #include <sys/uio.h>
47 #include "generic-morestack.h"
49 /* This file contains subroutines that are used by code compiled with
50 -fsplit-stack. */
52 /* Declare functions to avoid warnings--there is no header file for
53 these internal functions. We give these functions the flatten
54 attribute in order to minimize their stack usage--here we must
55 minimize stack usage even at the cost of code size, and in general
56 inlining everything will do that. */
58 extern void
59 __generic_morestack_set_initial_sp (void *sp, size_t len)
60 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
62 extern void *
63 __generic_morestack (size_t *frame_size, void *old_stack, size_t param_size)
64 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
66 extern void *
67 __generic_releasestack (size_t *pavailable)
68 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
70 extern void
71 __morestack_block_signals (void)
72 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
74 extern void
75 __morestack_unblock_signals (void)
76 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
78 extern void *
79 __morestack_allocate_stack_space (size_t)
80 __attribute__ ((visibility ("default")));
82 extern size_t
83 __generic_findstack (void *stack)
84 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
86 extern void
87 __morestack_load_mmap (void)
88 __attribute__ ((no_split_stack));
90 extern void *
91 __splitstack_find (void *, void *, size_t *, void **, void **, void **)
92 __attribute__ ((visibility ("default")));
94 /* When we allocate a stack segment we put this header at the
95 start. */
97 struct stack_segment
99 /* The previous stack segment--when a function running on this stack
100 segment returns, it will run on the previous one. */
101 struct stack_segment *prev;
102 /* The next stack segment, if it has been allocated--when a function
103 is running on this stack segment, the next one is not being
104 used. */
105 struct stack_segment *next;
106 /* The total size of this stack segment. */
107 size_t size;
108 /* The stack address when this stack was created. This is used when
109 popping the stack. */
110 void *old_stack;
111 /* A list of memory blocks allocated by dynamic stack
112 allocation. */
113 struct dynamic_allocation_blocks *dynamic_allocation;
114 /* A list of dynamic memory blocks no longer needed. */
115 struct dynamic_allocation_blocks *free_dynamic_allocation;
118 /* A list of memory blocks allocated by dynamic stack allocation.
119 This is used for code that calls alloca or uses variably sized
120 arrays. */
122 struct dynamic_allocation_blocks
124 /* The next block in the list. */
125 struct dynamic_allocation_blocks *next;
126 /* The size of the allocated memory. */
127 size_t size;
128 /* The allocated memory. */
129 void *block;
132 /* The first stack segment allocated for this thread. */
134 __thread struct stack_segment *__morestack_segments
135 __attribute__ ((visibility ("default")));
137 /* The stack segment that we think we are currently using. This will
138 be correct in normal usage, but will be incorrect if an exception
139 unwinds into a different stack segment or if longjmp jumps to a
140 different stack segment. */
142 __thread struct stack_segment *__morestack_current_segment
143 __attribute__ ((visibility ("default")));
145 /* The (approximate) initial stack pointer and size for this thread on
146 the system supplied stack. This is set when the thread is created.
147 We also store a sigset_t here to hold the signal mask while
148 splitting the stack, since we don't want to store that on the
149 stack. */
151 struct initial_sp
153 void *sp;
154 size_t len;
155 sigset_t mask;
158 __thread struct initial_sp __morestack_initial_sp
159 __attribute__ ((visibility ("default")));
161 /* A static signal mask, to avoid taking up stack space. */
163 static sigset_t __morestack_fullmask;
165 /* Convert an integer to a decimal string without using much stack
166 space. Return a pointer to the part of the buffer to use. We this
167 instead of sprintf because sprintf will require too much stack
168 space. */
170 static char *
171 print_int (int val, char *buf, int buflen, size_t *print_len)
173 int is_negative;
174 int i;
175 unsigned int uval;
177 uval = (unsigned int) val;
178 if (val >= 0)
179 is_negative = 0;
180 else
182 is_negative = 1;
183 uval = - uval;
186 i = buflen;
189 --i;
190 buf[i] = '0' + (uval % 10);
191 uval /= 10;
193 while (uval != 0 && i > 0);
195 if (is_negative)
197 if (i > 0)
198 --i;
199 buf[i] = '-';
202 *print_len = buflen - i;
203 return buf + i;
206 /* Print the string MSG/LEN, the errno number ERR, and a newline on
207 stderr. Then crash. */
209 void
210 __morestack_fail (const char *, size_t, int) __attribute__ ((noreturn));
212 void
213 __morestack_fail (const char *msg, size_t len, int err)
215 char buf[24];
216 static const char nl[] = "\n";
217 struct iovec iov[3];
218 union { char *p; const char *cp; } const_cast;
220 const_cast.cp = msg;
221 iov[0].iov_base = const_cast.p;
222 iov[0].iov_len = len;
223 /* We can't call strerror, because it may try to translate the error
224 message, and that would use too much stack space. */
225 iov[1].iov_base = print_int (err, buf, sizeof buf, &iov[1].iov_len);
226 const_cast.cp = &nl[0];
227 iov[2].iov_base = const_cast.p;
228 iov[2].iov_len = sizeof nl - 1;
229 /* FIXME: On systems without writev we need to issue three write
230 calls, or punt on printing errno. For now this is irrelevant
231 since stack splitting only works on GNU/Linux anyhow. */
232 writev (2, iov, 3);
233 abort ();
236 /* Allocate a new stack segment. FRAME_SIZE is the required frame
237 size. */
239 static struct stack_segment *
240 allocate_segment (size_t frame_size)
242 static unsigned int static_pagesize;
243 static int use_guard_page;
244 unsigned int pagesize;
245 unsigned int overhead;
246 unsigned int allocate;
247 void *space;
248 struct stack_segment *pss;
250 pagesize = static_pagesize;
251 if (pagesize == 0)
253 unsigned int p;
255 pagesize = getpagesize ();
257 #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
258 p = __sync_val_compare_and_swap (&static_pagesize, 0, pagesize);
259 #else
260 /* Just hope this assignment is atomic. */
261 static_pagesize = pagesize;
262 p = 0;
263 #endif
265 use_guard_page = getenv ("SPLIT_STACK_GUARD") != 0;
267 /* FIXME: I'm not sure this assert should be in the released
268 code. */
269 assert (p == 0 || p == pagesize);
272 overhead = sizeof (struct stack_segment);
274 allocate = pagesize;
275 if (allocate < MINSIGSTKSZ)
276 allocate = ((MINSIGSTKSZ + overhead + pagesize - 1)
277 & ~ (pagesize - 1));
278 if (allocate < frame_size)
279 allocate = ((frame_size + overhead + pagesize - 1)
280 & ~ (pagesize - 1));
282 if (use_guard_page)
283 allocate += pagesize;
285 /* FIXME: If this binary requires an executable stack, then we need
286 to set PROT_EXEC. Unfortunately figuring that out is complicated
287 and target dependent. We would need to use dl_iterate_phdr to
288 see if there is any object which does not have a PT_GNU_STACK
289 phdr, though only for architectures which use that mechanism. */
290 space = mmap (NULL, allocate, PROT_READ | PROT_WRITE,
291 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
292 if (space == MAP_FAILED)
294 static const char msg[] =
295 "unable to allocate additional stack space: errno ";
296 __morestack_fail (msg, sizeof msg - 1, errno);
299 if (use_guard_page)
301 void *guard;
303 #ifdef STACK_GROWS_DOWNWARD
304 guard = space;
305 space = (char *) space + pagesize;
306 #else
307 guard = space + allocate - pagesize;
308 #endif
310 mprotect (guard, pagesize, PROT_NONE);
311 allocate -= pagesize;
314 pss = (struct stack_segment *) space;
316 pss->prev = __morestack_current_segment;
317 pss->next = NULL;
318 pss->size = allocate - overhead;
319 pss->dynamic_allocation = NULL;
320 pss->free_dynamic_allocation = NULL;
322 if (__morestack_current_segment != NULL)
323 __morestack_current_segment->next = pss;
324 else
325 __morestack_segments = pss;
327 return pss;
330 /* Free a list of dynamic blocks. */
332 static void
333 free_dynamic_blocks (struct dynamic_allocation_blocks *p)
335 while (p != NULL)
337 struct dynamic_allocation_blocks *next;
339 next = p->next;
340 free (p->block);
341 free (p);
342 p = next;
346 /* Merge two lists of dynamic blocks. */
348 static struct dynamic_allocation_blocks *
349 merge_dynamic_blocks (struct dynamic_allocation_blocks *a,
350 struct dynamic_allocation_blocks *b)
352 struct dynamic_allocation_blocks **pp;
354 if (a == NULL)
355 return b;
356 if (b == NULL)
357 return a;
358 for (pp = &a->next; *pp != NULL; pp = &(*pp)->next)
360 *pp = b;
361 return a;
364 /* Release stack segments. If FREE_DYNAMIC is non-zero, we also free
365 any dynamic blocks. Otherwise we return them. */
367 struct dynamic_allocation_blocks *
368 __morestack_release_segments (struct stack_segment **pp, int free_dynamic)
370 struct dynamic_allocation_blocks *ret;
371 struct stack_segment *pss;
373 ret = NULL;
374 pss = *pp;
375 while (pss != NULL)
377 struct stack_segment *next;
378 unsigned int allocate;
380 next = pss->next;
382 if (pss->dynamic_allocation != NULL
383 || pss->free_dynamic_allocation != NULL)
385 if (free_dynamic)
387 free_dynamic_blocks (pss->dynamic_allocation);
388 free_dynamic_blocks (pss->free_dynamic_allocation);
390 else
392 ret = merge_dynamic_blocks (pss->dynamic_allocation, ret);
393 ret = merge_dynamic_blocks (pss->free_dynamic_allocation, ret);
397 allocate = pss->size + sizeof (struct stack_segment);
398 if (munmap (pss, allocate) < 0)
400 static const char msg[] = "munmap of stack space failed: errno ";
401 __morestack_fail (msg, sizeof msg - 1, errno);
404 pss = next;
406 *pp = NULL;
408 return ret;
411 /* This function is called by a processor specific function to set the
412 initial stack pointer for a thread. The operating system will
413 always create a stack for a thread. Here we record a stack pointer
414 near the base of that stack. The size argument lets the processor
415 specific code estimate how much stack space is available on this
416 initial stack. */
418 void
419 __generic_morestack_set_initial_sp (void *sp, size_t len)
421 /* The stack pointer most likely starts on a page boundary. Adjust
422 to the nearest 512 byte boundary. It's not essential that we be
423 precise here; getting it wrong will just leave some stack space
424 unused. */
425 #ifdef STACK_GROWS_DOWNWARD
426 sp = (void *) ((((__UINTPTR_TYPE__) sp + 511U) / 512U) * 512U);
427 #else
428 sp = (void *) ((((__UINTPTR_TYPE__) sp - 511U) / 512U) * 512U);
429 #endif
431 __morestack_initial_sp.sp = sp;
432 __morestack_initial_sp.len = len;
433 sigemptyset (&__morestack_initial_sp.mask);
435 sigfillset (&__morestack_fullmask);
436 #ifdef __linux__
437 /* On Linux, the first two real time signals are used by the NPTL
438 threading library. By taking them out of the set of signals, we
439 avoiding copying the signal mask in pthread_sigmask. More
440 importantly, pthread_sigmask uses less stack space on x86_64. */
441 sigdelset (&__morestack_fullmask, __SIGRTMIN);
442 sigdelset (&__morestack_fullmask, __SIGRTMIN + 1);
443 #endif
446 /* This function is called by a processor specific function which is
447 run in the prologue when more stack is needed. The processor
448 specific function handles the details of saving registers and
449 frobbing the actual stack pointer. This function is responsible
450 for allocating a new stack segment and for copying a parameter
451 block from the old stack to the new one. On function entry
452 *PFRAME_SIZE is the size of the required stack frame--the returned
453 stack must be at least this large. On function exit *PFRAME_SIZE
454 is the amount of space remaining on the allocated stack. OLD_STACK
455 points at the parameters the old stack (really the current one
456 while this function is running). OLD_STACK is saved so that it can
457 be returned by a later call to __generic_releasestack. PARAM_SIZE
458 is the size in bytes of parameters to copy to the new stack. This
459 function returns a pointer to the new stack segment, pointing to
460 the memory after the parameters have been copied. The returned
461 value minus the returned *PFRAME_SIZE (or plus if the stack grows
462 upward) is the first address on the stack which should not be used.
464 This function is running on the old stack and has only a limited
465 amount of stack space available. */
467 void *
468 __generic_morestack (size_t *pframe_size, void *old_stack, size_t param_size)
470 size_t frame_size = *pframe_size;
471 struct stack_segment *current;
472 struct stack_segment **pp;
473 struct dynamic_allocation_blocks *dynamic;
474 char *from;
475 char *to;
476 void *ret;
477 size_t i;
479 current = __morestack_current_segment;
481 pp = current != NULL ? &current->next : &__morestack_segments;
482 if (*pp != NULL && (*pp)->size < frame_size)
483 dynamic = __morestack_release_segments (pp, 0);
484 else
485 dynamic = NULL;
486 current = *pp;
488 if (current == NULL)
489 current = allocate_segment (frame_size);
491 current->old_stack = old_stack;
493 __morestack_current_segment = current;
495 if (dynamic != NULL)
497 /* Move the free blocks onto our list. We don't want to call
498 free here, as we are short on stack space. */
499 current->free_dynamic_allocation =
500 merge_dynamic_blocks (dynamic, current->free_dynamic_allocation);
503 *pframe_size = current->size - param_size;
505 #ifdef STACK_GROWS_DOWNWARD
507 char *bottom = (char *) (current + 1) + current->size;
508 to = bottom - param_size;
509 ret = bottom - param_size;
511 #else
512 to = current + 1;
513 ret = (char *) (current + 1) + param_size;
514 #endif
516 /* We don't call memcpy to avoid worrying about the dynamic linker
517 trying to resolve it. */
518 from = (char *) old_stack;
519 for (i = 0; i < param_size; i++)
520 *to++ = *from++;
522 return ret;
525 /* This function is called by a processor specific function when it is
526 ready to release a stack segment. We don't actually release the
527 stack segment, we just move back to the previous one. The current
528 stack segment will still be available if we need it in
529 __generic_morestack. This returns a pointer to the new stack
530 segment to use, which is the one saved by a previous call to
531 __generic_morestack. The processor specific function is then
532 responsible for actually updating the stack pointer. This sets
533 *PAVAILABLE to the amount of stack space now available. */
535 void *
536 __generic_releasestack (size_t *pavailable)
538 struct stack_segment *current;
539 void *old_stack;
541 current = __morestack_current_segment;
542 old_stack = current->old_stack;
543 current = current->prev;
544 __morestack_current_segment = current;
546 if (current != NULL)
548 #ifdef STACK_GROWS_DOWNWARD
549 *pavailable = (char *) old_stack - (char *) (current + 1);
550 #else
551 *pavailable = (char *) (current + 1) + current->size - (char *) old_stack;
552 #endif
554 else
556 size_t used;
558 /* We have popped back to the original stack. */
559 #ifdef STACK_GROWS_DOWNWARD
560 if ((char *) old_stack >= (char *) __morestack_initial_sp.sp)
561 used = 0;
562 else
563 used = (char *) __morestack_initial_sp.sp - (char *) old_stack;
564 #else
565 if ((char *) old_stack <= (char *) __morestack_initial_sp.sp)
566 used = 0;
567 else
568 used = (char *) old_stack - (char *) __morestack_initial_sp.sp;
569 #endif
571 if (used > __morestack_initial_sp.len)
572 *pavailable = 0;
573 else
574 *pavailable = __morestack_initial_sp.len - used;
577 return old_stack;
580 /* Block signals while splitting the stack. This avoids trouble if we
581 try to invoke a signal handler which itself wants to split the
582 stack. */
584 extern int pthread_sigmask (int, const sigset_t *, sigset_t *)
585 __attribute__ ((weak));
587 void
588 __morestack_block_signals (void)
590 if (pthread_sigmask)
591 pthread_sigmask (SIG_BLOCK, &__morestack_fullmask,
592 &__morestack_initial_sp.mask);
593 else
594 sigprocmask (SIG_BLOCK, &__morestack_fullmask,
595 &__morestack_initial_sp.mask);
598 /* Unblock signals while splitting the stack. */
600 void
601 __morestack_unblock_signals (void)
603 if (pthread_sigmask)
604 pthread_sigmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
605 else
606 sigprocmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
609 /* This function is called to allocate dynamic stack space, for alloca
610 or a variably sized array. This is a regular function with
611 sufficient stack space, so we just use malloc to allocate the
612 space. We attach the allocated blocks to the current stack
613 segment, so that they will eventually be reused or freed. */
615 void *
616 __morestack_allocate_stack_space (size_t size)
618 struct stack_segment *seg, *current;
619 struct dynamic_allocation_blocks *p;
621 /* We have to block signals to avoid getting confused if we get
622 interrupted by a signal whose handler itself uses alloca or a
623 variably sized array. */
624 __morestack_block_signals ();
626 /* Since we don't want to call free while we are low on stack space,
627 we may have a list of already allocated blocks waiting to be
628 freed. Release them all, unless we find one that is large
629 enough. We don't look at every block to see if one is large
630 enough, just the first one, because we aren't trying to build a
631 memory allocator here, we're just trying to speed up common
632 cases. */
634 current = __morestack_current_segment;
635 p = NULL;
636 for (seg = __morestack_segments; seg != NULL; seg = seg->next)
638 p = seg->free_dynamic_allocation;
639 if (p != NULL)
641 if (p->size >= size)
643 seg->free_dynamic_allocation = p->next;
644 break;
647 free_dynamic_blocks (p);
648 seg->free_dynamic_allocation = NULL;
649 p = NULL;
653 if (p == NULL)
655 /* We need to allocate additional memory. */
656 p = malloc (sizeof (*p));
657 if (p == NULL)
658 abort ();
659 p->size = size;
660 p->block = malloc (size);
661 if (p->block == NULL)
662 abort ();
665 /* If we are still on the initial stack, then we have a space leak.
666 FIXME. */
667 if (current != NULL)
669 p->next = current->dynamic_allocation;
670 current->dynamic_allocation = p;
673 __morestack_unblock_signals ();
675 return p->block;
678 /* Find the stack segment for STACK and return the amount of space
679 available. This is used when unwinding the stack because of an
680 exception, in order to reset the stack guard correctly. */
682 size_t
683 __generic_findstack (void *stack)
685 struct stack_segment *pss;
686 size_t used;
688 for (pss = __morestack_current_segment; pss != NULL; pss = pss->prev)
690 if ((char *) pss < (char *) stack
691 && (char *) pss + pss->size > (char *) stack)
693 __morestack_current_segment = pss;
694 #ifdef STACK_GROWS_DOWNWARD
695 return (char *) stack - (char *) (pss + 1);
696 #else
697 return (char *) (pss + 1) + pss->size - (char *) stack;
698 #endif
702 /* We have popped back to the original stack. */
703 #ifdef STACK_GROWS_DOWNWARD
704 if ((char *) stack >= (char *) __morestack_initial_sp.sp)
705 used = 0;
706 else
707 used = (char *) __morestack_initial_sp.sp - (char *) stack;
708 #else
709 if ((char *) stack <= (char *) __morestack_initial_sp.sp)
710 used = 0;
711 else
712 used = (char *) stack - (char *) __morestack_initial_sp.sp;
713 #endif
715 if (used > __morestack_initial_sp.len)
716 return 0;
717 else
718 return __morestack_initial_sp.len - used;
721 /* This function is called at program startup time to make sure that
722 mmap, munmap, and getpagesize are resolved if linking dynamically.
723 We want to resolve them while we have enough stack for them, rather
724 than calling into the dynamic linker while low on stack space. */
726 void
727 __morestack_load_mmap (void)
729 /* Call with bogus values to run faster. We don't care if the call
730 fails. Pass __MORESTACK_CURRENT_SEGMENT to make sure that any
731 TLS accessor function is resolved. */
732 mmap (__morestack_current_segment, 0, PROT_READ, MAP_ANONYMOUS, -1, 0);
733 mprotect (NULL, 0, 0);
734 munmap (0, getpagesize ());
737 /* This function may be used to iterate over the stack segments.
738 This can be called like this.
739 void *next_segment = NULL;
740 void *next_sp = NULL;
741 void *initial_sp = NULL;
742 void *stack;
743 size_t stack_size;
744 while ((stack = __splitstack_find (next_segment, next_sp, &stack_size,
745 &next_segment, &next_sp,
746 &initial_sp)) != NULL)
748 // Stack segment starts at stack and is stack_size bytes long.
751 There is no way to iterate over the stack segments of a different
752 thread. However, what is permitted is for one thread to call this
753 with the first two values NULL, to pass next_segment, next_sp, and
754 initial_sp to a different thread, and then to suspend one way or
755 another. A different thread may run the subsequent
756 __morestack_find iterations. Of course, this will only work if the
757 first thread is suspended during the __morestack_find iterations.
758 If not, the second thread will be looking at the stack while it is
759 changing, and anything could happen.
761 FIXME: This should be declared in some header file, but where? */
763 void *
764 __splitstack_find (void *segment_arg, void *sp, size_t *len,
765 void **next_segment, void **next_sp,
766 void **initial_sp)
768 struct stack_segment *segment;
769 void *ret;
770 char *nsp;
772 if (segment_arg == (void *) 1)
774 char *isp = (char *) *initial_sp;
776 *next_segment = (void *) 2;
777 *next_sp = NULL;
778 #ifdef STACK_GROWS_DOWNWARD
779 if ((char *) sp >= isp)
780 return NULL;
781 *len = (char *) isp - (char *) sp;
782 return sp;
783 #else
784 if ((char *) sp <= (char *) isp)
785 return NULL;
786 *len = (char *) sp - (char *) isp;
787 return (void *) isp;
788 #endif
790 else if (segment_arg == (void *) 2)
791 return NULL;
792 else if (segment_arg != NULL)
793 segment = (struct stack_segment *) segment_arg;
794 else
796 *initial_sp = __morestack_initial_sp.sp;
797 segment = __morestack_current_segment;
798 sp = (void *) &segment;
799 while (1)
801 if (segment == NULL)
802 return __splitstack_find ((void *) 1, sp, len, next_segment,
803 next_sp, initial_sp);
804 if ((char *) sp >= (char *) (segment + 1)
805 && (char *) sp <= (char *) (segment + 1) + segment->size)
806 break;
807 segment = segment->prev;
811 if (segment->prev == NULL)
812 *next_segment = (void *) 1;
813 else
814 *next_segment = segment->prev;
816 /* The old_stack value is the address of the function parameters of
817 the function which called __morestack. So if f1 called f2 which
818 called __morestack, the stack looks like this:
820 parameters <- old_stack
821 return in f1
822 return in f2
823 data pushed by __morestack
825 On x86, the data pushed by __morestack includes the saved value
826 of the ebp/rbp register. We want our caller to be able to see
827 that value, which can not be found on any other stack. So we
828 adjust accordingly. This may need to be tweaked for other
829 targets. */
831 nsp = (char *) segment->old_stack;
832 #ifdef STACK_GROWS_DOWNWARD
833 nsp -= 3 * sizeof (void *);
834 #else
835 nsp += 3 * sizeof (void *);
836 #endif
837 *next_sp = (void *) nsp;
839 #ifdef STACK_GROWS_DOWNWARD
840 *len = (char *) (segment + 1) + segment->size - (char *) sp;
841 ret = (void *) sp;
842 #else
843 *len = (char *) sp - (char *) (segment + 1);
844 ret = (void *) (segment + 1);
845 #endif
847 return ret;
850 #endif /* !defined (inhibit_libc) */