1 /* Allocate a stack suitable to be used with xclone or xsigaltstack.
2 Copyright (C) 2021-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #include <support/check.h>
20 #include <support/support.h>
21 #include <support/xunistd.h>
24 #include <stackinfo.h>
26 #include <sys/param.h> /* roundup, MAX */
29 # define MAP_NORESERVE 0
36 support_stack_alloc (size_t size
)
38 size_t pagesize
= sysconf (_SC_PAGESIZE
);
40 FAIL_EXIT1 ("sysconf (_SC_PAGESIZE): %m\n");
42 /* Always supply at least sysconf (_SC_SIGSTKSZ) space; passing 0
43 as size means only that much space. No matter what the number is,
44 round it up to a whole number of pages. */
45 size_t stacksize
= roundup (size
+ sysconf (_SC_SIGSTKSZ
),
48 /* The guard bands need to be large enough to intercept offset
49 accesses from a stack address that might otherwise hit another
50 mapping. Make them at least twice as big as the stack itself, to
51 defend against an offset by the entire size of a large
52 stack-allocated array. The minimum is 1MiB, which is arbitrarily
53 chosen to be larger than any "typical" wild pointer offset.
54 Again, no matter what the number is, round it up to a whole
56 size_t guardsize
= roundup (MAX (2 * stacksize
, 1024 * 1024), pagesize
);
57 size_t alloc_size
= guardsize
+ stacksize
+ guardsize
;
58 /* Use MAP_NORESERVE so that RAM will not be wasted on the guard
59 bands; touch all the pages of the actual stack before returning,
60 so we know they are allocated. */
61 void *alloc_base
= xmmap (0,
64 MAP_PRIVATE
|MAP_ANONYMOUS
|MAP_NORESERVE
|MAP_STACK
,
66 /* Some architecture still requires executable stack for the signal return
67 trampoline, although PF_X could be overridden if PT_GNU_STACK is present.
68 However since glibc does not export such information with a proper ABI,
69 it uses the historical permissions. */
70 int prot
= PROT_READ
| PROT_WRITE
71 | (DEFAULT_STACK_PERMS
& PF_X
? PROT_EXEC
: 0);
72 xmprotect (alloc_base
+ guardsize
, stacksize
, prot
);
73 memset (alloc_base
+ guardsize
, 0xA5, stacksize
);
74 return (struct support_stack
) { alloc_base
+ guardsize
, stacksize
, guardsize
};
78 support_stack_free (struct support_stack
*stack
)
80 void *alloc_base
= (void *)((uintptr_t) stack
->stack
- stack
->guardsize
);
81 size_t alloc_size
= stack
->size
+ 2 * stack
->guardsize
;
82 xmunmap (alloc_base
, alloc_size
);