[PR67828] don't unswitch on default defs of non-parms
[official-gcc.git] / libbacktrace / mmap.c
blob1910cb1f9eb4a4d704899553e9517d91ba35738f
1 /* mmap.c -- Memory allocation with mmap.
2 Copyright (C) 2012-2015 Free Software Foundation, Inc.
3 Written by Ian Lance Taylor, Google.
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are
7 met:
9 (1) Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
12 (2) Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in
14 the documentation and/or other materials provided with the
15 distribution.
17 (3) The name of the author may not be used to
18 endorse or promote products derived from this software without
19 specific prior written permission.
21 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
25 INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30 IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE. */
33 #include "config.h"
35 #include <errno.h>
36 #include <string.h>
37 #include <stdlib.h>
38 #include <unistd.h>
39 #include <sys/types.h>
40 #include <sys/mman.h>
42 #include "backtrace.h"
43 #include "internal.h"
45 /* Memory allocation on systems that provide anonymous mmap. This
46 permits the backtrace functions to be invoked from a signal
47 handler, assuming that mmap is async-signal safe. */
49 #ifndef MAP_ANONYMOUS
50 #define MAP_ANONYMOUS MAP_ANON
51 #endif
53 /* A list of free memory blocks. */
55 struct backtrace_freelist_struct
57 /* Next on list. */
58 struct backtrace_freelist_struct *next;
59 /* Size of this block, including this structure. */
60 size_t size;
63 /* Free memory allocated by backtrace_alloc. */
65 static void
66 backtrace_free_locked (struct backtrace_state *state, void *addr, size_t size)
68 /* Just leak small blocks. We don't have to be perfect. */
69 if (size >= sizeof (struct backtrace_freelist_struct))
71 struct backtrace_freelist_struct *p;
73 p = (struct backtrace_freelist_struct *) addr;
74 p->next = state->freelist;
75 p->size = size;
76 state->freelist = p;
80 /* Allocate memory like malloc. If ERROR_CALLBACK is NULL, don't
81 report an error. */
83 void *
84 backtrace_alloc (struct backtrace_state *state,
85 size_t size, backtrace_error_callback error_callback,
86 void *data)
88 void *ret;
89 int locked;
90 struct backtrace_freelist_struct **pp;
91 size_t pagesize;
92 size_t asksize;
93 void *page;
95 ret = NULL;
97 /* If we can acquire the lock, then see if there is space on the
98 free list. If we can't acquire the lock, drop straight into
99 using mmap. __sync_lock_test_and_set returns the old state of
100 the lock, so we have acquired it if it returns 0. */
102 if (!state->threaded)
103 locked = 1;
104 else
105 locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
107 if (locked)
109 for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
111 if ((*pp)->size >= size)
113 struct backtrace_freelist_struct *p;
115 p = *pp;
116 *pp = p->next;
118 /* Round for alignment; we assume that no type we care about
119 is more than 8 bytes. */
120 size = (size + 7) & ~ (size_t) 7;
121 if (size < p->size)
122 backtrace_free_locked (state, (char *) p + size,
123 p->size - size);
125 ret = (void *) p;
127 break;
131 if (state->threaded)
132 __sync_lock_release (&state->lock_alloc);
135 if (ret == NULL)
137 /* Allocate a new page. */
139 pagesize = getpagesize ();
140 asksize = (size + pagesize - 1) & ~ (pagesize - 1);
141 page = mmap (NULL, asksize, PROT_READ | PROT_WRITE,
142 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
143 if (page == MAP_FAILED)
145 if (error_callback)
146 error_callback (data, "mmap", errno);
148 else
150 size = (size + 7) & ~ (size_t) 7;
151 if (size < asksize)
152 backtrace_free (state, (char *) page + size, asksize - size,
153 error_callback, data);
155 ret = page;
159 return ret;
162 /* Free memory allocated by backtrace_alloc. */
164 void
165 backtrace_free (struct backtrace_state *state, void *addr, size_t size,
166 backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
167 void *data ATTRIBUTE_UNUSED)
169 int locked;
171 /* If we are freeing a large aligned block, just release it back to
172 the system. This case arises when growing a vector for a large
173 binary with lots of debug info. Calling munmap here may cause us
174 to call mmap again if there is also a large shared library; we
175 just live with that. */
176 if (size >= 16 * 4096)
178 size_t pagesize;
180 pagesize = getpagesize ();
181 if (((uintptr_t) addr & (pagesize - 1)) == 0
182 && (size & (pagesize - 1)) == 0)
184 /* If munmap fails for some reason, just add the block to
185 the freelist. */
186 if (munmap (addr, size) == 0)
187 return;
191 /* If we can acquire the lock, add the new space to the free list.
192 If we can't acquire the lock, just leak the memory.
193 __sync_lock_test_and_set returns the old state of the lock, so we
194 have acquired it if it returns 0. */
196 if (!state->threaded)
197 locked = 1;
198 else
199 locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
201 if (locked)
203 backtrace_free_locked (state, addr, size);
205 if (state->threaded)
206 __sync_lock_release (&state->lock_alloc);
210 /* Grow VEC by SIZE bytes. */
212 void *
213 backtrace_vector_grow (struct backtrace_state *state,size_t size,
214 backtrace_error_callback error_callback,
215 void *data, struct backtrace_vector *vec)
217 void *ret;
219 if (size > vec->alc)
221 size_t pagesize;
222 size_t alc;
223 void *base;
225 pagesize = getpagesize ();
226 alc = vec->size + size;
227 if (vec->size == 0)
228 alc = 16 * size;
229 else if (alc < pagesize)
231 alc *= 2;
232 if (alc > pagesize)
233 alc = pagesize;
235 else
237 alc *= 2;
238 alc = (alc + pagesize - 1) & ~ (pagesize - 1);
240 base = backtrace_alloc (state, alc, error_callback, data);
241 if (base == NULL)
242 return NULL;
243 if (vec->base != NULL)
245 memcpy (base, vec->base, vec->size);
246 backtrace_free (state, vec->base, vec->size + vec->alc,
247 error_callback, data);
249 vec->base = base;
250 vec->alc = alc - vec->size;
253 ret = (char *) vec->base + vec->size;
254 vec->size += size;
255 vec->alc -= size;
256 return ret;
259 /* Finish the current allocation on VEC. */
261 void *
262 backtrace_vector_finish (
263 struct backtrace_state *state ATTRIBUTE_UNUSED,
264 struct backtrace_vector *vec,
265 backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
266 void *data ATTRIBUTE_UNUSED)
268 void *ret;
270 ret = vec->base;
271 vec->base = (char *) vec->base + vec->size;
272 vec->size = 0;
273 return ret;
276 /* Release any extra space allocated for VEC. */
279 backtrace_vector_release (struct backtrace_state *state,
280 struct backtrace_vector *vec,
281 backtrace_error_callback error_callback,
282 void *data)
284 size_t size;
285 size_t alc;
286 size_t aligned;
288 /* Make sure that the block that we free is aligned on an 8-byte
289 boundary. */
290 size = vec->size;
291 alc = vec->alc;
292 aligned = (size + 7) & ~ (size_t) 7;
293 alc -= aligned - size;
295 backtrace_free (state, (char *) vec->base + aligned, alc,
296 error_callback, data);
297 vec->alc = 0;
298 return 1;