1 /* mmap.c -- Memory allocation with mmap.
2 Copyright (C) 2012-2022 Free Software Foundation, Inc.
3 Written by Ian Lance Taylor, Google.
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are
9 (1) Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
12 (2) Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in
14 the documentation and/or other materials provided with the
17 (3) The name of the author may not be used to
18 endorse or promote products derived from this software without
19 specific prior written permission.
21 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
25 INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30 IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE. */
39 #include <sys/types.h>
42 #include "backtrace.h"
45 #ifndef HAVE_DECL_GETPAGESIZE
46 extern int getpagesize (void);
49 /* Memory allocation on systems that provide anonymous mmap. This
50 permits the backtrace functions to be invoked from a signal
51 handler, assuming that mmap is async-signal safe. */
54 #define MAP_ANONYMOUS MAP_ANON
58 #define MAP_FAILED ((void *)-1)
61 /* A list of free memory blocks. */
63 struct backtrace_freelist_struct
66 struct backtrace_freelist_struct
*next
;
67 /* Size of this block, including this structure. */
71 /* Free memory allocated by backtrace_alloc. */
74 backtrace_free_locked (struct backtrace_state
*state
, void *addr
, size_t size
)
76 /* Just leak small blocks. We don't have to be perfect. Don't put
77 more than 16 entries on the free list, to avoid wasting time
78 searching when allocating a block. If we have more than 16
79 entries, leak the smallest entry. */
81 if (size
>= sizeof (struct backtrace_freelist_struct
))
84 struct backtrace_freelist_struct
**ppsmall
;
85 struct backtrace_freelist_struct
**pp
;
86 struct backtrace_freelist_struct
*p
;
90 for (pp
= &state
->freelist
; *pp
!= NULL
; pp
= &(*pp
)->next
)
92 if (ppsmall
== NULL
|| (*pp
)->size
< (*ppsmall
)->size
)
98 if (size
<= (*ppsmall
)->size
)
100 *ppsmall
= (*ppsmall
)->next
;
103 p
= (struct backtrace_freelist_struct
*) addr
;
104 p
->next
= state
->freelist
;
110 /* Allocate memory like malloc. If ERROR_CALLBACK is NULL, don't
114 backtrace_alloc (struct backtrace_state
*state
,
115 size_t size
, backtrace_error_callback error_callback
,
120 struct backtrace_freelist_struct
**pp
;
127 /* If we can acquire the lock, then see if there is space on the
128 free list. If we can't acquire the lock, drop straight into
129 using mmap. __sync_lock_test_and_set returns the old state of
130 the lock, so we have acquired it if it returns 0. */
132 if (!state
->threaded
)
135 locked
= __sync_lock_test_and_set (&state
->lock_alloc
, 1) == 0;
139 for (pp
= &state
->freelist
; *pp
!= NULL
; pp
= &(*pp
)->next
)
141 if ((*pp
)->size
>= size
)
143 struct backtrace_freelist_struct
*p
;
148 /* Round for alignment; we assume that no type we care about
149 is more than 8 bytes. */
150 size
= (size
+ 7) & ~ (size_t) 7;
152 backtrace_free_locked (state
, (char *) p
+ size
,
162 __sync_lock_release (&state
->lock_alloc
);
167 /* Allocate a new page. */
169 pagesize
= getpagesize ();
170 asksize
= (size
+ pagesize
- 1) & ~ (pagesize
- 1);
171 page
= mmap (NULL
, asksize
, PROT_READ
| PROT_WRITE
,
172 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
173 if (page
== MAP_FAILED
)
176 error_callback (data
, "mmap", errno
);
180 size
= (size
+ 7) & ~ (size_t) 7;
182 backtrace_free (state
, (char *) page
+ size
, asksize
- size
,
183 error_callback
, data
);
192 /* Free memory allocated by backtrace_alloc. */
195 backtrace_free (struct backtrace_state
*state
, void *addr
, size_t size
,
196 backtrace_error_callback error_callback ATTRIBUTE_UNUSED
,
197 void *data ATTRIBUTE_UNUSED
)
201 /* If we are freeing a large aligned block, just release it back to
202 the system. This case arises when growing a vector for a large
203 binary with lots of debug info. Calling munmap here may cause us
204 to call mmap again if there is also a large shared library; we
205 just live with that. */
206 if (size
>= 16 * 4096)
210 pagesize
= getpagesize ();
211 if (((uintptr_t) addr
& (pagesize
- 1)) == 0
212 && (size
& (pagesize
- 1)) == 0)
214 /* If munmap fails for some reason, just add the block to
216 if (munmap (addr
, size
) == 0)
221 /* If we can acquire the lock, add the new space to the free list.
222 If we can't acquire the lock, just leak the memory.
223 __sync_lock_test_and_set returns the old state of the lock, so we
224 have acquired it if it returns 0. */
226 if (!state
->threaded
)
229 locked
= __sync_lock_test_and_set (&state
->lock_alloc
, 1) == 0;
233 backtrace_free_locked (state
, addr
, size
);
236 __sync_lock_release (&state
->lock_alloc
);
240 /* Grow VEC by SIZE bytes. */
243 backtrace_vector_grow (struct backtrace_state
*state
,size_t size
,
244 backtrace_error_callback error_callback
,
245 void *data
, struct backtrace_vector
*vec
)
255 pagesize
= getpagesize ();
256 alc
= vec
->size
+ size
;
259 else if (alc
< pagesize
)
268 alc
= (alc
+ pagesize
- 1) & ~ (pagesize
- 1);
270 base
= backtrace_alloc (state
, alc
, error_callback
, data
);
273 if (vec
->base
!= NULL
)
275 memcpy (base
, vec
->base
, vec
->size
);
276 backtrace_free (state
, vec
->base
, vec
->size
+ vec
->alc
,
277 error_callback
, data
);
280 vec
->alc
= alc
- vec
->size
;
283 ret
= (char *) vec
->base
+ vec
->size
;
289 /* Finish the current allocation on VEC. */
292 backtrace_vector_finish (
293 struct backtrace_state
*state ATTRIBUTE_UNUSED
,
294 struct backtrace_vector
*vec
,
295 backtrace_error_callback error_callback ATTRIBUTE_UNUSED
,
296 void *data ATTRIBUTE_UNUSED
)
301 vec
->base
= (char *) vec
->base
+ vec
->size
;
306 /* Release any extra space allocated for VEC. */
309 backtrace_vector_release (struct backtrace_state
*state
,
310 struct backtrace_vector
*vec
,
311 backtrace_error_callback error_callback
,
318 /* Make sure that the block that we free is aligned on an 8-byte
322 aligned
= (size
+ 7) & ~ (size_t) 7;
323 alc
-= aligned
- size
;
325 backtrace_free (state
, (char *) vec
->base
+ aligned
, alc
,
326 error_callback
, data
);