move __expand_heap into malloc.c
[musl.git] / src / malloc / malloc.c
blobdf3ea1becd3d5eb81ab68cb8e493b3ba27e81b6c
1 #define _GNU_SOURCE
2 #include <stdlib.h>
3 #include <string.h>
4 #include <limits.h>
5 #include <stdint.h>
6 #include <errno.h>
7 #include <sys/mman.h>
8 #include "libc.h"
9 #include "atomic.h"
10 #include "pthread_impl.h"
11 #include "malloc_impl.h"
13 #if defined(__GNUC__) && defined(__PIC__)
14 #define inline inline __attribute__((always_inline))
15 #endif
17 static struct {
18 volatile uint64_t binmap;
19 struct bin bins[64];
20 volatile int split_merge_lock[2];
21 } mal;
23 int __malloc_replaced;
25 /* Synchronization tools */
27 static inline void lock(volatile int *lk)
29 int need_locks = libc.need_locks;
30 if (need_locks) {
31 while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
32 if (need_locks < 0) libc.need_locks = 0;
36 static inline void unlock(volatile int *lk)
38 if (lk[0]) {
39 a_store(lk, 0);
40 if (lk[1]) __wake(lk, 1, 1);
44 static inline void lock_bin(int i)
46 lock(mal.bins[i].lock);
47 if (!mal.bins[i].head)
48 mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i);
51 static inline void unlock_bin(int i)
53 unlock(mal.bins[i].lock);
56 static int first_set(uint64_t x)
58 #if 1
59 return a_ctz_64(x);
60 #else
61 static const char debruijn64[64] = {
62 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
63 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
64 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
65 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12
67 static const char debruijn32[32] = {
68 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
69 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
71 if (sizeof(long) < 8) {
72 uint32_t y = x;
73 if (!y) {
74 y = x>>32;
75 return 32 + debruijn32[(y&-y)*0x076be629 >> 27];
77 return debruijn32[(y&-y)*0x076be629 >> 27];
79 return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58];
80 #endif
83 static const unsigned char bin_tab[60] = {
84 32,33,34,35,36,36,37,37,38,38,39,39,
85 40,40,40,40,41,41,41,41,42,42,42,42,43,43,43,43,
86 44,44,44,44,44,44,44,44,45,45,45,45,45,45,45,45,
87 46,46,46,46,46,46,46,46,47,47,47,47,47,47,47,47,
90 static int bin_index(size_t x)
92 x = x / SIZE_ALIGN - 1;
93 if (x <= 32) return x;
94 if (x < 512) return bin_tab[x/8-4];
95 if (x > 0x1c00) return 63;
96 return bin_tab[x/128-4] + 16;
99 static int bin_index_up(size_t x)
101 x = x / SIZE_ALIGN - 1;
102 if (x <= 32) return x;
103 x--;
104 if (x < 512) return bin_tab[x/8-4] + 1;
105 return bin_tab[x/128-4] + 17;
108 #if 0
109 void __dump_heap(int x)
111 struct chunk *c;
112 int i;
113 for (c = (void *)mal.heap; CHUNK_SIZE(c); c = NEXT_CHUNK(c))
114 fprintf(stderr, "base %p size %zu (%d) flags %d/%d\n",
115 c, CHUNK_SIZE(c), bin_index(CHUNK_SIZE(c)),
116 c->csize & 15,
117 NEXT_CHUNK(c)->psize & 15);
118 for (i=0; i<64; i++) {
119 if (mal.bins[i].head != BIN_TO_CHUNK(i) && mal.bins[i].head) {
120 fprintf(stderr, "bin %d: %p\n", i, mal.bins[i].head);
121 if (!(mal.binmap & 1ULL<<i))
122 fprintf(stderr, "missing from binmap!\n");
123 } else if (mal.binmap & 1ULL<<i)
124 fprintf(stderr, "binmap wrongly contains %d!\n", i);
127 #endif
129 /* This function returns true if the interval [old,new]
130 * intersects the 'len'-sized interval below &libc.auxv
131 * (interpreted as the main-thread stack) or below &b
132 * (the current stack). It is used to defend against
133 * buggy brk implementations that can cross the stack. */
135 static int traverses_stack_p(uintptr_t old, uintptr_t new)
137 const uintptr_t len = 8<<20;
138 uintptr_t a, b;
140 b = (uintptr_t)libc.auxv;
141 a = b > len ? b-len : 0;
142 if (new>a && old<b) return 1;
144 b = (uintptr_t)&b;
145 a = b > len ? b-len : 0;
146 if (new>a && old<b) return 1;
148 return 0;
151 /* Expand the heap in-place if brk can be used, or otherwise via mmap,
152 * using an exponential lower bound on growth by mmap to make
153 * fragmentation asymptotically irrelevant. The size argument is both
154 * an input and an output, since the caller needs to know the size
155 * allocated, which will be larger than requested due to page alignment
156 * and mmap minimum size rules. The caller is responsible for locking
157 * to prevent concurrent calls. */
159 static void *__expand_heap(size_t *pn)
161 static uintptr_t brk;
162 static unsigned mmap_step;
163 size_t n = *pn;
165 if (n > SIZE_MAX/2 - PAGE_SIZE) {
166 errno = ENOMEM;
167 return 0;
169 n += -n & PAGE_SIZE-1;
171 if (!brk) {
172 brk = __syscall(SYS_brk, 0);
173 brk += -brk & PAGE_SIZE-1;
176 if (n < SIZE_MAX-brk && !traverses_stack_p(brk, brk+n)
177 && __syscall(SYS_brk, brk+n)==brk+n) {
178 *pn = n;
179 brk += n;
180 return (void *)(brk-n);
183 size_t min = (size_t)PAGE_SIZE << mmap_step/2;
184 if (n < min) n = min;
185 void *area = __mmap(0, n, PROT_READ|PROT_WRITE,
186 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
187 if (area == MAP_FAILED) return 0;
188 *pn = n;
189 mmap_step++;
190 return area;
193 static struct chunk *expand_heap(size_t n)
195 static void *end;
196 void *p;
197 struct chunk *w;
199 /* The argument n already accounts for the caller's chunk
200 * overhead needs, but if the heap can't be extended in-place,
201 * we need room for an extra zero-sized sentinel chunk. */
202 n += SIZE_ALIGN;
204 p = __expand_heap(&n);
205 if (!p) return 0;
207 /* If not just expanding existing space, we need to make a
208 * new sentinel chunk below the allocated space. */
209 if (p != end) {
210 /* Valid/safe because of the prologue increment. */
211 n -= SIZE_ALIGN;
212 p = (char *)p + SIZE_ALIGN;
213 w = MEM_TO_CHUNK(p);
214 w->psize = 0 | C_INUSE;
217 /* Record new heap end and fill in footer. */
218 end = (char *)p + n;
219 w = MEM_TO_CHUNK(end);
220 w->psize = n | C_INUSE;
221 w->csize = 0 | C_INUSE;
223 /* Fill in header, which may be new or may be replacing a
224 * zero-size sentinel header at the old end-of-heap. */
225 w = MEM_TO_CHUNK(p);
226 w->csize = n | C_INUSE;
228 return w;
231 static int adjust_size(size_t *n)
233 /* Result of pointer difference must fit in ptrdiff_t. */
234 if (*n-1 > PTRDIFF_MAX - SIZE_ALIGN - PAGE_SIZE) {
235 if (*n) {
236 errno = ENOMEM;
237 return -1;
238 } else {
239 *n = SIZE_ALIGN;
240 return 0;
243 *n = (*n + OVERHEAD + SIZE_ALIGN - 1) & SIZE_MASK;
244 return 0;
247 static void unbin(struct chunk *c, int i)
249 if (c->prev == c->next)
250 a_and_64(&mal.binmap, ~(1ULL<<i));
251 c->prev->next = c->next;
252 c->next->prev = c->prev;
253 c->csize |= C_INUSE;
254 NEXT_CHUNK(c)->psize |= C_INUSE;
257 static void bin_chunk(struct chunk *self, int i)
259 self->next = BIN_TO_CHUNK(i);
260 self->prev = mal.bins[i].tail;
261 self->next->prev = self;
262 self->prev->next = self;
263 if (self->prev == BIN_TO_CHUNK(i))
264 a_or_64(&mal.binmap, 1ULL<<i);
267 static void trim(struct chunk *self, size_t n)
269 size_t n1 = CHUNK_SIZE(self);
270 struct chunk *next, *split;
272 if (n >= n1 - DONTCARE) return;
274 next = NEXT_CHUNK(self);
275 split = (void *)((char *)self + n);
277 split->psize = n | C_INUSE;
278 split->csize = n1-n;
279 next->psize = n1-n;
280 self->csize = n | C_INUSE;
282 int i = bin_index(n1-n);
283 lock_bin(i);
285 bin_chunk(split, i);
287 unlock_bin(i);
290 void *malloc(size_t n)
292 struct chunk *c;
293 int i, j;
294 uint64_t mask;
296 if (adjust_size(&n) < 0) return 0;
298 if (n > MMAP_THRESHOLD) {
299 size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE;
300 char *base = __mmap(0, len, PROT_READ|PROT_WRITE,
301 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
302 if (base == (void *)-1) return 0;
303 c = (void *)(base + SIZE_ALIGN - OVERHEAD);
304 c->csize = len - (SIZE_ALIGN - OVERHEAD);
305 c->psize = SIZE_ALIGN - OVERHEAD;
306 return CHUNK_TO_MEM(c);
309 i = bin_index_up(n);
310 if (i<63 && (mal.binmap & (1ULL<<i))) {
311 lock_bin(i);
312 c = mal.bins[i].head;
313 if (c != BIN_TO_CHUNK(i) && CHUNK_SIZE(c)-n <= DONTCARE) {
314 unbin(c, i);
315 unlock_bin(i);
316 return CHUNK_TO_MEM(c);
318 unlock_bin(i);
320 lock(mal.split_merge_lock);
321 for (mask = mal.binmap & -(1ULL<<i); mask; mask -= (mask&-mask)) {
322 j = first_set(mask);
323 lock_bin(j);
324 c = mal.bins[j].head;
325 if (c != BIN_TO_CHUNK(j)) {
326 unbin(c, j);
327 unlock_bin(j);
328 break;
330 unlock_bin(j);
332 if (!mask) {
333 c = expand_heap(n);
334 if (!c) {
335 unlock(mal.split_merge_lock);
336 return 0;
339 trim(c, n);
340 unlock(mal.split_merge_lock);
341 return CHUNK_TO_MEM(c);
344 static size_t mal0_clear(char *p, size_t pagesz, size_t n)
346 #ifdef __GNUC__
347 typedef uint64_t __attribute__((__may_alias__)) T;
348 #else
349 typedef unsigned char T;
350 #endif
351 char *pp = p + n;
352 size_t i = (uintptr_t)pp & (pagesz - 1);
353 for (;;) {
354 pp = memset(pp - i, 0, i);
355 if (pp - p < pagesz) return pp - p;
356 for (i = pagesz; i; i -= 2*sizeof(T), pp -= 2*sizeof(T))
357 if (((T *)pp)[-1] | ((T *)pp)[-2])
358 break;
362 void *calloc(size_t m, size_t n)
364 if (n && m > (size_t)-1/n) {
365 errno = ENOMEM;
366 return 0;
368 n *= m;
369 void *p = malloc(n);
370 if (!p) return p;
371 if (!__malloc_replaced) {
372 if (IS_MMAPPED(MEM_TO_CHUNK(p)))
373 return p;
374 if (n >= PAGE_SIZE)
375 n = mal0_clear(p, PAGE_SIZE, n);
377 return memset(p, 0, n);
380 void *realloc(void *p, size_t n)
382 struct chunk *self, *next;
383 size_t n0, n1;
384 void *new;
386 if (!p) return malloc(n);
388 if (adjust_size(&n) < 0) return 0;
390 self = MEM_TO_CHUNK(p);
391 n1 = n0 = CHUNK_SIZE(self);
393 if (n<=n0 && n0-n<=DONTCARE) return p;
395 if (IS_MMAPPED(self)) {
396 size_t extra = self->psize;
397 char *base = (char *)self - extra;
398 size_t oldlen = n0 + extra;
399 size_t newlen = n + extra;
400 /* Crash on realloc of freed chunk */
401 if (extra & 1) a_crash();
402 if (newlen < PAGE_SIZE && (new = malloc(n-OVERHEAD))) {
403 n0 = n;
404 goto copy_free_ret;
406 newlen = (newlen + PAGE_SIZE-1) & -PAGE_SIZE;
407 if (oldlen == newlen) return p;
408 base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE);
409 if (base == (void *)-1)
410 goto copy_realloc;
411 self = (void *)(base + extra);
412 self->csize = newlen - extra;
413 return CHUNK_TO_MEM(self);
416 next = NEXT_CHUNK(self);
418 /* Crash on corrupted footer (likely from buffer overflow) */
419 if (next->psize != self->csize) a_crash();
421 lock(mal.split_merge_lock);
423 size_t nsize = next->csize & C_INUSE ? 0 : CHUNK_SIZE(next);
424 if (n0+nsize >= n) {
425 int i = bin_index(nsize);
426 lock_bin(i);
427 if (!(next->csize & C_INUSE)) {
428 unbin(next, i);
429 unlock_bin(i);
430 next = NEXT_CHUNK(next);
431 self->csize = next->psize = n0+nsize | C_INUSE;
432 trim(self, n);
433 unlock(mal.split_merge_lock);
434 return CHUNK_TO_MEM(self);
436 unlock_bin(i);
438 unlock(mal.split_merge_lock);
440 copy_realloc:
441 /* As a last resort, allocate a new chunk and copy to it. */
442 new = malloc(n-OVERHEAD);
443 if (!new) return 0;
444 copy_free_ret:
445 memcpy(new, p, n0-OVERHEAD);
446 free(CHUNK_TO_MEM(self));
447 return new;
450 void __bin_chunk(struct chunk *self)
452 struct chunk *next = NEXT_CHUNK(self);
454 /* Crash on corrupted footer (likely from buffer overflow) */
455 if (next->psize != self->csize) a_crash();
457 lock(mal.split_merge_lock);
459 size_t osize = CHUNK_SIZE(self), size = osize;
461 /* Since we hold split_merge_lock, only transition from free to
462 * in-use can race; in-use to free is impossible */
463 size_t psize = self->psize & C_INUSE ? 0 : CHUNK_PSIZE(self);
464 size_t nsize = next->csize & C_INUSE ? 0 : CHUNK_SIZE(next);
466 if (psize) {
467 int i = bin_index(psize);
468 lock_bin(i);
469 if (!(self->psize & C_INUSE)) {
470 struct chunk *prev = PREV_CHUNK(self);
471 unbin(prev, i);
472 self = prev;
473 size += psize;
475 unlock_bin(i);
477 if (nsize) {
478 int i = bin_index(nsize);
479 lock_bin(i);
480 if (!(next->csize & C_INUSE)) {
481 unbin(next, i);
482 next = NEXT_CHUNK(next);
483 size += nsize;
485 unlock_bin(i);
488 int i = bin_index(size);
489 lock_bin(i);
491 self->csize = size;
492 next->psize = size;
493 bin_chunk(self, i);
494 unlock(mal.split_merge_lock);
496 /* Replace middle of large chunks with fresh zero pages */
497 if (size > RECLAIM && (size^(size-osize)) > size-osize) {
498 uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE;
499 uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE;
500 #if 1
501 __madvise((void *)a, b-a, MADV_DONTNEED);
502 #else
503 __mmap((void *)a, b-a, PROT_READ|PROT_WRITE,
504 MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
505 #endif
508 unlock_bin(i);
511 static void unmap_chunk(struct chunk *self)
513 size_t extra = self->psize;
514 char *base = (char *)self - extra;
515 size_t len = CHUNK_SIZE(self) + extra;
516 /* Crash on double free */
517 if (extra & 1) a_crash();
518 __munmap(base, len);
521 void free(void *p)
523 if (!p) return;
525 struct chunk *self = MEM_TO_CHUNK(p);
527 if (IS_MMAPPED(self))
528 unmap_chunk(self);
529 else
530 __bin_chunk(self);
533 void __malloc_donate(char *start, char *end)
535 size_t align_start_up = (SIZE_ALIGN-1) & (-(uintptr_t)start - OVERHEAD);
536 size_t align_end_down = (SIZE_ALIGN-1) & (uintptr_t)end;
538 /* Getting past this condition ensures that the padding for alignment
539 * and header overhead will not overflow and will leave a nonzero
540 * multiple of SIZE_ALIGN bytes between start and end. */
541 if (end - start <= OVERHEAD + align_start_up + align_end_down)
542 return;
543 start += align_start_up + OVERHEAD;
544 end -= align_end_down;
546 struct chunk *c = MEM_TO_CHUNK(start), *n = MEM_TO_CHUNK(end);
547 c->psize = n->csize = C_INUSE;
548 c->csize = n->psize = C_INUSE | (end-start);
549 __bin_chunk(c);