PPC: Fix HREFK code generation for huge tables.
[luajit-2.0.git] / src / lj_alloc.c
blobe4ce76347bda29d06fd3bc37c86d1a57e5fecf19
1 /*
2 ** Bundled memory allocator.
3 **
4 ** Beware: this is a HEAVILY CUSTOMIZED version of dlmalloc.
5 ** The original bears the following remark:
6 **
7 ** This is a version (aka dlmalloc) of malloc/free/realloc written by
8 ** Doug Lea and released to the public domain, as explained at
9 ** http://creativecommons.org/licenses/publicdomain.
11 ** * Version pre-2.8.4 Wed Mar 29 19:46:29 2006 (dl at gee)
13 ** No additional copyright is claimed over the customizations.
14 ** Please do NOT bother the original author about this version here!
16 ** If you want to use dlmalloc in another project, you should get
17 ** the original from: ftp://gee.cs.oswego.edu/pub/misc/
18 ** For thread-safe derivatives, take a look at:
19 ** - ptmalloc: http://www.malloc.de/
20 ** - nedmalloc: http://www.nedprod.com/programs/portable/nedmalloc/
23 #define lj_alloc_c
24 #define LUA_CORE
26 /* To get the mremap prototype. Must be defined before any system includes. */
27 #if defined(__linux__) && !defined(_GNU_SOURCE)
28 #define _GNU_SOURCE
29 #endif
31 #include "lj_def.h"
32 #include "lj_arch.h"
33 #include "lj_alloc.h"
35 #ifndef LUAJIT_USE_SYSMALLOC
37 #define MAX_SIZE_T (~(size_t)0)
38 #define MALLOC_ALIGNMENT ((size_t)8U)
40 #define DEFAULT_GRANULARITY ((size_t)128U * (size_t)1024U)
41 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
42 #define DEFAULT_MMAP_THRESHOLD ((size_t)128U * (size_t)1024U)
43 #define MAX_RELEASE_CHECK_RATE 255
45 /* ------------------- size_t and alignment properties -------------------- */
47 /* The byte and bit size of a size_t */
48 #define SIZE_T_SIZE (sizeof(size_t))
49 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
51 /* Some constants coerced to size_t */
52 /* Annoying but necessary to avoid errors on some platforms */
53 #define SIZE_T_ZERO ((size_t)0)
54 #define SIZE_T_ONE ((size_t)1)
55 #define SIZE_T_TWO ((size_t)2)
56 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
57 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
58 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
60 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
61 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
63 /* the number of bytes to offset an address to align it */
64 #define align_offset(A)\
65 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
66 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
68 /* -------------------------- MMAP support ------------------------------- */
70 #define MFAIL ((void *)(MAX_SIZE_T))
71 #define CMFAIL ((char *)(MFAIL)) /* defined for convenience */
73 #define IS_DIRECT_BIT (SIZE_T_ONE)
75 #if LJ_TARGET_WINDOWS
77 #define WIN32_LEAN_AND_MEAN
78 #include <windows.h>
80 #if LJ_64
82 /* Undocumented, but hey, that's what we all love so much about Windows. */
83 typedef long (*PNTAVM)(HANDLE handle, void **addr, ULONG zbits,
84 size_t *size, ULONG alloctype, ULONG prot);
85 static PNTAVM ntavm;
87 /* Number of top bits of the lower 32 bits of an address that must be zero.
88 ** Apparently 0 gives us full 64 bit addresses and 1 gives us the lower 2GB.
90 #define NTAVM_ZEROBITS 1
92 static void INIT_MMAP(void)
94 ntavm = (PNTAVM)GetProcAddress(GetModuleHandleA("ntdll.dll"),
95 "NtAllocateVirtualMemory");
98 /* Win64 32 bit MMAP via NtAllocateVirtualMemory. */
99 static LJ_AINLINE void *CALL_MMAP(size_t size)
101 DWORD olderr = GetLastError();
102 void *ptr = NULL;
103 long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
104 MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
105 SetLastError(olderr);
106 return st == 0 ? ptr : MFAIL;
109 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
110 static LJ_AINLINE void *DIRECT_MMAP(size_t size)
112 DWORD olderr = GetLastError();
113 void *ptr = NULL;
114 long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
115 MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, PAGE_READWRITE);
116 SetLastError(olderr);
117 return st == 0 ? ptr : MFAIL;
120 #else
122 #define INIT_MMAP() ((void)0)
124 /* Win32 MMAP via VirtualAlloc */
125 static LJ_AINLINE void *CALL_MMAP(size_t size)
127 DWORD olderr = GetLastError();
128 void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
129 SetLastError(olderr);
130 return ptr ? ptr : MFAIL;
133 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
134 static LJ_AINLINE void *DIRECT_MMAP(size_t size)
136 DWORD olderr = GetLastError();
137 void *ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
138 PAGE_READWRITE);
139 SetLastError(olderr);
140 return ptr ? ptr : MFAIL;
143 #endif
145 /* This function supports releasing coalesed segments */
146 static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size)
148 DWORD olderr = GetLastError();
149 MEMORY_BASIC_INFORMATION minfo;
150 char *cptr = (char *)ptr;
151 while (size) {
152 if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
153 return -1;
154 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
155 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
156 return -1;
157 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
158 return -1;
159 cptr += minfo.RegionSize;
160 size -= minfo.RegionSize;
162 SetLastError(olderr);
163 return 0;
166 #else
168 #include <errno.h>
169 #include <sys/mman.h>
171 #define MMAP_PROT (PROT_READ|PROT_WRITE)
172 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
173 #define MAP_ANONYMOUS MAP_ANON
174 #endif
175 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
177 #if LJ_64
178 /* 64 bit mode needs special support for allocating memory in the lower 2GB. */
180 #if LJ_TARGET_LINUX
182 /* Actually this only gives us max. 1GB in current Linux kernels. */
183 static LJ_AINLINE void *CALL_MMAP(size_t size)
185 int olderr = errno;
186 void *ptr = mmap(NULL, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0);
187 errno = olderr;
188 return ptr;
191 #elif LJ_TARGET_OSX || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__OpenBSD__)
193 /* OSX and FreeBSD mmap() use a naive first-fit linear search.
194 ** That's perfect for us. Except that -pagezero_size must be set for OSX,
195 ** otherwise the lower 4GB are blocked. And the 32GB RLIMIT_DATA needs
196 ** to be reduced to 250MB on FreeBSD.
198 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__OpenBSD__)
199 #include <sys/resource.h>
200 #define MMAP_REGION_START ((uintptr_t)0x10000000)
201 #else
202 #define MMAP_REGION_START ((uintptr_t)0x10000)
203 #endif
204 #define MMAP_REGION_END ((uintptr_t)0x80000000)
206 static LJ_AINLINE void *CALL_MMAP(size_t size)
208 int olderr = errno;
209 /* Hint for next allocation. Doesn't need to be thread-safe. */
210 static uintptr_t alloc_hint = MMAP_REGION_START;
211 int retry = 0;
212 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
213 static int rlimit_modified = 0;
214 if (LJ_UNLIKELY(rlimit_modified == 0)) {
215 struct rlimit rlim;
216 rlim.rlim_cur = rlim.rlim_max = MMAP_REGION_START;
217 setrlimit(RLIMIT_DATA, &rlim); /* Ignore result. May fail below. */
218 rlimit_modified = 1;
220 #endif
221 for (;;) {
222 void *p = mmap((void *)alloc_hint, size, MMAP_PROT, MMAP_FLAGS, -1, 0);
223 if ((uintptr_t)p >= MMAP_REGION_START &&
224 (uintptr_t)p + size < MMAP_REGION_END) {
225 alloc_hint = (uintptr_t)p + size;
226 errno = olderr;
227 return p;
229 if (p != CMFAIL) munmap(p, size);
230 if (retry) break;
231 retry = 1;
232 alloc_hint = MMAP_REGION_START;
234 errno = olderr;
235 return CMFAIL;
238 #else
240 #error "NYI: need an equivalent of MAP_32BIT for this 64 bit OS"
242 #endif
244 #else
246 /* 32 bit mode is easy. */
247 static LJ_AINLINE void *CALL_MMAP(size_t size)
249 int olderr = errno;
250 void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0);
251 errno = olderr;
252 return ptr;
255 #endif
257 #define INIT_MMAP() ((void)0)
258 #define DIRECT_MMAP(s) CALL_MMAP(s)
260 static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size)
262 int olderr = errno;
263 int ret = munmap(ptr, size);
264 errno = olderr;
265 return ret;
268 #if LJ_TARGET_LINUX
269 /* Need to define _GNU_SOURCE to get the mremap prototype. */
270 static LJ_AINLINE void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz,
271 int flags)
273 int olderr = errno;
274 ptr = mremap(ptr, osz, nsz, flags);
275 errno = olderr;
276 return ptr;
279 #define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv))
280 #define CALL_MREMAP_NOMOVE 0
281 #define CALL_MREMAP_MAYMOVE 1
282 #if LJ_64
283 #define CALL_MREMAP_MV CALL_MREMAP_NOMOVE
284 #else
285 #define CALL_MREMAP_MV CALL_MREMAP_MAYMOVE
286 #endif
287 #endif
289 #endif
291 #ifndef CALL_MREMAP
292 #define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL)
293 #endif
295 /* ----------------------- Chunk representations ------------------------ */
297 struct malloc_chunk {
298 size_t prev_foot; /* Size of previous chunk (if free). */
299 size_t head; /* Size and inuse bits. */
300 struct malloc_chunk *fd; /* double links -- used only if free. */
301 struct malloc_chunk *bk;
304 typedef struct malloc_chunk mchunk;
305 typedef struct malloc_chunk *mchunkptr;
306 typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */
307 typedef size_t bindex_t; /* Described below */
308 typedef unsigned int binmap_t; /* Described below */
309 typedef unsigned int flag_t; /* The type of various bit flag sets */
311 /* ------------------- Chunks sizes and alignments ----------------------- */
313 #define MCHUNK_SIZE (sizeof(mchunk))
315 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
317 /* Direct chunks need a second word of overhead ... */
318 #define DIRECT_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
319 /* ... and additional padding for fake next-chunk at foot */
320 #define DIRECT_FOOT_PAD (FOUR_SIZE_T_SIZES)
322 /* The smallest size we can malloc is an aligned minimal chunk */
323 #define MIN_CHUNK_SIZE\
324 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
326 /* conversion from malloc headers to user pointers, and back */
327 #define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES))
328 #define mem2chunk(mem) ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES))
329 /* chunk associated with aligned address A */
330 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
332 /* Bounds on request (not chunk) sizes. */
333 #define MAX_REQUEST ((~MIN_CHUNK_SIZE+1) << 2)
334 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
336 /* pad request bytes into a usable size */
337 #define pad_request(req) \
338 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
340 /* pad request, checking for minimum (but not maximum) */
341 #define request2size(req) \
342 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
344 /* ------------------ Operations on head and foot fields ----------------- */
346 #define PINUSE_BIT (SIZE_T_ONE)
347 #define CINUSE_BIT (SIZE_T_TWO)
348 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
350 /* Head value for fenceposts */
351 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
353 /* extraction of fields from head words */
354 #define cinuse(p) ((p)->head & CINUSE_BIT)
355 #define pinuse(p) ((p)->head & PINUSE_BIT)
356 #define chunksize(p) ((p)->head & ~(INUSE_BITS))
358 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
359 #define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
361 /* Treat space at ptr +/- offset as a chunk */
362 #define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s)))
363 #define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s)))
365 /* Ptr to next or previous physical malloc_chunk. */
366 #define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~INUSE_BITS)))
367 #define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot) ))
369 /* extract next chunk's pinuse bit */
370 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
372 /* Get/set size at footer */
373 #define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot)
374 #define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s))
376 /* Set size, pinuse bit, and foot */
377 #define set_size_and_pinuse_of_free_chunk(p, s)\
378 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
380 /* Set size, pinuse bit, foot, and clear next pinuse */
381 #define set_free_with_pinuse(p, s, n)\
382 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
384 #define is_direct(p)\
385 (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_DIRECT_BIT))
387 /* Get the internal overhead associated with chunk p */
388 #define overhead_for(p)\
389 (is_direct(p)? DIRECT_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
391 /* ---------------------- Overlaid data structures ----------------------- */
393 struct malloc_tree_chunk {
394 /* The first four fields must be compatible with malloc_chunk */
395 size_t prev_foot;
396 size_t head;
397 struct malloc_tree_chunk *fd;
398 struct malloc_tree_chunk *bk;
400 struct malloc_tree_chunk *child[2];
401 struct malloc_tree_chunk *parent;
402 bindex_t index;
405 typedef struct malloc_tree_chunk tchunk;
406 typedef struct malloc_tree_chunk *tchunkptr;
407 typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
409 /* A little helper macro for trees */
410 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
412 /* ----------------------------- Segments -------------------------------- */
414 struct malloc_segment {
415 char *base; /* base address */
416 size_t size; /* allocated size */
417 struct malloc_segment *next; /* ptr to next segment */
420 typedef struct malloc_segment msegment;
421 typedef struct malloc_segment *msegmentptr;
423 /* ---------------------------- malloc_state ----------------------------- */
425 /* Bin types, widths and sizes */
426 #define NSMALLBINS (32U)
427 #define NTREEBINS (32U)
428 #define SMALLBIN_SHIFT (3U)
429 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
430 #define TREEBIN_SHIFT (8U)
431 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
432 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
433 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
435 struct malloc_state {
436 binmap_t smallmap;
437 binmap_t treemap;
438 size_t dvsize;
439 size_t topsize;
440 mchunkptr dv;
441 mchunkptr top;
442 size_t trim_check;
443 size_t release_checks;
444 mchunkptr smallbins[(NSMALLBINS+1)*2];
445 tbinptr treebins[NTREEBINS];
446 msegment seg;
449 typedef struct malloc_state *mstate;
451 #define is_initialized(M) ((M)->top != 0)
453 /* -------------------------- system alloc setup ------------------------- */
455 /* page-align a size */
456 #define page_align(S)\
457 (((S) + (LJ_PAGESIZE - SIZE_T_ONE)) & ~(LJ_PAGESIZE - SIZE_T_ONE))
459 /* granularity-align a size */
460 #define granularity_align(S)\
461 (((S) + (DEFAULT_GRANULARITY - SIZE_T_ONE))\
462 & ~(DEFAULT_GRANULARITY - SIZE_T_ONE))
464 #if LJ_TARGET_WINDOWS
465 #define mmap_align(S) granularity_align(S)
466 #else
467 #define mmap_align(S) page_align(S)
468 #endif
470 /* True if segment S holds address A */
471 #define segment_holds(S, A)\
472 ((char *)(A) >= S->base && (char *)(A) < S->base + S->size)
474 /* Return segment holding given address */
475 static msegmentptr segment_holding(mstate m, char *addr)
477 msegmentptr sp = &m->seg;
478 for (;;) {
479 if (addr >= sp->base && addr < sp->base + sp->size)
480 return sp;
481 if ((sp = sp->next) == 0)
482 return 0;
486 /* Return true if segment contains a segment link */
487 static int has_segment_link(mstate m, msegmentptr ss)
489 msegmentptr sp = &m->seg;
490 for (;;) {
491 if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size)
492 return 1;
493 if ((sp = sp->next) == 0)
494 return 0;
499 TOP_FOOT_SIZE is padding at the end of a segment, including space
500 that may be needed to place segment records and fenceposts when new
501 noncontiguous segments are added.
503 #define TOP_FOOT_SIZE\
504 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
506 /* ---------------------------- Indexing Bins ---------------------------- */
508 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
509 #define small_index(s) ((s) >> SMALLBIN_SHIFT)
510 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
511 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
513 /* addressing by index. See above about smallbin repositioning */
514 #define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i)<<1])))
515 #define treebin_at(M,i) (&((M)->treebins[i]))
517 /* assign tree index for size S to variable I */
518 #define compute_tree_index(S, I)\
520 unsigned int X = (unsigned int)(S >> TREEBIN_SHIFT);\
521 if (X == 0) {\
522 I = 0;\
523 } else if (X > 0xFFFF) {\
524 I = NTREEBINS-1;\
525 } else {\
526 unsigned int K = lj_fls(X);\
527 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
531 /* Bit representing maximum resolved size in a treebin at i */
532 #define bit_for_tree_index(i) \
533 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
535 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
536 #define leftshift_for_tree_index(i) \
537 ((i == NTREEBINS-1)? 0 : \
538 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
540 /* The size of the smallest chunk held in bin with index i */
541 #define minsize_for_tree_index(i) \
542 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
543 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
545 /* ------------------------ Operations on bin maps ----------------------- */
547 /* bit corresponding to given index */
548 #define idx2bit(i) ((binmap_t)(1) << (i))
550 /* Mark/Clear bits with given index */
551 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
552 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
553 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
555 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
556 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
557 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
559 /* mask with all bits to left of least bit of x on */
560 #define left_bits(x) ((x<<1) | (~(x<<1)+1))
562 /* Set cinuse bit and pinuse bit of next chunk */
563 #define set_inuse(M,p,s)\
564 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
565 ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
567 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
568 #define set_inuse_and_pinuse(M,p,s)\
569 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
570 ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
572 /* Set size, cinuse and pinuse bit of this chunk */
573 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
574 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
576 /* ----------------------- Operations on smallbins ----------------------- */
578 /* Link a free chunk into a smallbin */
579 #define insert_small_chunk(M, P, S) {\
580 bindex_t I = small_index(S);\
581 mchunkptr B = smallbin_at(M, I);\
582 mchunkptr F = B;\
583 if (!smallmap_is_marked(M, I))\
584 mark_smallmap(M, I);\
585 else\
586 F = B->fd;\
587 B->fd = P;\
588 F->bk = P;\
589 P->fd = F;\
590 P->bk = B;\
593 /* Unlink a chunk from a smallbin */
594 #define unlink_small_chunk(M, P, S) {\
595 mchunkptr F = P->fd;\
596 mchunkptr B = P->bk;\
597 bindex_t I = small_index(S);\
598 if (F == B) {\
599 clear_smallmap(M, I);\
600 } else {\
601 F->bk = B;\
602 B->fd = F;\
606 /* Unlink the first chunk from a smallbin */
607 #define unlink_first_small_chunk(M, B, P, I) {\
608 mchunkptr F = P->fd;\
609 if (B == F) {\
610 clear_smallmap(M, I);\
611 } else {\
612 B->fd = F;\
613 F->bk = B;\
617 /* Replace dv node, binning the old one */
618 /* Used only when dvsize known to be small */
619 #define replace_dv(M, P, S) {\
620 size_t DVS = M->dvsize;\
621 if (DVS != 0) {\
622 mchunkptr DV = M->dv;\
623 insert_small_chunk(M, DV, DVS);\
625 M->dvsize = S;\
626 M->dv = P;\
629 /* ------------------------- Operations on trees ------------------------- */
631 /* Insert chunk into tree */
632 #define insert_large_chunk(M, X, S) {\
633 tbinptr *H;\
634 bindex_t I;\
635 compute_tree_index(S, I);\
636 H = treebin_at(M, I);\
637 X->index = I;\
638 X->child[0] = X->child[1] = 0;\
639 if (!treemap_is_marked(M, I)) {\
640 mark_treemap(M, I);\
641 *H = X;\
642 X->parent = (tchunkptr)H;\
643 X->fd = X->bk = X;\
644 } else {\
645 tchunkptr T = *H;\
646 size_t K = S << leftshift_for_tree_index(I);\
647 for (;;) {\
648 if (chunksize(T) != S) {\
649 tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
650 K <<= 1;\
651 if (*C != 0) {\
652 T = *C;\
653 } else {\
654 *C = X;\
655 X->parent = T;\
656 X->fd = X->bk = X;\
657 break;\
659 } else {\
660 tchunkptr F = T->fd;\
661 T->fd = F->bk = X;\
662 X->fd = F;\
663 X->bk = T;\
664 X->parent = 0;\
665 break;\
671 #define unlink_large_chunk(M, X) {\
672 tchunkptr XP = X->parent;\
673 tchunkptr R;\
674 if (X->bk != X) {\
675 tchunkptr F = X->fd;\
676 R = X->bk;\
677 F->bk = R;\
678 R->fd = F;\
679 } else {\
680 tchunkptr *RP;\
681 if (((R = *(RP = &(X->child[1]))) != 0) ||\
682 ((R = *(RP = &(X->child[0]))) != 0)) {\
683 tchunkptr *CP;\
684 while ((*(CP = &(R->child[1])) != 0) ||\
685 (*(CP = &(R->child[0])) != 0)) {\
686 R = *(RP = CP);\
688 *RP = 0;\
691 if (XP != 0) {\
692 tbinptr *H = treebin_at(M, X->index);\
693 if (X == *H) {\
694 if ((*H = R) == 0) \
695 clear_treemap(M, X->index);\
696 } else {\
697 if (XP->child[0] == X) \
698 XP->child[0] = R;\
699 else \
700 XP->child[1] = R;\
702 if (R != 0) {\
703 tchunkptr C0, C1;\
704 R->parent = XP;\
705 if ((C0 = X->child[0]) != 0) {\
706 R->child[0] = C0;\
707 C0->parent = R;\
709 if ((C1 = X->child[1]) != 0) {\
710 R->child[1] = C1;\
711 C1->parent = R;\
717 /* Relays to large vs small bin operations */
719 #define insert_chunk(M, P, S)\
720 if (is_small(S)) { insert_small_chunk(M, P, S)\
721 } else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
723 #define unlink_chunk(M, P, S)\
724 if (is_small(S)) { unlink_small_chunk(M, P, S)\
725 } else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
727 /* ----------------------- Direct-mmapping chunks ----------------------- */
729 static void *direct_alloc(size_t nb)
731 size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
732 if (LJ_LIKELY(mmsize > nb)) { /* Check for wrap around 0 */
733 char *mm = (char *)(DIRECT_MMAP(mmsize));
734 if (mm != CMFAIL) {
735 size_t offset = align_offset(chunk2mem(mm));
736 size_t psize = mmsize - offset - DIRECT_FOOT_PAD;
737 mchunkptr p = (mchunkptr)(mm + offset);
738 p->prev_foot = offset | IS_DIRECT_BIT;
739 p->head = psize|CINUSE_BIT;
740 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
741 chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
742 return chunk2mem(p);
745 return NULL;
748 static mchunkptr direct_resize(mchunkptr oldp, size_t nb)
750 size_t oldsize = chunksize(oldp);
751 if (is_small(nb)) /* Can't shrink direct regions below small size */
752 return NULL;
753 /* Keep old chunk if big enough but not too big */
754 if (oldsize >= nb + SIZE_T_SIZE &&
755 (oldsize - nb) <= (DEFAULT_GRANULARITY << 1)) {
756 return oldp;
757 } else {
758 size_t offset = oldp->prev_foot & ~IS_DIRECT_BIT;
759 size_t oldmmsize = oldsize + offset + DIRECT_FOOT_PAD;
760 size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
761 char *cp = (char *)CALL_MREMAP((char *)oldp - offset,
762 oldmmsize, newmmsize, CALL_MREMAP_MV);
763 if (cp != CMFAIL) {
764 mchunkptr newp = (mchunkptr)(cp + offset);
765 size_t psize = newmmsize - offset - DIRECT_FOOT_PAD;
766 newp->head = psize|CINUSE_BIT;
767 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
768 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
769 return newp;
772 return NULL;
775 /* -------------------------- mspace management -------------------------- */
777 /* Initialize top chunk and its size */
778 static void init_top(mstate m, mchunkptr p, size_t psize)
780 /* Ensure alignment */
781 size_t offset = align_offset(chunk2mem(p));
782 p = (mchunkptr)((char *)p + offset);
783 psize -= offset;
785 m->top = p;
786 m->topsize = psize;
787 p->head = psize | PINUSE_BIT;
788 /* set size of fake trailing chunk holding overhead space only once */
789 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
790 m->trim_check = DEFAULT_TRIM_THRESHOLD; /* reset on each update */
793 /* Initialize bins for a new mstate that is otherwise zeroed out */
794 static void init_bins(mstate m)
796 /* Establish circular links for smallbins */
797 bindex_t i;
798 for (i = 0; i < NSMALLBINS; i++) {
799 sbinptr bin = smallbin_at(m,i);
800 bin->fd = bin->bk = bin;
804 /* Allocate chunk and prepend remainder with chunk in successor base. */
805 static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
807 mchunkptr p = align_as_chunk(newbase);
808 mchunkptr oldfirst = align_as_chunk(oldbase);
809 size_t psize = (size_t)((char *)oldfirst - (char *)p);
810 mchunkptr q = chunk_plus_offset(p, nb);
811 size_t qsize = psize - nb;
812 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
814 /* consolidate remainder with first chunk of old base */
815 if (oldfirst == m->top) {
816 size_t tsize = m->topsize += qsize;
817 m->top = q;
818 q->head = tsize | PINUSE_BIT;
819 } else if (oldfirst == m->dv) {
820 size_t dsize = m->dvsize += qsize;
821 m->dv = q;
822 set_size_and_pinuse_of_free_chunk(q, dsize);
823 } else {
824 if (!cinuse(oldfirst)) {
825 size_t nsize = chunksize(oldfirst);
826 unlink_chunk(m, oldfirst, nsize);
827 oldfirst = chunk_plus_offset(oldfirst, nsize);
828 qsize += nsize;
830 set_free_with_pinuse(q, qsize, oldfirst);
831 insert_chunk(m, q, qsize);
834 return chunk2mem(p);
837 /* Add a segment to hold a new noncontiguous region */
838 static void add_segment(mstate m, char *tbase, size_t tsize)
840 /* Determine locations and sizes of segment, fenceposts, old top */
841 char *old_top = (char *)m->top;
842 msegmentptr oldsp = segment_holding(m, old_top);
843 char *old_end = oldsp->base + oldsp->size;
844 size_t ssize = pad_request(sizeof(struct malloc_segment));
845 char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
846 size_t offset = align_offset(chunk2mem(rawsp));
847 char *asp = rawsp + offset;
848 char *csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
849 mchunkptr sp = (mchunkptr)csp;
850 msegmentptr ss = (msegmentptr)(chunk2mem(sp));
851 mchunkptr tnext = chunk_plus_offset(sp, ssize);
852 mchunkptr p = tnext;
854 /* reset top to new space */
855 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
857 /* Set up segment record */
858 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
859 *ss = m->seg; /* Push current record */
860 m->seg.base = tbase;
861 m->seg.size = tsize;
862 m->seg.next = ss;
864 /* Insert trailing fenceposts */
865 for (;;) {
866 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
867 p->head = FENCEPOST_HEAD;
868 if ((char *)(&(nextp->head)) < old_end)
869 p = nextp;
870 else
871 break;
874 /* Insert the rest of old top into a bin as an ordinary free chunk */
875 if (csp != old_top) {
876 mchunkptr q = (mchunkptr)old_top;
877 size_t psize = (size_t)(csp - old_top);
878 mchunkptr tn = chunk_plus_offset(q, psize);
879 set_free_with_pinuse(q, psize, tn);
880 insert_chunk(m, q, psize);
884 /* -------------------------- System allocation -------------------------- */
886 static void *alloc_sys(mstate m, size_t nb)
888 char *tbase = CMFAIL;
889 size_t tsize = 0;
891 /* Directly map large chunks */
892 if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) {
893 void *mem = direct_alloc(nb);
894 if (mem != 0)
895 return mem;
899 size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
900 size_t rsize = granularity_align(req);
901 if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */
902 char *mp = (char *)(CALL_MMAP(rsize));
903 if (mp != CMFAIL) {
904 tbase = mp;
905 tsize = rsize;
910 if (tbase != CMFAIL) {
911 msegmentptr sp = &m->seg;
912 /* Try to merge with an existing segment */
913 while (sp != 0 && tbase != sp->base + sp->size)
914 sp = sp->next;
915 if (sp != 0 && segment_holds(sp, m->top)) { /* append */
916 sp->size += tsize;
917 init_top(m, m->top, m->topsize + tsize);
918 } else {
919 sp = &m->seg;
920 while (sp != 0 && sp->base != tbase + tsize)
921 sp = sp->next;
922 if (sp != 0) {
923 char *oldbase = sp->base;
924 sp->base = tbase;
925 sp->size += tsize;
926 return prepend_alloc(m, tbase, oldbase, nb);
927 } else {
928 add_segment(m, tbase, tsize);
932 if (nb < m->topsize) { /* Allocate from new or extended top space */
933 size_t rsize = m->topsize -= nb;
934 mchunkptr p = m->top;
935 mchunkptr r = m->top = chunk_plus_offset(p, nb);
936 r->head = rsize | PINUSE_BIT;
937 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
938 return chunk2mem(p);
942 return NULL;
945 /* ----------------------- system deallocation -------------------------- */
947 /* Unmap and unlink any mmapped segments that don't contain used chunks */
948 static size_t release_unused_segments(mstate m)
950 size_t released = 0;
951 size_t nsegs = 0;
952 msegmentptr pred = &m->seg;
953 msegmentptr sp = pred->next;
954 while (sp != 0) {
955 char *base = sp->base;
956 size_t size = sp->size;
957 msegmentptr next = sp->next;
958 nsegs++;
960 mchunkptr p = align_as_chunk(base);
961 size_t psize = chunksize(p);
962 /* Can unmap if first chunk holds entire segment and not pinned */
963 if (!cinuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) {
964 tchunkptr tp = (tchunkptr)p;
965 if (p == m->dv) {
966 m->dv = 0;
967 m->dvsize = 0;
968 } else {
969 unlink_large_chunk(m, tp);
971 if (CALL_MUNMAP(base, size) == 0) {
972 released += size;
973 /* unlink obsoleted record */
974 sp = pred;
975 sp->next = next;
976 } else { /* back out if cannot unmap */
977 insert_large_chunk(m, tp, psize);
981 pred = sp;
982 sp = next;
984 /* Reset check counter */
985 m->release_checks = nsegs > MAX_RELEASE_CHECK_RATE ?
986 nsegs : MAX_RELEASE_CHECK_RATE;
987 return released;
990 static int alloc_trim(mstate m, size_t pad)
992 size_t released = 0;
993 if (pad < MAX_REQUEST && is_initialized(m)) {
994 pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
996 if (m->topsize > pad) {
997 /* Shrink top space in granularity-size units, keeping at least one */
998 size_t unit = DEFAULT_GRANULARITY;
999 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
1000 SIZE_T_ONE) * unit;
1001 msegmentptr sp = segment_holding(m, (char *)m->top);
1003 if (sp->size >= extra &&
1004 !has_segment_link(m, sp)) { /* can't shrink if pinned */
1005 size_t newsize = sp->size - extra;
1006 /* Prefer mremap, fall back to munmap */
1007 if ((CALL_MREMAP(sp->base, sp->size, newsize, CALL_MREMAP_NOMOVE) != MFAIL) ||
1008 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
1009 released = extra;
1013 if (released != 0) {
1014 sp->size -= released;
1015 init_top(m, m->top, m->topsize - released);
1019 /* Unmap any unused mmapped segments */
1020 released += release_unused_segments(m);
1022 /* On failure, disable autotrim to avoid repeated failed future calls */
1023 if (released == 0 && m->topsize > m->trim_check)
1024 m->trim_check = MAX_SIZE_T;
1027 return (released != 0)? 1 : 0;
1030 /* ---------------------------- malloc support --------------------------- */
1032 /* allocate a large request from the best fitting chunk in a treebin */
1033 static void *tmalloc_large(mstate m, size_t nb)
1035 tchunkptr v = 0;
1036 size_t rsize = ~nb+1; /* Unsigned negation */
1037 tchunkptr t;
1038 bindex_t idx;
1039 compute_tree_index(nb, idx);
1041 if ((t = *treebin_at(m, idx)) != 0) {
1042 /* Traverse tree for this bin looking for node with size == nb */
1043 size_t sizebits = nb << leftshift_for_tree_index(idx);
1044 tchunkptr rst = 0; /* The deepest untaken right subtree */
1045 for (;;) {
1046 tchunkptr rt;
1047 size_t trem = chunksize(t) - nb;
1048 if (trem < rsize) {
1049 v = t;
1050 if ((rsize = trem) == 0)
1051 break;
1053 rt = t->child[1];
1054 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
1055 if (rt != 0 && rt != t)
1056 rst = rt;
1057 if (t == 0) {
1058 t = rst; /* set t to least subtree holding sizes > nb */
1059 break;
1061 sizebits <<= 1;
1065 if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
1066 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
1067 if (leftbits != 0)
1068 t = *treebin_at(m, lj_ffs(leftbits));
1071 while (t != 0) { /* find smallest of tree or subtree */
1072 size_t trem = chunksize(t) - nb;
1073 if (trem < rsize) {
1074 rsize = trem;
1075 v = t;
1077 t = leftmost_child(t);
1080 /* If dv is a better fit, return NULL so malloc will use it */
1081 if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
1082 mchunkptr r = chunk_plus_offset(v, nb);
1083 unlink_large_chunk(m, v);
1084 if (rsize < MIN_CHUNK_SIZE) {
1085 set_inuse_and_pinuse(m, v, (rsize + nb));
1086 } else {
1087 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
1088 set_size_and_pinuse_of_free_chunk(r, rsize);
1089 insert_chunk(m, r, rsize);
1091 return chunk2mem(v);
1093 return NULL;
1096 /* allocate a small request from the best fitting chunk in a treebin */
1097 static void *tmalloc_small(mstate m, size_t nb)
1099 tchunkptr t, v;
1100 mchunkptr r;
1101 size_t rsize;
1102 bindex_t i = lj_ffs(m->treemap);
1104 v = t = *treebin_at(m, i);
1105 rsize = chunksize(t) - nb;
1107 while ((t = leftmost_child(t)) != 0) {
1108 size_t trem = chunksize(t) - nb;
1109 if (trem < rsize) {
1110 rsize = trem;
1111 v = t;
1115 r = chunk_plus_offset(v, nb);
1116 unlink_large_chunk(m, v);
1117 if (rsize < MIN_CHUNK_SIZE) {
1118 set_inuse_and_pinuse(m, v, (rsize + nb));
1119 } else {
1120 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
1121 set_size_and_pinuse_of_free_chunk(r, rsize);
1122 replace_dv(m, r, rsize);
1124 return chunk2mem(v);
1127 /* ----------------------------------------------------------------------- */
1129 void *lj_alloc_create(void)
1131 size_t tsize = DEFAULT_GRANULARITY;
1132 char *tbase;
1133 INIT_MMAP();
1134 tbase = (char *)(CALL_MMAP(tsize));
1135 if (tbase != CMFAIL) {
1136 size_t msize = pad_request(sizeof(struct malloc_state));
1137 mchunkptr mn;
1138 mchunkptr msp = align_as_chunk(tbase);
1139 mstate m = (mstate)(chunk2mem(msp));
1140 memset(m, 0, msize);
1141 msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
1142 m->seg.base = tbase;
1143 m->seg.size = tsize;
1144 m->release_checks = MAX_RELEASE_CHECK_RATE;
1145 init_bins(m);
1146 mn = next_chunk(mem2chunk(m));
1147 init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE);
1148 return m;
1150 return NULL;
1153 void lj_alloc_destroy(void *msp)
1155 mstate ms = (mstate)msp;
1156 msegmentptr sp = &ms->seg;
1157 while (sp != 0) {
1158 char *base = sp->base;
1159 size_t size = sp->size;
1160 sp = sp->next;
1161 CALL_MUNMAP(base, size);
1165 static LJ_NOINLINE void *lj_alloc_malloc(void *msp, size_t nsize)
1167 mstate ms = (mstate)msp;
1168 void *mem;
1169 size_t nb;
1170 if (nsize <= MAX_SMALL_REQUEST) {
1171 bindex_t idx;
1172 binmap_t smallbits;
1173 nb = (nsize < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(nsize);
1174 idx = small_index(nb);
1175 smallbits = ms->smallmap >> idx;
1177 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
1178 mchunkptr b, p;
1179 idx += ~smallbits & 1; /* Uses next bin if idx empty */
1180 b = smallbin_at(ms, idx);
1181 p = b->fd;
1182 unlink_first_small_chunk(ms, b, p, idx);
1183 set_inuse_and_pinuse(ms, p, small_index2size(idx));
1184 mem = chunk2mem(p);
1185 return mem;
1186 } else if (nb > ms->dvsize) {
1187 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
1188 mchunkptr b, p, r;
1189 size_t rsize;
1190 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
1191 bindex_t i = lj_ffs(leftbits);
1192 b = smallbin_at(ms, i);
1193 p = b->fd;
1194 unlink_first_small_chunk(ms, b, p, i);
1195 rsize = small_index2size(i) - nb;
1196 /* Fit here cannot be remainderless if 4byte sizes */
1197 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) {
1198 set_inuse_and_pinuse(ms, p, small_index2size(i));
1199 } else {
1200 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
1201 r = chunk_plus_offset(p, nb);
1202 set_size_and_pinuse_of_free_chunk(r, rsize);
1203 replace_dv(ms, r, rsize);
1205 mem = chunk2mem(p);
1206 return mem;
1207 } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
1208 return mem;
1211 } else if (nsize >= MAX_REQUEST) {
1212 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
1213 } else {
1214 nb = pad_request(nsize);
1215 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
1216 return mem;
1220 if (nb <= ms->dvsize) {
1221 size_t rsize = ms->dvsize - nb;
1222 mchunkptr p = ms->dv;
1223 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
1224 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
1225 ms->dvsize = rsize;
1226 set_size_and_pinuse_of_free_chunk(r, rsize);
1227 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
1228 } else { /* exhaust dv */
1229 size_t dvs = ms->dvsize;
1230 ms->dvsize = 0;
1231 ms->dv = 0;
1232 set_inuse_and_pinuse(ms, p, dvs);
1234 mem = chunk2mem(p);
1235 return mem;
1236 } else if (nb < ms->topsize) { /* Split top */
1237 size_t rsize = ms->topsize -= nb;
1238 mchunkptr p = ms->top;
1239 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
1240 r->head = rsize | PINUSE_BIT;
1241 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
1242 mem = chunk2mem(p);
1243 return mem;
1245 return alloc_sys(ms, nb);
1248 static LJ_NOINLINE void *lj_alloc_free(void *msp, void *ptr)
1250 if (ptr != 0) {
1251 mchunkptr p = mem2chunk(ptr);
1252 mstate fm = (mstate)msp;
1253 size_t psize = chunksize(p);
1254 mchunkptr next = chunk_plus_offset(p, psize);
1255 if (!pinuse(p)) {
1256 size_t prevsize = p->prev_foot;
1257 if ((prevsize & IS_DIRECT_BIT) != 0) {
1258 prevsize &= ~IS_DIRECT_BIT;
1259 psize += prevsize + DIRECT_FOOT_PAD;
1260 CALL_MUNMAP((char *)p - prevsize, psize);
1261 return NULL;
1262 } else {
1263 mchunkptr prev = chunk_minus_offset(p, prevsize);
1264 psize += prevsize;
1265 p = prev;
1266 /* consolidate backward */
1267 if (p != fm->dv) {
1268 unlink_chunk(fm, p, prevsize);
1269 } else if ((next->head & INUSE_BITS) == INUSE_BITS) {
1270 fm->dvsize = psize;
1271 set_free_with_pinuse(p, psize, next);
1272 return NULL;
1276 if (!cinuse(next)) { /* consolidate forward */
1277 if (next == fm->top) {
1278 size_t tsize = fm->topsize += psize;
1279 fm->top = p;
1280 p->head = tsize | PINUSE_BIT;
1281 if (p == fm->dv) {
1282 fm->dv = 0;
1283 fm->dvsize = 0;
1285 if (tsize > fm->trim_check)
1286 alloc_trim(fm, 0);
1287 return NULL;
1288 } else if (next == fm->dv) {
1289 size_t dsize = fm->dvsize += psize;
1290 fm->dv = p;
1291 set_size_and_pinuse_of_free_chunk(p, dsize);
1292 return NULL;
1293 } else {
1294 size_t nsize = chunksize(next);
1295 psize += nsize;
1296 unlink_chunk(fm, next, nsize);
1297 set_size_and_pinuse_of_free_chunk(p, psize);
1298 if (p == fm->dv) {
1299 fm->dvsize = psize;
1300 return NULL;
1303 } else {
1304 set_free_with_pinuse(p, psize, next);
1307 if (is_small(psize)) {
1308 insert_small_chunk(fm, p, psize);
1309 } else {
1310 tchunkptr tp = (tchunkptr)p;
1311 insert_large_chunk(fm, tp, psize);
1312 if (--fm->release_checks == 0)
1313 release_unused_segments(fm);
1316 return NULL;
1319 static LJ_NOINLINE void *lj_alloc_realloc(void *msp, void *ptr, size_t nsize)
1321 if (nsize >= MAX_REQUEST) {
1322 return NULL;
1323 } else {
1324 mstate m = (mstate)msp;
1325 mchunkptr oldp = mem2chunk(ptr);
1326 size_t oldsize = chunksize(oldp);
1327 mchunkptr next = chunk_plus_offset(oldp, oldsize);
1328 mchunkptr newp = 0;
1329 size_t nb = request2size(nsize);
1331 /* Try to either shrink or extend into top. Else malloc-copy-free */
1332 if (is_direct(oldp)) {
1333 newp = direct_resize(oldp, nb); /* this may return NULL. */
1334 } else if (oldsize >= nb) { /* already big enough */
1335 size_t rsize = oldsize - nb;
1336 newp = oldp;
1337 if (rsize >= MIN_CHUNK_SIZE) {
1338 mchunkptr rem = chunk_plus_offset(newp, nb);
1339 set_inuse(m, newp, nb);
1340 set_inuse(m, rem, rsize);
1341 lj_alloc_free(m, chunk2mem(rem));
1343 } else if (next == m->top && oldsize + m->topsize > nb) {
1344 /* Expand into top */
1345 size_t newsize = oldsize + m->topsize;
1346 size_t newtopsize = newsize - nb;
1347 mchunkptr newtop = chunk_plus_offset(oldp, nb);
1348 set_inuse(m, oldp, nb);
1349 newtop->head = newtopsize |PINUSE_BIT;
1350 m->top = newtop;
1351 m->topsize = newtopsize;
1352 newp = oldp;
1355 if (newp != 0) {
1356 return chunk2mem(newp);
1357 } else {
1358 void *newmem = lj_alloc_malloc(m, nsize);
1359 if (newmem != 0) {
1360 size_t oc = oldsize - overhead_for(oldp);
1361 memcpy(newmem, ptr, oc < nsize ? oc : nsize);
1362 lj_alloc_free(m, ptr);
1364 return newmem;
1369 void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize)
1371 (void)osize;
1372 if (nsize == 0) {
1373 return lj_alloc_free(msp, ptr);
1374 } else if (ptr == NULL) {
1375 return lj_alloc_malloc(msp, nsize);
1376 } else {
1377 return lj_alloc_realloc(msp, ptr, nsize);
1381 #endif