2 ** Bundled memory allocator.
4 ** Beware: this is a HEAVILY CUSTOMIZED version of dlmalloc.
5 ** The original bears the following remark:
7 ** This is a version (aka dlmalloc) of malloc/free/realloc written by
8 ** Doug Lea and released to the public domain, as explained at
9 ** http://creativecommons.org/licenses/publicdomain.
11 ** * Version pre-2.8.4 Wed Mar 29 19:46:29 2006 (dl at gee)
13 ** No additional copyright is claimed over the customizations.
14 ** Please do NOT bother the original author about this version here!
16 ** If you want to use dlmalloc in another project, you should get
17 ** the original from: ftp://gee.cs.oswego.edu/pub/misc/
18 ** For thread-safe derivatives, take a look at:
19 ** - ptmalloc: http://www.malloc.de/
20 ** - nedmalloc: http://www.nedprod.com/programs/portable/nedmalloc/
26 /* To get the mremap prototype. Must be defined before any system includes. */
27 #if defined(__linux__) && !defined(_GNU_SOURCE)
35 #ifndef LUAJIT_USE_SYSMALLOC
37 #define MAX_SIZE_T (~(size_t)0)
38 #define MALLOC_ALIGNMENT ((size_t)8U)
40 #define DEFAULT_GRANULARITY ((size_t)128U * (size_t)1024U)
41 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
42 #define DEFAULT_MMAP_THRESHOLD ((size_t)128U * (size_t)1024U)
43 #define MAX_RELEASE_CHECK_RATE 255
45 /* ------------------- size_t and alignment properties -------------------- */
47 /* The byte and bit size of a size_t */
48 #define SIZE_T_SIZE (sizeof(size_t))
49 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
51 /* Some constants coerced to size_t */
52 /* Annoying but necessary to avoid errors on some platforms */
53 #define SIZE_T_ZERO ((size_t)0)
54 #define SIZE_T_ONE ((size_t)1)
55 #define SIZE_T_TWO ((size_t)2)
56 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
57 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
58 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
60 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
61 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
63 /* the number of bytes to offset an address to align it */
64 #define align_offset(A)\
65 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
66 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
68 /* -------------------------- MMAP support ------------------------------- */
70 #define MFAIL ((void *)(MAX_SIZE_T))
71 #define CMFAIL ((char *)(MFAIL)) /* defined for convenience */
73 #define IS_DIRECT_BIT (SIZE_T_ONE)
77 #define WIN32_LEAN_AND_MEAN
82 /* Undocumented, but hey, that's what we all love so much about Windows. */
83 typedef long (*PNTAVM
)(HANDLE handle
, void **addr
, ULONG zbits
,
84 size_t *size
, ULONG alloctype
, ULONG prot
);
87 /* Number of top bits of the lower 32 bits of an address that must be zero.
88 ** Apparently 0 gives us full 64 bit addresses and 1 gives us the lower 2GB.
90 #define NTAVM_ZEROBITS 1
92 static void INIT_MMAP(void)
94 ntavm
= (PNTAVM
)GetProcAddress(GetModuleHandleA("ntdll.dll"),
95 "NtAllocateVirtualMemory");
98 /* Win64 32 bit MMAP via NtAllocateVirtualMemory. */
99 static LJ_AINLINE
void *CALL_MMAP(size_t size
)
101 DWORD olderr
= GetLastError();
103 long st
= ntavm(INVALID_HANDLE_VALUE
, &ptr
, NTAVM_ZEROBITS
, &size
,
104 MEM_RESERVE
|MEM_COMMIT
, PAGE_READWRITE
);
105 SetLastError(olderr
);
106 return st
== 0 ? ptr
: MFAIL
;
109 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
110 static LJ_AINLINE
void *DIRECT_MMAP(size_t size
)
112 DWORD olderr
= GetLastError();
114 long st
= ntavm(INVALID_HANDLE_VALUE
, &ptr
, NTAVM_ZEROBITS
, &size
,
115 MEM_RESERVE
|MEM_COMMIT
|MEM_TOP_DOWN
, PAGE_READWRITE
);
116 SetLastError(olderr
);
117 return st
== 0 ? ptr
: MFAIL
;
122 #define INIT_MMAP() ((void)0)
124 /* Win32 MMAP via VirtualAlloc */
125 static LJ_AINLINE
void *CALL_MMAP(size_t size
)
127 DWORD olderr
= GetLastError();
128 void *ptr
= VirtualAlloc(0, size
, MEM_RESERVE
|MEM_COMMIT
, PAGE_READWRITE
);
129 SetLastError(olderr
);
130 return ptr
? ptr
: MFAIL
;
133 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
134 static LJ_AINLINE
void *DIRECT_MMAP(size_t size
)
136 DWORD olderr
= GetLastError();
137 void *ptr
= VirtualAlloc(0, size
, MEM_RESERVE
|MEM_COMMIT
|MEM_TOP_DOWN
,
139 SetLastError(olderr
);
140 return ptr
? ptr
: MFAIL
;
145 /* This function supports releasing coalesed segments */
146 static LJ_AINLINE
int CALL_MUNMAP(void *ptr
, size_t size
)
148 DWORD olderr
= GetLastError();
149 MEMORY_BASIC_INFORMATION minfo
;
150 char *cptr
= (char *)ptr
;
152 if (VirtualQuery(cptr
, &minfo
, sizeof(minfo
)) == 0)
154 if (minfo
.BaseAddress
!= cptr
|| minfo
.AllocationBase
!= cptr
||
155 minfo
.State
!= MEM_COMMIT
|| minfo
.RegionSize
> size
)
157 if (VirtualFree(cptr
, 0, MEM_RELEASE
) == 0)
159 cptr
+= minfo
.RegionSize
;
160 size
-= minfo
.RegionSize
;
162 SetLastError(olderr
);
169 #include <sys/mman.h>
171 #define MMAP_PROT (PROT_READ|PROT_WRITE)
172 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
173 #define MAP_ANONYMOUS MAP_ANON
175 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
177 #if LJ_64 && !LJ_GC64
178 /* 64 bit mode with 32 bit pointers needs special support for allocating
179 ** memory in the lower 2GB.
182 #if defined(MAP_32BIT)
185 #define MMAP_REGION_START ((uintptr_t)0x1000)
187 /* Actually this only gives us max. 1GB in current Linux kernels. */
188 #define MMAP_REGION_START ((uintptr_t)0)
191 static LJ_AINLINE
void *CALL_MMAP(size_t size
)
194 void *ptr
= mmap((void *)MMAP_REGION_START
, size
, MMAP_PROT
, MAP_32BIT
|MMAP_FLAGS
, -1, 0);
199 #elif LJ_TARGET_OSX || LJ_TARGET_PS4 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__sun__) || defined(__CYGWIN__)
201 /* OSX and FreeBSD mmap() use a naive first-fit linear search.
202 ** That's perfect for us. Except that -pagezero_size must be set for OSX,
203 ** otherwise the lower 4GB are blocked. And the 32GB RLIMIT_DATA needs
204 ** to be reduced to 250MB on FreeBSD.
206 #if LJ_TARGET_OSX || defined(__DragonFly__)
207 #define MMAP_REGION_START ((uintptr_t)0x10000)
209 #define MMAP_REGION_START ((uintptr_t)0x4000)
211 #define MMAP_REGION_START ((uintptr_t)0x10000000)
213 #define MMAP_REGION_END ((uintptr_t)0x80000000)
215 #if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4
216 #include <sys/resource.h>
219 static LJ_AINLINE
void *CALL_MMAP(size_t size
)
222 /* Hint for next allocation. Doesn't need to be thread-safe. */
223 static uintptr_t alloc_hint
= MMAP_REGION_START
;
225 #if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4
226 static int rlimit_modified
= 0;
227 if (LJ_UNLIKELY(rlimit_modified
== 0)) {
229 rlim
.rlim_cur
= rlim
.rlim_max
= MMAP_REGION_START
;
230 setrlimit(RLIMIT_DATA
, &rlim
); /* Ignore result. May fail below. */
235 void *p
= mmap((void *)alloc_hint
, size
, MMAP_PROT
, MMAP_FLAGS
, -1, 0);
236 if ((uintptr_t)p
>= MMAP_REGION_START
&&
237 (uintptr_t)p
+ size
< MMAP_REGION_END
) {
238 alloc_hint
= (uintptr_t)p
+ size
;
242 if (p
!= CMFAIL
) munmap(p
, size
);
243 #if defined(__sun__) || defined(__DragonFly__)
244 alloc_hint
+= 0x1000000; /* Need near-exhaustive linear scan. */
245 if (alloc_hint
+ size
< MMAP_REGION_END
) continue;
249 alloc_hint
= MMAP_REGION_START
;
257 #error "NYI: need an equivalent of MAP_32BIT for this 64 bit OS"
263 /* 32 bit mode and GC64 mode is easy. */
264 static LJ_AINLINE
void *CALL_MMAP(size_t size
)
267 void *ptr
= mmap(NULL
, size
, MMAP_PROT
, MMAP_FLAGS
, -1, 0);
274 #define INIT_MMAP() ((void)0)
275 #define DIRECT_MMAP(s) CALL_MMAP(s)
277 static LJ_AINLINE
int CALL_MUNMAP(void *ptr
, size_t size
)
280 int ret
= munmap(ptr
, size
);
286 /* Need to define _GNU_SOURCE to get the mremap prototype. */
287 static LJ_AINLINE
void *CALL_MREMAP_(void *ptr
, size_t osz
, size_t nsz
,
291 ptr
= mremap(ptr
, osz
, nsz
, flags
);
296 #define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv))
297 #define CALL_MREMAP_NOMOVE 0
298 #define CALL_MREMAP_MAYMOVE 1
299 #if LJ_64 && !LJ_GC64
300 #define CALL_MREMAP_MV CALL_MREMAP_NOMOVE
302 #define CALL_MREMAP_MV CALL_MREMAP_MAYMOVE
309 #define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL)
312 /* ----------------------- Chunk representations ------------------------ */
314 struct malloc_chunk
{
315 size_t prev_foot
; /* Size of previous chunk (if free). */
316 size_t head
; /* Size and inuse bits. */
317 struct malloc_chunk
*fd
; /* double links -- used only if free. */
318 struct malloc_chunk
*bk
;
321 typedef struct malloc_chunk mchunk
;
322 typedef struct malloc_chunk
*mchunkptr
;
323 typedef struct malloc_chunk
*sbinptr
; /* The type of bins of chunks */
324 typedef size_t bindex_t
; /* Described below */
325 typedef unsigned int binmap_t
; /* Described below */
326 typedef unsigned int flag_t
; /* The type of various bit flag sets */
328 /* ------------------- Chunks sizes and alignments ----------------------- */
330 #define MCHUNK_SIZE (sizeof(mchunk))
332 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
334 /* Direct chunks need a second word of overhead ... */
335 #define DIRECT_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
336 /* ... and additional padding for fake next-chunk at foot */
337 #define DIRECT_FOOT_PAD (FOUR_SIZE_T_SIZES)
339 /* The smallest size we can malloc is an aligned minimal chunk */
340 #define MIN_CHUNK_SIZE\
341 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
343 /* conversion from malloc headers to user pointers, and back */
344 #define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES))
345 #define mem2chunk(mem) ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES))
346 /* chunk associated with aligned address A */
347 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
349 /* Bounds on request (not chunk) sizes. */
350 #define MAX_REQUEST ((~MIN_CHUNK_SIZE+1) << 2)
351 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
353 /* pad request bytes into a usable size */
354 #define pad_request(req) \
355 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
357 /* pad request, checking for minimum (but not maximum) */
358 #define request2size(req) \
359 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
361 /* ------------------ Operations on head and foot fields ----------------- */
363 #define PINUSE_BIT (SIZE_T_ONE)
364 #define CINUSE_BIT (SIZE_T_TWO)
365 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
367 /* Head value for fenceposts */
368 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
370 /* extraction of fields from head words */
371 #define cinuse(p) ((p)->head & CINUSE_BIT)
372 #define pinuse(p) ((p)->head & PINUSE_BIT)
373 #define chunksize(p) ((p)->head & ~(INUSE_BITS))
375 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
376 #define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
378 /* Treat space at ptr +/- offset as a chunk */
379 #define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s)))
380 #define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s)))
382 /* Ptr to next or previous physical malloc_chunk. */
383 #define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~INUSE_BITS)))
384 #define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot) ))
386 /* extract next chunk's pinuse bit */
387 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
389 /* Get/set size at footer */
390 #define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot)
391 #define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s))
393 /* Set size, pinuse bit, and foot */
394 #define set_size_and_pinuse_of_free_chunk(p, s)\
395 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
397 /* Set size, pinuse bit, foot, and clear next pinuse */
398 #define set_free_with_pinuse(p, s, n)\
399 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
401 #define is_direct(p)\
402 (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_DIRECT_BIT))
404 /* Get the internal overhead associated with chunk p */
405 #define overhead_for(p)\
406 (is_direct(p)? DIRECT_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
408 /* ---------------------- Overlaid data structures ----------------------- */
410 struct malloc_tree_chunk
{
411 /* The first four fields must be compatible with malloc_chunk */
414 struct malloc_tree_chunk
*fd
;
415 struct malloc_tree_chunk
*bk
;
417 struct malloc_tree_chunk
*child
[2];
418 struct malloc_tree_chunk
*parent
;
422 typedef struct malloc_tree_chunk tchunk
;
423 typedef struct malloc_tree_chunk
*tchunkptr
;
424 typedef struct malloc_tree_chunk
*tbinptr
; /* The type of bins of trees */
426 /* A little helper macro for trees */
427 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
429 /* ----------------------------- Segments -------------------------------- */
431 struct malloc_segment
{
432 char *base
; /* base address */
433 size_t size
; /* allocated size */
434 struct malloc_segment
*next
; /* ptr to next segment */
437 typedef struct malloc_segment msegment
;
438 typedef struct malloc_segment
*msegmentptr
;
440 /* ---------------------------- malloc_state ----------------------------- */
442 /* Bin types, widths and sizes */
443 #define NSMALLBINS (32U)
444 #define NTREEBINS (32U)
445 #define SMALLBIN_SHIFT (3U)
446 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
447 #define TREEBIN_SHIFT (8U)
448 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
449 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
450 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
452 struct malloc_state
{
460 size_t release_checks
;
461 mchunkptr smallbins
[(NSMALLBINS
+1)*2];
462 tbinptr treebins
[NTREEBINS
];
466 typedef struct malloc_state
*mstate
;
468 #define is_initialized(M) ((M)->top != 0)
470 /* -------------------------- system alloc setup ------------------------- */
472 /* page-align a size */
473 #define page_align(S)\
474 (((S) + (LJ_PAGESIZE - SIZE_T_ONE)) & ~(LJ_PAGESIZE - SIZE_T_ONE))
476 /* granularity-align a size */
477 #define granularity_align(S)\
478 (((S) + (DEFAULT_GRANULARITY - SIZE_T_ONE))\
479 & ~(DEFAULT_GRANULARITY - SIZE_T_ONE))
481 #if LJ_TARGET_WINDOWS
482 #define mmap_align(S) granularity_align(S)
484 #define mmap_align(S) page_align(S)
487 /* True if segment S holds address A */
488 #define segment_holds(S, A)\
489 ((char *)(A) >= S->base && (char *)(A) < S->base + S->size)
491 /* Return segment holding given address */
492 static msegmentptr
segment_holding(mstate m
, char *addr
)
494 msegmentptr sp
= &m
->seg
;
496 if (addr
>= sp
->base
&& addr
< sp
->base
+ sp
->size
)
498 if ((sp
= sp
->next
) == 0)
503 /* Return true if segment contains a segment link */
504 static int has_segment_link(mstate m
, msegmentptr ss
)
506 msegmentptr sp
= &m
->seg
;
508 if ((char *)sp
>= ss
->base
&& (char *)sp
< ss
->base
+ ss
->size
)
510 if ((sp
= sp
->next
) == 0)
516 TOP_FOOT_SIZE is padding at the end of a segment, including space
517 that may be needed to place segment records and fenceposts when new
518 noncontiguous segments are added.
520 #define TOP_FOOT_SIZE\
521 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
523 /* ---------------------------- Indexing Bins ---------------------------- */
525 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
526 #define small_index(s) ((s) >> SMALLBIN_SHIFT)
527 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
528 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
530 /* addressing by index. See above about smallbin repositioning */
531 #define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i)<<1])))
532 #define treebin_at(M,i) (&((M)->treebins[i]))
534 /* assign tree index for size S to variable I */
535 #define compute_tree_index(S, I)\
537 unsigned int X = (unsigned int)(S >> TREEBIN_SHIFT);\
540 } else if (X > 0xFFFF) {\
543 unsigned int K = lj_fls(X);\
544 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
548 /* Bit representing maximum resolved size in a treebin at i */
549 #define bit_for_tree_index(i) \
550 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
552 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
553 #define leftshift_for_tree_index(i) \
554 ((i == NTREEBINS-1)? 0 : \
555 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
557 /* The size of the smallest chunk held in bin with index i */
558 #define minsize_for_tree_index(i) \
559 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
560 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
562 /* ------------------------ Operations on bin maps ----------------------- */
564 /* bit corresponding to given index */
565 #define idx2bit(i) ((binmap_t)(1) << (i))
567 /* Mark/Clear bits with given index */
568 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
569 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
570 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
572 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
573 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
574 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
576 /* mask with all bits to left of least bit of x on */
577 #define left_bits(x) ((x<<1) | (~(x<<1)+1))
579 /* Set cinuse bit and pinuse bit of next chunk */
580 #define set_inuse(M,p,s)\
581 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
582 ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
584 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
585 #define set_inuse_and_pinuse(M,p,s)\
586 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
587 ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
589 /* Set size, cinuse and pinuse bit of this chunk */
590 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
591 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
593 /* ----------------------- Operations on smallbins ----------------------- */
595 /* Link a free chunk into a smallbin */
596 #define insert_small_chunk(M, P, S) {\
597 bindex_t I = small_index(S);\
598 mchunkptr B = smallbin_at(M, I);\
600 if (!smallmap_is_marked(M, I))\
601 mark_smallmap(M, I);\
610 /* Unlink a chunk from a smallbin */
611 #define unlink_small_chunk(M, P, S) {\
612 mchunkptr F = P->fd;\
613 mchunkptr B = P->bk;\
614 bindex_t I = small_index(S);\
616 clear_smallmap(M, I);\
623 /* Unlink the first chunk from a smallbin */
624 #define unlink_first_small_chunk(M, B, P, I) {\
625 mchunkptr F = P->fd;\
627 clear_smallmap(M, I);\
634 /* Replace dv node, binning the old one */
635 /* Used only when dvsize known to be small */
636 #define replace_dv(M, P, S) {\
637 size_t DVS = M->dvsize;\
639 mchunkptr DV = M->dv;\
640 insert_small_chunk(M, DV, DVS);\
646 /* ------------------------- Operations on trees ------------------------- */
648 /* Insert chunk into tree */
649 #define insert_large_chunk(M, X, S) {\
652 compute_tree_index(S, I);\
653 H = treebin_at(M, I);\
655 X->child[0] = X->child[1] = 0;\
656 if (!treemap_is_marked(M, I)) {\
659 X->parent = (tchunkptr)H;\
663 size_t K = S << leftshift_for_tree_index(I);\
665 if (chunksize(T) != S) {\
666 tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
677 tchunkptr F = T->fd;\
688 #define unlink_large_chunk(M, X) {\
689 tchunkptr XP = X->parent;\
692 tchunkptr F = X->fd;\
698 if (((R = *(RP = &(X->child[1]))) != 0) ||\
699 ((R = *(RP = &(X->child[0]))) != 0)) {\
701 while ((*(CP = &(R->child[1])) != 0) ||\
702 (*(CP = &(R->child[0])) != 0)) {\
709 tbinptr *H = treebin_at(M, X->index);\
712 clear_treemap(M, X->index);\
714 if (XP->child[0] == X) \
722 if ((C0 = X->child[0]) != 0) {\
726 if ((C1 = X->child[1]) != 0) {\
734 /* Relays to large vs small bin operations */
736 #define insert_chunk(M, P, S)\
737 if (is_small(S)) { insert_small_chunk(M, P, S)\
738 } else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
740 #define unlink_chunk(M, P, S)\
741 if (is_small(S)) { unlink_small_chunk(M, P, S)\
742 } else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
744 /* ----------------------- Direct-mmapping chunks ----------------------- */
746 static void *direct_alloc(size_t nb
)
748 size_t mmsize
= mmap_align(nb
+ SIX_SIZE_T_SIZES
+ CHUNK_ALIGN_MASK
);
749 if (LJ_LIKELY(mmsize
> nb
)) { /* Check for wrap around 0 */
750 char *mm
= (char *)(DIRECT_MMAP(mmsize
));
752 size_t offset
= align_offset(chunk2mem(mm
));
753 size_t psize
= mmsize
- offset
- DIRECT_FOOT_PAD
;
754 mchunkptr p
= (mchunkptr
)(mm
+ offset
);
755 p
->prev_foot
= offset
| IS_DIRECT_BIT
;
756 p
->head
= psize
|CINUSE_BIT
;
757 chunk_plus_offset(p
, psize
)->head
= FENCEPOST_HEAD
;
758 chunk_plus_offset(p
, psize
+SIZE_T_SIZE
)->head
= 0;
765 static mchunkptr
direct_resize(mchunkptr oldp
, size_t nb
)
767 size_t oldsize
= chunksize(oldp
);
768 if (is_small(nb
)) /* Can't shrink direct regions below small size */
770 /* Keep old chunk if big enough but not too big */
771 if (oldsize
>= nb
+ SIZE_T_SIZE
&&
772 (oldsize
- nb
) <= (DEFAULT_GRANULARITY
>> 1)) {
775 size_t offset
= oldp
->prev_foot
& ~IS_DIRECT_BIT
;
776 size_t oldmmsize
= oldsize
+ offset
+ DIRECT_FOOT_PAD
;
777 size_t newmmsize
= mmap_align(nb
+ SIX_SIZE_T_SIZES
+ CHUNK_ALIGN_MASK
);
778 char *cp
= (char *)CALL_MREMAP((char *)oldp
- offset
,
779 oldmmsize
, newmmsize
, CALL_MREMAP_MV
);
781 mchunkptr newp
= (mchunkptr
)(cp
+ offset
);
782 size_t psize
= newmmsize
- offset
- DIRECT_FOOT_PAD
;
783 newp
->head
= psize
|CINUSE_BIT
;
784 chunk_plus_offset(newp
, psize
)->head
= FENCEPOST_HEAD
;
785 chunk_plus_offset(newp
, psize
+SIZE_T_SIZE
)->head
= 0;
792 /* -------------------------- mspace management -------------------------- */
794 /* Initialize top chunk and its size */
795 static void init_top(mstate m
, mchunkptr p
, size_t psize
)
797 /* Ensure alignment */
798 size_t offset
= align_offset(chunk2mem(p
));
799 p
= (mchunkptr
)((char *)p
+ offset
);
804 p
->head
= psize
| PINUSE_BIT
;
805 /* set size of fake trailing chunk holding overhead space only once */
806 chunk_plus_offset(p
, psize
)->head
= TOP_FOOT_SIZE
;
807 m
->trim_check
= DEFAULT_TRIM_THRESHOLD
; /* reset on each update */
810 /* Initialize bins for a new mstate that is otherwise zeroed out */
811 static void init_bins(mstate m
)
813 /* Establish circular links for smallbins */
815 for (i
= 0; i
< NSMALLBINS
; i
++) {
816 sbinptr bin
= smallbin_at(m
,i
);
817 bin
->fd
= bin
->bk
= bin
;
821 /* Allocate chunk and prepend remainder with chunk in successor base. */
822 static void *prepend_alloc(mstate m
, char *newbase
, char *oldbase
, size_t nb
)
824 mchunkptr p
= align_as_chunk(newbase
);
825 mchunkptr oldfirst
= align_as_chunk(oldbase
);
826 size_t psize
= (size_t)((char *)oldfirst
- (char *)p
);
827 mchunkptr q
= chunk_plus_offset(p
, nb
);
828 size_t qsize
= psize
- nb
;
829 set_size_and_pinuse_of_inuse_chunk(m
, p
, nb
);
831 /* consolidate remainder with first chunk of old base */
832 if (oldfirst
== m
->top
) {
833 size_t tsize
= m
->topsize
+= qsize
;
835 q
->head
= tsize
| PINUSE_BIT
;
836 } else if (oldfirst
== m
->dv
) {
837 size_t dsize
= m
->dvsize
+= qsize
;
839 set_size_and_pinuse_of_free_chunk(q
, dsize
);
841 if (!cinuse(oldfirst
)) {
842 size_t nsize
= chunksize(oldfirst
);
843 unlink_chunk(m
, oldfirst
, nsize
);
844 oldfirst
= chunk_plus_offset(oldfirst
, nsize
);
847 set_free_with_pinuse(q
, qsize
, oldfirst
);
848 insert_chunk(m
, q
, qsize
);
854 /* Add a segment to hold a new noncontiguous region */
855 static void add_segment(mstate m
, char *tbase
, size_t tsize
)
857 /* Determine locations and sizes of segment, fenceposts, old top */
858 char *old_top
= (char *)m
->top
;
859 msegmentptr oldsp
= segment_holding(m
, old_top
);
860 char *old_end
= oldsp
->base
+ oldsp
->size
;
861 size_t ssize
= pad_request(sizeof(struct malloc_segment
));
862 char *rawsp
= old_end
- (ssize
+ FOUR_SIZE_T_SIZES
+ CHUNK_ALIGN_MASK
);
863 size_t offset
= align_offset(chunk2mem(rawsp
));
864 char *asp
= rawsp
+ offset
;
865 char *csp
= (asp
< (old_top
+ MIN_CHUNK_SIZE
))? old_top
: asp
;
866 mchunkptr sp
= (mchunkptr
)csp
;
867 msegmentptr ss
= (msegmentptr
)(chunk2mem(sp
));
868 mchunkptr tnext
= chunk_plus_offset(sp
, ssize
);
871 /* reset top to new space */
872 init_top(m
, (mchunkptr
)tbase
, tsize
- TOP_FOOT_SIZE
);
874 /* Set up segment record */
875 set_size_and_pinuse_of_inuse_chunk(m
, sp
, ssize
);
876 *ss
= m
->seg
; /* Push current record */
881 /* Insert trailing fenceposts */
883 mchunkptr nextp
= chunk_plus_offset(p
, SIZE_T_SIZE
);
884 p
->head
= FENCEPOST_HEAD
;
885 if ((char *)(&(nextp
->head
)) < old_end
)
891 /* Insert the rest of old top into a bin as an ordinary free chunk */
892 if (csp
!= old_top
) {
893 mchunkptr q
= (mchunkptr
)old_top
;
894 size_t psize
= (size_t)(csp
- old_top
);
895 mchunkptr tn
= chunk_plus_offset(q
, psize
);
896 set_free_with_pinuse(q
, psize
, tn
);
897 insert_chunk(m
, q
, psize
);
901 /* -------------------------- System allocation -------------------------- */
903 static void *alloc_sys(mstate m
, size_t nb
)
905 char *tbase
= CMFAIL
;
908 /* Directly map large chunks */
909 if (LJ_UNLIKELY(nb
>= DEFAULT_MMAP_THRESHOLD
)) {
910 void *mem
= direct_alloc(nb
);
916 size_t req
= nb
+ TOP_FOOT_SIZE
+ SIZE_T_ONE
;
917 size_t rsize
= granularity_align(req
);
918 if (LJ_LIKELY(rsize
> nb
)) { /* Fail if wraps around zero */
919 char *mp
= (char *)(CALL_MMAP(rsize
));
927 if (tbase
!= CMFAIL
) {
928 msegmentptr sp
= &m
->seg
;
929 /* Try to merge with an existing segment */
930 while (sp
!= 0 && tbase
!= sp
->base
+ sp
->size
)
932 if (sp
!= 0 && segment_holds(sp
, m
->top
)) { /* append */
934 init_top(m
, m
->top
, m
->topsize
+ tsize
);
937 while (sp
!= 0 && sp
->base
!= tbase
+ tsize
)
940 char *oldbase
= sp
->base
;
943 return prepend_alloc(m
, tbase
, oldbase
, nb
);
945 add_segment(m
, tbase
, tsize
);
949 if (nb
< m
->topsize
) { /* Allocate from new or extended top space */
950 size_t rsize
= m
->topsize
-= nb
;
951 mchunkptr p
= m
->top
;
952 mchunkptr r
= m
->top
= chunk_plus_offset(p
, nb
);
953 r
->head
= rsize
| PINUSE_BIT
;
954 set_size_and_pinuse_of_inuse_chunk(m
, p
, nb
);
962 /* ----------------------- system deallocation -------------------------- */
964 /* Unmap and unlink any mmapped segments that don't contain used chunks */
965 static size_t release_unused_segments(mstate m
)
969 msegmentptr pred
= &m
->seg
;
970 msegmentptr sp
= pred
->next
;
972 char *base
= sp
->base
;
973 size_t size
= sp
->size
;
974 msegmentptr next
= sp
->next
;
977 mchunkptr p
= align_as_chunk(base
);
978 size_t psize
= chunksize(p
);
979 /* Can unmap if first chunk holds entire segment and not pinned */
980 if (!cinuse(p
) && (char *)p
+ psize
>= base
+ size
- TOP_FOOT_SIZE
) {
981 tchunkptr tp
= (tchunkptr
)p
;
986 unlink_large_chunk(m
, tp
);
988 if (CALL_MUNMAP(base
, size
) == 0) {
990 /* unlink obsoleted record */
993 } else { /* back out if cannot unmap */
994 insert_large_chunk(m
, tp
, psize
);
1001 /* Reset check counter */
1002 m
->release_checks
= nsegs
> MAX_RELEASE_CHECK_RATE
?
1003 nsegs
: MAX_RELEASE_CHECK_RATE
;
1007 static int alloc_trim(mstate m
, size_t pad
)
1009 size_t released
= 0;
1010 if (pad
< MAX_REQUEST
&& is_initialized(m
)) {
1011 pad
+= TOP_FOOT_SIZE
; /* ensure enough room for segment overhead */
1013 if (m
->topsize
> pad
) {
1014 /* Shrink top space in granularity-size units, keeping at least one */
1015 size_t unit
= DEFAULT_GRANULARITY
;
1016 size_t extra
= ((m
->topsize
- pad
+ (unit
- SIZE_T_ONE
)) / unit
-
1018 msegmentptr sp
= segment_holding(m
, (char *)m
->top
);
1020 if (sp
->size
>= extra
&&
1021 !has_segment_link(m
, sp
)) { /* can't shrink if pinned */
1022 size_t newsize
= sp
->size
- extra
;
1023 /* Prefer mremap, fall back to munmap */
1024 if ((CALL_MREMAP(sp
->base
, sp
->size
, newsize
, CALL_MREMAP_NOMOVE
) != MFAIL
) ||
1025 (CALL_MUNMAP(sp
->base
+ newsize
, extra
) == 0)) {
1030 if (released
!= 0) {
1031 sp
->size
-= released
;
1032 init_top(m
, m
->top
, m
->topsize
- released
);
1036 /* Unmap any unused mmapped segments */
1037 released
+= release_unused_segments(m
);
1039 /* On failure, disable autotrim to avoid repeated failed future calls */
1040 if (released
== 0 && m
->topsize
> m
->trim_check
)
1041 m
->trim_check
= MAX_SIZE_T
;
1044 return (released
!= 0)? 1 : 0;
1047 /* ---------------------------- malloc support --------------------------- */
1049 /* allocate a large request from the best fitting chunk in a treebin */
1050 static void *tmalloc_large(mstate m
, size_t nb
)
1053 size_t rsize
= ~nb
+1; /* Unsigned negation */
1056 compute_tree_index(nb
, idx
);
1058 if ((t
= *treebin_at(m
, idx
)) != 0) {
1059 /* Traverse tree for this bin looking for node with size == nb */
1060 size_t sizebits
= nb
<< leftshift_for_tree_index(idx
);
1061 tchunkptr rst
= 0; /* The deepest untaken right subtree */
1064 size_t trem
= chunksize(t
) - nb
;
1067 if ((rsize
= trem
) == 0)
1071 t
= t
->child
[(sizebits
>> (SIZE_T_BITSIZE
-SIZE_T_ONE
)) & 1];
1072 if (rt
!= 0 && rt
!= t
)
1075 t
= rst
; /* set t to least subtree holding sizes > nb */
1082 if (t
== 0 && v
== 0) { /* set t to root of next non-empty treebin */
1083 binmap_t leftbits
= left_bits(idx2bit(idx
)) & m
->treemap
;
1085 t
= *treebin_at(m
, lj_ffs(leftbits
));
1088 while (t
!= 0) { /* find smallest of tree or subtree */
1089 size_t trem
= chunksize(t
) - nb
;
1094 t
= leftmost_child(t
);
1097 /* If dv is a better fit, return NULL so malloc will use it */
1098 if (v
!= 0 && rsize
< (size_t)(m
->dvsize
- nb
)) {
1099 mchunkptr r
= chunk_plus_offset(v
, nb
);
1100 unlink_large_chunk(m
, v
);
1101 if (rsize
< MIN_CHUNK_SIZE
) {
1102 set_inuse_and_pinuse(m
, v
, (rsize
+ nb
));
1104 set_size_and_pinuse_of_inuse_chunk(m
, v
, nb
);
1105 set_size_and_pinuse_of_free_chunk(r
, rsize
);
1106 insert_chunk(m
, r
, rsize
);
1108 return chunk2mem(v
);
1113 /* allocate a small request from the best fitting chunk in a treebin */
1114 static void *tmalloc_small(mstate m
, size_t nb
)
1119 bindex_t i
= lj_ffs(m
->treemap
);
1121 v
= t
= *treebin_at(m
, i
);
1122 rsize
= chunksize(t
) - nb
;
1124 while ((t
= leftmost_child(t
)) != 0) {
1125 size_t trem
= chunksize(t
) - nb
;
1132 r
= chunk_plus_offset(v
, nb
);
1133 unlink_large_chunk(m
, v
);
1134 if (rsize
< MIN_CHUNK_SIZE
) {
1135 set_inuse_and_pinuse(m
, v
, (rsize
+ nb
));
1137 set_size_and_pinuse_of_inuse_chunk(m
, v
, nb
);
1138 set_size_and_pinuse_of_free_chunk(r
, rsize
);
1139 replace_dv(m
, r
, rsize
);
1141 return chunk2mem(v
);
1144 /* ----------------------------------------------------------------------- */
1146 void *lj_alloc_create(void)
1148 size_t tsize
= DEFAULT_GRANULARITY
;
1151 tbase
= (char *)(CALL_MMAP(tsize
));
1152 if (tbase
!= CMFAIL
) {
1153 size_t msize
= pad_request(sizeof(struct malloc_state
));
1155 mchunkptr msp
= align_as_chunk(tbase
);
1156 mstate m
= (mstate
)(chunk2mem(msp
));
1157 memset(m
, 0, msize
);
1158 msp
->head
= (msize
|PINUSE_BIT
|CINUSE_BIT
);
1159 m
->seg
.base
= tbase
;
1160 m
->seg
.size
= tsize
;
1161 m
->release_checks
= MAX_RELEASE_CHECK_RATE
;
1163 mn
= next_chunk(mem2chunk(m
));
1164 init_top(m
, mn
, (size_t)((tbase
+ tsize
) - (char *)mn
) - TOP_FOOT_SIZE
);
1170 void lj_alloc_destroy(void *msp
)
1172 mstate ms
= (mstate
)msp
;
1173 msegmentptr sp
= &ms
->seg
;
1175 char *base
= sp
->base
;
1176 size_t size
= sp
->size
;
1178 CALL_MUNMAP(base
, size
);
1182 static LJ_NOINLINE
void *lj_alloc_malloc(void *msp
, size_t nsize
)
1184 mstate ms
= (mstate
)msp
;
1187 if (nsize
<= MAX_SMALL_REQUEST
) {
1190 nb
= (nsize
< MIN_REQUEST
)? MIN_CHUNK_SIZE
: pad_request(nsize
);
1191 idx
= small_index(nb
);
1192 smallbits
= ms
->smallmap
>> idx
;
1194 if ((smallbits
& 0x3U
) != 0) { /* Remainderless fit to a smallbin. */
1196 idx
+= ~smallbits
& 1; /* Uses next bin if idx empty */
1197 b
= smallbin_at(ms
, idx
);
1199 unlink_first_small_chunk(ms
, b
, p
, idx
);
1200 set_inuse_and_pinuse(ms
, p
, small_index2size(idx
));
1203 } else if (nb
> ms
->dvsize
) {
1204 if (smallbits
!= 0) { /* Use chunk in next nonempty smallbin */
1207 binmap_t leftbits
= (smallbits
<< idx
) & left_bits(idx2bit(idx
));
1208 bindex_t i
= lj_ffs(leftbits
);
1209 b
= smallbin_at(ms
, i
);
1211 unlink_first_small_chunk(ms
, b
, p
, i
);
1212 rsize
= small_index2size(i
) - nb
;
1213 /* Fit here cannot be remainderless if 4byte sizes */
1214 if (SIZE_T_SIZE
!= 4 && rsize
< MIN_CHUNK_SIZE
) {
1215 set_inuse_and_pinuse(ms
, p
, small_index2size(i
));
1217 set_size_and_pinuse_of_inuse_chunk(ms
, p
, nb
);
1218 r
= chunk_plus_offset(p
, nb
);
1219 set_size_and_pinuse_of_free_chunk(r
, rsize
);
1220 replace_dv(ms
, r
, rsize
);
1224 } else if (ms
->treemap
!= 0 && (mem
= tmalloc_small(ms
, nb
)) != 0) {
1228 } else if (nsize
>= MAX_REQUEST
) {
1229 nb
= MAX_SIZE_T
; /* Too big to allocate. Force failure (in sys alloc) */
1231 nb
= pad_request(nsize
);
1232 if (ms
->treemap
!= 0 && (mem
= tmalloc_large(ms
, nb
)) != 0) {
1237 if (nb
<= ms
->dvsize
) {
1238 size_t rsize
= ms
->dvsize
- nb
;
1239 mchunkptr p
= ms
->dv
;
1240 if (rsize
>= MIN_CHUNK_SIZE
) { /* split dv */
1241 mchunkptr r
= ms
->dv
= chunk_plus_offset(p
, nb
);
1243 set_size_and_pinuse_of_free_chunk(r
, rsize
);
1244 set_size_and_pinuse_of_inuse_chunk(ms
, p
, nb
);
1245 } else { /* exhaust dv */
1246 size_t dvs
= ms
->dvsize
;
1249 set_inuse_and_pinuse(ms
, p
, dvs
);
1253 } else if (nb
< ms
->topsize
) { /* Split top */
1254 size_t rsize
= ms
->topsize
-= nb
;
1255 mchunkptr p
= ms
->top
;
1256 mchunkptr r
= ms
->top
= chunk_plus_offset(p
, nb
);
1257 r
->head
= rsize
| PINUSE_BIT
;
1258 set_size_and_pinuse_of_inuse_chunk(ms
, p
, nb
);
1262 return alloc_sys(ms
, nb
);
1265 static LJ_NOINLINE
void *lj_alloc_free(void *msp
, void *ptr
)
1268 mchunkptr p
= mem2chunk(ptr
);
1269 mstate fm
= (mstate
)msp
;
1270 size_t psize
= chunksize(p
);
1271 mchunkptr next
= chunk_plus_offset(p
, psize
);
1273 size_t prevsize
= p
->prev_foot
;
1274 if ((prevsize
& IS_DIRECT_BIT
) != 0) {
1275 prevsize
&= ~IS_DIRECT_BIT
;
1276 psize
+= prevsize
+ DIRECT_FOOT_PAD
;
1277 CALL_MUNMAP((char *)p
- prevsize
, psize
);
1280 mchunkptr prev
= chunk_minus_offset(p
, prevsize
);
1283 /* consolidate backward */
1285 unlink_chunk(fm
, p
, prevsize
);
1286 } else if ((next
->head
& INUSE_BITS
) == INUSE_BITS
) {
1288 set_free_with_pinuse(p
, psize
, next
);
1293 if (!cinuse(next
)) { /* consolidate forward */
1294 if (next
== fm
->top
) {
1295 size_t tsize
= fm
->topsize
+= psize
;
1297 p
->head
= tsize
| PINUSE_BIT
;
1302 if (tsize
> fm
->trim_check
)
1305 } else if (next
== fm
->dv
) {
1306 size_t dsize
= fm
->dvsize
+= psize
;
1308 set_size_and_pinuse_of_free_chunk(p
, dsize
);
1311 size_t nsize
= chunksize(next
);
1313 unlink_chunk(fm
, next
, nsize
);
1314 set_size_and_pinuse_of_free_chunk(p
, psize
);
1321 set_free_with_pinuse(p
, psize
, next
);
1324 if (is_small(psize
)) {
1325 insert_small_chunk(fm
, p
, psize
);
1327 tchunkptr tp
= (tchunkptr
)p
;
1328 insert_large_chunk(fm
, tp
, psize
);
1329 if (--fm
->release_checks
== 0)
1330 release_unused_segments(fm
);
1336 static LJ_NOINLINE
void *lj_alloc_realloc(void *msp
, void *ptr
, size_t nsize
)
1338 if (nsize
>= MAX_REQUEST
) {
1341 mstate m
= (mstate
)msp
;
1342 mchunkptr oldp
= mem2chunk(ptr
);
1343 size_t oldsize
= chunksize(oldp
);
1344 mchunkptr next
= chunk_plus_offset(oldp
, oldsize
);
1346 size_t nb
= request2size(nsize
);
1348 /* Try to either shrink or extend into top. Else malloc-copy-free */
1349 if (is_direct(oldp
)) {
1350 newp
= direct_resize(oldp
, nb
); /* this may return NULL. */
1351 } else if (oldsize
>= nb
) { /* already big enough */
1352 size_t rsize
= oldsize
- nb
;
1354 if (rsize
>= MIN_CHUNK_SIZE
) {
1355 mchunkptr rem
= chunk_plus_offset(newp
, nb
);
1356 set_inuse(m
, newp
, nb
);
1357 set_inuse(m
, rem
, rsize
);
1358 lj_alloc_free(m
, chunk2mem(rem
));
1360 } else if (next
== m
->top
&& oldsize
+ m
->topsize
> nb
) {
1361 /* Expand into top */
1362 size_t newsize
= oldsize
+ m
->topsize
;
1363 size_t newtopsize
= newsize
- nb
;
1364 mchunkptr newtop
= chunk_plus_offset(oldp
, nb
);
1365 set_inuse(m
, oldp
, nb
);
1366 newtop
->head
= newtopsize
|PINUSE_BIT
;
1368 m
->topsize
= newtopsize
;
1373 return chunk2mem(newp
);
1375 void *newmem
= lj_alloc_malloc(m
, nsize
);
1377 size_t oc
= oldsize
- overhead_for(oldp
);
1378 memcpy(newmem
, ptr
, oc
< nsize
? oc
: nsize
);
1379 lj_alloc_free(m
, ptr
);
1386 void *lj_alloc_f(void *msp
, void *ptr
, size_t osize
, size_t nsize
)
1390 return lj_alloc_free(msp
, ptr
);
1391 } else if (ptr
== NULL
) {
1392 return lj_alloc_malloc(msp
, nsize
);
1394 return lj_alloc_realloc(msp
, ptr
, nsize
);