2 ** Bundled memory allocator.
4 ** Beware: this is a HEAVILY CUSTOMIZED version of dlmalloc.
5 ** The original bears the following remark:
7 ** This is a version (aka dlmalloc) of malloc/free/realloc written by
8 ** Doug Lea and released to the public domain, as explained at
9 ** http://creativecommons.org/licenses/publicdomain.
11 ** * Version pre-2.8.4 Wed Mar 29 19:46:29 2006 (dl at gee)
13 ** No additional copyright is claimed over the customizations.
14 ** Please do NOT bother the original author about this version here!
16 ** If you want to use dlmalloc in another project, you should get
17 ** the original from: ftp://gee.cs.oswego.edu/pub/misc/
18 ** For thread-safe derivatives, take a look at:
19 ** - ptmalloc: http://www.malloc.de/
20 ** - nedmalloc: http://www.nedprod.com/programs/portable/nedmalloc/
26 /* To get the mremap prototype. Must be defined before any system includes. */
27 #if defined(__linux__) && !defined(_GNU_SOURCE)
35 #ifndef LUAJIT_USE_SYSMALLOC
37 #define MAX_SIZE_T (~(size_t)0)
38 #define MALLOC_ALIGNMENT ((size_t)8U)
40 #define DEFAULT_GRANULARITY ((size_t)128U * (size_t)1024U)
41 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
42 #define DEFAULT_MMAP_THRESHOLD ((size_t)128U * (size_t)1024U)
43 #define MAX_RELEASE_CHECK_RATE 255
45 /* ------------------- size_t and alignment properties -------------------- */
47 /* The byte and bit size of a size_t */
48 #define SIZE_T_SIZE (sizeof(size_t))
49 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
51 /* Some constants coerced to size_t */
52 /* Annoying but necessary to avoid errors on some platforms */
53 #define SIZE_T_ZERO ((size_t)0)
54 #define SIZE_T_ONE ((size_t)1)
55 #define SIZE_T_TWO ((size_t)2)
56 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
57 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
58 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
60 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
61 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
63 /* the number of bytes to offset an address to align it */
64 #define align_offset(A)\
65 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
66 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
68 /* -------------------------- MMAP support ------------------------------- */
70 #define MFAIL ((void *)(MAX_SIZE_T))
71 #define CMFAIL ((char *)(MFAIL)) /* defined for convenience */
73 #define IS_DIRECT_BIT (SIZE_T_ONE)
77 #define WIN32_LEAN_AND_MEAN
82 /* Undocumented, but hey, that's what we all love so much about Windows. */
83 typedef long (*PNTAVM
)(HANDLE handle
, void **addr
, ULONG zbits
,
84 size_t *size
, ULONG alloctype
, ULONG prot
);
87 /* Number of top bits of the lower 32 bits of an address that must be zero.
88 ** Apparently 0 gives us full 64 bit addresses and 1 gives us the lower 2GB.
90 #define NTAVM_ZEROBITS 1
92 static void INIT_MMAP(void)
94 ntavm
= (PNTAVM
)GetProcAddress(GetModuleHandleA("ntdll.dll"),
95 "NtAllocateVirtualMemory");
98 /* Win64 32 bit MMAP via NtAllocateVirtualMemory. */
99 static LJ_AINLINE
void *CALL_MMAP(size_t size
)
101 DWORD olderr
= GetLastError();
103 long st
= ntavm(INVALID_HANDLE_VALUE
, &ptr
, NTAVM_ZEROBITS
, &size
,
104 MEM_RESERVE
|MEM_COMMIT
, PAGE_READWRITE
);
105 SetLastError(olderr
);
106 return st
== 0 ? ptr
: MFAIL
;
109 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
110 static LJ_AINLINE
void *DIRECT_MMAP(size_t size
)
112 DWORD olderr
= GetLastError();
114 long st
= ntavm(INVALID_HANDLE_VALUE
, &ptr
, NTAVM_ZEROBITS
, &size
,
115 MEM_RESERVE
|MEM_COMMIT
|MEM_TOP_DOWN
, PAGE_READWRITE
);
116 SetLastError(olderr
);
117 return st
== 0 ? ptr
: MFAIL
;
122 #define INIT_MMAP() ((void)0)
124 /* Win32 MMAP via VirtualAlloc */
125 static LJ_AINLINE
void *CALL_MMAP(size_t size
)
127 DWORD olderr
= GetLastError();
128 void *ptr
= VirtualAlloc(0, size
, MEM_RESERVE
|MEM_COMMIT
, PAGE_READWRITE
);
129 SetLastError(olderr
);
130 return ptr
? ptr
: MFAIL
;
133 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
134 static LJ_AINLINE
void *DIRECT_MMAP(size_t size
)
136 DWORD olderr
= GetLastError();
137 void *ptr
= VirtualAlloc(0, size
, MEM_RESERVE
|MEM_COMMIT
|MEM_TOP_DOWN
,
139 SetLastError(olderr
);
140 return ptr
? ptr
: MFAIL
;
145 /* This function supports releasing coalesed segments */
146 static LJ_AINLINE
int CALL_MUNMAP(void *ptr
, size_t size
)
148 DWORD olderr
= GetLastError();
149 MEMORY_BASIC_INFORMATION minfo
;
150 char *cptr
= (char *)ptr
;
152 if (VirtualQuery(cptr
, &minfo
, sizeof(minfo
)) == 0)
154 if (minfo
.BaseAddress
!= cptr
|| minfo
.AllocationBase
!= cptr
||
155 minfo
.State
!= MEM_COMMIT
|| minfo
.RegionSize
> size
)
157 if (VirtualFree(cptr
, 0, MEM_RELEASE
) == 0)
159 cptr
+= minfo
.RegionSize
;
160 size
-= minfo
.RegionSize
;
162 SetLastError(olderr
);
169 #include <sys/mman.h>
171 #define MMAP_PROT (PROT_READ|PROT_WRITE)
172 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
173 #define MAP_ANONYMOUS MAP_ANON
175 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
178 /* 64 bit mode needs special support for allocating memory in the lower 2GB. */
180 #if defined(MAP_32BIT)
182 /* Actually this only gives us max. 1GB in current Linux kernels. */
183 static LJ_AINLINE
void *CALL_MMAP(size_t size
)
186 void *ptr
= mmap(NULL
, size
, MMAP_PROT
, MAP_32BIT
|MMAP_FLAGS
, -1, 0);
191 #elif LJ_TARGET_OSX || LJ_TARGET_PS4 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__sun__)
193 /* OSX and FreeBSD mmap() use a naive first-fit linear search.
194 ** That's perfect for us. Except that -pagezero_size must be set for OSX,
195 ** otherwise the lower 4GB are blocked. And the 32GB RLIMIT_DATA needs
196 ** to be reduced to 250MB on FreeBSD.
199 #define MMAP_REGION_START ((uintptr_t)0x10000)
201 #define MMAP_REGION_START ((uintptr_t)0x4000)
203 #define MMAP_REGION_START ((uintptr_t)0x10000000)
205 #define MMAP_REGION_END ((uintptr_t)0x80000000)
207 #if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4
208 #include <sys/resource.h>
211 static LJ_AINLINE
void *CALL_MMAP(size_t size
)
214 /* Hint for next allocation. Doesn't need to be thread-safe. */
215 static uintptr_t alloc_hint
= MMAP_REGION_START
;
217 #if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4
218 static int rlimit_modified
= 0;
219 if (LJ_UNLIKELY(rlimit_modified
== 0)) {
221 rlim
.rlim_cur
= rlim
.rlim_max
= MMAP_REGION_START
;
222 setrlimit(RLIMIT_DATA
, &rlim
); /* Ignore result. May fail below. */
227 void *p
= mmap((void *)alloc_hint
, size
, MMAP_PROT
, MMAP_FLAGS
, -1, 0);
228 if ((uintptr_t)p
>= MMAP_REGION_START
&&
229 (uintptr_t)p
+ size
< MMAP_REGION_END
) {
230 alloc_hint
= (uintptr_t)p
+ size
;
234 if (p
!= CMFAIL
) munmap(p
, size
);
236 alloc_hint
+= 0x1000000; /* Need near-exhaustive linear scan. */
237 if (alloc_hint
+ size
< MMAP_REGION_END
) continue;
241 alloc_hint
= MMAP_REGION_START
;
249 #error "NYI: need an equivalent of MAP_32BIT for this 64 bit OS"
255 /* 32 bit mode is easy. */
256 static LJ_AINLINE
void *CALL_MMAP(size_t size
)
259 void *ptr
= mmap(NULL
, size
, MMAP_PROT
, MMAP_FLAGS
, -1, 0);
266 #define INIT_MMAP() ((void)0)
267 #define DIRECT_MMAP(s) CALL_MMAP(s)
269 static LJ_AINLINE
int CALL_MUNMAP(void *ptr
, size_t size
)
272 int ret
= munmap(ptr
, size
);
278 /* Need to define _GNU_SOURCE to get the mremap prototype. */
279 static LJ_AINLINE
void *CALL_MREMAP_(void *ptr
, size_t osz
, size_t nsz
,
283 ptr
= mremap(ptr
, osz
, nsz
, flags
);
288 #define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv))
289 #define CALL_MREMAP_NOMOVE 0
290 #define CALL_MREMAP_MAYMOVE 1
292 #define CALL_MREMAP_MV CALL_MREMAP_NOMOVE
294 #define CALL_MREMAP_MV CALL_MREMAP_MAYMOVE
301 #define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL)
304 /* ----------------------- Chunk representations ------------------------ */
306 struct malloc_chunk
{
307 size_t prev_foot
; /* Size of previous chunk (if free). */
308 size_t head
; /* Size and inuse bits. */
309 struct malloc_chunk
*fd
; /* double links -- used only if free. */
310 struct malloc_chunk
*bk
;
313 typedef struct malloc_chunk mchunk
;
314 typedef struct malloc_chunk
*mchunkptr
;
315 typedef struct malloc_chunk
*sbinptr
; /* The type of bins of chunks */
316 typedef size_t bindex_t
; /* Described below */
317 typedef unsigned int binmap_t
; /* Described below */
318 typedef unsigned int flag_t
; /* The type of various bit flag sets */
320 /* ------------------- Chunks sizes and alignments ----------------------- */
322 #define MCHUNK_SIZE (sizeof(mchunk))
324 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
326 /* Direct chunks need a second word of overhead ... */
327 #define DIRECT_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
328 /* ... and additional padding for fake next-chunk at foot */
329 #define DIRECT_FOOT_PAD (FOUR_SIZE_T_SIZES)
331 /* The smallest size we can malloc is an aligned minimal chunk */
332 #define MIN_CHUNK_SIZE\
333 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
335 /* conversion from malloc headers to user pointers, and back */
336 #define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES))
337 #define mem2chunk(mem) ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES))
338 /* chunk associated with aligned address A */
339 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
341 /* Bounds on request (not chunk) sizes. */
342 #define MAX_REQUEST ((~MIN_CHUNK_SIZE+1) << 2)
343 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
345 /* pad request bytes into a usable size */
346 #define pad_request(req) \
347 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
349 /* pad request, checking for minimum (but not maximum) */
350 #define request2size(req) \
351 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
353 /* ------------------ Operations on head and foot fields ----------------- */
355 #define PINUSE_BIT (SIZE_T_ONE)
356 #define CINUSE_BIT (SIZE_T_TWO)
357 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
359 /* Head value for fenceposts */
360 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
362 /* extraction of fields from head words */
363 #define cinuse(p) ((p)->head & CINUSE_BIT)
364 #define pinuse(p) ((p)->head & PINUSE_BIT)
365 #define chunksize(p) ((p)->head & ~(INUSE_BITS))
367 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
368 #define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
370 /* Treat space at ptr +/- offset as a chunk */
371 #define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s)))
372 #define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s)))
374 /* Ptr to next or previous physical malloc_chunk. */
375 #define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~INUSE_BITS)))
376 #define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot) ))
378 /* extract next chunk's pinuse bit */
379 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
381 /* Get/set size at footer */
382 #define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot)
383 #define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s))
385 /* Set size, pinuse bit, and foot */
386 #define set_size_and_pinuse_of_free_chunk(p, s)\
387 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
389 /* Set size, pinuse bit, foot, and clear next pinuse */
390 #define set_free_with_pinuse(p, s, n)\
391 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
393 #define is_direct(p)\
394 (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_DIRECT_BIT))
396 /* Get the internal overhead associated with chunk p */
397 #define overhead_for(p)\
398 (is_direct(p)? DIRECT_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
400 /* ---------------------- Overlaid data structures ----------------------- */
402 struct malloc_tree_chunk
{
403 /* The first four fields must be compatible with malloc_chunk */
406 struct malloc_tree_chunk
*fd
;
407 struct malloc_tree_chunk
*bk
;
409 struct malloc_tree_chunk
*child
[2];
410 struct malloc_tree_chunk
*parent
;
414 typedef struct malloc_tree_chunk tchunk
;
415 typedef struct malloc_tree_chunk
*tchunkptr
;
416 typedef struct malloc_tree_chunk
*tbinptr
; /* The type of bins of trees */
418 /* A little helper macro for trees */
419 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
421 /* ----------------------------- Segments -------------------------------- */
423 struct malloc_segment
{
424 char *base
; /* base address */
425 size_t size
; /* allocated size */
426 struct malloc_segment
*next
; /* ptr to next segment */
429 typedef struct malloc_segment msegment
;
430 typedef struct malloc_segment
*msegmentptr
;
432 /* ---------------------------- malloc_state ----------------------------- */
434 /* Bin types, widths and sizes */
435 #define NSMALLBINS (32U)
436 #define NTREEBINS (32U)
437 #define SMALLBIN_SHIFT (3U)
438 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
439 #define TREEBIN_SHIFT (8U)
440 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
441 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
442 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
444 struct malloc_state
{
452 size_t release_checks
;
453 mchunkptr smallbins
[(NSMALLBINS
+1)*2];
454 tbinptr treebins
[NTREEBINS
];
458 typedef struct malloc_state
*mstate
;
460 #define is_initialized(M) ((M)->top != 0)
462 /* -------------------------- system alloc setup ------------------------- */
464 /* page-align a size */
465 #define page_align(S)\
466 (((S) + (LJ_PAGESIZE - SIZE_T_ONE)) & ~(LJ_PAGESIZE - SIZE_T_ONE))
468 /* granularity-align a size */
469 #define granularity_align(S)\
470 (((S) + (DEFAULT_GRANULARITY - SIZE_T_ONE))\
471 & ~(DEFAULT_GRANULARITY - SIZE_T_ONE))
473 #if LJ_TARGET_WINDOWS
474 #define mmap_align(S) granularity_align(S)
476 #define mmap_align(S) page_align(S)
479 /* True if segment S holds address A */
480 #define segment_holds(S, A)\
481 ((char *)(A) >= S->base && (char *)(A) < S->base + S->size)
483 /* Return segment holding given address */
484 static msegmentptr
segment_holding(mstate m
, char *addr
)
486 msegmentptr sp
= &m
->seg
;
488 if (addr
>= sp
->base
&& addr
< sp
->base
+ sp
->size
)
490 if ((sp
= sp
->next
) == 0)
495 /* Return true if segment contains a segment link */
496 static int has_segment_link(mstate m
, msegmentptr ss
)
498 msegmentptr sp
= &m
->seg
;
500 if ((char *)sp
>= ss
->base
&& (char *)sp
< ss
->base
+ ss
->size
)
502 if ((sp
= sp
->next
) == 0)
508 TOP_FOOT_SIZE is padding at the end of a segment, including space
509 that may be needed to place segment records and fenceposts when new
510 noncontiguous segments are added.
512 #define TOP_FOOT_SIZE\
513 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
515 /* ---------------------------- Indexing Bins ---------------------------- */
517 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
518 #define small_index(s) ((s) >> SMALLBIN_SHIFT)
519 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
520 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
522 /* addressing by index. See above about smallbin repositioning */
523 #define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i)<<1])))
524 #define treebin_at(M,i) (&((M)->treebins[i]))
526 /* assign tree index for size S to variable I */
527 #define compute_tree_index(S, I)\
529 unsigned int X = (unsigned int)(S >> TREEBIN_SHIFT);\
532 } else if (X > 0xFFFF) {\
535 unsigned int K = lj_fls(X);\
536 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
540 /* Bit representing maximum resolved size in a treebin at i */
541 #define bit_for_tree_index(i) \
542 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
544 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
545 #define leftshift_for_tree_index(i) \
546 ((i == NTREEBINS-1)? 0 : \
547 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
549 /* The size of the smallest chunk held in bin with index i */
550 #define minsize_for_tree_index(i) \
551 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
552 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
554 /* ------------------------ Operations on bin maps ----------------------- */
556 /* bit corresponding to given index */
557 #define idx2bit(i) ((binmap_t)(1) << (i))
559 /* Mark/Clear bits with given index */
560 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
561 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
562 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
564 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
565 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
566 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
568 /* mask with all bits to left of least bit of x on */
569 #define left_bits(x) ((x<<1) | (~(x<<1)+1))
571 /* Set cinuse bit and pinuse bit of next chunk */
572 #define set_inuse(M,p,s)\
573 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
574 ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
576 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
577 #define set_inuse_and_pinuse(M,p,s)\
578 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
579 ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
581 /* Set size, cinuse and pinuse bit of this chunk */
582 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
583 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
585 /* ----------------------- Operations on smallbins ----------------------- */
587 /* Link a free chunk into a smallbin */
588 #define insert_small_chunk(M, P, S) {\
589 bindex_t I = small_index(S);\
590 mchunkptr B = smallbin_at(M, I);\
592 if (!smallmap_is_marked(M, I))\
593 mark_smallmap(M, I);\
602 /* Unlink a chunk from a smallbin */
603 #define unlink_small_chunk(M, P, S) {\
604 mchunkptr F = P->fd;\
605 mchunkptr B = P->bk;\
606 bindex_t I = small_index(S);\
608 clear_smallmap(M, I);\
615 /* Unlink the first chunk from a smallbin */
616 #define unlink_first_small_chunk(M, B, P, I) {\
617 mchunkptr F = P->fd;\
619 clear_smallmap(M, I);\
626 /* Replace dv node, binning the old one */
627 /* Used only when dvsize known to be small */
628 #define replace_dv(M, P, S) {\
629 size_t DVS = M->dvsize;\
631 mchunkptr DV = M->dv;\
632 insert_small_chunk(M, DV, DVS);\
638 /* ------------------------- Operations on trees ------------------------- */
640 /* Insert chunk into tree */
641 #define insert_large_chunk(M, X, S) {\
644 compute_tree_index(S, I);\
645 H = treebin_at(M, I);\
647 X->child[0] = X->child[1] = 0;\
648 if (!treemap_is_marked(M, I)) {\
651 X->parent = (tchunkptr)H;\
655 size_t K = S << leftshift_for_tree_index(I);\
657 if (chunksize(T) != S) {\
658 tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
669 tchunkptr F = T->fd;\
680 #define unlink_large_chunk(M, X) {\
681 tchunkptr XP = X->parent;\
684 tchunkptr F = X->fd;\
690 if (((R = *(RP = &(X->child[1]))) != 0) ||\
691 ((R = *(RP = &(X->child[0]))) != 0)) {\
693 while ((*(CP = &(R->child[1])) != 0) ||\
694 (*(CP = &(R->child[0])) != 0)) {\
701 tbinptr *H = treebin_at(M, X->index);\
704 clear_treemap(M, X->index);\
706 if (XP->child[0] == X) \
714 if ((C0 = X->child[0]) != 0) {\
718 if ((C1 = X->child[1]) != 0) {\
726 /* Relays to large vs small bin operations */
728 #define insert_chunk(M, P, S)\
729 if (is_small(S)) { insert_small_chunk(M, P, S)\
730 } else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
732 #define unlink_chunk(M, P, S)\
733 if (is_small(S)) { unlink_small_chunk(M, P, S)\
734 } else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
736 /* ----------------------- Direct-mmapping chunks ----------------------- */
738 static void *direct_alloc(size_t nb
)
740 size_t mmsize
= mmap_align(nb
+ SIX_SIZE_T_SIZES
+ CHUNK_ALIGN_MASK
);
741 if (LJ_LIKELY(mmsize
> nb
)) { /* Check for wrap around 0 */
742 char *mm
= (char *)(DIRECT_MMAP(mmsize
));
744 size_t offset
= align_offset(chunk2mem(mm
));
745 size_t psize
= mmsize
- offset
- DIRECT_FOOT_PAD
;
746 mchunkptr p
= (mchunkptr
)(mm
+ offset
);
747 p
->prev_foot
= offset
| IS_DIRECT_BIT
;
748 p
->head
= psize
|CINUSE_BIT
;
749 chunk_plus_offset(p
, psize
)->head
= FENCEPOST_HEAD
;
750 chunk_plus_offset(p
, psize
+SIZE_T_SIZE
)->head
= 0;
757 static mchunkptr
direct_resize(mchunkptr oldp
, size_t nb
)
759 size_t oldsize
= chunksize(oldp
);
760 if (is_small(nb
)) /* Can't shrink direct regions below small size */
762 /* Keep old chunk if big enough but not too big */
763 if (oldsize
>= nb
+ SIZE_T_SIZE
&&
764 (oldsize
- nb
) <= (DEFAULT_GRANULARITY
>> 1)) {
767 size_t offset
= oldp
->prev_foot
& ~IS_DIRECT_BIT
;
768 size_t oldmmsize
= oldsize
+ offset
+ DIRECT_FOOT_PAD
;
769 size_t newmmsize
= mmap_align(nb
+ SIX_SIZE_T_SIZES
+ CHUNK_ALIGN_MASK
);
770 char *cp
= (char *)CALL_MREMAP((char *)oldp
- offset
,
771 oldmmsize
, newmmsize
, CALL_MREMAP_MV
);
773 mchunkptr newp
= (mchunkptr
)(cp
+ offset
);
774 size_t psize
= newmmsize
- offset
- DIRECT_FOOT_PAD
;
775 newp
->head
= psize
|CINUSE_BIT
;
776 chunk_plus_offset(newp
, psize
)->head
= FENCEPOST_HEAD
;
777 chunk_plus_offset(newp
, psize
+SIZE_T_SIZE
)->head
= 0;
784 /* -------------------------- mspace management -------------------------- */
786 /* Initialize top chunk and its size */
787 static void init_top(mstate m
, mchunkptr p
, size_t psize
)
789 /* Ensure alignment */
790 size_t offset
= align_offset(chunk2mem(p
));
791 p
= (mchunkptr
)((char *)p
+ offset
);
796 p
->head
= psize
| PINUSE_BIT
;
797 /* set size of fake trailing chunk holding overhead space only once */
798 chunk_plus_offset(p
, psize
)->head
= TOP_FOOT_SIZE
;
799 m
->trim_check
= DEFAULT_TRIM_THRESHOLD
; /* reset on each update */
802 /* Initialize bins for a new mstate that is otherwise zeroed out */
803 static void init_bins(mstate m
)
805 /* Establish circular links for smallbins */
807 for (i
= 0; i
< NSMALLBINS
; i
++) {
808 sbinptr bin
= smallbin_at(m
,i
);
809 bin
->fd
= bin
->bk
= bin
;
813 /* Allocate chunk and prepend remainder with chunk in successor base. */
814 static void *prepend_alloc(mstate m
, char *newbase
, char *oldbase
, size_t nb
)
816 mchunkptr p
= align_as_chunk(newbase
);
817 mchunkptr oldfirst
= align_as_chunk(oldbase
);
818 size_t psize
= (size_t)((char *)oldfirst
- (char *)p
);
819 mchunkptr q
= chunk_plus_offset(p
, nb
);
820 size_t qsize
= psize
- nb
;
821 set_size_and_pinuse_of_inuse_chunk(m
, p
, nb
);
823 /* consolidate remainder with first chunk of old base */
824 if (oldfirst
== m
->top
) {
825 size_t tsize
= m
->topsize
+= qsize
;
827 q
->head
= tsize
| PINUSE_BIT
;
828 } else if (oldfirst
== m
->dv
) {
829 size_t dsize
= m
->dvsize
+= qsize
;
831 set_size_and_pinuse_of_free_chunk(q
, dsize
);
833 if (!cinuse(oldfirst
)) {
834 size_t nsize
= chunksize(oldfirst
);
835 unlink_chunk(m
, oldfirst
, nsize
);
836 oldfirst
= chunk_plus_offset(oldfirst
, nsize
);
839 set_free_with_pinuse(q
, qsize
, oldfirst
);
840 insert_chunk(m
, q
, qsize
);
846 /* Add a segment to hold a new noncontiguous region */
847 static void add_segment(mstate m
, char *tbase
, size_t tsize
)
849 /* Determine locations and sizes of segment, fenceposts, old top */
850 char *old_top
= (char *)m
->top
;
851 msegmentptr oldsp
= segment_holding(m
, old_top
);
852 char *old_end
= oldsp
->base
+ oldsp
->size
;
853 size_t ssize
= pad_request(sizeof(struct malloc_segment
));
854 char *rawsp
= old_end
- (ssize
+ FOUR_SIZE_T_SIZES
+ CHUNK_ALIGN_MASK
);
855 size_t offset
= align_offset(chunk2mem(rawsp
));
856 char *asp
= rawsp
+ offset
;
857 char *csp
= (asp
< (old_top
+ MIN_CHUNK_SIZE
))? old_top
: asp
;
858 mchunkptr sp
= (mchunkptr
)csp
;
859 msegmentptr ss
= (msegmentptr
)(chunk2mem(sp
));
860 mchunkptr tnext
= chunk_plus_offset(sp
, ssize
);
863 /* reset top to new space */
864 init_top(m
, (mchunkptr
)tbase
, tsize
- TOP_FOOT_SIZE
);
866 /* Set up segment record */
867 set_size_and_pinuse_of_inuse_chunk(m
, sp
, ssize
);
868 *ss
= m
->seg
; /* Push current record */
873 /* Insert trailing fenceposts */
875 mchunkptr nextp
= chunk_plus_offset(p
, SIZE_T_SIZE
);
876 p
->head
= FENCEPOST_HEAD
;
877 if ((char *)(&(nextp
->head
)) < old_end
)
883 /* Insert the rest of old top into a bin as an ordinary free chunk */
884 if (csp
!= old_top
) {
885 mchunkptr q
= (mchunkptr
)old_top
;
886 size_t psize
= (size_t)(csp
- old_top
);
887 mchunkptr tn
= chunk_plus_offset(q
, psize
);
888 set_free_with_pinuse(q
, psize
, tn
);
889 insert_chunk(m
, q
, psize
);
893 /* -------------------------- System allocation -------------------------- */
895 static void *alloc_sys(mstate m
, size_t nb
)
897 char *tbase
= CMFAIL
;
900 /* Directly map large chunks */
901 if (LJ_UNLIKELY(nb
>= DEFAULT_MMAP_THRESHOLD
)) {
902 void *mem
= direct_alloc(nb
);
908 size_t req
= nb
+ TOP_FOOT_SIZE
+ SIZE_T_ONE
;
909 size_t rsize
= granularity_align(req
);
910 if (LJ_LIKELY(rsize
> nb
)) { /* Fail if wraps around zero */
911 char *mp
= (char *)(CALL_MMAP(rsize
));
919 if (tbase
!= CMFAIL
) {
920 msegmentptr sp
= &m
->seg
;
921 /* Try to merge with an existing segment */
922 while (sp
!= 0 && tbase
!= sp
->base
+ sp
->size
)
924 if (sp
!= 0 && segment_holds(sp
, m
->top
)) { /* append */
926 init_top(m
, m
->top
, m
->topsize
+ tsize
);
929 while (sp
!= 0 && sp
->base
!= tbase
+ tsize
)
932 char *oldbase
= sp
->base
;
935 return prepend_alloc(m
, tbase
, oldbase
, nb
);
937 add_segment(m
, tbase
, tsize
);
941 if (nb
< m
->topsize
) { /* Allocate from new or extended top space */
942 size_t rsize
= m
->topsize
-= nb
;
943 mchunkptr p
= m
->top
;
944 mchunkptr r
= m
->top
= chunk_plus_offset(p
, nb
);
945 r
->head
= rsize
| PINUSE_BIT
;
946 set_size_and_pinuse_of_inuse_chunk(m
, p
, nb
);
954 /* ----------------------- system deallocation -------------------------- */
956 /* Unmap and unlink any mmapped segments that don't contain used chunks */
957 static size_t release_unused_segments(mstate m
)
961 msegmentptr pred
= &m
->seg
;
962 msegmentptr sp
= pred
->next
;
964 char *base
= sp
->base
;
965 size_t size
= sp
->size
;
966 msegmentptr next
= sp
->next
;
969 mchunkptr p
= align_as_chunk(base
);
970 size_t psize
= chunksize(p
);
971 /* Can unmap if first chunk holds entire segment and not pinned */
972 if (!cinuse(p
) && (char *)p
+ psize
>= base
+ size
- TOP_FOOT_SIZE
) {
973 tchunkptr tp
= (tchunkptr
)p
;
978 unlink_large_chunk(m
, tp
);
980 if (CALL_MUNMAP(base
, size
) == 0) {
982 /* unlink obsoleted record */
985 } else { /* back out if cannot unmap */
986 insert_large_chunk(m
, tp
, psize
);
993 /* Reset check counter */
994 m
->release_checks
= nsegs
> MAX_RELEASE_CHECK_RATE
?
995 nsegs
: MAX_RELEASE_CHECK_RATE
;
999 static int alloc_trim(mstate m
, size_t pad
)
1001 size_t released
= 0;
1002 if (pad
< MAX_REQUEST
&& is_initialized(m
)) {
1003 pad
+= TOP_FOOT_SIZE
; /* ensure enough room for segment overhead */
1005 if (m
->topsize
> pad
) {
1006 /* Shrink top space in granularity-size units, keeping at least one */
1007 size_t unit
= DEFAULT_GRANULARITY
;
1008 size_t extra
= ((m
->topsize
- pad
+ (unit
- SIZE_T_ONE
)) / unit
-
1010 msegmentptr sp
= segment_holding(m
, (char *)m
->top
);
1012 if (sp
->size
>= extra
&&
1013 !has_segment_link(m
, sp
)) { /* can't shrink if pinned */
1014 size_t newsize
= sp
->size
- extra
;
1015 /* Prefer mremap, fall back to munmap */
1016 if ((CALL_MREMAP(sp
->base
, sp
->size
, newsize
, CALL_MREMAP_NOMOVE
) != MFAIL
) ||
1017 (CALL_MUNMAP(sp
->base
+ newsize
, extra
) == 0)) {
1022 if (released
!= 0) {
1023 sp
->size
-= released
;
1024 init_top(m
, m
->top
, m
->topsize
- released
);
1028 /* Unmap any unused mmapped segments */
1029 released
+= release_unused_segments(m
);
1031 /* On failure, disable autotrim to avoid repeated failed future calls */
1032 if (released
== 0 && m
->topsize
> m
->trim_check
)
1033 m
->trim_check
= MAX_SIZE_T
;
1036 return (released
!= 0)? 1 : 0;
1039 /* ---------------------------- malloc support --------------------------- */
1041 /* allocate a large request from the best fitting chunk in a treebin */
1042 static void *tmalloc_large(mstate m
, size_t nb
)
1045 size_t rsize
= ~nb
+1; /* Unsigned negation */
1048 compute_tree_index(nb
, idx
);
1050 if ((t
= *treebin_at(m
, idx
)) != 0) {
1051 /* Traverse tree for this bin looking for node with size == nb */
1052 size_t sizebits
= nb
<< leftshift_for_tree_index(idx
);
1053 tchunkptr rst
= 0; /* The deepest untaken right subtree */
1056 size_t trem
= chunksize(t
) - nb
;
1059 if ((rsize
= trem
) == 0)
1063 t
= t
->child
[(sizebits
>> (SIZE_T_BITSIZE
-SIZE_T_ONE
)) & 1];
1064 if (rt
!= 0 && rt
!= t
)
1067 t
= rst
; /* set t to least subtree holding sizes > nb */
1074 if (t
== 0 && v
== 0) { /* set t to root of next non-empty treebin */
1075 binmap_t leftbits
= left_bits(idx2bit(idx
)) & m
->treemap
;
1077 t
= *treebin_at(m
, lj_ffs(leftbits
));
1080 while (t
!= 0) { /* find smallest of tree or subtree */
1081 size_t trem
= chunksize(t
) - nb
;
1086 t
= leftmost_child(t
);
1089 /* If dv is a better fit, return NULL so malloc will use it */
1090 if (v
!= 0 && rsize
< (size_t)(m
->dvsize
- nb
)) {
1091 mchunkptr r
= chunk_plus_offset(v
, nb
);
1092 unlink_large_chunk(m
, v
);
1093 if (rsize
< MIN_CHUNK_SIZE
) {
1094 set_inuse_and_pinuse(m
, v
, (rsize
+ nb
));
1096 set_size_and_pinuse_of_inuse_chunk(m
, v
, nb
);
1097 set_size_and_pinuse_of_free_chunk(r
, rsize
);
1098 insert_chunk(m
, r
, rsize
);
1100 return chunk2mem(v
);
1105 /* allocate a small request from the best fitting chunk in a treebin */
1106 static void *tmalloc_small(mstate m
, size_t nb
)
1111 bindex_t i
= lj_ffs(m
->treemap
);
1113 v
= t
= *treebin_at(m
, i
);
1114 rsize
= chunksize(t
) - nb
;
1116 while ((t
= leftmost_child(t
)) != 0) {
1117 size_t trem
= chunksize(t
) - nb
;
1124 r
= chunk_plus_offset(v
, nb
);
1125 unlink_large_chunk(m
, v
);
1126 if (rsize
< MIN_CHUNK_SIZE
) {
1127 set_inuse_and_pinuse(m
, v
, (rsize
+ nb
));
1129 set_size_and_pinuse_of_inuse_chunk(m
, v
, nb
);
1130 set_size_and_pinuse_of_free_chunk(r
, rsize
);
1131 replace_dv(m
, r
, rsize
);
1133 return chunk2mem(v
);
1136 /* ----------------------------------------------------------------------- */
1138 void *lj_alloc_create(void)
1140 size_t tsize
= DEFAULT_GRANULARITY
;
1143 tbase
= (char *)(CALL_MMAP(tsize
));
1144 if (tbase
!= CMFAIL
) {
1145 size_t msize
= pad_request(sizeof(struct malloc_state
));
1147 mchunkptr msp
= align_as_chunk(tbase
);
1148 mstate m
= (mstate
)(chunk2mem(msp
));
1149 memset(m
, 0, msize
);
1150 msp
->head
= (msize
|PINUSE_BIT
|CINUSE_BIT
);
1151 m
->seg
.base
= tbase
;
1152 m
->seg
.size
= tsize
;
1153 m
->release_checks
= MAX_RELEASE_CHECK_RATE
;
1155 mn
= next_chunk(mem2chunk(m
));
1156 init_top(m
, mn
, (size_t)((tbase
+ tsize
) - (char *)mn
) - TOP_FOOT_SIZE
);
1162 void lj_alloc_destroy(void *msp
)
1164 mstate ms
= (mstate
)msp
;
1165 msegmentptr sp
= &ms
->seg
;
1167 char *base
= sp
->base
;
1168 size_t size
= sp
->size
;
1170 CALL_MUNMAP(base
, size
);
1174 static LJ_NOINLINE
void *lj_alloc_malloc(void *msp
, size_t nsize
)
1176 mstate ms
= (mstate
)msp
;
1179 if (nsize
<= MAX_SMALL_REQUEST
) {
1182 nb
= (nsize
< MIN_REQUEST
)? MIN_CHUNK_SIZE
: pad_request(nsize
);
1183 idx
= small_index(nb
);
1184 smallbits
= ms
->smallmap
>> idx
;
1186 if ((smallbits
& 0x3U
) != 0) { /* Remainderless fit to a smallbin. */
1188 idx
+= ~smallbits
& 1; /* Uses next bin if idx empty */
1189 b
= smallbin_at(ms
, idx
);
1191 unlink_first_small_chunk(ms
, b
, p
, idx
);
1192 set_inuse_and_pinuse(ms
, p
, small_index2size(idx
));
1195 } else if (nb
> ms
->dvsize
) {
1196 if (smallbits
!= 0) { /* Use chunk in next nonempty smallbin */
1199 binmap_t leftbits
= (smallbits
<< idx
) & left_bits(idx2bit(idx
));
1200 bindex_t i
= lj_ffs(leftbits
);
1201 b
= smallbin_at(ms
, i
);
1203 unlink_first_small_chunk(ms
, b
, p
, i
);
1204 rsize
= small_index2size(i
) - nb
;
1205 /* Fit here cannot be remainderless if 4byte sizes */
1206 if (SIZE_T_SIZE
!= 4 && rsize
< MIN_CHUNK_SIZE
) {
1207 set_inuse_and_pinuse(ms
, p
, small_index2size(i
));
1209 set_size_and_pinuse_of_inuse_chunk(ms
, p
, nb
);
1210 r
= chunk_plus_offset(p
, nb
);
1211 set_size_and_pinuse_of_free_chunk(r
, rsize
);
1212 replace_dv(ms
, r
, rsize
);
1216 } else if (ms
->treemap
!= 0 && (mem
= tmalloc_small(ms
, nb
)) != 0) {
1220 } else if (nsize
>= MAX_REQUEST
) {
1221 nb
= MAX_SIZE_T
; /* Too big to allocate. Force failure (in sys alloc) */
1223 nb
= pad_request(nsize
);
1224 if (ms
->treemap
!= 0 && (mem
= tmalloc_large(ms
, nb
)) != 0) {
1229 if (nb
<= ms
->dvsize
) {
1230 size_t rsize
= ms
->dvsize
- nb
;
1231 mchunkptr p
= ms
->dv
;
1232 if (rsize
>= MIN_CHUNK_SIZE
) { /* split dv */
1233 mchunkptr r
= ms
->dv
= chunk_plus_offset(p
, nb
);
1235 set_size_and_pinuse_of_free_chunk(r
, rsize
);
1236 set_size_and_pinuse_of_inuse_chunk(ms
, p
, nb
);
1237 } else { /* exhaust dv */
1238 size_t dvs
= ms
->dvsize
;
1241 set_inuse_and_pinuse(ms
, p
, dvs
);
1245 } else if (nb
< ms
->topsize
) { /* Split top */
1246 size_t rsize
= ms
->topsize
-= nb
;
1247 mchunkptr p
= ms
->top
;
1248 mchunkptr r
= ms
->top
= chunk_plus_offset(p
, nb
);
1249 r
->head
= rsize
| PINUSE_BIT
;
1250 set_size_and_pinuse_of_inuse_chunk(ms
, p
, nb
);
1254 return alloc_sys(ms
, nb
);
1257 static LJ_NOINLINE
void *lj_alloc_free(void *msp
, void *ptr
)
1260 mchunkptr p
= mem2chunk(ptr
);
1261 mstate fm
= (mstate
)msp
;
1262 size_t psize
= chunksize(p
);
1263 mchunkptr next
= chunk_plus_offset(p
, psize
);
1265 size_t prevsize
= p
->prev_foot
;
1266 if ((prevsize
& IS_DIRECT_BIT
) != 0) {
1267 prevsize
&= ~IS_DIRECT_BIT
;
1268 psize
+= prevsize
+ DIRECT_FOOT_PAD
;
1269 CALL_MUNMAP((char *)p
- prevsize
, psize
);
1272 mchunkptr prev
= chunk_minus_offset(p
, prevsize
);
1275 /* consolidate backward */
1277 unlink_chunk(fm
, p
, prevsize
);
1278 } else if ((next
->head
& INUSE_BITS
) == INUSE_BITS
) {
1280 set_free_with_pinuse(p
, psize
, next
);
1285 if (!cinuse(next
)) { /* consolidate forward */
1286 if (next
== fm
->top
) {
1287 size_t tsize
= fm
->topsize
+= psize
;
1289 p
->head
= tsize
| PINUSE_BIT
;
1294 if (tsize
> fm
->trim_check
)
1297 } else if (next
== fm
->dv
) {
1298 size_t dsize
= fm
->dvsize
+= psize
;
1300 set_size_and_pinuse_of_free_chunk(p
, dsize
);
1303 size_t nsize
= chunksize(next
);
1305 unlink_chunk(fm
, next
, nsize
);
1306 set_size_and_pinuse_of_free_chunk(p
, psize
);
1313 set_free_with_pinuse(p
, psize
, next
);
1316 if (is_small(psize
)) {
1317 insert_small_chunk(fm
, p
, psize
);
1319 tchunkptr tp
= (tchunkptr
)p
;
1320 insert_large_chunk(fm
, tp
, psize
);
1321 if (--fm
->release_checks
== 0)
1322 release_unused_segments(fm
);
1328 static LJ_NOINLINE
void *lj_alloc_realloc(void *msp
, void *ptr
, size_t nsize
)
1330 if (nsize
>= MAX_REQUEST
) {
1333 mstate m
= (mstate
)msp
;
1334 mchunkptr oldp
= mem2chunk(ptr
);
1335 size_t oldsize
= chunksize(oldp
);
1336 mchunkptr next
= chunk_plus_offset(oldp
, oldsize
);
1338 size_t nb
= request2size(nsize
);
1340 /* Try to either shrink or extend into top. Else malloc-copy-free */
1341 if (is_direct(oldp
)) {
1342 newp
= direct_resize(oldp
, nb
); /* this may return NULL. */
1343 } else if (oldsize
>= nb
) { /* already big enough */
1344 size_t rsize
= oldsize
- nb
;
1346 if (rsize
>= MIN_CHUNK_SIZE
) {
1347 mchunkptr rem
= chunk_plus_offset(newp
, nb
);
1348 set_inuse(m
, newp
, nb
);
1349 set_inuse(m
, rem
, rsize
);
1350 lj_alloc_free(m
, chunk2mem(rem
));
1352 } else if (next
== m
->top
&& oldsize
+ m
->topsize
> nb
) {
1353 /* Expand into top */
1354 size_t newsize
= oldsize
+ m
->topsize
;
1355 size_t newtopsize
= newsize
- nb
;
1356 mchunkptr newtop
= chunk_plus_offset(oldp
, nb
);
1357 set_inuse(m
, oldp
, nb
);
1358 newtop
->head
= newtopsize
|PINUSE_BIT
;
1360 m
->topsize
= newtopsize
;
1365 return chunk2mem(newp
);
1367 void *newmem
= lj_alloc_malloc(m
, nsize
);
1369 size_t oc
= oldsize
- overhead_for(oldp
);
1370 memcpy(newmem
, ptr
, oc
< nsize
? oc
: nsize
);
1371 lj_alloc_free(m
, ptr
);
1378 void *lj_alloc_f(void *msp
, void *ptr
, size_t osize
, size_t nsize
)
1382 return lj_alloc_free(msp
, ptr
);
1383 } else if (ptr
== NULL
) {
1384 return lj_alloc_malloc(msp
, nsize
);
1386 return lj_alloc_realloc(msp
, ptr
, nsize
);