1 /* Alternative malloc implementation for multiple threads without
2 lock contention based on dlmalloc. (C) 2005-2006 Niall Douglas
4 Boost Software License - Version 1.0 - August 17th, 2003
6 Permission is hereby granted, free of charge, to any person or organization
7 obtaining a copy of the software and accompanying documentation covered by
8 this license (the "Software") to use, reproduce, display, distribute,
9 execute, and transmit the Software, and to prepare derivative works of the
10 Software, and to permit third-parties to whom the Software is furnished to
11 do so, all subject to the following:
13 The copyright notices in the Software and this entire statement, including
14 the above license grant, this restriction and the following disclaimer,
15 must be included in all copies of the Software, in whole or in part, and
16 all derivative works of the Software, unless such copies or derivative
17 works are solely in the form of machine-executable object code generated by
18 a source language processor.
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
23 SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
24 FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
25 ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 DEALINGS IN THE SOFTWARE.
30 /* Enable full aliasing on MSVC */
31 /*#pragma optimize("a", on)*/
34 /*#define FULLSANITYCHECKS*/
36 #include "nedmalloc.h"
41 #define ONLY_MSPACES 1
45 #define FOOTERS 1 /* Need to enable footers so frees lock the right mspace */
46 #undef DEBUG /* dlmalloc wants DEBUG either 0 or 1 */
52 #ifdef NDEBUG /* Disable assert checking on release builds */
55 /* The default of 64Kb means we spend too much time kernel-side */
56 #ifndef DEFAULT_GRANULARITY
57 #define DEFAULT_GRANULARITY (1*1024*1024)
59 /*#define USE_SPIN_LOCKS 0*/
62 /*#define FORCEINLINE*/
64 #ifdef NDEBUG /* Disable assert checking on release builds */
68 /* The maximum concurrent threads in a pool possible */
69 #ifndef MAXTHREADSINPOOL
70 #define MAXTHREADSINPOOL 16
72 /* The maximum number of threadcaches which can be allocated */
73 #ifndef THREADCACHEMAXCACHES
74 #define THREADCACHEMAXCACHES 256
76 /* The maximum size to be allocated from the thread cache */
77 #ifndef THREADCACHEMAX
78 #define THREADCACHEMAX 8192
81 /* The number of cache entries for finer grained bins. This is (topbitpos(THREADCACHEMAX)-4)*2 */
82 #define THREADCACHEMAXBINS ((13-4)*2)
84 /* The number of cache entries. This is (topbitpos(THREADCACHEMAX)-4) */
85 #define THREADCACHEMAXBINS (13-4)
87 /* Point at which the free space in a thread cache is garbage collected */
88 #ifndef THREADCACHEMAXFREESPACE
89 #define THREADCACHEMAXFREESPACE (512*1024)
95 #define TLSALLOC(k) (*(k)=TlsAlloc(), TLS_OUT_OF_INDEXES==*(k))
96 #define TLSFREE(k) (!TlsFree(k))
97 #define TLSGET(k) TlsGetValue(k)
98 #define TLSSET(k, a) (!TlsSetValue(k, a))
100 static LPVOID
ChkedTlsGetValue(DWORD idx
)
102 LPVOID ret
=TlsGetValue(idx
);
103 assert(S_OK
==GetLastError());
107 #define TLSGET(k) ChkedTlsGetValue(k)
110 #define TLSVAR pthread_key_t
111 #define TLSALLOC(k) pthread_key_create(k, 0)
112 #define TLSFREE(k) pthread_key_delete(k)
113 #define TLSGET(k) pthread_getspecific(k)
114 #define TLSSET(k, a) pthread_setspecific(k, a)
118 /* Only enable if testing with valgrind. Causes misoperation */
119 #define mspace_malloc(p, s) malloc(s)
120 #define mspace_realloc(p, m, s) realloc(m, s)
121 #define mspace_calloc(p, n, s) calloc(n, s)
122 #define mspace_free(p, m) free(m)
126 #if defined(__cplusplus)
127 #if !defined(NO_NED_NAMESPACE)
134 size_t nedblksize(void *mem
) THROWSPEC
137 /* Only enable if testing with valgrind. Causes misoperation */
138 return THREADCACHEMAX
;
142 mchunkptr p
=mem2chunk(mem
);
143 assert(cinuse(p
)); /* If this fails, someone tried to free a block twice */
145 return chunksize(p
)-overhead_for(p
);
151 void nedsetvalue(void *v
) THROWSPEC
{ nedpsetvalue(0, v
); }
152 void * nedmalloc(size_t size
) THROWSPEC
{ return nedpmalloc(0, size
); }
153 void * nedcalloc(size_t no
, size_t size
) THROWSPEC
{ return nedpcalloc(0, no
, size
); }
154 void * nedrealloc(void *mem
, size_t size
) THROWSPEC
{ return nedprealloc(0, mem
, size
); }
155 void nedfree(void *mem
) THROWSPEC
{ nedpfree(0, mem
); }
156 void * nedmemalign(size_t alignment
, size_t bytes
) THROWSPEC
{ return nedpmemalign(0, alignment
, bytes
); }
158 struct mallinfo
nedmallinfo(void) THROWSPEC
{ return nedpmallinfo(0); }
160 int nedmallopt(int parno
, int value
) THROWSPEC
{ return nedpmallopt(0, parno
, value
); }
161 int nedmalloc_trim(size_t pad
) THROWSPEC
{ return nedpmalloc_trim(0, pad
); }
162 void nedmalloc_stats(void) THROWSPEC
{ nedpmalloc_stats(0); }
163 size_t nedmalloc_footprint(void) THROWSPEC
{ return nedpmalloc_footprint(0); }
164 void **nedindependent_calloc(size_t elemsno
, size_t elemsize
, void **chunks
) THROWSPEC
{ return nedpindependent_calloc(0, elemsno
, elemsize
, chunks
); }
165 void **nedindependent_comalloc(size_t elems
, size_t *sizes
, void **chunks
) THROWSPEC
{ return nedpindependent_comalloc(0, elems
, sizes
, chunks
); }
167 struct threadcacheblk_t
;
168 typedef struct threadcacheblk_t threadcacheblk
;
169 struct threadcacheblk_t
170 { /* Keep less than 16 bytes on 32 bit systems and 32 bytes on 64 bit systems */
171 #ifdef FULLSANITYCHECKS
174 unsigned int lastUsed
, size
;
175 threadcacheblk
*next
, *prev
;
177 typedef struct threadcache_t
179 #ifdef FULLSANITYCHECKS
182 int mymspace
; /* Last mspace entry this thread used */
184 unsigned int mallocs
, frees
, successes
;
185 size_t freeInCache
; /* How much free space is stored in this cache */
186 threadcacheblk
*bins
[(THREADCACHEMAXBINS
+1)*2];
187 #ifdef FULLSANITYCHECKS
195 int threads
; /* Max entries in m to use */
196 threadcache
*caches
[THREADCACHEMAXCACHES
];
197 TLSVAR mycache
; /* Thread cache for this thread. 0 for unset, negative for use mspace-1 directly, otherwise is cache-1 */
198 mstate m
[MAXTHREADSINPOOL
+1]; /* mspace entries for this pool */
200 static nedpool syspool
;
202 static FORCEINLINE
unsigned int size2binidx(size_t _size
) THROWSPEC
203 { /* 8=1000 16=10000 20=10100 24=11000 32=100000 48=110000 4096=1000000000000 */
204 unsigned int topbit
, size
=(unsigned int)(_size
>>4);
205 /* 16=1 20=1 24=1 32=10 48=11 64=100 96=110 128=1000 4096=100000000 */
207 #if defined(__GNUC__)
208 topbit
= sizeof(size
)*__CHAR_BIT__
- 1 - __builtin_clz(size
);
209 #elif defined(_MSC_VER) && _MSC_VER>=1300
211 unsigned long bsrTopBit
;
213 _BitScanReverse(&bsrTopBit
, size
);
225 asDouble
= (double)size
+ 0.5;
226 topbit
= (asInt
[!FOX_BIGENDIAN
] >> 20) - 1023;
236 x
= x
- ((x
>> 1) & 0x55555555);
237 x
= (x
& 0x33333333) + ((x
>> 2) & 0x33333333);
238 x
= (x
+ (x
>> 4)) & 0x0F0F0F0F;
241 topbit
=31 - (x
>> 24);
249 #ifdef FULLSANITYCHECKS
250 static void tcsanitycheck(threadcacheblk
**ptr
) THROWSPEC
252 assert((ptr
[0] && ptr
[1]) || (!ptr
[0] && !ptr
[1]));
255 assert(nedblksize(ptr
[0])>=sizeof(threadcacheblk
));
256 assert(nedblksize(ptr
[1])>=sizeof(threadcacheblk
));
257 assert(*(unsigned int *) "NEDN"==ptr
[0]->magic
);
258 assert(*(unsigned int *) "NEDN"==ptr
[1]->magic
);
259 assert(!ptr
[0]->prev
);
260 assert(!ptr
[1]->next
);
263 assert(!ptr
[0]->next
);
264 assert(!ptr
[1]->prev
);
268 static void tcfullsanitycheck(threadcache
*tc
) THROWSPEC
270 threadcacheblk
**tcbptr
=tc
->bins
;
272 for(n
=0; n
<=THREADCACHEMAXBINS
; n
++, tcbptr
+=2)
274 threadcacheblk
*b
, *ob
=0;
275 tcsanitycheck(tcbptr
);
276 for(b
=tcbptr
[0]; b
; ob
=b
, b
=b
->next
)
278 assert(*(unsigned int *) "NEDN"==b
->magic
);
279 assert(!ob
|| ob
->next
==b
);
280 assert(!ob
|| b
->prev
==ob
);
286 static NOINLINE
void RemoveCacheEntries(nedpool
*p
, threadcache
*tc
, unsigned int age
) THROWSPEC
288 #ifdef FULLSANITYCHECKS
289 tcfullsanitycheck(tc
);
293 threadcacheblk
**tcbptr
=tc
->bins
;
295 for(n
=0; n
<=THREADCACHEMAXBINS
; n
++, tcbptr
+=2)
297 threadcacheblk
**tcb
=tcbptr
+1; /* come from oldest end of list */
298 /*tcsanitycheck(tcbptr);*/
299 for(; *tcb
&& tc
->frees
-(*tcb
)->lastUsed
>=age
; )
301 threadcacheblk
*f
=*tcb
;
302 size_t blksize
=f
->size
; /*nedblksize(f);*/
303 assert(blksize
<=nedblksize(f
));
305 #ifdef FULLSANITYCHECKS
306 assert(*(unsigned int *) "NEDN"==(*tcb
)->magic
);
313 tc
->freeInCache
-=blksize
;
314 assert((long) tc
->freeInCache
>=0);
316 /*tcsanitycheck(tcbptr);*/
320 #ifdef FULLSANITYCHECKS
321 tcfullsanitycheck(tc
);
324 static void DestroyCaches(nedpool
*p
) THROWSPEC
330 for(n
=0; n
<THREADCACHEMAXCACHES
; n
++)
332 if((tc
=p
->caches
[n
]))
335 RemoveCacheEntries(p
, tc
, 0);
336 assert(!tc
->freeInCache
);
346 static NOINLINE threadcache
*AllocCache(nedpool
*p
) THROWSPEC
350 ACQUIRE_LOCK(&p
->mutex
);
351 for(n
=0; n
<THREADCACHEMAXCACHES
&& p
->caches
[n
]; n
++);
352 if(THREADCACHEMAXCACHES
==n
)
353 { /* List exhausted, so disable for this thread */
354 RELEASE_LOCK(&p
->mutex
);
357 tc
=p
->caches
[n
]=(threadcache
*) mspace_calloc(p
->m
[0], 1, sizeof(threadcache
));
360 RELEASE_LOCK(&p
->mutex
);
363 #ifdef FULLSANITYCHECKS
364 tc
->magic1
=*(unsigned int *)"NEDMALC1";
365 tc
->magic2
=*(unsigned int *)"NEDMALC2";
367 tc
->threadid
=(long)(size_t)CURRENT_THREAD
;
368 for(end
=0; p
->m
[end
]; end
++);
369 tc
->mymspace
=tc
->threadid
% end
;
370 RELEASE_LOCK(&p
->mutex
);
371 if(TLSSET(p
->mycache
, (void *)(size_t)(n
+1))) abort();
375 static void *threadcache_malloc(nedpool
*p
, threadcache
*tc
, size_t *size
) THROWSPEC
378 unsigned int bestsize
;
379 unsigned int idx
=size2binidx(*size
);
381 threadcacheblk
*blk
, **binsptr
;
382 #ifdef FULLSANITYCHECKS
383 tcfullsanitycheck(tc
);
385 /* Calculate best fit bin size */
388 /* Finer grained bin fit */
393 bestsize
+=bestsize
>>1;
398 bestsize
=1<<(4+(idx
>>1));
407 assert(bestsize
>=*size
);
408 if(*size
<bestsize
) *size
=bestsize
;
409 assert(*size
<=THREADCACHEMAX
);
410 assert(idx
<=THREADCACHEMAXBINS
);
411 binsptr
=&tc
->bins
[idx
*2];
412 /* Try to match close, but move up a bin if necessary */
414 if(!blk
|| blk
->size
<*size
)
415 { /* Bump it up a bin */
416 if(idx
<THREADCACHEMAXBINS
)
425 blksize
=blk
->size
; /*nedblksize(blk);*/
426 assert(nedblksize(blk
)>=blksize
);
427 assert(blksize
>=*size
);
433 #ifdef FULLSANITYCHECKS
436 assert(binsptr
[0]!=blk
&& binsptr
[1]!=blk
);
437 assert(nedblksize(blk
)>=sizeof(threadcacheblk
) && nedblksize(blk
)<=THREADCACHEMAX
+CHUNK_OVERHEAD
);
438 /*printf("malloc: %p, %p, %p, %lu\n", p, tc, blk, (long) size);*/
444 assert(blksize
>=*size
);
446 tc
->freeInCache
-=blksize
;
447 assert((long) tc
->freeInCache
>=0);
449 #if defined(DEBUG) && 0
450 if(!(tc
->mallocs
& 0xfff))
452 printf("*** threadcache=%u, mallocs=%u (%f), free=%u (%f), freeInCache=%u\n", (unsigned int) tc
->threadid
, tc
->mallocs
,
453 (float) tc
->successes
/tc
->mallocs
, tc
->frees
, (float) tc
->successes
/tc
->frees
, (unsigned int) tc
->freeInCache
);
456 #ifdef FULLSANITYCHECKS
457 tcfullsanitycheck(tc
);
461 static NOINLINE
void ReleaseFreeInCache(nedpool
*p
, threadcache
*tc
, int mymspace
) THROWSPEC
463 unsigned int age
=THREADCACHEMAXFREESPACE
/8192;
464 /*ACQUIRE_LOCK(&p->m[mymspace]->mutex);*/
465 while(age
&& tc
->freeInCache
>=THREADCACHEMAXFREESPACE
)
467 RemoveCacheEntries(p
, tc
, age
);
468 /*printf("*** Removing cache entries older than %u (%u)\n", age, (unsigned int) tc->freeInCache);*/
471 /*RELEASE_LOCK(&p->m[mymspace]->mutex);*/
473 static void threadcache_free(nedpool
*p
, threadcache
*tc
, int mymspace
, void *mem
, size_t size
) THROWSPEC
475 unsigned int bestsize
;
476 unsigned int idx
=size2binidx(size
);
477 threadcacheblk
**binsptr
, *tck
=(threadcacheblk
*) mem
;
478 assert(size
>=sizeof(threadcacheblk
) && size
<=THREADCACHEMAX
+CHUNK_OVERHEAD
);
480 { /* Make sure this is a valid memory block */
481 mchunkptr p
= mem2chunk(mem
);
482 mstate fm
= get_mstate_for(p
);
484 USAGE_ERROR_ACTION(fm
, p
);
489 #ifdef FULLSANITYCHECKS
490 tcfullsanitycheck(tc
);
492 /* Calculate best fit bin size */
495 /* Finer grained bin fit */
499 unsigned int biggerbestsize
=bestsize
+bestsize
<<1;
500 if(size
>=biggerbestsize
)
503 bestsize
=biggerbestsize
;
507 if(bestsize
!=size
) /* dlmalloc can round up, so we round down to preserve indexing */
509 binsptr
=&tc
->bins
[idx
*2];
510 assert(idx
<=THREADCACHEMAXBINS
);
513 fprintf(stderr
, "Attempt to free already freed memory block %p - aborting!\n", tck
);
516 #ifdef FULLSANITYCHECKS
517 tck
->magic
=*(unsigned int *) "NEDN";
519 tck
->lastUsed
=++tc
->frees
;
520 tck
->size
=(unsigned int) size
;
527 assert(!*binsptr
|| (*binsptr
)->size
==tck
->size
);
529 assert(tck
==tc
->bins
[idx
*2]);
530 assert(tc
->bins
[idx
*2+1]==tck
|| binsptr
[0]->next
->prev
==tck
);
531 /*printf("free: %p, %p, %p, %lu\n", p, tc, mem, (long) size);*/
532 tc
->freeInCache
+=size
;
533 #ifdef FULLSANITYCHECKS
534 tcfullsanitycheck(tc
);
537 if(tc
->freeInCache
>=THREADCACHEMAXFREESPACE
)
538 ReleaseFreeInCache(p
, tc
, mymspace
);
545 static NOINLINE
int InitPool(nedpool
*p
, size_t capacity
, int threads
) THROWSPEC
546 { /* threads is -1 for system pool */
547 ensure_initialization();
548 ACQUIRE_MALLOC_GLOBAL_LOCK();
549 if(p
->threads
) goto done
;
550 if(INITIAL_LOCK(&p
->mutex
)) goto err
;
551 if(TLSALLOC(&p
->mycache
)) goto err
;
552 if(!(p
->m
[0]=(mstate
) create_mspace(capacity
, 1))) goto err
;
554 p
->threads
=(threads
<1 || threads
>MAXTHREADSINPOOL
) ? MAXTHREADSINPOOL
: threads
;
556 RELEASE_MALLOC_GLOBAL_LOCK();
560 abort(); /* If you can't allocate for system pool, we're screwed */
564 destroy_mspace(p
->m
[0]);
569 if(TLSFREE(p
->mycache
)) abort();
572 RELEASE_MALLOC_GLOBAL_LOCK();
575 static NOINLINE mstate
FindMSpace(nedpool
*p
, threadcache
*tc
, int *lastUsed
, size_t size
) THROWSPEC
576 { /* Gets called when thread's last used mspace is in use. The strategy
577 is to run through the list of all available mspaces looking for an
578 unlocked one and if we fail, we create a new one so long as we don't
581 for(n
=end
=*lastUsed
+1; p
->m
[n
]; end
=++n
)
583 if(TRY_LOCK(&p
->m
[n
]->mutex
)) goto found
;
585 for(n
=0; n
<*lastUsed
&& p
->m
[n
]; n
++)
587 if(TRY_LOCK(&p
->m
[n
]->mutex
)) goto found
;
592 if(!(temp
=(mstate
) create_mspace(size
, 1)))
594 /* Now we're ready to modify the lists, we lock */
595 ACQUIRE_LOCK(&p
->mutex
);
596 while(p
->m
[end
] && end
<p
->threads
)
599 { /* Drat, must destroy it now */
600 RELEASE_LOCK(&p
->mutex
);
601 destroy_mspace((mspace
) temp
);
604 /* We really want to make sure this goes into memory now but we
605 have to be careful of breaking aliasing rules, so write it twice */
607 volatile struct malloc_state
**_m
=(volatile struct malloc_state
**) &p
->m
[end
];
608 *_m
=(p
->m
[end
]=temp
);
610 ACQUIRE_LOCK(&p
->m
[end
]->mutex
);
611 /*printf("Created mspace idx %d\n", end);*/
612 RELEASE_LOCK(&p
->mutex
);
616 /* Let it lock on the last one it used */
618 ACQUIRE_LOCK(&p
->m
[*lastUsed
]->mutex
);
619 return p
->m
[*lastUsed
];
626 if(TLSSET(p
->mycache
, (void *)(size_t)(-(n
+1)))) abort();
631 nedpool
*nedcreatepool(size_t capacity
, int threads
) THROWSPEC
634 if(!(ret
=(nedpool
*) nedpcalloc(0, 1, sizeof(nedpool
)))) return 0;
635 if(!InitPool(ret
, capacity
, threads
))
642 void neddestroypool(nedpool
*p
) THROWSPEC
645 ACQUIRE_LOCK(&p
->mutex
);
647 for(n
=0; p
->m
[n
]; n
++)
649 destroy_mspace(p
->m
[n
]);
652 RELEASE_LOCK(&p
->mutex
);
653 if(TLSFREE(p
->mycache
)) abort();
657 void nedpsetvalue(nedpool
*p
, void *v
) THROWSPEC
659 if(!p
) { p
=&syspool
; if(!syspool
.threads
) InitPool(&syspool
, 0, -1); }
662 void *nedgetvalue(nedpool
**p
, void *mem
) THROWSPEC
665 mchunkptr mcp
=mem2chunk(mem
);
667 if(!(is_aligned(chunk2mem(mcp
))) && mcp
->head
!= FENCEPOST_HEAD
) return 0;
668 if(!cinuse(mcp
)) return 0;
669 if(!next_pinuse(mcp
)) return 0;
670 if(!is_mmapped(mcp
) && !pinuse(mcp
))
672 if(next_chunk(prev_chunk(mcp
))!=mcp
) return 0;
674 fm
=get_mstate_for(mcp
);
675 if(!ok_magic(fm
)) return 0;
676 if(!ok_address(fm
, mcp
)) return 0;
677 if(!fm
->extp
) return 0;
678 np
=(nedpool
*) fm
->extp
;
680 return np
->uservalue
;
683 void neddisablethreadcache(nedpool
*p
) THROWSPEC
689 if(!syspool
.threads
) InitPool(&syspool
, 0, -1);
691 mycache
=(int)(size_t) TLSGET(p
->mycache
);
693 { /* Set to mspace 0 */
694 if(TLSSET(p
->mycache
, (void *)-1)) abort();
697 { /* Set to last used mspace */
698 threadcache
*tc
=p
->caches
[mycache
-1];
700 printf("Threadcache utilisation: %lf%% in cache with %lf%% lost to other threads\n",
701 100.0*tc
->successes
/tc
->mallocs
, 100.0*((double) tc
->mallocs
-tc
->frees
)/tc
->mallocs
);
703 if(TLSSET(p
->mycache
, (void *)(size_t)(-tc
->mymspace
))) abort();
705 RemoveCacheEntries(p
, tc
, 0);
706 assert(!tc
->freeInCache
);
709 mspace_free(0, p
->caches
[mycache
-1]);
710 p
->caches
[mycache
-1]=0;
714 #define GETMSPACE(m,p,tc,ms,s,action) \
717 mstate m = GetMSpace((p),(tc),(ms),(s)); \
719 RELEASE_LOCK(&m->mutex); \
722 static FORCEINLINE mstate
GetMSpace(nedpool
*p
, threadcache
*tc
, int mymspace
, size_t size
) THROWSPEC
723 { /* Returns a locked and ready for use mspace */
724 mstate m
=p
->m
[mymspace
];
726 if(!TRY_LOCK(&p
->m
[mymspace
]->mutex
)) m
=FindMSpace(p
, tc
, &mymspace
, size
);\
727 /*assert(IS_LOCKED(&p->m[mymspace]->mutex));*/
730 static FORCEINLINE
void GetThreadCache(nedpool
**p
, threadcache
**tc
, int *mymspace
, size_t *size
) THROWSPEC
733 if(size
&& *size
<sizeof(threadcacheblk
)) *size
=sizeof(threadcacheblk
);
737 if(!syspool
.threads
) InitPool(&syspool
, 0, -1);
739 mycache
=(int)(size_t) TLSGET((*p
)->mycache
);
742 *tc
=(*p
)->caches
[mycache
-1];
743 *mymspace
=(*tc
)->mymspace
;
750 if(TLSSET((*p
)->mycache
, (void *)-1)) abort();
754 *mymspace
=(*tc
)->mymspace
;
759 *mymspace
=-mycache
-1;
761 assert(*mymspace
>=0);
762 assert((long)(size_t)CURRENT_THREAD
==(*tc
)->threadid
);
763 #ifdef FULLSANITYCHECKS
766 if(*(unsigned int *)"NEDMALC1"!=(*tc
)->magic1
|| *(unsigned int *)"NEDMALC2"!=(*tc
)->magic2
)
774 void * nedpmalloc(nedpool
*p
, size_t size
) THROWSPEC
779 GetThreadCache(&p
, &tc
, &mymspace
, &size
);
781 if(tc
&& size
<=THREADCACHEMAX
)
782 { /* Use the thread cache */
783 ret
=threadcache_malloc(p
, tc
, &size
);
787 { /* Use this thread's mspace */
788 GETMSPACE(m
, p
, tc
, mymspace
, size
,
789 ret
=mspace_malloc(m
, size
));
793 void * nedpcalloc(nedpool
*p
, size_t no
, size_t size
) THROWSPEC
795 size_t rsize
=size
*no
;
799 GetThreadCache(&p
, &tc
, &mymspace
, &rsize
);
801 if(tc
&& rsize
<=THREADCACHEMAX
)
802 { /* Use the thread cache */
803 if((ret
=threadcache_malloc(p
, tc
, &rsize
)))
804 memset(ret
, 0, rsize
);
808 { /* Use this thread's mspace */
809 GETMSPACE(m
, p
, tc
, mymspace
, rsize
,
810 ret
=mspace_calloc(m
, 1, rsize
));
814 void * nedprealloc(nedpool
*p
, void *mem
, size_t size
) THROWSPEC
819 if(!mem
) return nedpmalloc(p
, size
);
820 GetThreadCache(&p
, &tc
, &mymspace
, &size
);
822 if(tc
&& size
&& size
<=THREADCACHEMAX
)
823 { /* Use the thread cache */
824 size_t memsize
=nedblksize(mem
);
826 if((ret
=threadcache_malloc(p
, tc
, &size
)))
828 memcpy(ret
, mem
, memsize
<size
? memsize
: size
);
829 if(memsize
<=THREADCACHEMAX
)
830 threadcache_free(p
, tc
, mymspace
, mem
, memsize
);
837 { /* Reallocs always happen in the mspace they happened in, so skip
838 locking the preferred mspace for this thread */
839 ret
=mspace_realloc(0, mem
, size
);
843 void nedpfree(nedpool
*p
, void *mem
) THROWSPEC
844 { /* Frees always happen in the mspace they happened in, so skip
845 locking the preferred mspace for this thread */
850 GetThreadCache(&p
, &tc
, &mymspace
, 0);
852 memsize
=nedblksize(mem
);
854 if(mem
&& tc
&& memsize
<=(THREADCACHEMAX
+CHUNK_OVERHEAD
))
855 threadcache_free(p
, tc
, mymspace
, mem
, memsize
);
860 void * nedpmemalign(nedpool
*p
, size_t alignment
, size_t bytes
) THROWSPEC
865 GetThreadCache(&p
, &tc
, &mymspace
, &bytes
);
866 { /* Use this thread's mspace */
867 GETMSPACE(m
, p
, tc
, mymspace
, bytes
,
868 ret
=mspace_memalign(m
, alignment
, bytes
));
873 struct mallinfo
nedpmallinfo(nedpool
*p
) THROWSPEC
876 struct mallinfo ret
={0};
877 if(!p
) { p
=&syspool
; if(!syspool
.threads
) InitPool(&syspool
, 0, -1); }
878 for(n
=0; p
->m
[n
]; n
++)
880 struct mallinfo t
=mspace_mallinfo(p
->m
[n
]);
882 ret
.ordblks
+=t
.ordblks
;
883 ret
.hblkhd
+=t
.hblkhd
;
884 ret
.usmblks
+=t
.usmblks
;
885 ret
.uordblks
+=t
.uordblks
;
886 ret
.fordblks
+=t
.fordblks
;
887 ret
.keepcost
+=t
.keepcost
;
892 int nedpmallopt(nedpool
*p
, int parno
, int value
) THROWSPEC
894 return mspace_mallopt(parno
, value
);
896 int nedpmalloc_trim(nedpool
*p
, size_t pad
) THROWSPEC
899 if(!p
) { p
=&syspool
; if(!syspool
.threads
) InitPool(&syspool
, 0, -1); }
900 for(n
=0; p
->m
[n
]; n
++)
902 ret
+=mspace_trim(p
->m
[n
], pad
);
906 void nedpmalloc_stats(nedpool
*p
) THROWSPEC
909 if(!p
) { p
=&syspool
; if(!syspool
.threads
) InitPool(&syspool
, 0, -1); }
910 for(n
=0; p
->m
[n
]; n
++)
912 mspace_malloc_stats(p
->m
[n
]);
915 size_t nedpmalloc_footprint(nedpool
*p
) THROWSPEC
919 if(!p
) { p
=&syspool
; if(!syspool
.threads
) InitPool(&syspool
, 0, -1); }
920 for(n
=0; p
->m
[n
]; n
++)
922 ret
+=mspace_footprint(p
->m
[n
]);
926 void **nedpindependent_calloc(nedpool
*p
, size_t elemsno
, size_t elemsize
, void **chunks
) THROWSPEC
931 GetThreadCache(&p
, &tc
, &mymspace
, &elemsize
);
932 GETMSPACE(m
, p
, tc
, mymspace
, elemsno
*elemsize
,
933 ret
=mspace_independent_calloc(m
, elemsno
, elemsize
, chunks
));
936 void **nedpindependent_comalloc(nedpool
*p
, size_t elems
, size_t *sizes
, void **chunks
) THROWSPEC
941 size_t i
, *adjustedsizes
=(size_t *) alloca(elems
*sizeof(size_t));
942 if(!adjustedsizes
) return 0;
943 for(i
=0; i
<elems
; i
++)
944 adjustedsizes
[i
]=sizes
[i
]<sizeof(threadcacheblk
) ? sizeof(threadcacheblk
) : sizes
[i
];
945 GetThreadCache(&p
, &tc
, &mymspace
, 0);
946 GETMSPACE(m
, p
, tc
, mymspace
, 0,
947 ret
=mspace_independent_comalloc(m
, elems
, adjustedsizes
, chunks
));
951 #ifdef OVERRIDE_STRDUP
953 * This implementation is purely there to override the libc version, to
954 * avoid a crash due to allocation and free on different 'heaps'.
956 char *strdup(const char *s1
)
960 s2
= malloc(strlen(s1
) + 1);
967 #if defined(__cplusplus)