Replace the malloc implementation shipped with Charm -
[charm.git] / src / conv-core / memory-gnu.c
blob9c00cc3607f9004f465761e2cfe26a984e9b10c9
1 /*
2 This version of ptmalloc3 is hacked in following ways:
3 - Renamed to file memory-gnu.c
4 - Use mm_* routine names, as defined below.
5 - Add UPDATE_MEMUSAGE
6 - Add definitions for ONLY_MSPACES, MSPACES, USE_LOCKS
7 - Rename malloc.c to memory-gnu-internal.c and include here
8 - Merge thread files to generate memory-gnu-threads.h
9 */
11 #define malloc mm_malloc
12 #define free mm_free
13 #define calloc mm_calloc
14 #define cfree mm_cfree
15 #define realloc mm_realloc
16 #define memalign mm_memalign
17 #define valloc mm_valloc
19 extern CMK_TYPEDEF_UINT8 _memory_allocated;
20 extern CMK_TYPEDEF_UINT8 _memory_allocated_max;
21 extern CMK_TYPEDEF_UINT8 _memory_allocated_min;
23 #define UPDATE_MEMUSAGE \
24 if(_memory_allocated > _memory_allocated_max) \
25 _memory_allocated_max=_memory_allocated; \
26 if(_memory_allocated < _memory_allocated_min) \
27 _memory_allocated_min=_memory_allocated;
29 #define ONLY_MSPACES 1
30 #define MSPACES 1
31 #define USE_LOCKS 0
33 * $Id: ptmalloc3.c,v 1.8 2006/03/31 15:57:28 wg Exp $
36 ptmalloc3 -- wrapper for Doug Lea's malloc-2.8.3 with concurrent
37 allocations
39 Copyright (c) 2005, 2006 Wolfram Gloger <ptmalloc@malloc.de>
41 Permission to use, copy, modify, distribute, and sell this software
42 and its documentation for any purpose is hereby granted without fee,
43 provided that (i) the above copyright notices and this permission
44 notice appear in all copies of the software and related documentation,
45 and (ii) the name of Wolfram Gloger may not be used in any advertising
46 or publicity relating to the software.
48 THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
49 EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
50 WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
52 IN NO EVENT SHALL WOLFRAM GLOGER BE LIABLE FOR ANY SPECIAL,
53 INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY
54 DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
55 WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY
56 OF LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
57 PERFORMANCE OF THIS SOFTWARE.
62 * TODO: optimization / better integration with malloc.c (partly done)
63 * malloc_{get,set}_state (probably hard to keep compatibility)
64 * debugging hooks
65 * better mstats
68 #include <sys/types.h> /* For size_t */
69 #include <sys/mman.h> /* for mmap */
70 #include <errno.h>
71 #include <stdlib.h>
72 #include <stdio.h>
73 #include <string.h> /* for memset */
74 #include "converse.h"
76 #include "memory-gnu-threads.h"
78 /* ----------------------------------------------------------------------- */
80 /* The following section is replicated from malloc.c */
82 #include "memory-gnu-internal.c"
84 /* head field is or'ed with NON_MAIN_ARENA if the chunk was obtained
85 from a non-main arena. This is only set immediately before handing
86 the chunk to the user, if necessary. */
87 #define NON_MAIN_ARENA (SIZE_T_FOUR)
89 /* end of definitions replicated from malloc.c */
91 #define munmap_chunk(mst, p) do { \
92 size_t prevsize = (p)->prev_foot & ~IS_MMAPPED_BIT; \
93 size_t psize = chunksize(p) + prevsize + MMAP_FOOT_PAD; \
94 if (CALL_MUNMAP((char*)(p) - prevsize, psize) == 0) \
95 ((struct malloc_state*)(mst))->footprint -= psize; \
96 } while (0)
98 /* ---------------------------------------------------------------------- */
100 /* Minimum size for a newly created arena. */
101 #ifndef ARENA_SIZE_MIN
102 # define ARENA_SIZE_MIN (128*1024)
103 #endif
104 #define HAVE_MEMCPY 1
106 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
107 computed. */
108 #ifndef THREAD_STATS
109 # define THREAD_STATS 0
110 #endif
112 #ifndef MALLOC_DEBUG
113 # define MALLOC_DEBUG 0
114 #endif
116 #define my_powerof2(x) ((((x)-1)&(x))==0)
118 /* Already initialized? */
119 int __malloc_initialized = -1;
121 #ifndef RETURN_ADDRESS
122 # define RETURN_ADDRESS(X_) (NULL)
123 #endif
125 #if THREAD_STATS
126 # define THREAD_STAT(x) x
127 #else
128 # define THREAD_STAT(x) do ; while(0)
129 #endif
131 #ifdef _LIBC
133 /* Special defines for the GNU C library. */
134 #define public_cALLOc __libc_calloc
135 #define public_fREe __libc_free
136 #define public_cFREe __libc_cfree
137 #define public_mALLOc __libc_malloc
138 #define public_mEMALIGn __libc_memalign
139 #define public_rEALLOc __libc_realloc
140 #define public_vALLOc __libc_valloc
141 #define public_pVALLOc __libc_pvalloc
142 #define public_pMEMALIGn __posix_memalign
143 #define public_mALLINFo __libc_mallinfo
144 #define public_mALLOPt __libc_mallopt
145 #define public_mTRIm __malloc_trim
146 #define public_mSTATs __malloc_stats
147 #define public_mUSABLe __malloc_usable_size
148 #define public_iCALLOc __libc_independent_calloc
149 #define public_iCOMALLOc __libc_independent_comalloc
150 #define public_gET_STATe __malloc_get_state
151 #define public_sET_STATe __malloc_set_state
152 #define malloc_getpagesize __getpagesize()
153 #define open __open
154 #define mmap __mmap
155 #define munmap __munmap
156 #define mremap __mremap
157 #define mprotect __mprotect
158 #define MORECORE (*__morecore)
159 #define MORECORE_FAILURE 0
161 void * __default_morecore (ptrdiff_t);
162 void *(*__morecore)(ptrdiff_t) = __default_morecore;
164 #else /* !_LIBC */
166 #define public_cALLOc calloc
167 #define public_fREe free
168 #define public_cFREe cfree
169 #define public_mALLOc malloc
170 #define public_mEMALIGn memalign
171 #define public_rEALLOc realloc
172 #define public_vALLOc valloc
173 #define public_pVALLOc pvalloc
174 #define public_pMEMALIGn posix_memalign
175 #define public_mALLINFo mallinfo
176 #define public_mALLOPt mallopt
177 #define public_mTRIm malloc_trim
178 #define public_mSTATs malloc_stats
179 #define public_mUSABLe malloc_usable_size
180 #define public_iCALLOc independent_calloc
181 #define public_iCOMALLOc independent_comalloc
182 #define public_gET_STATe malloc_get_state
183 #define public_sET_STATe malloc_set_state
185 #endif /* _LIBC */
187 /* CHARM++ ADD BEGIN */
188 // Forward Declarations
189 void* public_mALLOc(size_t bytes);
190 void public_fREe(void* mem);
191 void* public_rEALLOc(void* oldmem, size_t bytes);
192 void* public_mEMALIGn(size_t alignment, size_t bytes);
193 void* public_vALLOc(size_t bytes);
194 int public_pMEMALIGn (void **memptr, size_t alignment, size_t size);
195 void* public_cALLOc(size_t n_elements, size_t elem_size);
196 void** public_iCALLOc(size_t n, size_t elem_size, void* chunks[]);
197 void** public_iCOMALLOc(size_t n, size_t sizes[], void* chunks[]);
198 int public_mTRIm(size_t s);
199 size_t public_mUSABLe(void* mem);
200 int public_mALLOPt(int p, int v);
201 void public_mSTATs(void);
202 /* CHARM++ ADD END */
204 #if !defined _LIBC && (!defined __GNUC__ || __GNUC__<3)
205 #define __builtin_expect(expr, val) (expr)
206 #endif
208 #if MALLOC_DEBUG
209 #include <assert.h>
210 #else
211 #undef assert
212 #define assert(x) ((void)0)
213 #endif
215 /* USE_STARTER determines if and when the special "starter" hook
216 functions are used: not at all (0), during ptmalloc_init (first bit
217 set), or from the beginning until an explicit call to ptmalloc_init
218 (second bit set). This is necessary if thread-related
219 initialization functions (e.g. pthread_key_create) require
220 malloc() calls (set USE_STARTER=1), or if those functions initially
221 cannot be used at all (set USE_STARTER=2 and perform an explicit
222 ptmalloc_init() when the thread library is ready, typically at the
223 start of main()). */
225 #ifndef USE_STARTER
226 # ifndef _LIBC
227 # define USE_STARTER 1
228 # else
229 # if USE___THREAD || (defined USE_TLS && !defined SHARED)
230 /* These routines are never needed in this configuration. */
231 # define USE_STARTER 0
232 # else
233 # define USE_STARTER (USE_TLS ? 4 : 1)
234 # endif
235 # endif
236 #endif
238 /*----------------------------------------------------------------------*/
240 /* Arenas */
241 static tsd_key_t arena_key;
242 static mutex_t list_lock;
244 /* Arena structure */
245 struct malloc_arena {
246 /* Serialize access. */
247 mutex_t mutex;
249 /* Statistics for locking. Only used if THREAD_STATS is defined. */
250 long stat_lock_direct, stat_lock_loop, stat_lock_wait;
251 long stat_starter;
253 /* Linked list */
254 struct malloc_arena *next;
256 /* Space for mstate. The size is just the minimum such that
257 create_mspace_with_base can be successfully called. */
258 char buf_[pad_request(sizeof(struct malloc_state)) + TOP_FOOT_SIZE +
259 CHUNK_ALIGN_MASK + 1];
261 #define MSPACE_OFFSET (((offsetof(struct malloc_arena, buf_) \
262 + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK))
263 #define arena_to_mspace(a) ((void *)chunk2mem((char*)(a) + MSPACE_OFFSET))
265 /* check for chunk from non-main arena */
266 #define chunk_non_main_arena(p) ((p)->head & NON_MAIN_ARENA)
268 static struct malloc_arena* _int_new_arena(size_t size);
270 /* Buffer for the main arena. */
271 static struct malloc_arena main_arena;
273 /* For now, store arena in footer. This means typically 4bytes more
274 overhead for each non-main-arena chunk, but is fast and easy to
275 compute. Note that the pointer stored in the extra footer must be
276 properly aligned, though. */
277 #define FOOTER_OVERHEAD \
278 (2*sizeof(struct malloc_arena*) - SIZE_T_SIZE)
280 #define arena_for_chunk(ptr) \
281 (chunk_non_main_arena(ptr) ? *(struct malloc_arena**) \
282 ((char*)(ptr) + chunksize(ptr) - (FOOTER_OVERHEAD - SIZE_T_SIZE)) \
283 : &main_arena)
285 /* special because of extra overhead */
286 #define arena_for_mmap_chunk(ptr) \
287 (chunk_non_main_arena(ptr) ? *(struct malloc_arena**) \
288 ((char*)(ptr) + chunksize(ptr) - sizeof(struct malloc_arena*)) \
289 : &main_arena)
291 #define set_non_main_arena(mem, ar_ptr) do { \
292 mchunkptr P = mem2chunk(mem); \
293 size_t SZ = chunksize(P) - (is_mmapped(P) ? sizeof(struct malloc_arena*) \
294 : (FOOTER_OVERHEAD - SIZE_T_SIZE)); \
295 assert((unsigned long)((char*)(P) + SZ)%sizeof(struct malloc_arena*) == 0); \
296 *(struct malloc_arena**)((char*)(P) + SZ) = (ar_ptr); \
297 P->head |= NON_MAIN_ARENA; \
298 } while (0)
300 /* arena_get() acquires an arena and locks the corresponding mutex.
301 First, try the one last locked successfully by this thread. (This
302 is the common case and handled with a macro for speed.) Then, loop
303 once over the circularly linked list of arenas. If no arena is
304 readily available, create a new one. In this latter case, `size'
305 is just a hint as to how much memory will be required immediately
306 in the new arena. */
308 #define arena_get(ptr, size) do { \
309 void *vptr = NULL; \
310 ptr = (struct malloc_arena*)tsd_getspecific(arena_key, vptr); \
311 if(ptr && !mutex_trylock(&ptr->mutex)) { \
312 THREAD_STAT(++(ptr->stat_lock_direct)); \
313 } else \
314 ptr = arena_get2(ptr, (size)); \
315 } while(0)
317 static struct malloc_arena*
318 arena_get2(struct malloc_arena* a_tsd, size_t size)
320 struct malloc_arena* a;
321 int err;
323 if(!a_tsd)
324 a = a_tsd = &main_arena;
325 else {
326 a = a_tsd->next;
327 if(!a) {
328 /* This can only happen while initializing the new arena. */
329 (void)mutex_lock(&main_arena.mutex);
330 THREAD_STAT(++(main_arena.stat_lock_wait));
331 return &main_arena;
335 /* Check the global, circularly linked list for available arenas. */
336 repeat:
337 do {
338 if(!mutex_trylock(&a->mutex)) {
339 THREAD_STAT(++(a->stat_lock_loop));
340 tsd_setspecific(arena_key, (void *)a);
341 return a;
343 a = a->next;
344 } while(a != a_tsd);
346 /* If not even the list_lock can be obtained, try again. This can
347 happen during `atfork', or for example on systems where thread
348 creation makes it temporarily impossible to obtain _any_
349 locks. */
350 if(mutex_trylock(&list_lock)) {
351 a = a_tsd;
352 goto repeat;
354 (void)mutex_unlock(&list_lock);
356 /* Nothing immediately available, so generate a new arena. */
357 a = _int_new_arena(size);
358 if(!a)
359 return 0;
361 tsd_setspecific(arena_key, (void *)a);
362 mutex_init(&a->mutex);
363 err = mutex_lock(&a->mutex); /* remember result */
365 /* Add the new arena to the global list. */
366 (void)mutex_lock(&list_lock);
367 a->next = main_arena.next;
368 atomic_write_barrier ();
369 main_arena.next = a;
370 (void)mutex_unlock(&list_lock);
372 if(err) /* locking failed; keep arena for further attempts later */
373 return 0;
375 THREAD_STAT(++(a->stat_lock_loop));
376 return a;
379 /* Create a new arena with room for a chunk of size "size". */
381 static struct malloc_arena*
382 _int_new_arena(size_t size)
384 struct malloc_arena* a;
385 size_t mmap_sz = sizeof(*a) + pad_request(size);
386 void *m;
388 if (mmap_sz < ARENA_SIZE_MIN)
389 mmap_sz = ARENA_SIZE_MIN;
390 /* conservative estimate for page size */
391 mmap_sz = (mmap_sz + 8191) & ~(size_t)8191;
392 a = CALL_MMAP(mmap_sz);
393 if ((char*)a == (char*)-1)
394 return 0;
396 m = create_mspace_with_base((char*)a + MSPACE_OFFSET,
397 mmap_sz - MSPACE_OFFSET,
400 if (!m) {
401 CALL_MUNMAP(a, mmap_sz);
402 a = 0;
403 } else {
404 /*a->next = NULL;*/
405 /*a->system_mem = a->max_system_mem = h->size;*/
408 return a;
411 /*------------------------------------------------------------------------*/
413 /* Hook mechanism for proper initialization and atfork support. */
415 /* Define and initialize the hook variables. These weak definitions must
416 appear before any use of the variables in a function. */
417 #ifndef weak_variable
418 #ifndef _LIBC
419 #define weak_variable /**/
420 #else
421 /* In GNU libc we want the hook variables to be weak definitions to
422 avoid a problem with Emacs. */
423 #define weak_variable weak_function
424 #endif
425 #endif
427 #if !(USE_STARTER & 2)
428 # define free_hook_ini NULL
429 /* Forward declarations. */
430 static void* malloc_hook_ini (size_t sz, const void *caller);
431 static void* realloc_hook_ini (void* ptr, size_t sz, const void* caller);
432 static void* memalign_hook_ini (size_t alignment, size_t sz,
433 const void* caller);
434 #else
435 # define free_hook_ini free_starter
436 # define malloc_hook_ini malloc_starter
437 # define realloc_hook_ini NULL
438 # define memalign_hook_ini memalign_starter
439 #endif
441 void weak_variable (*__malloc_initialize_hook) (void) = NULL;
442 void weak_variable (*__free_hook) (void * __ptr, const void *)
443 = free_hook_ini;
444 void * weak_variable (*__malloc_hook) (size_t __size, const void *)
445 = malloc_hook_ini;
446 void * weak_variable (*__realloc_hook)
447 (void * __ptr, size_t __size, const void *) = realloc_hook_ini;
448 void * weak_variable (*__memalign_hook)
449 (size_t __alignment, size_t __size, const void *) = memalign_hook_ini;
450 /*void weak_variable (*__after_morecore_hook) (void) = NULL;*/
452 /* The initial hooks just call the initialization routine, then do the
453 normal work. */
455 #if !(USE_STARTER & 2)
456 static
457 #endif
458 void ptmalloc_init(void);
460 #if !(USE_STARTER & 2)
462 static void*
463 malloc_hook_ini(size_t sz, const void * caller)
465 __malloc_hook = NULL;
466 ptmalloc_init();
467 return public_mALLOc(sz);
470 static void *
471 realloc_hook_ini(void *ptr, size_t sz, const void * caller)
473 __malloc_hook = NULL;
474 __realloc_hook = NULL;
475 ptmalloc_init();
476 return public_rEALLOc(ptr, sz);
479 static void*
480 memalign_hook_ini(size_t alignment, size_t sz, const void * caller)
482 __memalign_hook = NULL;
483 ptmalloc_init();
484 return public_mEMALIGn(alignment, sz);
487 #endif /* !(USE_STARTER & 2) */
489 /*----------------------------------------------------------------------*/
491 #if !defined NO_THREADS && USE_STARTER
493 /* The following hooks are used when the global initialization in
494 ptmalloc_init() hasn't completed yet. */
496 static void*
497 malloc_starter(size_t sz, const void *caller)
499 void* victim;
501 /*ptmalloc_init_minimal();*/
502 victim = mspace_malloc(arena_to_mspace(&main_arena), sz);
503 THREAD_STAT(++main_arena.stat_starter);
505 return victim;
508 static void*
509 memalign_starter(size_t align, size_t sz, const void *caller)
511 void* victim;
513 /*ptmalloc_init_minimal();*/
514 victim = mspace_memalign(arena_to_mspace(&main_arena), align, sz);
515 THREAD_STAT(++main_arena.stat_starter);
517 return victim;
520 static void
521 free_starter(void* mem, const void *caller)
523 if (mem) {
524 mchunkptr p = mem2chunk(mem);
525 void *msp = arena_to_mspace(&main_arena);
526 if (is_mmapped(p))
527 munmap_chunk(msp, p);
528 else
529 mspace_free(msp, mem);
531 THREAD_STAT(++main_arena.stat_starter);
534 #endif /* !defined NO_THREADS && USE_STARTER */
536 /*----------------------------------------------------------------------*/
538 #ifndef NO_THREADS
540 /* atfork support. */
542 static void * (*save_malloc_hook) (size_t __size, const void *);
543 # if !defined _LIBC || !defined USE_TLS || (defined SHARED && !USE___THREAD)
544 static void * (*save_memalign_hook) (size_t __align, size_t __size,
545 const void *);
546 # endif
547 static void (*save_free_hook) (void * __ptr, const void *);
548 static void* save_arena;
550 /* Magic value for the thread-specific arena pointer when
551 malloc_atfork() is in use. */
553 #define ATFORK_ARENA_PTR ((void*)-1)
555 /* The following hooks are used while the `atfork' handling mechanism
556 is active. */
558 static void*
559 malloc_atfork(size_t sz, const void *caller)
561 void *vptr = NULL;
563 tsd_getspecific(arena_key, vptr);
564 if(vptr == ATFORK_ARENA_PTR) {
565 /* We are the only thread that may allocate at all. */
566 return mspace_malloc(arena_to_mspace(&main_arena), sz);
567 } else {
568 /* Suspend the thread until the `atfork' handlers have completed.
569 By that time, the hooks will have been reset as well, so that
570 mALLOc() can be used again. */
571 (void)mutex_lock(&list_lock);
572 (void)mutex_unlock(&list_lock);
573 return public_mALLOc(sz);
577 static void
578 free_atfork(void* mem, const void *caller)
580 void *vptr = NULL;
581 struct malloc_arena *ar_ptr;
582 mchunkptr p; /* chunk corresponding to mem */
584 if (mem == 0) /* free(0) has no effect */
585 return;
587 p = mem2chunk(mem);
589 if (is_mmapped(p)) { /* release mmapped memory. */
590 ar_ptr = arena_for_mmap_chunk(p);
591 munmap_chunk(arena_to_mspace(ar_ptr), p);
592 return;
595 ar_ptr = arena_for_chunk(p);
596 tsd_getspecific(arena_key, vptr);
597 if(vptr != ATFORK_ARENA_PTR)
598 (void)mutex_lock(&ar_ptr->mutex);
599 mspace_free(arena_to_mspace(ar_ptr), mem);
600 if(vptr != ATFORK_ARENA_PTR)
601 (void)mutex_unlock(&ar_ptr->mutex);
604 /* The following two functions are registered via thread_atfork() to
605 make sure that the mutexes remain in a consistent state in the
606 fork()ed version of a thread. Also adapt the malloc and free hooks
607 temporarily, because the `atfork' handler mechanism may use
608 malloc/free internally (e.g. in LinuxThreads). */
610 static void
611 ptmalloc_lock_all (void)
613 struct malloc_arena* ar_ptr;
615 if(__malloc_initialized < 1)
616 return;
617 (void)mutex_lock(&list_lock);
618 for(ar_ptr = &main_arena;;) {
619 (void)mutex_lock(&ar_ptr->mutex);
620 ar_ptr = ar_ptr->next;
621 if(ar_ptr == &main_arena)
622 break;
624 save_malloc_hook = __malloc_hook;
625 save_free_hook = __free_hook;
626 __malloc_hook = malloc_atfork;
627 __free_hook = free_atfork;
628 /* Only the current thread may perform malloc/free calls now. */
629 tsd_getspecific(arena_key, save_arena);
630 tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
633 static void
634 ptmalloc_unlock_all (void)
636 struct malloc_arena *ar_ptr;
638 if(__malloc_initialized < 1)
639 return;
640 tsd_setspecific(arena_key, save_arena);
641 __malloc_hook = save_malloc_hook;
642 __free_hook = save_free_hook;
643 for(ar_ptr = &main_arena;;) {
644 (void)mutex_unlock(&ar_ptr->mutex);
645 ar_ptr = ar_ptr->next;
646 if(ar_ptr == &main_arena) break;
648 (void)mutex_unlock(&list_lock);
651 #ifdef __linux__
653 /* In LinuxThreads, unlocking a mutex in the child process after a
654 fork() is currently unsafe, whereas re-initializing it is safe and
655 does not leak resources. Therefore, a special atfork handler is
656 installed for the child. */
658 static void
659 ptmalloc_unlock_all2(void)
661 struct malloc_arena *ar_ptr;
663 if(__malloc_initialized < 1)
664 return;
665 #if defined _LIBC || 1 /*defined MALLOC_HOOKS*/
666 tsd_setspecific(arena_key, save_arena);
667 __malloc_hook = save_malloc_hook;
668 __free_hook = save_free_hook;
669 #endif
670 for(ar_ptr = &main_arena;;) {
671 (void)mutex_init(&ar_ptr->mutex);
672 ar_ptr = ar_ptr->next;
673 if(ar_ptr == &main_arena) break;
675 (void)mutex_init(&list_lock);
678 #else
680 #define ptmalloc_unlock_all2 ptmalloc_unlock_all
682 #endif
684 #endif /* !defined NO_THREADS */
686 /*---------------------------------------------------------------------*/
688 #if !(USE_STARTER & 2)
689 static
690 #endif
691 void
692 ptmalloc_init(void)
694 const char* s;
695 int secure = 0;
696 void *mspace;
698 if(__malloc_initialized >= 0) return;
699 __malloc_initialized = 0;
701 /*if (mp_.pagesize == 0)
702 ptmalloc_init_minimal();*/
704 #ifndef NO_THREADS
705 # if USE_STARTER & 1
706 /* With some threads implementations, creating thread-specific data
707 or initializing a mutex may call malloc() itself. Provide a
708 simple starter version (realloc() won't work). */
709 save_malloc_hook = __malloc_hook;
710 save_memalign_hook = __memalign_hook;
711 save_free_hook = __free_hook;
712 __malloc_hook = malloc_starter;
713 __memalign_hook = memalign_starter;
714 __free_hook = free_starter;
715 # ifdef _LIBC
716 /* Initialize the pthreads interface. */
717 if (__pthread_initialize != NULL)
718 __pthread_initialize();
719 # endif /* !defined _LIBC */
720 # endif /* USE_STARTER & 1 */
721 #endif /* !defined NO_THREADS */
722 mutex_init(&main_arena.mutex);
723 main_arena.next = &main_arena;
724 mspace = create_mspace_with_base((char*)&main_arena + MSPACE_OFFSET,
725 sizeof(main_arena) - MSPACE_OFFSET,
727 assert(mspace == arena_to_mspace(&main_arena));
729 mutex_init(&list_lock);
730 tsd_key_create(&arena_key, NULL);
731 tsd_setspecific(arena_key, (void *)&main_arena);
732 thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
733 #ifndef NO_THREADS
734 # if USE_STARTER & 1
735 __malloc_hook = save_malloc_hook;
736 __memalign_hook = save_memalign_hook;
737 __free_hook = save_free_hook;
738 # endif
739 # if USE_STARTER & 2
740 __malloc_hook = 0;
741 __memalign_hook = 0;
742 __free_hook = 0;
743 # endif
744 #endif
745 #ifdef _LIBC
746 secure = __libc_enable_secure;
747 #else
748 if (! secure) {
749 if ((s = getenv("MALLOC_TRIM_THRESHOLD_")))
750 public_mALLOPt(M_TRIM_THRESHOLD, atoi(s));
751 if ((s = getenv("MALLOC_TOP_PAD_")) ||
752 (s = getenv("MALLOC_GRANULARITY_")))
753 public_mALLOPt(M_GRANULARITY, atoi(s));
754 if ((s = getenv("MALLOC_MMAP_THRESHOLD_")))
755 public_mALLOPt(M_MMAP_THRESHOLD, atoi(s));
756 /*if ((s = getenv("MALLOC_MMAP_MAX_"))) this is no longer available
757 public_mALLOPt(M_MMAP_MAX, atoi(s));*/
759 s = getenv("MALLOC_CHECK_");
760 #endif
761 if (s) {
762 /*if(s[0]) mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0'));
763 __malloc_check_init();*/
765 if (__malloc_initialize_hook != NULL)
766 (*__malloc_initialize_hook)();
767 __malloc_initialized = 1;
770 /*------------------------ Public wrappers. --------------------------------*/
772 void*
773 public_mALLOc(size_t bytes)
775 struct malloc_arena* ar_ptr;
776 void *victim;
777 void * (*hook) (size_t, const void *) = __malloc_hook;
778 if (hook != NULL)
779 return (*hook)(bytes, RETURN_ADDRESS (0));
781 arena_get(ar_ptr, bytes + FOOTER_OVERHEAD);
782 if (!ar_ptr)
783 return 0;
784 if (ar_ptr != &main_arena)
785 bytes += FOOTER_OVERHEAD;
786 victim = mspace_malloc(arena_to_mspace(ar_ptr), bytes);
787 if (victim && ar_ptr != &main_arena)
788 set_non_main_arena(victim, ar_ptr);
789 (void)mutex_unlock(&ar_ptr->mutex);
790 assert(!victim || is_mmapped(mem2chunk(victim)) ||
791 ar_ptr == arena_for_chunk(mem2chunk(victim)));
793 /* CHARM++ ADD BEGIN */
794 if (victim != NULL) {
795 _memory_allocated += chunksize(mem2chunk(victim));
797 UPDATE_MEMUSAGE
799 /* CHARM++ ADD END */
800 return victim;
802 #ifdef libc_hidden_def
803 libc_hidden_def(public_mALLOc)
804 #endif
806 void
807 public_fREe(void* mem)
809 struct malloc_arena* ar_ptr;
810 mchunkptr p; /* chunk corresponding to mem */
812 void (*hook) (void *, const void *) = __free_hook;
813 if (hook != NULL) {
814 (*hook)(mem, RETURN_ADDRESS (0));
815 return;
818 if (mem == 0) /* free(0) has no effect */
819 return;
821 p = mem2chunk(mem);
823 if (is_mmapped(p)) { /* release mmapped memory. */
824 ar_ptr = arena_for_mmap_chunk(p);
825 munmap_chunk(arena_to_mspace(ar_ptr), p);
826 return;
829 ar_ptr = arena_for_chunk(p);
830 #if THREAD_STATS
831 if(!mutex_trylock(&ar_ptr->mutex))
832 ++(ar_ptr->stat_lock_direct);
833 else {
834 (void)mutex_lock(&ar_ptr->mutex);
835 ++(ar_ptr->stat_lock_wait);
837 #else
838 (void)mutex_lock(&ar_ptr->mutex);
839 #endif
840 mspace_free(arena_to_mspace(ar_ptr), mem);
841 (void)mutex_unlock(&ar_ptr->mutex);
843 #ifdef libc_hidden_def
844 libc_hidden_def (public_fREe)
845 #endif
847 void*
848 public_rEALLOc(void* oldmem, size_t bytes)
850 struct malloc_arena* ar_ptr;
852 mchunkptr oldp; /* chunk corresponding to oldmem */
854 void* newp; /* chunk to return */
856 void * (*hook) (void *, size_t, const void *) = __realloc_hook;
857 if (hook != NULL)
858 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
860 #if REALLOC_ZERO_BYTES_FREES
861 if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
862 #endif
864 /* realloc of null is supposed to be same as malloc */
865 if (oldmem == 0)
866 return public_mALLOc(bytes);
868 oldp = mem2chunk(oldmem);
870 /* CHARM++ ADD BEGIN */
871 _memory_allocated -= chunksize(oldp);
872 /* CHARM++ ADD END */
874 if (is_mmapped(oldp))
875 ar_ptr = arena_for_mmap_chunk(oldp); /* FIXME: use mmap_resize */
876 else
877 ar_ptr = arena_for_chunk(oldp);
878 #if THREAD_STATS
879 if(!mutex_trylock(&ar_ptr->mutex))
880 ++(ar_ptr->stat_lock_direct);
881 else {
882 (void)mutex_lock(&ar_ptr->mutex);
883 ++(ar_ptr->stat_lock_wait);
885 #else
886 (void)mutex_lock(&ar_ptr->mutex);
887 #endif
889 #ifndef NO_THREADS
890 /* As in malloc(), remember this arena for the next allocation. */
891 tsd_setspecific(arena_key, (void *)ar_ptr);
892 #endif
894 if (ar_ptr != &main_arena)
895 bytes += FOOTER_OVERHEAD;
896 newp = mspace_realloc(arena_to_mspace(ar_ptr), oldmem, bytes);
898 if (newp && ar_ptr != &main_arena)
899 set_non_main_arena(newp, ar_ptr);
900 (void)mutex_unlock(&ar_ptr->mutex);
902 assert(!newp || is_mmapped(mem2chunk(newp)) ||
903 ar_ptr == arena_for_chunk(mem2chunk(newp)));
905 /* CHARM++ ADD BEGIN */
906 if (newp != NULL) {
907 _memory_allocated += chunksize(mem2chunk(newp));
909 UPDATE_MEMUSAGE
911 /* CHARM++ ADD END */
913 return newp;
915 #ifdef libc_hidden_def
916 libc_hidden_def (public_rEALLOc)
917 #endif
919 void*
920 public_mEMALIGn(size_t alignment, size_t bytes)
922 struct malloc_arena* ar_ptr;
923 void *p;
925 void * (*hook) (size_t, size_t, const void *) = __memalign_hook;
926 if (hook != NULL)
927 return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
929 /* If need less alignment than we give anyway, just relay to malloc */
930 if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);
932 /* Otherwise, ensure that it is at least a minimum chunk size */
933 if (alignment < MIN_CHUNK_SIZE)
934 alignment = MIN_CHUNK_SIZE;
936 arena_get(ar_ptr,
937 bytes + FOOTER_OVERHEAD + alignment + MIN_CHUNK_SIZE);
938 if(!ar_ptr)
939 return 0;
941 if (ar_ptr != &main_arena)
942 bytes += FOOTER_OVERHEAD;
943 p = mspace_memalign(arena_to_mspace(ar_ptr), alignment, bytes);
945 if (p && ar_ptr != &main_arena)
946 set_non_main_arena(p, ar_ptr);
947 (void)mutex_unlock(&ar_ptr->mutex);
949 assert(!p || is_mmapped(mem2chunk(p)) ||
950 ar_ptr == arena_for_chunk(mem2chunk(p)));
952 /* CHARM++ ADD BEGIN */
953 if (p != NULL) {
954 _memory_allocated += chunksize(mem2chunk(p));
956 UPDATE_MEMUSAGE
958 /* CHARM++ ADD END */
960 return p;
962 #ifdef libc_hidden_def
963 libc_hidden_def (public_mEMALIGn)
964 #endif
966 void*
967 public_vALLOc(size_t bytes)
969 struct malloc_arena* ar_ptr;
970 void *p;
972 if(__malloc_initialized < 0)
973 ptmalloc_init ();
974 arena_get(ar_ptr, bytes + FOOTER_OVERHEAD + MIN_CHUNK_SIZE);
975 if(!ar_ptr)
976 return 0;
977 if (ar_ptr != &main_arena)
978 bytes += FOOTER_OVERHEAD;
979 p = mspace_memalign(arena_to_mspace(ar_ptr), 4096, bytes);
981 if (p && ar_ptr != &main_arena)
982 set_non_main_arena(p, ar_ptr);
983 (void)mutex_unlock(&ar_ptr->mutex);
985 /* CHARM++ ADD BEGIN */
986 if (p != NULL) {
987 _memory_allocated += chunksize(mem2chunk(p));
989 UPDATE_MEMUSAGE
991 /* CHARM++ ADD END */
993 return p;
997 public_pMEMALIGn (void **memptr, size_t alignment, size_t size)
999 void *mem;
1001 /* Test whether the SIZE argument is valid. It must be a power of
1002 two multiple of sizeof (void *). */
1003 if (alignment % sizeof (void *) != 0
1004 || !my_powerof2 (alignment / sizeof (void *)) != 0
1005 || alignment == 0)
1006 return EINVAL;
1008 mem = public_mEMALIGn (alignment, size);
1010 if (mem != NULL) {
1011 *memptr = mem;
1012 return 0;
1015 return ENOMEM;
1018 void*
1019 public_cALLOc(size_t n_elements, size_t elem_size)
1021 struct malloc_arena* ar_ptr;
1022 size_t bytes, sz;
1023 void* mem;
1024 void * (*hook) (size_t, const void *) = __malloc_hook;
1026 /* size_t is unsigned so the behavior on overflow is defined. */
1027 bytes = n_elements * elem_size;
1028 #define HALF_INTERNAL_SIZE_T \
1029 (((size_t) 1) << (8 * sizeof (size_t) / 2))
1030 if (__builtin_expect ((n_elements | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
1031 if (elem_size != 0 && bytes / elem_size != n_elements) {
1032 /*MALLOC_FAILURE_ACTION;*/
1033 return 0;
1037 if (hook != NULL) {
1038 sz = bytes;
1039 mem = (*hook)(sz, RETURN_ADDRESS (0));
1040 if(mem == 0)
1041 return 0;
1042 #ifdef HAVE_MEMCPY
1043 return memset(mem, 0, sz);
1044 #else
1045 while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
1046 return mem;
1047 #endif
1050 arena_get(ar_ptr, bytes + FOOTER_OVERHEAD);
1051 if(!ar_ptr)
1052 return 0;
1054 if (ar_ptr != &main_arena)
1055 bytes += FOOTER_OVERHEAD;
1056 mem = mspace_calloc(arena_to_mspace(ar_ptr), bytes, 1);
1058 if (mem && ar_ptr != &main_arena)
1059 set_non_main_arena(mem, ar_ptr);
1060 (void)mutex_unlock(&ar_ptr->mutex);
1062 assert(!mem || is_mmapped(mem2chunk(mem)) ||
1063 ar_ptr == arena_for_chunk(mem2chunk(mem)));
1065 /* CHARM++ ADD BEGIN */
1066 if (mem != NULL) {
1067 _memory_allocated += chunksize(mem2chunk(mem));
1069 UPDATE_MEMUSAGE
1071 /* CHARM++ ADD END */
1073 return mem;
1076 void**
1077 public_iCALLOc(size_t n, size_t elem_size, void* chunks[])
1079 struct malloc_arena* ar_ptr;
1080 void** m;
1082 arena_get(ar_ptr, n*(elem_size + FOOTER_OVERHEAD));
1083 if (!ar_ptr)
1084 return 0;
1086 if (ar_ptr != &main_arena)
1087 elem_size += FOOTER_OVERHEAD;
1088 m = mspace_independent_calloc(arena_to_mspace(ar_ptr), n, elem_size, chunks);
1090 if (m && ar_ptr != &main_arena) {
1091 while (n > 0)
1092 set_non_main_arena(m[--n], ar_ptr);
1094 (void)mutex_unlock(&ar_ptr->mutex);
1096 /* CHARM++ ADD BEGIN */
1097 if (m != NULL) {
1098 _memory_allocated += chunksize(mem2chunk(m));
1100 UPDATE_MEMUSAGE
1102 /* CHARM++ ADD END */
1104 return m;
1107 void**
1108 public_iCOMALLOc(size_t n, size_t sizes[], void* chunks[])
1110 struct malloc_arena* ar_ptr;
1111 size_t* m_sizes;
1112 size_t i;
1113 void** m;
1115 arena_get(ar_ptr, n*sizeof(size_t));
1116 if (!ar_ptr)
1117 return 0;
1119 if (ar_ptr != &main_arena) {
1120 /* Temporary m_sizes[] array is ugly but it would be surprising to
1121 change the original sizes[]... */
1122 m_sizes = mspace_malloc(arena_to_mspace(ar_ptr), n*sizeof(size_t));
1123 if (!m_sizes) {
1124 (void)mutex_unlock(&ar_ptr->mutex);
1125 return 0;
1127 for (i=0; i<n; ++i)
1128 m_sizes[i] = sizes[i] + FOOTER_OVERHEAD;
1129 if (!chunks) {
1130 chunks = mspace_malloc(arena_to_mspace(ar_ptr),
1131 n*sizeof(void*)+FOOTER_OVERHEAD);
1132 if (!chunks) {
1133 mspace_free(arena_to_mspace(ar_ptr), m_sizes);
1134 (void)mutex_unlock(&ar_ptr->mutex);
1135 return 0;
1137 set_non_main_arena(chunks, ar_ptr);
1139 } else
1140 m_sizes = sizes;
1142 m = mspace_independent_comalloc(arena_to_mspace(ar_ptr), n, m_sizes, chunks);
1144 if (ar_ptr != &main_arena) {
1145 mspace_free(arena_to_mspace(ar_ptr), m_sizes);
1146 if (m)
1147 for (i=0; i<n; ++i)
1148 set_non_main_arena(m[i], ar_ptr);
1150 (void)mutex_unlock(&ar_ptr->mutex);
1152 /* CHARM++ ADD BEGIN */
1153 if (m != NULL) {
1154 _memory_allocated += chunksize(mem2chunk(m));
1156 UPDATE_MEMUSAGE
1158 /* CHARM++ ADD END */
1160 return m;
1163 void
1164 public_cFREe(void* m)
1166 public_fREe(m);
1170 public_mTRIm(size_t s)
1172 int result;
1174 (void)mutex_lock(&main_arena.mutex);
1175 result = mspace_trim(arena_to_mspace(&main_arena), s);
1176 (void)mutex_unlock(&main_arena.mutex);
1177 return result;
1180 size_t
1181 public_mUSABLe(void* mem)
1183 if (mem != 0) {
1184 mchunkptr p = mem2chunk(mem);
1185 if (cinuse(p))
1186 return chunksize(p) - overhead_for(p);
1188 return 0;
1192 public_mALLOPt(int p, int v)
1194 int result;
1195 result = mspace_mallopt(p, v);
1196 return result;
1199 void
1200 public_mSTATs(void)
1202 int i;
1203 struct malloc_arena* ar_ptr;
1204 /*unsigned long in_use_b, system_b, avail_b;*/
1205 #if THREAD_STATS
1206 long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
1207 #endif
1209 if(__malloc_initialized < 0)
1210 ptmalloc_init ();
1211 for (i=0, ar_ptr = &main_arena;; ++i) {
1212 struct malloc_state* msp = arena_to_mspace(ar_ptr);
1214 fprintf(stderr, "Arena %d:\n", i);
1215 mspace_malloc_stats(msp);
1216 #if THREAD_STATS
1217 stat_lock_direct += ar_ptr->stat_lock_direct;
1218 stat_lock_loop += ar_ptr->stat_lock_loop;
1219 stat_lock_wait += ar_ptr->stat_lock_wait;
1220 #endif
1221 if (MALLOC_DEBUG > 1) {
1222 struct malloc_segment* mseg = &msp->seg;
1223 while (mseg) {
1224 fprintf(stderr, " seg %08lx-%08lx\n", (unsigned long)mseg->base,
1225 (unsigned long)(mseg->base + mseg->size));
1226 mseg = mseg->next;
1229 ar_ptr = ar_ptr->next;
1230 if (ar_ptr == &main_arena)
1231 break;
1233 #if THREAD_STATS
1234 fprintf(stderr, "locked directly = %10ld\n", stat_lock_direct);
1235 fprintf(stderr, "locked in loop = %10ld\n", stat_lock_loop);
1236 fprintf(stderr, "locked waiting = %10ld\n", stat_lock_wait);
1237 fprintf(stderr, "locked total = %10ld\n",
1238 stat_lock_direct + stat_lock_loop + stat_lock_wait);
1239 if (main_arena.stat_starter > 0)
1240 fprintf(stderr, "starter hooks = %10ld\n", main_arena.stat_starter);
1241 #endif
1245 * Local variables:
1246 * c-basic-offset: 2
1247 * End:
1249 #undef malloc
1250 #undef free
1251 #undef calloc
1252 #undef cfree
1253 #undef realloc
1254 #undef memalign
1255 #undef valloc