Merge from origin/emacs-25
[emacs.git] / src / gmalloc.c
blob33d424fe9af1f7a591952363b156d86f16b26f03
1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2016 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <http://www.gnu.org/licenses/>.
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
22 #include <config.h>
24 #if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
25 #define USE_PTHREAD
26 #endif
28 #include <stddef.h>
29 #include <string.h>
30 #include <limits.h>
31 #include <stdint.h>
32 #include <unistd.h>
34 #ifdef USE_PTHREAD
35 #include <pthread.h>
36 #endif
38 #ifdef WINDOWSNT
39 #include <w32heap.h> /* for sbrk */
40 #endif
42 #ifdef emacs
43 # include "lisp.h"
44 #endif
46 #ifdef HAVE_MALLOC_H
47 # if GNUC_PREREQ (4, 2, 0)
48 # pragma GCC diagnostic ignored "-Wdeprecated-declarations"
49 # endif
50 # include <malloc.h>
51 #endif
52 #ifndef __MALLOC_HOOK_VOLATILE
53 # define __MALLOC_HOOK_VOLATILE volatile
54 #endif
55 #ifndef HAVE_MALLOC_H
56 extern void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
57 extern void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
58 extern void *(*__morecore) (ptrdiff_t);
59 #endif
61 /* If HYBRID_MALLOC is defined, then temacs will use malloc,
62 realloc... as defined in this file (and renamed gmalloc,
63 grealloc... via the macros that follow). The dumped emacs,
64 however, will use the system malloc, realloc.... In other source
65 files, malloc, realloc... are renamed hybrid_malloc,
66 hybrid_realloc... via macros in conf_post.h. hybrid_malloc and
67 friends are wrapper functions defined later in this file. */
68 #undef malloc
69 #undef realloc
70 #undef calloc
71 #undef aligned_alloc
72 #undef free
73 #define malloc gmalloc
74 #define realloc grealloc
75 #define calloc gcalloc
76 #define aligned_alloc galigned_alloc
77 #define free gfree
78 #define malloc_info gmalloc_info
80 #ifdef HYBRID_MALLOC
81 # include "sheap.h"
82 # define DUMPED bss_sbrk_did_unexec
83 static bool
84 ALLOCATED_BEFORE_DUMPING (char *p)
86 return bss_sbrk_buffer <= p && p < bss_sbrk_buffer + STATIC_HEAP_SIZE;
88 #endif
90 #ifdef __cplusplus
91 extern "C"
93 #endif
95 #ifdef HYBRID_MALLOC
96 #define extern static
97 #endif
99 /* Allocate SIZE bytes of memory. */
100 extern void *malloc (size_t size) ATTRIBUTE_MALLOC_SIZE ((1));
101 /* Re-allocate the previously allocated block
102 in ptr, making the new block SIZE bytes long. */
103 extern void *realloc (void *ptr, size_t size) ATTRIBUTE_ALLOC_SIZE ((2));
104 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
105 extern void *calloc (size_t nmemb, size_t size) ATTRIBUTE_MALLOC_SIZE ((1,2));
106 /* Free a block. */
107 extern void free (void *ptr);
109 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
110 extern void *aligned_alloc (size_t, size_t);
111 #ifdef MSDOS
112 extern void *memalign (size_t, size_t);
113 extern int posix_memalign (void **, size_t, size_t);
114 #endif
116 /* The allocator divides the heap into blocks of fixed size; large
117 requests receive one or more whole blocks, and small requests
118 receive a fragment of a block. Fragment sizes are powers of two,
119 and all fragments of a block are the same size. When all the
120 fragments in a block have been freed, the block itself is freed. */
121 #define BLOCKLOG (INT_WIDTH > 16 ? 12 : 9)
122 #define BLOCKSIZE (1 << BLOCKLOG)
123 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
125 /* Determine the amount of memory spanned by the initial heap table
126 (not an absolute limit). */
127 #define HEAP (INT_WIDTH > 16 ? 4194304 : 65536)
129 /* Number of contiguous free blocks allowed to build up at the end of
130 memory before they will be returned to the system. */
131 #define FINAL_FREE_BLOCKS 8
133 /* Data structure giving per-block information. */
134 typedef union
136 /* Heap information for a busy block. */
137 struct
139 /* Zero for a large (multiblock) object, or positive giving the
140 logarithm to the base two of the fragment size. */
141 int type;
142 union
144 struct
146 size_t nfree; /* Free frags in a fragmented block. */
147 size_t first; /* First free fragment of the block. */
148 } frag;
149 /* For a large object, in its first block, this has the number
150 of blocks in the object. In the other blocks, this has a
151 negative number which says how far back the first block is. */
152 ptrdiff_t size;
153 } info;
154 } busy;
155 /* Heap information for a free block
156 (that may be the first of a free cluster). */
157 struct
159 size_t size; /* Size (in blocks) of a free cluster. */
160 size_t next; /* Index of next free cluster. */
161 size_t prev; /* Index of previous free cluster. */
162 } free;
163 } malloc_info;
165 /* Pointer to first block of the heap. */
166 extern char *_heapbase;
168 /* Table indexed by block number giving per-block information. */
169 extern malloc_info *_heapinfo;
171 /* Address to block number and vice versa. */
172 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
173 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
175 /* Current search index for the heap table. */
176 extern size_t _heapindex;
178 /* Limit of valid info table indices. */
179 extern size_t _heaplimit;
181 /* Doubly linked lists of free fragments. */
182 struct list
184 struct list *next;
185 struct list *prev;
188 /* Free list headers for each fragment size. */
189 extern struct list _fraghead[];
191 /* List of blocks allocated with aligned_alloc and friends. */
192 struct alignlist
194 struct alignlist *next;
195 void *aligned; /* The address that aligned_alloc returned. */
196 void *exact; /* The address that malloc returned. */
198 extern struct alignlist *_aligned_blocks;
200 /* Instrumentation. */
201 extern size_t _chunks_used;
202 extern size_t _bytes_used;
203 extern size_t _chunks_free;
204 extern size_t _bytes_free;
206 /* Internal versions of `malloc', `realloc', and `free'
207 used when these functions need to call each other.
208 They are the same but don't call the hooks. */
209 extern void *_malloc_internal (size_t);
210 extern void *_realloc_internal (void *, size_t);
211 extern void _free_internal (void *);
212 extern void *_malloc_internal_nolock (size_t);
213 extern void *_realloc_internal_nolock (void *, size_t);
214 extern void _free_internal_nolock (void *);
216 #ifdef USE_PTHREAD
217 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
218 extern int _malloc_thread_enabled_p;
219 #define LOCK() \
220 do { \
221 if (_malloc_thread_enabled_p) \
222 pthread_mutex_lock (&_malloc_mutex); \
223 } while (0)
224 #define UNLOCK() \
225 do { \
226 if (_malloc_thread_enabled_p) \
227 pthread_mutex_unlock (&_malloc_mutex); \
228 } while (0)
229 #define LOCK_ALIGNED_BLOCKS() \
230 do { \
231 if (_malloc_thread_enabled_p) \
232 pthread_mutex_lock (&_aligned_blocks_mutex); \
233 } while (0)
234 #define UNLOCK_ALIGNED_BLOCKS() \
235 do { \
236 if (_malloc_thread_enabled_p) \
237 pthread_mutex_unlock (&_aligned_blocks_mutex); \
238 } while (0)
239 #else
240 #define LOCK()
241 #define UNLOCK()
242 #define LOCK_ALIGNED_BLOCKS()
243 #define UNLOCK_ALIGNED_BLOCKS()
244 #endif
246 /* Nonzero if `malloc' has been called and done its initialization. */
247 extern int __malloc_initialized;
248 /* Function called to initialize malloc data structures. */
249 extern int __malloc_initialize (void);
251 #ifdef GC_MCHECK
253 /* Return values for `mprobe': these are the kinds of inconsistencies that
254 `mcheck' enables detection of. */
255 enum mcheck_status
257 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
258 MCHECK_OK, /* Block is fine. */
259 MCHECK_FREE, /* Block freed twice. */
260 MCHECK_HEAD, /* Memory before the block was clobbered. */
261 MCHECK_TAIL /* Memory after the block was clobbered. */
264 /* Activate a standard collection of debugging hooks. This must be called
265 before `malloc' is ever called. ABORTFUNC is called with an error code
266 (see enum above) when an inconsistency is detected. If ABORTFUNC is
267 null, the standard function prints on stderr and then calls `abort'. */
268 extern int mcheck (void (*abortfunc) (enum mcheck_status));
270 /* Check for aberrations in a particular malloc'd block. You must have
271 called `mcheck' already. These are the same checks that `mcheck' does
272 when you free or reallocate a block. */
273 extern enum mcheck_status mprobe (void *ptr);
275 /* Activate a standard collection of tracing hooks. */
276 extern void mtrace (void);
277 extern void muntrace (void);
279 /* Statistics available to the user. */
280 struct mstats
282 size_t bytes_total; /* Total size of the heap. */
283 size_t chunks_used; /* Chunks allocated by the user. */
284 size_t bytes_used; /* Byte total of user-allocated chunks. */
285 size_t chunks_free; /* Chunks in the free list. */
286 size_t bytes_free; /* Byte total of chunks in the free list. */
289 /* Pick up the current statistics. */
290 extern struct mstats mstats (void);
292 #endif
294 #undef extern
296 #ifdef __cplusplus
298 #endif
300 /* Memory allocator `malloc'.
301 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
302 Written May 1989 by Mike Haertel.
304 This library is free software; you can redistribute it and/or
305 modify it under the terms of the GNU General Public License as
306 published by the Free Software Foundation; either version 2 of the
307 License, or (at your option) any later version.
309 This library is distributed in the hope that it will be useful,
310 but WITHOUT ANY WARRANTY; without even the implied warranty of
311 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
312 General Public License for more details.
314 You should have received a copy of the GNU General Public
315 License along with this library. If not, see <http://www.gnu.org/licenses/>.
317 The author may be reached (Email) at the address mike@ai.mit.edu,
318 or (US mail) as Mike Haertel c/o Free Software Foundation. */
320 #include <errno.h>
322 /* Debugging hook for 'malloc'. */
323 static void *(*__MALLOC_HOOK_VOLATILE gmalloc_hook) (size_t);
325 /* Replacements for traditional glibc malloc hooks, for platforms that
326 do not already have these hooks. Platforms with these hooks all
327 used relaxed ref/def, so it is OK to define them here too. */
328 void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
329 void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
330 void *(*__morecore) (ptrdiff_t);
332 #ifndef HYBRID_MALLOC
334 /* Pointer to the base of the first block. */
335 char *_heapbase;
337 /* Block information table. Allocated with align/__free (not malloc/free). */
338 malloc_info *_heapinfo;
340 /* Search index in the info table. */
341 size_t _heapindex;
343 /* Limit of valid info table indices. */
344 size_t _heaplimit;
346 /* Free lists for each fragment size. */
347 struct list _fraghead[BLOCKLOG];
349 /* Instrumentation. */
350 size_t _chunks_used;
351 size_t _bytes_used;
352 size_t _chunks_free;
353 size_t _bytes_free;
355 /* Are you experienced? */
356 int __malloc_initialized;
358 #else
360 static struct list _fraghead[BLOCKLOG];
362 #endif /* HYBRID_MALLOC */
364 /* Number of extra blocks to get each time we ask for more core.
365 This reduces the frequency of calling `(*__morecore)'. */
366 #if defined DOUG_LEA_MALLOC || defined HYBRID_MALLOC || defined SYSTEM_MALLOC
367 static
368 #endif
369 size_t __malloc_extra_blocks;
371 /* Number of info entries. */
372 static size_t heapsize;
374 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
376 /* Some code for hunting a bug writing into _heapinfo.
378 Call this macro with argument PROT non-zero to protect internal
379 malloc state against writing to it, call it with a zero argument to
380 make it readable and writable.
382 Note that this only works if BLOCKSIZE == page size, which is
383 the case on the i386. */
385 #include <sys/types.h>
386 #include <sys/mman.h>
388 static int state_protected_p;
389 static size_t last_state_size;
390 static malloc_info *last_heapinfo;
392 void
393 protect_malloc_state (int protect_p)
395 /* If _heapinfo has been relocated, make sure its old location
396 isn't left read-only; it will be reused by malloc. */
397 if (_heapinfo != last_heapinfo
398 && last_heapinfo
399 && state_protected_p)
400 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
402 last_state_size = _heaplimit * sizeof *_heapinfo;
403 last_heapinfo = _heapinfo;
405 if (protect_p != state_protected_p)
407 state_protected_p = protect_p;
408 if (mprotect (_heapinfo, last_state_size,
409 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
410 abort ();
414 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
416 #else
417 #define PROTECT_MALLOC_STATE(PROT) /* empty */
418 #endif
421 /* Aligned allocation. */
422 static void *
423 align (size_t size)
425 void *result;
426 ptrdiff_t adj;
428 /* align accepts an unsigned argument, but __morecore accepts a
429 signed one. This could lead to trouble if SIZE overflows the
430 ptrdiff_t type accepted by __morecore. We just punt in that
431 case, since they are requesting a ludicrous amount anyway. */
432 if (PTRDIFF_MAX < size)
433 result = 0;
434 else
435 result = (*__morecore) (size);
436 adj = (uintptr_t) result % BLOCKSIZE;
437 if (adj != 0)
439 adj = BLOCKSIZE - adj;
440 (*__morecore) (adj);
441 result = (char *) result + adj;
444 if (__after_morecore_hook)
445 (*__after_morecore_hook) ();
447 return result;
450 /* Get SIZE bytes, if we can get them starting at END.
451 Return the address of the space we got.
452 If we cannot get space at END, fail and return 0. */
453 static void *
454 get_contiguous_space (ptrdiff_t size, void *position)
456 void *before;
457 void *after;
459 before = (*__morecore) (0);
460 /* If we can tell in advance that the break is at the wrong place,
461 fail now. */
462 if (before != position)
463 return 0;
465 /* Allocate SIZE bytes and get the address of them. */
466 after = (*__morecore) (size);
467 if (!after)
468 return 0;
470 /* It was not contiguous--reject it. */
471 if (after != position)
473 (*__morecore) (- size);
474 return 0;
477 return after;
481 /* This is called when `_heapinfo' and `heapsize' have just
482 been set to describe a new info table. Set up the table
483 to describe itself and account for it in the statistics. */
484 static void
485 register_heapinfo (void)
487 size_t block, blocks;
489 block = BLOCK (_heapinfo);
490 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
492 /* Account for the _heapinfo block itself in the statistics. */
493 _bytes_used += blocks * BLOCKSIZE;
494 ++_chunks_used;
496 /* Describe the heapinfo block itself in the heapinfo. */
497 _heapinfo[block].busy.type = 0;
498 _heapinfo[block].busy.info.size = blocks;
499 /* Leave back-pointers for malloc_find_address. */
500 while (--blocks > 0)
501 _heapinfo[block + blocks].busy.info.size = -blocks;
504 #ifdef USE_PTHREAD
505 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
506 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
507 int _malloc_thread_enabled_p;
509 static void
510 malloc_atfork_handler_prepare (void)
512 LOCK ();
513 LOCK_ALIGNED_BLOCKS ();
516 static void
517 malloc_atfork_handler_parent (void)
519 UNLOCK_ALIGNED_BLOCKS ();
520 UNLOCK ();
523 static void
524 malloc_atfork_handler_child (void)
526 UNLOCK_ALIGNED_BLOCKS ();
527 UNLOCK ();
530 /* Set up mutexes and make malloc etc. thread-safe. */
531 void
532 malloc_enable_thread (void)
534 if (_malloc_thread_enabled_p)
535 return;
537 /* Some pthread implementations call malloc for statically
538 initialized mutexes when they are used first. To avoid such a
539 situation, we initialize mutexes here while their use is
540 disabled in malloc etc. */
541 pthread_mutex_init (&_malloc_mutex, NULL);
542 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
543 pthread_atfork (malloc_atfork_handler_prepare,
544 malloc_atfork_handler_parent,
545 malloc_atfork_handler_child);
546 _malloc_thread_enabled_p = 1;
548 #endif /* USE_PTHREAD */
550 static void
551 malloc_initialize_1 (void)
553 #ifdef GC_MCHECK
554 mcheck (NULL);
555 #endif
557 if (__malloc_initialize_hook)
558 (*__malloc_initialize_hook) ();
560 heapsize = HEAP / BLOCKSIZE;
561 _heapinfo = align (heapsize * sizeof (malloc_info));
562 if (_heapinfo == NULL)
563 return;
564 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
565 _heapinfo[0].free.size = 0;
566 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
567 _heapindex = 0;
568 _heapbase = (char *) _heapinfo;
569 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
571 register_heapinfo ();
573 __malloc_initialized = 1;
574 PROTECT_MALLOC_STATE (1);
575 return;
578 /* Set everything up and remember that we have.
579 main will call malloc which calls this function. That is before any threads
580 or signal handlers has been set up, so we don't need thread protection. */
582 __malloc_initialize (void)
584 if (__malloc_initialized)
585 return 0;
587 malloc_initialize_1 ();
589 return __malloc_initialized;
592 static int morecore_recursing;
594 /* Get neatly aligned memory, initializing or
595 growing the heap info table as necessary. */
596 static void *
597 morecore_nolock (size_t size)
599 void *result;
600 malloc_info *newinfo, *oldinfo;
601 size_t newsize;
603 if (morecore_recursing)
604 /* Avoid recursion. The caller will know how to handle a null return. */
605 return NULL;
607 result = align (size);
608 if (result == NULL)
609 return NULL;
611 PROTECT_MALLOC_STATE (0);
613 /* Check if we need to grow the info table. */
614 if ((size_t) BLOCK ((char *) result + size) > heapsize)
616 /* Calculate the new _heapinfo table size. We do not account for the
617 added blocks in the table itself, as we hope to place them in
618 existing free space, which is already covered by part of the
619 existing table. */
620 newsize = heapsize;
622 newsize *= 2;
623 while ((size_t) BLOCK ((char *) result + size) > newsize);
625 /* We must not reuse existing core for the new info table when called
626 from realloc in the case of growing a large block, because the
627 block being grown is momentarily marked as free. In this case
628 _heaplimit is zero so we know not to reuse space for internal
629 allocation. */
630 if (_heaplimit != 0)
632 /* First try to allocate the new info table in core we already
633 have, in the usual way using realloc. If realloc cannot
634 extend it in place or relocate it to existing sufficient core,
635 we will get called again, and the code above will notice the
636 `morecore_recursing' flag and return null. */
637 int save = errno; /* Don't want to clobber errno with ENOMEM. */
638 morecore_recursing = 1;
639 newinfo = _realloc_internal_nolock (_heapinfo,
640 newsize * sizeof (malloc_info));
641 morecore_recursing = 0;
642 if (newinfo == NULL)
643 errno = save;
644 else
646 /* We found some space in core, and realloc has put the old
647 table's blocks on the free list. Now zero the new part
648 of the table and install the new table location. */
649 memset (&newinfo[heapsize], 0,
650 (newsize - heapsize) * sizeof (malloc_info));
651 _heapinfo = newinfo;
652 heapsize = newsize;
653 goto got_heap;
657 /* Allocate new space for the malloc info table. */
658 while (1)
660 newinfo = align (newsize * sizeof (malloc_info));
662 /* Did it fail? */
663 if (newinfo == NULL)
665 (*__morecore) (-size);
666 return NULL;
669 /* Is it big enough to record status for its own space?
670 If so, we win. */
671 if ((size_t) BLOCK ((char *) newinfo
672 + newsize * sizeof (malloc_info))
673 < newsize)
674 break;
676 /* Must try again. First give back most of what we just got. */
677 (*__morecore) (- newsize * sizeof (malloc_info));
678 newsize *= 2;
681 /* Copy the old table to the beginning of the new,
682 and zero the rest of the new table. */
683 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
684 memset (&newinfo[heapsize], 0,
685 (newsize - heapsize) * sizeof (malloc_info));
686 oldinfo = _heapinfo;
687 _heapinfo = newinfo;
688 heapsize = newsize;
690 register_heapinfo ();
692 /* Reset _heaplimit so _free_internal never decides
693 it can relocate or resize the info table. */
694 _heaplimit = 0;
695 _free_internal_nolock (oldinfo);
696 PROTECT_MALLOC_STATE (0);
698 /* The new heap limit includes the new table just allocated. */
699 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
700 return result;
703 got_heap:
704 _heaplimit = BLOCK ((char *) result + size);
705 return result;
708 /* Allocate memory from the heap. */
709 void *
710 _malloc_internal_nolock (size_t size)
712 void *result;
713 size_t block, blocks, lastblocks, start;
714 register size_t i;
715 struct list *next;
717 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
718 valid address you can realloc and free (though not dereference).
720 It turns out that some extant code (sunrpc, at least Ultrix's version)
721 expects `malloc (0)' to return non-NULL and breaks otherwise.
722 Be compatible. */
724 #if 0
725 if (size == 0)
726 return NULL;
727 #endif
729 PROTECT_MALLOC_STATE (0);
731 if (size < sizeof (struct list))
732 size = sizeof (struct list);
734 /* Determine the allocation policy based on the request size. */
735 if (size <= BLOCKSIZE / 2)
737 /* Small allocation to receive a fragment of a block.
738 Determine the logarithm to base two of the fragment size. */
739 register size_t log = 1;
740 --size;
741 while ((size /= 2) != 0)
742 ++log;
744 /* Look in the fragment lists for a
745 free fragment of the desired size. */
746 next = _fraghead[log].next;
747 if (next != NULL)
749 /* There are free fragments of this size.
750 Pop a fragment out of the fragment list and return it.
751 Update the block's nfree and first counters. */
752 result = next;
753 next->prev->next = next->next;
754 if (next->next != NULL)
755 next->next->prev = next->prev;
756 block = BLOCK (result);
757 if (--_heapinfo[block].busy.info.frag.nfree != 0)
758 _heapinfo[block].busy.info.frag.first =
759 (uintptr_t) next->next % BLOCKSIZE >> log;
761 /* Update the statistics. */
762 ++_chunks_used;
763 _bytes_used += 1 << log;
764 --_chunks_free;
765 _bytes_free -= 1 << log;
767 else
769 /* No free fragments of the desired size, so get a new block
770 and break it into fragments, returning the first. */
771 #ifdef GC_MALLOC_CHECK
772 result = _malloc_internal_nolock (BLOCKSIZE);
773 PROTECT_MALLOC_STATE (0);
774 #elif defined (USE_PTHREAD)
775 result = _malloc_internal_nolock (BLOCKSIZE);
776 #else
777 result = malloc (BLOCKSIZE);
778 #endif
779 if (result == NULL)
781 PROTECT_MALLOC_STATE (1);
782 goto out;
785 /* Link all fragments but the first into the free list. */
786 next = (struct list *) ((char *) result + (1 << log));
787 next->next = NULL;
788 next->prev = &_fraghead[log];
789 _fraghead[log].next = next;
791 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
793 next = (struct list *) ((char *) result + (i << log));
794 next->next = _fraghead[log].next;
795 next->prev = &_fraghead[log];
796 next->prev->next = next;
797 next->next->prev = next;
800 /* Initialize the nfree and first counters for this block. */
801 block = BLOCK (result);
802 _heapinfo[block].busy.type = log;
803 _heapinfo[block].busy.info.frag.nfree = i - 1;
804 _heapinfo[block].busy.info.frag.first = i - 1;
806 _chunks_free += (BLOCKSIZE >> log) - 1;
807 _bytes_free += BLOCKSIZE - (1 << log);
808 _bytes_used -= BLOCKSIZE - (1 << log);
811 else
813 /* Large allocation to receive one or more blocks.
814 Search the free list in a circle starting at the last place visited.
815 If we loop completely around without finding a large enough
816 space we will have to get more memory from the system. */
817 blocks = BLOCKIFY (size);
818 start = block = _heapindex;
819 while (_heapinfo[block].free.size < blocks)
821 block = _heapinfo[block].free.next;
822 if (block == start)
824 /* Need to get more from the system. Get a little extra. */
825 size_t wantblocks = blocks + __malloc_extra_blocks;
826 block = _heapinfo[0].free.prev;
827 lastblocks = _heapinfo[block].free.size;
828 /* Check to see if the new core will be contiguous with the
829 final free block; if so we don't need to get as much. */
830 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
831 /* We can't do this if we will have to make the heap info
832 table bigger to accommodate the new space. */
833 block + wantblocks <= heapsize &&
834 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
835 ADDRESS (block + lastblocks)))
837 /* We got it contiguously. Which block we are extending
838 (the `final free block' referred to above) might have
839 changed, if it got combined with a freed info table. */
840 block = _heapinfo[0].free.prev;
841 _heapinfo[block].free.size += (wantblocks - lastblocks);
842 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
843 _heaplimit += wantblocks - lastblocks;
844 continue;
846 result = morecore_nolock (wantblocks * BLOCKSIZE);
847 if (result == NULL)
848 goto out;
849 block = BLOCK (result);
850 /* Put the new block at the end of the free list. */
851 _heapinfo[block].free.size = wantblocks;
852 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
853 _heapinfo[block].free.next = 0;
854 _heapinfo[0].free.prev = block;
855 _heapinfo[_heapinfo[block].free.prev].free.next = block;
856 ++_chunks_free;
857 /* Now loop to use some of that block for this allocation. */
861 /* At this point we have found a suitable free list entry.
862 Figure out how to remove what we need from the list. */
863 result = ADDRESS (block);
864 if (_heapinfo[block].free.size > blocks)
866 /* The block we found has a bit left over,
867 so relink the tail end back into the free list. */
868 _heapinfo[block + blocks].free.size
869 = _heapinfo[block].free.size - blocks;
870 _heapinfo[block + blocks].free.next
871 = _heapinfo[block].free.next;
872 _heapinfo[block + blocks].free.prev
873 = _heapinfo[block].free.prev;
874 _heapinfo[_heapinfo[block].free.prev].free.next
875 = _heapinfo[_heapinfo[block].free.next].free.prev
876 = _heapindex = block + blocks;
878 else
880 /* The block exactly matches our requirements,
881 so just remove it from the list. */
882 _heapinfo[_heapinfo[block].free.next].free.prev
883 = _heapinfo[block].free.prev;
884 _heapinfo[_heapinfo[block].free.prev].free.next
885 = _heapindex = _heapinfo[block].free.next;
886 --_chunks_free;
889 _heapinfo[block].busy.type = 0;
890 _heapinfo[block].busy.info.size = blocks;
891 ++_chunks_used;
892 _bytes_used += blocks * BLOCKSIZE;
893 _bytes_free -= blocks * BLOCKSIZE;
895 /* Mark all the blocks of the object just allocated except for the
896 first with a negative number so you can find the first block by
897 adding that adjustment. */
898 while (--blocks > 0)
899 _heapinfo[block + blocks].busy.info.size = -blocks;
902 PROTECT_MALLOC_STATE (1);
903 out:
904 return result;
907 void *
908 _malloc_internal (size_t size)
910 void *result;
912 LOCK ();
913 result = _malloc_internal_nolock (size);
914 UNLOCK ();
916 return result;
919 void *
920 malloc (size_t size)
922 void *(*hook) (size_t);
924 if (!__malloc_initialized && !__malloc_initialize ())
925 return NULL;
927 /* Copy the value of gmalloc_hook to an automatic variable in case
928 gmalloc_hook is modified in another thread between its
929 NULL-check and the use.
931 Note: Strictly speaking, this is not a right solution. We should
932 use mutexes to access non-read-only variables that are shared
933 among multiple threads. We just leave it for compatibility with
934 glibc malloc (i.e., assignments to gmalloc_hook) for now. */
935 hook = gmalloc_hook;
936 return (hook != NULL ? *hook : _malloc_internal) (size);
939 #if !(defined (_LIBC) || defined (HYBRID_MALLOC))
941 /* On some ANSI C systems, some libc functions call _malloc, _free
942 and _realloc. Make them use the GNU functions. */
944 extern void *_malloc (size_t);
945 extern void _free (void *);
946 extern void *_realloc (void *, size_t);
948 void *
949 _malloc (size_t size)
951 return malloc (size);
954 void
955 _free (void *ptr)
957 free (ptr);
960 void *
961 _realloc (void *ptr, size_t size)
963 return realloc (ptr, size);
966 #endif
967 /* Free a block of memory allocated by `malloc'.
968 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
969 Written May 1989 by Mike Haertel.
971 This library is free software; you can redistribute it and/or
972 modify it under the terms of the GNU General Public License as
973 published by the Free Software Foundation; either version 2 of the
974 License, or (at your option) any later version.
976 This library is distributed in the hope that it will be useful,
977 but WITHOUT ANY WARRANTY; without even the implied warranty of
978 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
979 General Public License for more details.
981 You should have received a copy of the GNU General Public
982 License along with this library. If not, see <http://www.gnu.org/licenses/>.
984 The author may be reached (Email) at the address mike@ai.mit.edu,
985 or (US mail) as Mike Haertel c/o Free Software Foundation. */
987 /* Debugging hook for free. */
988 static void (*__MALLOC_HOOK_VOLATILE gfree_hook) (void *);
990 #ifndef HYBRID_MALLOC
992 /* List of blocks allocated by aligned_alloc. */
993 struct alignlist *_aligned_blocks = NULL;
994 #endif
996 /* Return memory to the heap.
997 Like `_free_internal' but don't lock mutex. */
998 void
999 _free_internal_nolock (void *ptr)
1001 int type;
1002 size_t block, blocks;
1003 register size_t i;
1004 struct list *prev, *next;
1005 void *curbrk;
1006 const size_t lesscore_threshold
1007 /* Threshold of free space at which we will return some to the system. */
1008 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1010 register struct alignlist *l;
1012 if (ptr == NULL)
1013 return;
1015 PROTECT_MALLOC_STATE (0);
1017 LOCK_ALIGNED_BLOCKS ();
1018 for (l = _aligned_blocks; l != NULL; l = l->next)
1019 if (l->aligned == ptr)
1021 l->aligned = NULL; /* Mark the slot in the list as free. */
1022 ptr = l->exact;
1023 break;
1025 UNLOCK_ALIGNED_BLOCKS ();
1027 block = BLOCK (ptr);
1029 type = _heapinfo[block].busy.type;
1030 switch (type)
1032 case 0:
1033 /* Get as many statistics as early as we can. */
1034 --_chunks_used;
1035 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1036 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1038 /* Find the free cluster previous to this one in the free list.
1039 Start searching at the last block referenced; this may benefit
1040 programs with locality of allocation. */
1041 i = _heapindex;
1042 if (i > block)
1043 while (i > block)
1044 i = _heapinfo[i].free.prev;
1045 else
1048 i = _heapinfo[i].free.next;
1049 while (i > 0 && i < block);
1050 i = _heapinfo[i].free.prev;
1053 /* Determine how to link this block into the free list. */
1054 if (block == i + _heapinfo[i].free.size)
1056 /* Coalesce this block with its predecessor. */
1057 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1058 block = i;
1060 else
1062 /* Really link this block back into the free list. */
1063 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1064 _heapinfo[block].free.next = _heapinfo[i].free.next;
1065 _heapinfo[block].free.prev = i;
1066 _heapinfo[i].free.next = block;
1067 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1068 ++_chunks_free;
1071 /* Now that the block is linked in, see if we can coalesce it
1072 with its successor (by deleting its successor from the list
1073 and adding in its size). */
1074 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1076 _heapinfo[block].free.size
1077 += _heapinfo[_heapinfo[block].free.next].free.size;
1078 _heapinfo[block].free.next
1079 = _heapinfo[_heapinfo[block].free.next].free.next;
1080 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1081 --_chunks_free;
1084 /* How many trailing free blocks are there now? */
1085 blocks = _heapinfo[block].free.size;
1087 /* Where is the current end of accessible core? */
1088 curbrk = (*__morecore) (0);
1090 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1092 /* The end of the malloc heap is at the end of accessible core.
1093 It's possible that moving _heapinfo will allow us to
1094 return some space to the system. */
1096 size_t info_block = BLOCK (_heapinfo);
1097 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1098 size_t prev_block = _heapinfo[block].free.prev;
1099 size_t prev_blocks = _heapinfo[prev_block].free.size;
1100 size_t next_block = _heapinfo[block].free.next;
1101 size_t next_blocks = _heapinfo[next_block].free.size;
1103 if (/* Win if this block being freed is last in core, the info table
1104 is just before it, the previous free block is just before the
1105 info table, and the two free blocks together form a useful
1106 amount to return to the system. */
1107 (block + blocks == _heaplimit &&
1108 info_block + info_blocks == block &&
1109 prev_block != 0 && prev_block + prev_blocks == info_block &&
1110 blocks + prev_blocks >= lesscore_threshold) ||
1111 /* Nope, not the case. We can also win if this block being
1112 freed is just before the info table, and the table extends
1113 to the end of core or is followed only by a free block,
1114 and the total free space is worth returning to the system. */
1115 (block + blocks == info_block &&
1116 ((info_block + info_blocks == _heaplimit &&
1117 blocks >= lesscore_threshold) ||
1118 (info_block + info_blocks == next_block &&
1119 next_block + next_blocks == _heaplimit &&
1120 blocks + next_blocks >= lesscore_threshold)))
1123 malloc_info *newinfo;
1124 size_t oldlimit = _heaplimit;
1126 /* Free the old info table, clearing _heaplimit to avoid
1127 recursion into this code. We don't want to return the
1128 table's blocks to the system before we have copied them to
1129 the new location. */
1130 _heaplimit = 0;
1131 _free_internal_nolock (_heapinfo);
1132 _heaplimit = oldlimit;
1134 /* Tell malloc to search from the beginning of the heap for
1135 free blocks, so it doesn't reuse the ones just freed. */
1136 _heapindex = 0;
1138 /* Allocate new space for the info table and move its data. */
1139 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
1140 PROTECT_MALLOC_STATE (0);
1141 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1142 _heapinfo = newinfo;
1144 /* We should now have coalesced the free block with the
1145 blocks freed from the old info table. Examine the entire
1146 trailing free block to decide below whether to return some
1147 to the system. */
1148 block = _heapinfo[0].free.prev;
1149 blocks = _heapinfo[block].free.size;
1152 /* Now see if we can return stuff to the system. */
1153 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1155 register size_t bytes = blocks * BLOCKSIZE;
1156 _heaplimit -= blocks;
1157 (*__morecore) (-bytes);
1158 _heapinfo[_heapinfo[block].free.prev].free.next
1159 = _heapinfo[block].free.next;
1160 _heapinfo[_heapinfo[block].free.next].free.prev
1161 = _heapinfo[block].free.prev;
1162 block = _heapinfo[block].free.prev;
1163 --_chunks_free;
1164 _bytes_free -= bytes;
1168 /* Set the next search to begin at this block. */
1169 _heapindex = block;
1170 break;
1172 default:
1173 /* Do some of the statistics. */
1174 --_chunks_used;
1175 _bytes_used -= 1 << type;
1176 ++_chunks_free;
1177 _bytes_free += 1 << type;
1179 /* Get the address of the first free fragment in this block. */
1180 prev = (struct list *) ((char *) ADDRESS (block) +
1181 (_heapinfo[block].busy.info.frag.first << type));
1183 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1185 /* If all fragments of this block are free, remove them
1186 from the fragment list and free the whole block. */
1187 next = prev;
1188 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
1189 next = next->next;
1190 prev->prev->next = next;
1191 if (next != NULL)
1192 next->prev = prev->prev;
1193 _heapinfo[block].busy.type = 0;
1194 _heapinfo[block].busy.info.size = 1;
1196 /* Keep the statistics accurate. */
1197 ++_chunks_used;
1198 _bytes_used += BLOCKSIZE;
1199 _chunks_free -= BLOCKSIZE >> type;
1200 _bytes_free -= BLOCKSIZE;
1202 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1203 _free_internal_nolock (ADDRESS (block));
1204 #else
1205 free (ADDRESS (block));
1206 #endif
1208 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1210 /* If some fragments of this block are free, link this
1211 fragment into the fragment list after the first free
1212 fragment of this block. */
1213 next = ptr;
1214 next->next = prev->next;
1215 next->prev = prev;
1216 prev->next = next;
1217 if (next->next != NULL)
1218 next->next->prev = next;
1219 ++_heapinfo[block].busy.info.frag.nfree;
1221 else
1223 /* No fragments of this block are free, so link this
1224 fragment into the fragment list and announce that
1225 it is the first free fragment of this block. */
1226 prev = ptr;
1227 _heapinfo[block].busy.info.frag.nfree = 1;
1228 _heapinfo[block].busy.info.frag.first =
1229 (uintptr_t) ptr % BLOCKSIZE >> type;
1230 prev->next = _fraghead[type].next;
1231 prev->prev = &_fraghead[type];
1232 prev->prev->next = prev;
1233 if (prev->next != NULL)
1234 prev->next->prev = prev;
1236 break;
1239 PROTECT_MALLOC_STATE (1);
1242 /* Return memory to the heap.
1243 Like 'free' but don't call a hook if there is one. */
1244 void
1245 _free_internal (void *ptr)
1247 LOCK ();
1248 _free_internal_nolock (ptr);
1249 UNLOCK ();
1252 /* Return memory to the heap. */
1254 void
1255 free (void *ptr)
1257 void (*hook) (void *) = gfree_hook;
1259 if (hook != NULL)
1260 (*hook) (ptr);
1261 else
1262 _free_internal (ptr);
1265 #ifndef HYBRID_MALLOC
1266 /* Define the `cfree' alias for `free'. */
1267 #ifdef weak_alias
1268 weak_alias (free, cfree)
1269 #else
1270 void
1271 cfree (void *ptr)
1273 free (ptr);
1275 #endif
1276 #endif
1277 /* Change the size of a block allocated by `malloc'.
1278 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1279 Written May 1989 by Mike Haertel.
1281 This library is free software; you can redistribute it and/or
1282 modify it under the terms of the GNU General Public License as
1283 published by the Free Software Foundation; either version 2 of the
1284 License, or (at your option) any later version.
1286 This library is distributed in the hope that it will be useful,
1287 but WITHOUT ANY WARRANTY; without even the implied warranty of
1288 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1289 General Public License for more details.
1291 You should have received a copy of the GNU General Public
1292 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1294 The author may be reached (Email) at the address mike@ai.mit.edu,
1295 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1297 #ifndef min
1298 #define min(a, b) ((a) < (b) ? (a) : (b))
1299 #endif
1301 /* Debugging hook for realloc. */
1302 static void *(*grealloc_hook) (void *, size_t);
1304 /* Resize the given region to the new size, returning a pointer
1305 to the (possibly moved) region. This is optimized for speed;
1306 some benchmarks seem to indicate that greater compactness is
1307 achieved by unconditionally allocating and copying to a
1308 new region. This module has incestuous knowledge of the
1309 internals of both free and malloc. */
1310 void *
1311 _realloc_internal_nolock (void *ptr, size_t size)
1313 void *result;
1314 int type;
1315 size_t block, blocks, oldlimit;
1317 if (size == 0)
1319 _free_internal_nolock (ptr);
1320 return _malloc_internal_nolock (0);
1322 else if (ptr == NULL)
1323 return _malloc_internal_nolock (size);
1325 block = BLOCK (ptr);
1327 PROTECT_MALLOC_STATE (0);
1329 type = _heapinfo[block].busy.type;
1330 switch (type)
1332 case 0:
1333 /* Maybe reallocate a large block to a small fragment. */
1334 if (size <= BLOCKSIZE / 2)
1336 result = _malloc_internal_nolock (size);
1337 if (result != NULL)
1339 memcpy (result, ptr, size);
1340 _free_internal_nolock (ptr);
1341 goto out;
1345 /* The new size is a large allocation as well;
1346 see if we can hold it in place. */
1347 blocks = BLOCKIFY (size);
1348 if (blocks < _heapinfo[block].busy.info.size)
1350 /* The new size is smaller; return
1351 excess memory to the free list. */
1352 _heapinfo[block + blocks].busy.type = 0;
1353 _heapinfo[block + blocks].busy.info.size
1354 = _heapinfo[block].busy.info.size - blocks;
1355 _heapinfo[block].busy.info.size = blocks;
1356 /* We have just created a new chunk by splitting a chunk in two.
1357 Now we will free this chunk; increment the statistics counter
1358 so it doesn't become wrong when _free_internal decrements it. */
1359 ++_chunks_used;
1360 _free_internal_nolock (ADDRESS (block + blocks));
1361 result = ptr;
1363 else if (blocks == _heapinfo[block].busy.info.size)
1364 /* No size change necessary. */
1365 result = ptr;
1366 else
1368 /* Won't fit, so allocate a new region that will.
1369 Free the old region first in case there is sufficient
1370 adjacent free space to grow without moving. */
1371 blocks = _heapinfo[block].busy.info.size;
1372 /* Prevent free from actually returning memory to the system. */
1373 oldlimit = _heaplimit;
1374 _heaplimit = 0;
1375 _free_internal_nolock (ptr);
1376 result = _malloc_internal_nolock (size);
1377 PROTECT_MALLOC_STATE (0);
1378 if (_heaplimit == 0)
1379 _heaplimit = oldlimit;
1380 if (result == NULL)
1382 /* Now we're really in trouble. We have to unfree
1383 the thing we just freed. Unfortunately it might
1384 have been coalesced with its neighbors. */
1385 if (_heapindex == block)
1386 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1387 else
1389 void *previous
1390 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1391 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1392 _free_internal_nolock (previous);
1394 goto out;
1396 if (ptr != result)
1397 memmove (result, ptr, blocks * BLOCKSIZE);
1399 break;
1401 default:
1402 /* Old size is a fragment; type is logarithm
1403 to base two of the fragment size. */
1404 if (size > (size_t) (1 << (type - 1)) &&
1405 size <= (size_t) (1 << type))
1406 /* The new size is the same kind of fragment. */
1407 result = ptr;
1408 else
1410 /* The new size is different; allocate a new space,
1411 and copy the lesser of the new size and the old. */
1412 result = _malloc_internal_nolock (size);
1413 if (result == NULL)
1414 goto out;
1415 memcpy (result, ptr, min (size, (size_t) 1 << type));
1416 _free_internal_nolock (ptr);
1418 break;
1421 PROTECT_MALLOC_STATE (1);
1422 out:
1423 return result;
1426 void *
1427 _realloc_internal (void *ptr, size_t size)
1429 void *result;
1431 LOCK ();
1432 result = _realloc_internal_nolock (ptr, size);
1433 UNLOCK ();
1435 return result;
1438 void *
1439 realloc (void *ptr, size_t size)
1441 void *(*hook) (void *, size_t);
1443 if (!__malloc_initialized && !__malloc_initialize ())
1444 return NULL;
1446 hook = grealloc_hook;
1447 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1449 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1451 This library is free software; you can redistribute it and/or
1452 modify it under the terms of the GNU General Public License as
1453 published by the Free Software Foundation; either version 2 of the
1454 License, or (at your option) any later version.
1456 This library is distributed in the hope that it will be useful,
1457 but WITHOUT ANY WARRANTY; without even the implied warranty of
1458 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1459 General Public License for more details.
1461 You should have received a copy of the GNU General Public
1462 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1464 The author may be reached (Email) at the address mike@ai.mit.edu,
1465 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1467 /* Allocate an array of NMEMB elements each SIZE bytes long.
1468 The entire array is initialized to zeros. */
1469 void *
1470 calloc (size_t nmemb, size_t size)
1472 void *result;
1473 size_t bytes = nmemb * size;
1475 if (size != 0 && bytes / size != nmemb)
1477 errno = ENOMEM;
1478 return NULL;
1481 result = malloc (bytes);
1482 if (result)
1483 return memset (result, 0, bytes);
1484 return result;
1486 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1487 This file is part of the GNU C Library.
1489 The GNU C Library is free software; you can redistribute it and/or modify
1490 it under the terms of the GNU General Public License as published by
1491 the Free Software Foundation; either version 2, or (at your option)
1492 any later version.
1494 The GNU C Library is distributed in the hope that it will be useful,
1495 but WITHOUT ANY WARRANTY; without even the implied warranty of
1496 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1497 GNU General Public License for more details.
1499 You should have received a copy of the GNU General Public License
1500 along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
1502 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1503 compatible. */
1504 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1505 #define __sbrk sbrk
1506 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1507 /* It is best not to declare this and cast its result on foreign operating
1508 systems with potentially hostile include files. */
1510 extern void *__sbrk (ptrdiff_t increment);
1511 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1513 /* Allocate INCREMENT more bytes of data space,
1514 and return the start of data space, or NULL on errors.
1515 If INCREMENT is negative, shrink data space. */
1516 static void *
1517 gdefault_morecore (ptrdiff_t increment)
1519 void *result;
1520 #ifdef HYBRID_MALLOC
1521 if (!DUMPED)
1523 return bss_sbrk (increment);
1525 #endif
1526 result = (void *) __sbrk (increment);
1527 if (result == (void *) -1)
1528 return NULL;
1529 return result;
1532 void *(*__morecore) (ptrdiff_t) = gdefault_morecore;
1534 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1536 This library is free software; you can redistribute it and/or
1537 modify it under the terms of the GNU General Public License as
1538 published by the Free Software Foundation; either version 2 of the
1539 License, or (at your option) any later version.
1541 This library is distributed in the hope that it will be useful,
1542 but WITHOUT ANY WARRANTY; without even the implied warranty of
1543 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1544 General Public License for more details.
1546 You should have received a copy of the GNU General Public
1547 License along with this library. If not, see <http://www.gnu.org/licenses/>. */
1549 void *
1550 aligned_alloc (size_t alignment, size_t size)
1552 void *result;
1553 size_t adj, lastadj;
1555 /* Allocate a block with enough extra space to pad the block with up to
1556 (ALIGNMENT - 1) bytes if necessary. */
1557 if (- size < alignment)
1559 errno = ENOMEM;
1560 return NULL;
1562 result = malloc (size + alignment - 1);
1563 if (result == NULL)
1564 return NULL;
1566 /* Figure out how much we will need to pad this particular block
1567 to achieve the required alignment. */
1568 adj = alignment - (uintptr_t) result % alignment;
1569 if (adj == alignment)
1570 adj = 0;
1572 if (adj != alignment - 1)
1576 /* Reallocate the block with only as much excess as it
1577 needs. */
1578 free (result);
1579 result = malloc (size + adj);
1580 if (result == NULL) /* Impossible unless interrupted. */
1581 return NULL;
1583 lastadj = adj;
1584 adj = alignment - (uintptr_t) result % alignment;
1585 if (adj == alignment)
1586 adj = 0;
1587 /* It's conceivable we might have been so unlucky as to get
1588 a different block with weaker alignment. If so, this
1589 block is too short to contain SIZE after alignment
1590 correction. So we must try again and get another block,
1591 slightly larger. */
1592 } while (adj > lastadj);
1595 if (adj != 0)
1597 /* Record this block in the list of aligned blocks, so that `free'
1598 can identify the pointer it is passed, which will be in the middle
1599 of an allocated block. */
1601 struct alignlist *l;
1602 LOCK_ALIGNED_BLOCKS ();
1603 for (l = _aligned_blocks; l != NULL; l = l->next)
1604 if (l->aligned == NULL)
1605 /* This slot is free. Use it. */
1606 break;
1607 if (l == NULL)
1609 l = malloc (sizeof *l);
1610 if (l != NULL)
1612 l->next = _aligned_blocks;
1613 _aligned_blocks = l;
1616 if (l != NULL)
1618 l->exact = result;
1619 result = l->aligned = (char *) result + adj;
1621 UNLOCK_ALIGNED_BLOCKS ();
1622 if (l == NULL)
1624 free (result);
1625 result = NULL;
1629 return result;
1632 /* Note that memalign and posix_memalign are not used in Emacs. */
1633 #ifndef HYBRID_MALLOC
1634 /* An obsolete alias for aligned_alloc, for any old libraries that use
1635 this alias. */
1637 void *
1638 memalign (size_t alignment, size_t size)
1640 return aligned_alloc (alignment, size);
1643 /* If HYBRID_MALLOC is defined, we may want to use the system
1644 posix_memalign below. */
1646 posix_memalign (void **memptr, size_t alignment, size_t size)
1648 void *mem;
1650 if (alignment == 0
1651 || alignment % sizeof (void *) != 0
1652 || (alignment & (alignment - 1)) != 0)
1653 return EINVAL;
1655 mem = aligned_alloc (alignment, size);
1656 if (mem == NULL)
1657 return ENOMEM;
1659 *memptr = mem;
1661 return 0;
1663 #endif
1665 /* Allocate memory on a page boundary.
1666 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1668 This library is free software; you can redistribute it and/or
1669 modify it under the terms of the GNU General Public License as
1670 published by the Free Software Foundation; either version 2 of the
1671 License, or (at your option) any later version.
1673 This library is distributed in the hope that it will be useful,
1674 but WITHOUT ANY WARRANTY; without even the implied warranty of
1675 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1676 General Public License for more details.
1678 You should have received a copy of the GNU General Public
1679 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1681 The author may be reached (Email) at the address mike@ai.mit.edu,
1682 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1684 #ifndef HYBRID_MALLOC
1686 # ifndef HAVE_MALLOC_H
1687 /* Allocate SIZE bytes on a page boundary. */
1688 extern void *valloc (size_t);
1689 # endif
1691 # if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1692 # include "getpagesize.h"
1693 # elif !defined getpagesize
1694 extern int getpagesize (void);
1695 # endif
1697 static size_t pagesize;
1699 void *
1700 valloc (size_t size)
1702 if (pagesize == 0)
1703 pagesize = getpagesize ();
1705 return aligned_alloc (pagesize, size);
1707 #endif /* HYBRID_MALLOC */
1709 #undef malloc
1710 #undef realloc
1711 #undef calloc
1712 #undef aligned_alloc
1713 #undef free
1715 #ifdef HYBRID_MALLOC
1716 /* Declare system malloc and friends. */
1717 extern void *malloc (size_t size);
1718 extern void *realloc (void *ptr, size_t size);
1719 extern void *calloc (size_t nmemb, size_t size);
1720 extern void free (void *ptr);
1721 #ifdef HAVE_ALIGNED_ALLOC
1722 extern void *aligned_alloc (size_t alignment, size_t size);
1723 #elif defined HAVE_POSIX_MEMALIGN
1724 extern int posix_memalign (void **memptr, size_t alignment, size_t size);
1725 #endif
1727 /* See the comments near the beginning of this file for explanations
1728 of the following functions. */
1730 void *
1731 hybrid_malloc (size_t size)
1733 if (DUMPED)
1734 return malloc (size);
1735 return gmalloc (size);
1738 void *
1739 hybrid_calloc (size_t nmemb, size_t size)
1741 if (DUMPED)
1742 return calloc (nmemb, size);
1743 return gcalloc (nmemb, size);
1746 void
1747 hybrid_free (void *ptr)
1749 if (!DUMPED)
1750 gfree (ptr);
1751 else if (!ALLOCATED_BEFORE_DUMPING (ptr))
1752 free (ptr);
1753 /* Otherwise the dumped emacs is trying to free something allocated
1754 before dumping; do nothing. */
1755 return;
1758 #if defined HAVE_ALIGNED_ALLOC || defined HAVE_POSIX_MEMALIGN
1759 void *
1760 hybrid_aligned_alloc (size_t alignment, size_t size)
1762 if (!DUMPED)
1763 return galigned_alloc (alignment, size);
1764 /* The following is copied from alloc.c */
1765 #ifdef HAVE_ALIGNED_ALLOC
1766 return aligned_alloc (alignment, size);
1767 #else /* HAVE_POSIX_MEMALIGN */
1768 void *p;
1769 return posix_memalign (&p, alignment, size) == 0 ? p : 0;
1770 #endif
1772 #endif
1774 void *
1775 hybrid_realloc (void *ptr, size_t size)
1777 void *result;
1778 int type;
1779 size_t block, oldsize;
1781 if (!DUMPED)
1782 return grealloc (ptr, size);
1783 if (!ALLOCATED_BEFORE_DUMPING (ptr))
1784 return realloc (ptr, size);
1786 /* The dumped emacs is trying to realloc storage allocated before
1787 dumping. We just malloc new space and copy the data. */
1788 if (size == 0 || ptr == NULL)
1789 return malloc (size);
1790 block = ((char *) ptr - _heapbase) / BLOCKSIZE + 1;
1791 type = _heapinfo[block].busy.type;
1792 oldsize =
1793 type == 0 ? _heapinfo[block].busy.info.size * BLOCKSIZE
1794 : (size_t) 1 << type;
1795 result = malloc (size);
1796 if (result)
1797 return memcpy (result, ptr, min (oldsize, size));
1798 return result;
1801 #else /* ! HYBRID_MALLOC */
1803 void *
1804 malloc (size_t size)
1806 return gmalloc (size);
1809 void *
1810 calloc (size_t nmemb, size_t size)
1812 return gcalloc (nmemb, size);
1815 void
1816 free (void *ptr)
1818 gfree (ptr);
1821 void *
1822 aligned_alloc (size_t alignment, size_t size)
1824 return galigned_alloc (alignment, size);
1827 void *
1828 realloc (void *ptr, size_t size)
1830 return grealloc (ptr, size);
1833 #endif /* HYBRID_MALLOC */
1835 #ifdef GC_MCHECK
1837 /* Standard debugging hooks for `malloc'.
1838 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1839 Written May 1989 by Mike Haertel.
1841 This library is free software; you can redistribute it and/or
1842 modify it under the terms of the GNU General Public License as
1843 published by the Free Software Foundation; either version 2 of the
1844 License, or (at your option) any later version.
1846 This library is distributed in the hope that it will be useful,
1847 but WITHOUT ANY WARRANTY; without even the implied warranty of
1848 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1849 General Public License for more details.
1851 You should have received a copy of the GNU General Public
1852 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1854 The author may be reached (Email) at the address mike@ai.mit.edu,
1855 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1857 #include <stdio.h>
1859 /* Old hook values. */
1860 static void (*old_free_hook) (void *ptr);
1861 static void *(*old_malloc_hook) (size_t size);
1862 static void *(*old_realloc_hook) (void *ptr, size_t size);
1864 /* Function to call when something awful happens. */
1865 static void (*abortfunc) (enum mcheck_status);
1867 /* Arbitrary magical numbers. */
1868 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1869 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1870 #define MAGICBYTE ((char) 0xd7)
1871 #define MALLOCFLOOD ((char) 0x93)
1872 #define FREEFLOOD ((char) 0x95)
1874 struct hdr
1876 size_t size; /* Exact size requested by user. */
1877 size_t magic; /* Magic number to check header integrity. */
1880 static enum mcheck_status
1881 checkhdr (const struct hdr *hdr)
1883 enum mcheck_status status;
1884 switch (hdr->magic)
1886 default:
1887 status = MCHECK_HEAD;
1888 break;
1889 case MAGICFREE:
1890 status = MCHECK_FREE;
1891 break;
1892 case MAGICWORD:
1893 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1894 status = MCHECK_TAIL;
1895 else
1896 status = MCHECK_OK;
1897 break;
1899 if (status != MCHECK_OK)
1900 (*abortfunc) (status);
1901 return status;
1904 static void
1905 freehook (void *ptr)
1907 struct hdr *hdr;
1909 if (ptr)
1911 struct alignlist *l;
1913 /* If the block was allocated by aligned_alloc, its real pointer
1914 to free is recorded in _aligned_blocks; find that. */
1915 PROTECT_MALLOC_STATE (0);
1916 LOCK_ALIGNED_BLOCKS ();
1917 for (l = _aligned_blocks; l != NULL; l = l->next)
1918 if (l->aligned == ptr)
1920 l->aligned = NULL; /* Mark the slot in the list as free. */
1921 ptr = l->exact;
1922 break;
1924 UNLOCK_ALIGNED_BLOCKS ();
1925 PROTECT_MALLOC_STATE (1);
1927 hdr = ((struct hdr *) ptr) - 1;
1928 checkhdr (hdr);
1929 hdr->magic = MAGICFREE;
1930 memset (ptr, FREEFLOOD, hdr->size);
1932 else
1933 hdr = NULL;
1935 gfree_hook = old_free_hook;
1936 free (hdr);
1937 gfree_hook = freehook;
1940 static void *
1941 mallochook (size_t size)
1943 struct hdr *hdr;
1945 gmalloc_hook = old_malloc_hook;
1946 hdr = malloc (sizeof *hdr + size + 1);
1947 gmalloc_hook = mallochook;
1948 if (hdr == NULL)
1949 return NULL;
1951 hdr->size = size;
1952 hdr->magic = MAGICWORD;
1953 ((char *) &hdr[1])[size] = MAGICBYTE;
1954 return memset (hdr + 1, MALLOCFLOOD, size);
1957 static void *
1958 reallochook (void *ptr, size_t size)
1960 struct hdr *hdr = NULL;
1961 size_t osize = 0;
1963 if (ptr)
1965 hdr = ((struct hdr *) ptr) - 1;
1966 osize = hdr->size;
1968 checkhdr (hdr);
1969 if (size < osize)
1970 memset ((char *) ptr + size, FREEFLOOD, osize - size);
1973 gfree_hook = old_free_hook;
1974 gmalloc_hook = old_malloc_hook;
1975 grealloc_hook = old_realloc_hook;
1976 hdr = realloc (hdr, sizeof *hdr + size + 1);
1977 gfree_hook = freehook;
1978 gmalloc_hook = mallochook;
1979 grealloc_hook = reallochook;
1980 if (hdr == NULL)
1981 return NULL;
1983 hdr->size = size;
1984 hdr->magic = MAGICWORD;
1985 ((char *) &hdr[1])[size] = MAGICBYTE;
1986 if (size > osize)
1987 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1988 return hdr + 1;
1991 static void
1992 mabort (enum mcheck_status status)
1994 const char *msg;
1995 switch (status)
1997 case MCHECK_OK:
1998 msg = "memory is consistent, library is buggy";
1999 break;
2000 case MCHECK_HEAD:
2001 msg = "memory clobbered before allocated block";
2002 break;
2003 case MCHECK_TAIL:
2004 msg = "memory clobbered past end of allocated block";
2005 break;
2006 case MCHECK_FREE:
2007 msg = "block freed twice";
2008 break;
2009 default:
2010 msg = "bogus mcheck_status, library is buggy";
2011 break;
2013 #ifdef __GNU_LIBRARY__
2014 __libc_fatal (msg);
2015 #else
2016 fprintf (stderr, "mcheck: %s\n", msg);
2017 fflush (stderr);
2018 # ifdef emacs
2019 emacs_abort ();
2020 # else
2021 abort ();
2022 # endif
2023 #endif
2026 static int mcheck_used = 0;
2029 mcheck (void (*func) (enum mcheck_status))
2031 abortfunc = (func != NULL) ? func : &mabort;
2033 /* These hooks may not be safely inserted if malloc is already in use. */
2034 if (!__malloc_initialized && !mcheck_used)
2036 old_free_hook = gfree_hook;
2037 gfree_hook = freehook;
2038 old_malloc_hook = gmalloc_hook;
2039 gmalloc_hook = mallochook;
2040 old_realloc_hook = grealloc_hook;
2041 grealloc_hook = reallochook;
2042 mcheck_used = 1;
2045 return mcheck_used ? 0 : -1;
2048 enum mcheck_status
2049 mprobe (void *ptr)
2051 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2054 #endif /* GC_MCHECK */