CC Mode: Fix the fontification of a spuriously recognised enum member.
[emacs.git] / src / gmalloc.c
blob3f7bbda84ab9ef0e3caa6265ddef5b15ed875f17
1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2016 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <http://www.gnu.org/licenses/>.
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
22 #include <config.h>
24 #if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
25 #define USE_PTHREAD
26 #endif
28 #include <stddef.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <limits.h>
32 #include <stdint.h>
33 #include <unistd.h>
35 #ifdef USE_PTHREAD
36 #include <pthread.h>
37 #endif
39 #ifdef emacs
40 # include "lisp.h"
41 #endif
43 #ifdef HAVE_MALLOC_H
44 # if GNUC_PREREQ (4, 2, 0)
45 # pragma GCC diagnostic ignored "-Wdeprecated-declarations"
46 # endif
47 # include <malloc.h>
48 #endif
49 #ifndef __MALLOC_HOOK_VOLATILE
50 # define __MALLOC_HOOK_VOLATILE volatile
51 #endif
52 #ifndef HAVE_MALLOC_H
53 extern void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
54 extern void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
55 extern void *(*__morecore) (ptrdiff_t);
56 #endif
58 /* If HYBRID_MALLOC is defined, then temacs will use malloc,
59 realloc... as defined in this file (and renamed gmalloc,
60 grealloc... via the macros that follow). The dumped emacs,
61 however, will use the system malloc, realloc.... In other source
62 files, malloc, realloc... are renamed hybrid_malloc,
63 hybrid_realloc... via macros in conf_post.h. hybrid_malloc and
64 friends are wrapper functions defined later in this file. */
65 #undef malloc
66 #undef realloc
67 #undef calloc
68 #undef aligned_alloc
69 #undef free
70 #define malloc gmalloc
71 #define realloc grealloc
72 #define calloc gcalloc
73 #define aligned_alloc galigned_alloc
74 #define free gfree
75 #define malloc_info gmalloc_info
77 #ifdef HYBRID_MALLOC
78 # include "sheap.h"
79 # define DUMPED bss_sbrk_did_unexec
80 static bool
81 ALLOCATED_BEFORE_DUMPING (char *p)
83 return bss_sbrk_buffer <= p && p < bss_sbrk_buffer + STATIC_HEAP_SIZE;
85 #endif
87 #ifdef __cplusplus
88 extern "C"
90 #endif
92 #ifdef HYBRID_MALLOC
93 #define extern static
94 #endif
96 /* Allocate SIZE bytes of memory. */
97 extern void *malloc (size_t size) ATTRIBUTE_MALLOC_SIZE ((1));
98 /* Re-allocate the previously allocated block
99 in ptr, making the new block SIZE bytes long. */
100 extern void *realloc (void *ptr, size_t size) ATTRIBUTE_ALLOC_SIZE ((2));
101 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
102 extern void *calloc (size_t nmemb, size_t size) ATTRIBUTE_MALLOC_SIZE ((1,2));
103 /* Free a block. */
104 extern void free (void *ptr);
106 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
107 extern void *aligned_alloc (size_t, size_t);
108 #ifdef MSDOS
109 extern void *memalign (size_t, size_t);
110 extern int posix_memalign (void **, size_t, size_t);
111 #endif
113 /* The allocator divides the heap into blocks of fixed size; large
114 requests receive one or more whole blocks, and small requests
115 receive a fragment of a block. Fragment sizes are powers of two,
116 and all fragments of a block are the same size. When all the
117 fragments in a block have been freed, the block itself is freed. */
118 #define BLOCKLOG (INT_WIDTH > 16 ? 12 : 9)
119 #define BLOCKSIZE (1 << BLOCKLOG)
120 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
122 /* Determine the amount of memory spanned by the initial heap table
123 (not an absolute limit). */
124 #define HEAP (INT_WIDTH > 16 ? 4194304 : 65536)
126 /* Number of contiguous free blocks allowed to build up at the end of
127 memory before they will be returned to the system. */
128 #define FINAL_FREE_BLOCKS 8
130 /* Data structure giving per-block information. */
131 typedef union
133 /* Heap information for a busy block. */
134 struct
136 /* Zero for a large (multiblock) object, or positive giving the
137 logarithm to the base two of the fragment size. */
138 int type;
139 union
141 struct
143 size_t nfree; /* Free frags in a fragmented block. */
144 size_t first; /* First free fragment of the block. */
145 } frag;
146 /* For a large object, in its first block, this has the number
147 of blocks in the object. In the other blocks, this has a
148 negative number which says how far back the first block is. */
149 ptrdiff_t size;
150 } info;
151 } busy;
152 /* Heap information for a free block
153 (that may be the first of a free cluster). */
154 struct
156 size_t size; /* Size (in blocks) of a free cluster. */
157 size_t next; /* Index of next free cluster. */
158 size_t prev; /* Index of previous free cluster. */
159 } free;
160 } malloc_info;
162 /* Pointer to first block of the heap. */
163 extern char *_heapbase;
165 /* Table indexed by block number giving per-block information. */
166 extern malloc_info *_heapinfo;
168 /* Address to block number and vice versa. */
169 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
170 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
172 /* Current search index for the heap table. */
173 extern size_t _heapindex;
175 /* Limit of valid info table indices. */
176 extern size_t _heaplimit;
178 /* Doubly linked lists of free fragments. */
179 struct list
181 struct list *next;
182 struct list *prev;
185 /* Free list headers for each fragment size. */
186 extern struct list _fraghead[];
188 /* List of blocks allocated with aligned_alloc and friends. */
189 struct alignlist
191 struct alignlist *next;
192 void *aligned; /* The address that aligned_alloc returned. */
193 void *exact; /* The address that malloc returned. */
195 extern struct alignlist *_aligned_blocks;
197 /* Instrumentation. */
198 extern size_t _chunks_used;
199 extern size_t _bytes_used;
200 extern size_t _chunks_free;
201 extern size_t _bytes_free;
203 /* Internal versions of `malloc', `realloc', and `free'
204 used when these functions need to call each other.
205 They are the same but don't call the hooks. */
206 extern void *_malloc_internal (size_t);
207 extern void *_realloc_internal (void *, size_t);
208 extern void _free_internal (void *);
209 extern void *_malloc_internal_nolock (size_t);
210 extern void *_realloc_internal_nolock (void *, size_t);
211 extern void _free_internal_nolock (void *);
213 #ifdef USE_PTHREAD
214 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
215 extern int _malloc_thread_enabled_p;
216 #define LOCK() \
217 do { \
218 if (_malloc_thread_enabled_p) \
219 pthread_mutex_lock (&_malloc_mutex); \
220 } while (0)
221 #define UNLOCK() \
222 do { \
223 if (_malloc_thread_enabled_p) \
224 pthread_mutex_unlock (&_malloc_mutex); \
225 } while (0)
226 #define LOCK_ALIGNED_BLOCKS() \
227 do { \
228 if (_malloc_thread_enabled_p) \
229 pthread_mutex_lock (&_aligned_blocks_mutex); \
230 } while (0)
231 #define UNLOCK_ALIGNED_BLOCKS() \
232 do { \
233 if (_malloc_thread_enabled_p) \
234 pthread_mutex_unlock (&_aligned_blocks_mutex); \
235 } while (0)
236 #else
237 #define LOCK()
238 #define UNLOCK()
239 #define LOCK_ALIGNED_BLOCKS()
240 #define UNLOCK_ALIGNED_BLOCKS()
241 #endif
243 /* Nonzero if `malloc' has been called and done its initialization. */
244 extern int __malloc_initialized;
245 /* Function called to initialize malloc data structures. */
246 extern int __malloc_initialize (void);
248 #ifdef GC_MCHECK
250 /* Return values for `mprobe': these are the kinds of inconsistencies that
251 `mcheck' enables detection of. */
252 enum mcheck_status
254 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
255 MCHECK_OK, /* Block is fine. */
256 MCHECK_FREE, /* Block freed twice. */
257 MCHECK_HEAD, /* Memory before the block was clobbered. */
258 MCHECK_TAIL /* Memory after the block was clobbered. */
261 /* Activate a standard collection of debugging hooks. This must be called
262 before `malloc' is ever called. ABORTFUNC is called with an error code
263 (see enum above) when an inconsistency is detected. If ABORTFUNC is
264 null, the standard function prints on stderr and then calls `abort'. */
265 extern int mcheck (void (*abortfunc) (enum mcheck_status));
267 /* Check for aberrations in a particular malloc'd block. You must have
268 called `mcheck' already. These are the same checks that `mcheck' does
269 when you free or reallocate a block. */
270 extern enum mcheck_status mprobe (void *ptr);
272 /* Activate a standard collection of tracing hooks. */
273 extern void mtrace (void);
274 extern void muntrace (void);
276 /* Statistics available to the user. */
277 struct mstats
279 size_t bytes_total; /* Total size of the heap. */
280 size_t chunks_used; /* Chunks allocated by the user. */
281 size_t bytes_used; /* Byte total of user-allocated chunks. */
282 size_t chunks_free; /* Chunks in the free list. */
283 size_t bytes_free; /* Byte total of chunks in the free list. */
286 /* Pick up the current statistics. */
287 extern struct mstats mstats (void);
289 #endif
291 #undef extern
293 #ifdef __cplusplus
295 #endif
297 /* Memory allocator `malloc'.
298 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
299 Written May 1989 by Mike Haertel.
301 This library is free software; you can redistribute it and/or
302 modify it under the terms of the GNU General Public License as
303 published by the Free Software Foundation; either version 2 of the
304 License, or (at your option) any later version.
306 This library is distributed in the hope that it will be useful,
307 but WITHOUT ANY WARRANTY; without even the implied warranty of
308 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
309 General Public License for more details.
311 You should have received a copy of the GNU General Public
312 License along with this library. If not, see <http://www.gnu.org/licenses/>.
314 The author may be reached (Email) at the address mike@ai.mit.edu,
315 or (US mail) as Mike Haertel c/o Free Software Foundation. */
317 #include <errno.h>
319 /* Debugging hook for 'malloc'. */
320 static void *(*__MALLOC_HOOK_VOLATILE gmalloc_hook) (size_t);
322 /* Replacements for traditional glibc malloc hooks, for platforms that
323 do not already have these hooks. Platforms with these hooks all
324 used relaxed ref/def, so it is OK to define them here too. */
325 void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
326 void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
327 void *(*__morecore) (ptrdiff_t);
329 #ifndef HYBRID_MALLOC
331 /* Pointer to the base of the first block. */
332 char *_heapbase;
334 /* Block information table. Allocated with align/__free (not malloc/free). */
335 malloc_info *_heapinfo;
337 /* Search index in the info table. */
338 size_t _heapindex;
340 /* Limit of valid info table indices. */
341 size_t _heaplimit;
343 /* Free lists for each fragment size. */
344 struct list _fraghead[BLOCKLOG];
346 /* Instrumentation. */
347 size_t _chunks_used;
348 size_t _bytes_used;
349 size_t _chunks_free;
350 size_t _bytes_free;
352 /* Are you experienced? */
353 int __malloc_initialized;
355 #else
357 static struct list _fraghead[BLOCKLOG];
359 #endif /* HYBRID_MALLOC */
361 /* Number of extra blocks to get each time we ask for more core.
362 This reduces the frequency of calling `(*__morecore)'. */
363 #if defined DOUG_LEA_MALLOC || defined HYBRID_MALLOC || defined SYSTEM_MALLOC
364 static
365 #endif
366 size_t __malloc_extra_blocks;
368 /* Number of info entries. */
369 static size_t heapsize;
371 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
373 /* Some code for hunting a bug writing into _heapinfo.
375 Call this macro with argument PROT non-zero to protect internal
376 malloc state against writing to it, call it with a zero argument to
377 make it readable and writable.
379 Note that this only works if BLOCKSIZE == page size, which is
380 the case on the i386. */
382 #include <sys/types.h>
383 #include <sys/mman.h>
385 static int state_protected_p;
386 static size_t last_state_size;
387 static malloc_info *last_heapinfo;
389 void
390 protect_malloc_state (int protect_p)
392 /* If _heapinfo has been relocated, make sure its old location
393 isn't left read-only; it will be reused by malloc. */
394 if (_heapinfo != last_heapinfo
395 && last_heapinfo
396 && state_protected_p)
397 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
399 last_state_size = _heaplimit * sizeof *_heapinfo;
400 last_heapinfo = _heapinfo;
402 if (protect_p != state_protected_p)
404 state_protected_p = protect_p;
405 if (mprotect (_heapinfo, last_state_size,
406 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
407 abort ();
411 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
413 #else
414 #define PROTECT_MALLOC_STATE(PROT) /* empty */
415 #endif
418 /* Aligned allocation. */
419 static void *
420 align (size_t size)
422 void *result;
423 ptrdiff_t adj;
425 /* align accepts an unsigned argument, but __morecore accepts a
426 signed one. This could lead to trouble if SIZE overflows the
427 ptrdiff_t type accepted by __morecore. We just punt in that
428 case, since they are requesting a ludicrous amount anyway. */
429 if (PTRDIFF_MAX < size)
430 result = 0;
431 else
432 result = (*__morecore) (size);
433 adj = (uintptr_t) result % BLOCKSIZE;
434 if (adj != 0)
436 adj = BLOCKSIZE - adj;
437 (*__morecore) (adj);
438 result = (char *) result + adj;
441 if (__after_morecore_hook)
442 (*__after_morecore_hook) ();
444 return result;
447 /* Get SIZE bytes, if we can get them starting at END.
448 Return the address of the space we got.
449 If we cannot get space at END, fail and return 0. */
450 static void *
451 get_contiguous_space (ptrdiff_t size, void *position)
453 void *before;
454 void *after;
456 before = (*__morecore) (0);
457 /* If we can tell in advance that the break is at the wrong place,
458 fail now. */
459 if (before != position)
460 return 0;
462 /* Allocate SIZE bytes and get the address of them. */
463 after = (*__morecore) (size);
464 if (!after)
465 return 0;
467 /* It was not contiguous--reject it. */
468 if (after != position)
470 (*__morecore) (- size);
471 return 0;
474 return after;
478 /* This is called when `_heapinfo' and `heapsize' have just
479 been set to describe a new info table. Set up the table
480 to describe itself and account for it in the statistics. */
481 static void
482 register_heapinfo (void)
484 size_t block, blocks;
486 block = BLOCK (_heapinfo);
487 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
489 /* Account for the _heapinfo block itself in the statistics. */
490 _bytes_used += blocks * BLOCKSIZE;
491 ++_chunks_used;
493 /* Describe the heapinfo block itself in the heapinfo. */
494 _heapinfo[block].busy.type = 0;
495 _heapinfo[block].busy.info.size = blocks;
496 /* Leave back-pointers for malloc_find_address. */
497 while (--blocks > 0)
498 _heapinfo[block + blocks].busy.info.size = -blocks;
501 #ifdef USE_PTHREAD
502 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
503 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
504 int _malloc_thread_enabled_p;
506 static void
507 malloc_atfork_handler_prepare (void)
509 LOCK ();
510 LOCK_ALIGNED_BLOCKS ();
513 static void
514 malloc_atfork_handler_parent (void)
516 UNLOCK_ALIGNED_BLOCKS ();
517 UNLOCK ();
520 static void
521 malloc_atfork_handler_child (void)
523 UNLOCK_ALIGNED_BLOCKS ();
524 UNLOCK ();
527 /* Set up mutexes and make malloc etc. thread-safe. */
528 void
529 malloc_enable_thread (void)
531 if (_malloc_thread_enabled_p)
532 return;
534 /* Some pthread implementations call malloc for statically
535 initialized mutexes when they are used first. To avoid such a
536 situation, we initialize mutexes here while their use is
537 disabled in malloc etc. */
538 pthread_mutex_init (&_malloc_mutex, NULL);
539 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
540 pthread_atfork (malloc_atfork_handler_prepare,
541 malloc_atfork_handler_parent,
542 malloc_atfork_handler_child);
543 _malloc_thread_enabled_p = 1;
545 #endif /* USE_PTHREAD */
547 static void
548 malloc_initialize_1 (void)
550 #ifdef GC_MCHECK
551 mcheck (NULL);
552 #endif
554 if (__malloc_initialize_hook)
555 (*__malloc_initialize_hook) ();
557 heapsize = HEAP / BLOCKSIZE;
558 _heapinfo = align (heapsize * sizeof (malloc_info));
559 if (_heapinfo == NULL)
560 return;
561 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
562 _heapinfo[0].free.size = 0;
563 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
564 _heapindex = 0;
565 _heapbase = (char *) _heapinfo;
566 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
568 register_heapinfo ();
570 __malloc_initialized = 1;
571 PROTECT_MALLOC_STATE (1);
572 return;
575 /* Set everything up and remember that we have.
576 main will call malloc which calls this function. That is before any threads
577 or signal handlers has been set up, so we don't need thread protection. */
579 __malloc_initialize (void)
581 if (__malloc_initialized)
582 return 0;
584 malloc_initialize_1 ();
586 return __malloc_initialized;
589 static int morecore_recursing;
591 /* Get neatly aligned memory, initializing or
592 growing the heap info table as necessary. */
593 static void *
594 morecore_nolock (size_t size)
596 void *result;
597 malloc_info *newinfo, *oldinfo;
598 size_t newsize;
600 if (morecore_recursing)
601 /* Avoid recursion. The caller will know how to handle a null return. */
602 return NULL;
604 result = align (size);
605 if (result == NULL)
606 return NULL;
608 PROTECT_MALLOC_STATE (0);
610 /* Check if we need to grow the info table. */
611 if ((size_t) BLOCK ((char *) result + size) > heapsize)
613 /* Calculate the new _heapinfo table size. We do not account for the
614 added blocks in the table itself, as we hope to place them in
615 existing free space, which is already covered by part of the
616 existing table. */
617 newsize = heapsize;
619 newsize *= 2;
620 while ((size_t) BLOCK ((char *) result + size) > newsize);
622 /* We must not reuse existing core for the new info table when called
623 from realloc in the case of growing a large block, because the
624 block being grown is momentarily marked as free. In this case
625 _heaplimit is zero so we know not to reuse space for internal
626 allocation. */
627 if (_heaplimit != 0)
629 /* First try to allocate the new info table in core we already
630 have, in the usual way using realloc. If realloc cannot
631 extend it in place or relocate it to existing sufficient core,
632 we will get called again, and the code above will notice the
633 `morecore_recursing' flag and return null. */
634 int save = errno; /* Don't want to clobber errno with ENOMEM. */
635 morecore_recursing = 1;
636 newinfo = _realloc_internal_nolock (_heapinfo,
637 newsize * sizeof (malloc_info));
638 morecore_recursing = 0;
639 if (newinfo == NULL)
640 errno = save;
641 else
643 /* We found some space in core, and realloc has put the old
644 table's blocks on the free list. Now zero the new part
645 of the table and install the new table location. */
646 memset (&newinfo[heapsize], 0,
647 (newsize - heapsize) * sizeof (malloc_info));
648 _heapinfo = newinfo;
649 heapsize = newsize;
650 goto got_heap;
654 /* Allocate new space for the malloc info table. */
655 while (1)
657 newinfo = align (newsize * sizeof (malloc_info));
659 /* Did it fail? */
660 if (newinfo == NULL)
662 (*__morecore) (-size);
663 return NULL;
666 /* Is it big enough to record status for its own space?
667 If so, we win. */
668 if ((size_t) BLOCK ((char *) newinfo
669 + newsize * sizeof (malloc_info))
670 < newsize)
671 break;
673 /* Must try again. First give back most of what we just got. */
674 (*__morecore) (- newsize * sizeof (malloc_info));
675 newsize *= 2;
678 /* Copy the old table to the beginning of the new,
679 and zero the rest of the new table. */
680 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
681 memset (&newinfo[heapsize], 0,
682 (newsize - heapsize) * sizeof (malloc_info));
683 oldinfo = _heapinfo;
684 _heapinfo = newinfo;
685 heapsize = newsize;
687 register_heapinfo ();
689 /* Reset _heaplimit so _free_internal never decides
690 it can relocate or resize the info table. */
691 _heaplimit = 0;
692 _free_internal_nolock (oldinfo);
693 PROTECT_MALLOC_STATE (0);
695 /* The new heap limit includes the new table just allocated. */
696 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
697 return result;
700 got_heap:
701 _heaplimit = BLOCK ((char *) result + size);
702 return result;
705 /* Allocate memory from the heap. */
706 void *
707 _malloc_internal_nolock (size_t size)
709 void *result;
710 size_t block, blocks, lastblocks, start;
711 register size_t i;
712 struct list *next;
714 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
715 valid address you can realloc and free (though not dereference).
717 It turns out that some extant code (sunrpc, at least Ultrix's version)
718 expects `malloc (0)' to return non-NULL and breaks otherwise.
719 Be compatible. */
721 #if 0
722 if (size == 0)
723 return NULL;
724 #endif
726 PROTECT_MALLOC_STATE (0);
728 if (size < sizeof (struct list))
729 size = sizeof (struct list);
731 /* Determine the allocation policy based on the request size. */
732 if (size <= BLOCKSIZE / 2)
734 /* Small allocation to receive a fragment of a block.
735 Determine the logarithm to base two of the fragment size. */
736 register size_t log = 1;
737 --size;
738 while ((size /= 2) != 0)
739 ++log;
741 /* Look in the fragment lists for a
742 free fragment of the desired size. */
743 next = _fraghead[log].next;
744 if (next != NULL)
746 /* There are free fragments of this size.
747 Pop a fragment out of the fragment list and return it.
748 Update the block's nfree and first counters. */
749 result = next;
750 next->prev->next = next->next;
751 if (next->next != NULL)
752 next->next->prev = next->prev;
753 block = BLOCK (result);
754 if (--_heapinfo[block].busy.info.frag.nfree != 0)
755 _heapinfo[block].busy.info.frag.first =
756 (uintptr_t) next->next % BLOCKSIZE >> log;
758 /* Update the statistics. */
759 ++_chunks_used;
760 _bytes_used += 1 << log;
761 --_chunks_free;
762 _bytes_free -= 1 << log;
764 else
766 /* No free fragments of the desired size, so get a new block
767 and break it into fragments, returning the first. */
768 #ifdef GC_MALLOC_CHECK
769 result = _malloc_internal_nolock (BLOCKSIZE);
770 PROTECT_MALLOC_STATE (0);
771 #elif defined (USE_PTHREAD)
772 result = _malloc_internal_nolock (BLOCKSIZE);
773 #else
774 result = malloc (BLOCKSIZE);
775 #endif
776 if (result == NULL)
778 PROTECT_MALLOC_STATE (1);
779 goto out;
782 /* Link all fragments but the first into the free list. */
783 next = (struct list *) ((char *) result + (1 << log));
784 next->next = NULL;
785 next->prev = &_fraghead[log];
786 _fraghead[log].next = next;
788 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
790 next = (struct list *) ((char *) result + (i << log));
791 next->next = _fraghead[log].next;
792 next->prev = &_fraghead[log];
793 next->prev->next = next;
794 next->next->prev = next;
797 /* Initialize the nfree and first counters for this block. */
798 block = BLOCK (result);
799 _heapinfo[block].busy.type = log;
800 _heapinfo[block].busy.info.frag.nfree = i - 1;
801 _heapinfo[block].busy.info.frag.first = i - 1;
803 _chunks_free += (BLOCKSIZE >> log) - 1;
804 _bytes_free += BLOCKSIZE - (1 << log);
805 _bytes_used -= BLOCKSIZE - (1 << log);
808 else
810 /* Large allocation to receive one or more blocks.
811 Search the free list in a circle starting at the last place visited.
812 If we loop completely around without finding a large enough
813 space we will have to get more memory from the system. */
814 blocks = BLOCKIFY (size);
815 start = block = _heapindex;
816 while (_heapinfo[block].free.size < blocks)
818 block = _heapinfo[block].free.next;
819 if (block == start)
821 /* Need to get more from the system. Get a little extra. */
822 size_t wantblocks = blocks + __malloc_extra_blocks;
823 block = _heapinfo[0].free.prev;
824 lastblocks = _heapinfo[block].free.size;
825 /* Check to see if the new core will be contiguous with the
826 final free block; if so we don't need to get as much. */
827 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
828 /* We can't do this if we will have to make the heap info
829 table bigger to accommodate the new space. */
830 block + wantblocks <= heapsize &&
831 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
832 ADDRESS (block + lastblocks)))
834 /* We got it contiguously. Which block we are extending
835 (the `final free block' referred to above) might have
836 changed, if it got combined with a freed info table. */
837 block = _heapinfo[0].free.prev;
838 _heapinfo[block].free.size += (wantblocks - lastblocks);
839 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
840 _heaplimit += wantblocks - lastblocks;
841 continue;
843 result = morecore_nolock (wantblocks * BLOCKSIZE);
844 if (result == NULL)
845 goto out;
846 block = BLOCK (result);
847 /* Put the new block at the end of the free list. */
848 _heapinfo[block].free.size = wantblocks;
849 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
850 _heapinfo[block].free.next = 0;
851 _heapinfo[0].free.prev = block;
852 _heapinfo[_heapinfo[block].free.prev].free.next = block;
853 ++_chunks_free;
854 /* Now loop to use some of that block for this allocation. */
858 /* At this point we have found a suitable free list entry.
859 Figure out how to remove what we need from the list. */
860 result = ADDRESS (block);
861 if (_heapinfo[block].free.size > blocks)
863 /* The block we found has a bit left over,
864 so relink the tail end back into the free list. */
865 _heapinfo[block + blocks].free.size
866 = _heapinfo[block].free.size - blocks;
867 _heapinfo[block + blocks].free.next
868 = _heapinfo[block].free.next;
869 _heapinfo[block + blocks].free.prev
870 = _heapinfo[block].free.prev;
871 _heapinfo[_heapinfo[block].free.prev].free.next
872 = _heapinfo[_heapinfo[block].free.next].free.prev
873 = _heapindex = block + blocks;
875 else
877 /* The block exactly matches our requirements,
878 so just remove it from the list. */
879 _heapinfo[_heapinfo[block].free.next].free.prev
880 = _heapinfo[block].free.prev;
881 _heapinfo[_heapinfo[block].free.prev].free.next
882 = _heapindex = _heapinfo[block].free.next;
883 --_chunks_free;
886 _heapinfo[block].busy.type = 0;
887 _heapinfo[block].busy.info.size = blocks;
888 ++_chunks_used;
889 _bytes_used += blocks * BLOCKSIZE;
890 _bytes_free -= blocks * BLOCKSIZE;
892 /* Mark all the blocks of the object just allocated except for the
893 first with a negative number so you can find the first block by
894 adding that adjustment. */
895 while (--blocks > 0)
896 _heapinfo[block + blocks].busy.info.size = -blocks;
899 PROTECT_MALLOC_STATE (1);
900 out:
901 return result;
904 void *
905 _malloc_internal (size_t size)
907 void *result;
909 LOCK ();
910 result = _malloc_internal_nolock (size);
911 UNLOCK ();
913 return result;
916 void *
917 malloc (size_t size)
919 void *(*hook) (size_t);
921 if (!__malloc_initialized && !__malloc_initialize ())
922 return NULL;
924 /* Copy the value of gmalloc_hook to an automatic variable in case
925 gmalloc_hook is modified in another thread between its
926 NULL-check and the use.
928 Note: Strictly speaking, this is not a right solution. We should
929 use mutexes to access non-read-only variables that are shared
930 among multiple threads. We just leave it for compatibility with
931 glibc malloc (i.e., assignments to gmalloc_hook) for now. */
932 hook = gmalloc_hook;
933 return (hook != NULL ? *hook : _malloc_internal) (size);
936 #if !(defined (_LIBC) || defined (HYBRID_MALLOC))
938 /* On some ANSI C systems, some libc functions call _malloc, _free
939 and _realloc. Make them use the GNU functions. */
941 extern void *_malloc (size_t);
942 extern void _free (void *);
943 extern void *_realloc (void *, size_t);
945 void *
946 _malloc (size_t size)
948 return malloc (size);
951 void
952 _free (void *ptr)
954 free (ptr);
957 void *
958 _realloc (void *ptr, size_t size)
960 return realloc (ptr, size);
963 #endif
964 /* Free a block of memory allocated by `malloc'.
965 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
966 Written May 1989 by Mike Haertel.
968 This library is free software; you can redistribute it and/or
969 modify it under the terms of the GNU General Public License as
970 published by the Free Software Foundation; either version 2 of the
971 License, or (at your option) any later version.
973 This library is distributed in the hope that it will be useful,
974 but WITHOUT ANY WARRANTY; without even the implied warranty of
975 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
976 General Public License for more details.
978 You should have received a copy of the GNU General Public
979 License along with this library. If not, see <http://www.gnu.org/licenses/>.
981 The author may be reached (Email) at the address mike@ai.mit.edu,
982 or (US mail) as Mike Haertel c/o Free Software Foundation. */
984 /* Debugging hook for free. */
985 static void (*__MALLOC_HOOK_VOLATILE gfree_hook) (void *);
987 #ifndef HYBRID_MALLOC
989 /* List of blocks allocated by aligned_alloc. */
990 struct alignlist *_aligned_blocks = NULL;
991 #endif
993 /* Return memory to the heap.
994 Like `_free_internal' but don't lock mutex. */
995 void
996 _free_internal_nolock (void *ptr)
998 int type;
999 size_t block, blocks;
1000 register size_t i;
1001 struct list *prev, *next;
1002 void *curbrk;
1003 const size_t lesscore_threshold
1004 /* Threshold of free space at which we will return some to the system. */
1005 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1007 register struct alignlist *l;
1009 if (ptr == NULL)
1010 return;
1012 PROTECT_MALLOC_STATE (0);
1014 LOCK_ALIGNED_BLOCKS ();
1015 for (l = _aligned_blocks; l != NULL; l = l->next)
1016 if (l->aligned == ptr)
1018 l->aligned = NULL; /* Mark the slot in the list as free. */
1019 ptr = l->exact;
1020 break;
1022 UNLOCK_ALIGNED_BLOCKS ();
1024 block = BLOCK (ptr);
1026 type = _heapinfo[block].busy.type;
1027 switch (type)
1029 case 0:
1030 /* Get as many statistics as early as we can. */
1031 --_chunks_used;
1032 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1033 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1035 /* Find the free cluster previous to this one in the free list.
1036 Start searching at the last block referenced; this may benefit
1037 programs with locality of allocation. */
1038 i = _heapindex;
1039 if (i > block)
1040 while (i > block)
1041 i = _heapinfo[i].free.prev;
1042 else
1045 i = _heapinfo[i].free.next;
1046 while (i > 0 && i < block);
1047 i = _heapinfo[i].free.prev;
1050 /* Determine how to link this block into the free list. */
1051 if (block == i + _heapinfo[i].free.size)
1053 /* Coalesce this block with its predecessor. */
1054 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1055 block = i;
1057 else
1059 /* Really link this block back into the free list. */
1060 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1061 _heapinfo[block].free.next = _heapinfo[i].free.next;
1062 _heapinfo[block].free.prev = i;
1063 _heapinfo[i].free.next = block;
1064 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1065 ++_chunks_free;
1068 /* Now that the block is linked in, see if we can coalesce it
1069 with its successor (by deleting its successor from the list
1070 and adding in its size). */
1071 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1073 _heapinfo[block].free.size
1074 += _heapinfo[_heapinfo[block].free.next].free.size;
1075 _heapinfo[block].free.next
1076 = _heapinfo[_heapinfo[block].free.next].free.next;
1077 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1078 --_chunks_free;
1081 /* How many trailing free blocks are there now? */
1082 blocks = _heapinfo[block].free.size;
1084 /* Where is the current end of accessible core? */
1085 curbrk = (*__morecore) (0);
1087 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1089 /* The end of the malloc heap is at the end of accessible core.
1090 It's possible that moving _heapinfo will allow us to
1091 return some space to the system. */
1093 size_t info_block = BLOCK (_heapinfo);
1094 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1095 size_t prev_block = _heapinfo[block].free.prev;
1096 size_t prev_blocks = _heapinfo[prev_block].free.size;
1097 size_t next_block = _heapinfo[block].free.next;
1098 size_t next_blocks = _heapinfo[next_block].free.size;
1100 if (/* Win if this block being freed is last in core, the info table
1101 is just before it, the previous free block is just before the
1102 info table, and the two free blocks together form a useful
1103 amount to return to the system. */
1104 (block + blocks == _heaplimit &&
1105 info_block + info_blocks == block &&
1106 prev_block != 0 && prev_block + prev_blocks == info_block &&
1107 blocks + prev_blocks >= lesscore_threshold) ||
1108 /* Nope, not the case. We can also win if this block being
1109 freed is just before the info table, and the table extends
1110 to the end of core or is followed only by a free block,
1111 and the total free space is worth returning to the system. */
1112 (block + blocks == info_block &&
1113 ((info_block + info_blocks == _heaplimit &&
1114 blocks >= lesscore_threshold) ||
1115 (info_block + info_blocks == next_block &&
1116 next_block + next_blocks == _heaplimit &&
1117 blocks + next_blocks >= lesscore_threshold)))
1120 malloc_info *newinfo;
1121 size_t oldlimit = _heaplimit;
1123 /* Free the old info table, clearing _heaplimit to avoid
1124 recursion into this code. We don't want to return the
1125 table's blocks to the system before we have copied them to
1126 the new location. */
1127 _heaplimit = 0;
1128 _free_internal_nolock (_heapinfo);
1129 _heaplimit = oldlimit;
1131 /* Tell malloc to search from the beginning of the heap for
1132 free blocks, so it doesn't reuse the ones just freed. */
1133 _heapindex = 0;
1135 /* Allocate new space for the info table and move its data. */
1136 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
1137 PROTECT_MALLOC_STATE (0);
1138 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1139 _heapinfo = newinfo;
1141 /* We should now have coalesced the free block with the
1142 blocks freed from the old info table. Examine the entire
1143 trailing free block to decide below whether to return some
1144 to the system. */
1145 block = _heapinfo[0].free.prev;
1146 blocks = _heapinfo[block].free.size;
1149 /* Now see if we can return stuff to the system. */
1150 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1152 register size_t bytes = blocks * BLOCKSIZE;
1153 _heaplimit -= blocks;
1154 (*__morecore) (-bytes);
1155 _heapinfo[_heapinfo[block].free.prev].free.next
1156 = _heapinfo[block].free.next;
1157 _heapinfo[_heapinfo[block].free.next].free.prev
1158 = _heapinfo[block].free.prev;
1159 block = _heapinfo[block].free.prev;
1160 --_chunks_free;
1161 _bytes_free -= bytes;
1165 /* Set the next search to begin at this block. */
1166 _heapindex = block;
1167 break;
1169 default:
1170 /* Do some of the statistics. */
1171 --_chunks_used;
1172 _bytes_used -= 1 << type;
1173 ++_chunks_free;
1174 _bytes_free += 1 << type;
1176 /* Get the address of the first free fragment in this block. */
1177 prev = (struct list *) ((char *) ADDRESS (block) +
1178 (_heapinfo[block].busy.info.frag.first << type));
1180 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1182 /* If all fragments of this block are free, remove them
1183 from the fragment list and free the whole block. */
1184 next = prev;
1185 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
1186 next = next->next;
1187 prev->prev->next = next;
1188 if (next != NULL)
1189 next->prev = prev->prev;
1190 _heapinfo[block].busy.type = 0;
1191 _heapinfo[block].busy.info.size = 1;
1193 /* Keep the statistics accurate. */
1194 ++_chunks_used;
1195 _bytes_used += BLOCKSIZE;
1196 _chunks_free -= BLOCKSIZE >> type;
1197 _bytes_free -= BLOCKSIZE;
1199 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1200 _free_internal_nolock (ADDRESS (block));
1201 #else
1202 free (ADDRESS (block));
1203 #endif
1205 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1207 /* If some fragments of this block are free, link this
1208 fragment into the fragment list after the first free
1209 fragment of this block. */
1210 next = ptr;
1211 next->next = prev->next;
1212 next->prev = prev;
1213 prev->next = next;
1214 if (next->next != NULL)
1215 next->next->prev = next;
1216 ++_heapinfo[block].busy.info.frag.nfree;
1218 else
1220 /* No fragments of this block are free, so link this
1221 fragment into the fragment list and announce that
1222 it is the first free fragment of this block. */
1223 prev = ptr;
1224 _heapinfo[block].busy.info.frag.nfree = 1;
1225 _heapinfo[block].busy.info.frag.first =
1226 (uintptr_t) ptr % BLOCKSIZE >> type;
1227 prev->next = _fraghead[type].next;
1228 prev->prev = &_fraghead[type];
1229 prev->prev->next = prev;
1230 if (prev->next != NULL)
1231 prev->next->prev = prev;
1233 break;
1236 PROTECT_MALLOC_STATE (1);
1239 /* Return memory to the heap.
1240 Like 'free' but don't call a hook if there is one. */
1241 void
1242 _free_internal (void *ptr)
1244 LOCK ();
1245 _free_internal_nolock (ptr);
1246 UNLOCK ();
1249 /* Return memory to the heap. */
1251 void
1252 free (void *ptr)
1254 void (*hook) (void *) = gfree_hook;
1256 if (hook != NULL)
1257 (*hook) (ptr);
1258 else
1259 _free_internal (ptr);
1262 #ifndef HYBRID_MALLOC
1263 /* Define the `cfree' alias for `free'. */
1264 #ifdef weak_alias
1265 weak_alias (free, cfree)
1266 #else
1267 void
1268 cfree (void *ptr)
1270 free (ptr);
1272 #endif
1273 #endif
1274 /* Change the size of a block allocated by `malloc'.
1275 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1276 Written May 1989 by Mike Haertel.
1278 This library is free software; you can redistribute it and/or
1279 modify it under the terms of the GNU General Public License as
1280 published by the Free Software Foundation; either version 2 of the
1281 License, or (at your option) any later version.
1283 This library is distributed in the hope that it will be useful,
1284 but WITHOUT ANY WARRANTY; without even the implied warranty of
1285 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1286 General Public License for more details.
1288 You should have received a copy of the GNU General Public
1289 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1291 The author may be reached (Email) at the address mike@ai.mit.edu,
1292 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1294 #ifndef min
1295 #define min(a, b) ((a) < (b) ? (a) : (b))
1296 #endif
1298 /* Debugging hook for realloc. */
1299 static void *(*grealloc_hook) (void *, size_t);
1301 /* Resize the given region to the new size, returning a pointer
1302 to the (possibly moved) region. This is optimized for speed;
1303 some benchmarks seem to indicate that greater compactness is
1304 achieved by unconditionally allocating and copying to a
1305 new region. This module has incestuous knowledge of the
1306 internals of both free and malloc. */
1307 void *
1308 _realloc_internal_nolock (void *ptr, size_t size)
1310 void *result;
1311 int type;
1312 size_t block, blocks, oldlimit;
1314 if (size == 0)
1316 _free_internal_nolock (ptr);
1317 return _malloc_internal_nolock (0);
1319 else if (ptr == NULL)
1320 return _malloc_internal_nolock (size);
1322 block = BLOCK (ptr);
1324 PROTECT_MALLOC_STATE (0);
1326 type = _heapinfo[block].busy.type;
1327 switch (type)
1329 case 0:
1330 /* Maybe reallocate a large block to a small fragment. */
1331 if (size <= BLOCKSIZE / 2)
1333 result = _malloc_internal_nolock (size);
1334 if (result != NULL)
1336 memcpy (result, ptr, size);
1337 _free_internal_nolock (ptr);
1338 goto out;
1342 /* The new size is a large allocation as well;
1343 see if we can hold it in place. */
1344 blocks = BLOCKIFY (size);
1345 if (blocks < _heapinfo[block].busy.info.size)
1347 /* The new size is smaller; return
1348 excess memory to the free list. */
1349 _heapinfo[block + blocks].busy.type = 0;
1350 _heapinfo[block + blocks].busy.info.size
1351 = _heapinfo[block].busy.info.size - blocks;
1352 _heapinfo[block].busy.info.size = blocks;
1353 /* We have just created a new chunk by splitting a chunk in two.
1354 Now we will free this chunk; increment the statistics counter
1355 so it doesn't become wrong when _free_internal decrements it. */
1356 ++_chunks_used;
1357 _free_internal_nolock (ADDRESS (block + blocks));
1358 result = ptr;
1360 else if (blocks == _heapinfo[block].busy.info.size)
1361 /* No size change necessary. */
1362 result = ptr;
1363 else
1365 /* Won't fit, so allocate a new region that will.
1366 Free the old region first in case there is sufficient
1367 adjacent free space to grow without moving. */
1368 blocks = _heapinfo[block].busy.info.size;
1369 /* Prevent free from actually returning memory to the system. */
1370 oldlimit = _heaplimit;
1371 _heaplimit = 0;
1372 _free_internal_nolock (ptr);
1373 result = _malloc_internal_nolock (size);
1374 PROTECT_MALLOC_STATE (0);
1375 if (_heaplimit == 0)
1376 _heaplimit = oldlimit;
1377 if (result == NULL)
1379 /* Now we're really in trouble. We have to unfree
1380 the thing we just freed. Unfortunately it might
1381 have been coalesced with its neighbors. */
1382 if (_heapindex == block)
1383 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1384 else
1386 void *previous
1387 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1388 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1389 _free_internal_nolock (previous);
1391 goto out;
1393 if (ptr != result)
1394 memmove (result, ptr, blocks * BLOCKSIZE);
1396 break;
1398 default:
1399 /* Old size is a fragment; type is logarithm
1400 to base two of the fragment size. */
1401 if (size > (size_t) (1 << (type - 1)) &&
1402 size <= (size_t) (1 << type))
1403 /* The new size is the same kind of fragment. */
1404 result = ptr;
1405 else
1407 /* The new size is different; allocate a new space,
1408 and copy the lesser of the new size and the old. */
1409 result = _malloc_internal_nolock (size);
1410 if (result == NULL)
1411 goto out;
1412 memcpy (result, ptr, min (size, (size_t) 1 << type));
1413 _free_internal_nolock (ptr);
1415 break;
1418 PROTECT_MALLOC_STATE (1);
1419 out:
1420 return result;
1423 void *
1424 _realloc_internal (void *ptr, size_t size)
1426 void *result;
1428 LOCK ();
1429 result = _realloc_internal_nolock (ptr, size);
1430 UNLOCK ();
1432 return result;
1435 void *
1436 realloc (void *ptr, size_t size)
1438 void *(*hook) (void *, size_t);
1440 if (!__malloc_initialized && !__malloc_initialize ())
1441 return NULL;
1443 hook = grealloc_hook;
1444 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1446 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1448 This library is free software; you can redistribute it and/or
1449 modify it under the terms of the GNU General Public License as
1450 published by the Free Software Foundation; either version 2 of the
1451 License, or (at your option) any later version.
1453 This library is distributed in the hope that it will be useful,
1454 but WITHOUT ANY WARRANTY; without even the implied warranty of
1455 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1456 General Public License for more details.
1458 You should have received a copy of the GNU General Public
1459 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1461 The author may be reached (Email) at the address mike@ai.mit.edu,
1462 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1464 /* Allocate an array of NMEMB elements each SIZE bytes long.
1465 The entire array is initialized to zeros. */
1466 void *
1467 calloc (size_t nmemb, size_t size)
1469 void *result;
1470 size_t bytes = nmemb * size;
1472 if (size != 0 && bytes / size != nmemb)
1474 errno = ENOMEM;
1475 return NULL;
1478 result = malloc (bytes);
1479 if (result)
1480 return memset (result, 0, bytes);
1481 return result;
1483 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1484 This file is part of the GNU C Library.
1486 The GNU C Library is free software; you can redistribute it and/or modify
1487 it under the terms of the GNU General Public License as published by
1488 the Free Software Foundation; either version 2, or (at your option)
1489 any later version.
1491 The GNU C Library is distributed in the hope that it will be useful,
1492 but WITHOUT ANY WARRANTY; without even the implied warranty of
1493 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1494 GNU General Public License for more details.
1496 You should have received a copy of the GNU General Public License
1497 along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
1499 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1500 compatible. */
1501 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1502 #define __sbrk sbrk
1503 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1504 /* It is best not to declare this and cast its result on foreign operating
1505 systems with potentially hostile include files. */
1507 extern void *__sbrk (ptrdiff_t increment);
1508 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1510 /* Allocate INCREMENT more bytes of data space,
1511 and return the start of data space, or NULL on errors.
1512 If INCREMENT is negative, shrink data space. */
1513 static void *
1514 gdefault_morecore (ptrdiff_t increment)
1516 void *result;
1517 #ifdef HYBRID_MALLOC
1518 if (!DUMPED)
1520 return bss_sbrk (increment);
1522 #endif
1523 result = (void *) __sbrk (increment);
1524 if (result == (void *) -1)
1525 return NULL;
1526 return result;
1529 void *(*__morecore) (ptrdiff_t) = gdefault_morecore;
1531 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1533 This library is free software; you can redistribute it and/or
1534 modify it under the terms of the GNU General Public License as
1535 published by the Free Software Foundation; either version 2 of the
1536 License, or (at your option) any later version.
1538 This library is distributed in the hope that it will be useful,
1539 but WITHOUT ANY WARRANTY; without even the implied warranty of
1540 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1541 General Public License for more details.
1543 You should have received a copy of the GNU General Public
1544 License along with this library. If not, see <http://www.gnu.org/licenses/>. */
1546 void *
1547 aligned_alloc (size_t alignment, size_t size)
1549 void *result;
1550 size_t adj, lastadj;
1552 /* Allocate a block with enough extra space to pad the block with up to
1553 (ALIGNMENT - 1) bytes if necessary. */
1554 if (- size < alignment)
1556 errno = ENOMEM;
1557 return NULL;
1559 result = malloc (size + alignment - 1);
1560 if (result == NULL)
1561 return NULL;
1563 /* Figure out how much we will need to pad this particular block
1564 to achieve the required alignment. */
1565 adj = alignment - (uintptr_t) result % alignment;
1566 if (adj == alignment)
1567 adj = 0;
1569 if (adj != alignment - 1)
1573 /* Reallocate the block with only as much excess as it
1574 needs. */
1575 free (result);
1576 result = malloc (size + adj);
1577 if (result == NULL) /* Impossible unless interrupted. */
1578 return NULL;
1580 lastadj = adj;
1581 adj = alignment - (uintptr_t) result % alignment;
1582 if (adj == alignment)
1583 adj = 0;
1584 /* It's conceivable we might have been so unlucky as to get
1585 a different block with weaker alignment. If so, this
1586 block is too short to contain SIZE after alignment
1587 correction. So we must try again and get another block,
1588 slightly larger. */
1589 } while (adj > lastadj);
1592 if (adj != 0)
1594 /* Record this block in the list of aligned blocks, so that `free'
1595 can identify the pointer it is passed, which will be in the middle
1596 of an allocated block. */
1598 struct alignlist *l;
1599 LOCK_ALIGNED_BLOCKS ();
1600 for (l = _aligned_blocks; l != NULL; l = l->next)
1601 if (l->aligned == NULL)
1602 /* This slot is free. Use it. */
1603 break;
1604 if (l == NULL)
1606 l = malloc (sizeof *l);
1607 if (l != NULL)
1609 l->next = _aligned_blocks;
1610 _aligned_blocks = l;
1613 if (l != NULL)
1615 l->exact = result;
1616 result = l->aligned = (char *) result + adj;
1618 UNLOCK_ALIGNED_BLOCKS ();
1619 if (l == NULL)
1621 free (result);
1622 result = NULL;
1626 return result;
1629 /* Note that memalign and posix_memalign are not used in Emacs. */
1630 #ifndef HYBRID_MALLOC
1631 /* An obsolete alias for aligned_alloc, for any old libraries that use
1632 this alias. */
1634 void *
1635 memalign (size_t alignment, size_t size)
1637 return aligned_alloc (alignment, size);
1640 /* If HYBRID_MALLOC is defined, we may want to use the system
1641 posix_memalign below. */
1643 posix_memalign (void **memptr, size_t alignment, size_t size)
1645 void *mem;
1647 if (alignment == 0
1648 || alignment % sizeof (void *) != 0
1649 || (alignment & (alignment - 1)) != 0)
1650 return EINVAL;
1652 mem = aligned_alloc (alignment, size);
1653 if (mem == NULL)
1654 return ENOMEM;
1656 *memptr = mem;
1658 return 0;
1660 #endif
1662 /* Allocate memory on a page boundary.
1663 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1665 This library is free software; you can redistribute it and/or
1666 modify it under the terms of the GNU General Public License as
1667 published by the Free Software Foundation; either version 2 of the
1668 License, or (at your option) any later version.
1670 This library is distributed in the hope that it will be useful,
1671 but WITHOUT ANY WARRANTY; without even the implied warranty of
1672 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1673 General Public License for more details.
1675 You should have received a copy of the GNU General Public
1676 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1678 The author may be reached (Email) at the address mike@ai.mit.edu,
1679 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1681 #ifndef HYBRID_MALLOC
1683 # ifndef HAVE_MALLOC_H
1684 /* Allocate SIZE bytes on a page boundary. */
1685 extern void *valloc (size_t);
1686 # endif
1688 # if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1689 # include "getpagesize.h"
1690 # elif !defined getpagesize
1691 extern int getpagesize (void);
1692 # endif
1694 static size_t pagesize;
1696 void *
1697 valloc (size_t size)
1699 if (pagesize == 0)
1700 pagesize = getpagesize ();
1702 return aligned_alloc (pagesize, size);
1704 #endif /* HYBRID_MALLOC */
1706 #undef malloc
1707 #undef realloc
1708 #undef calloc
1709 #undef aligned_alloc
1710 #undef free
1712 #ifdef HYBRID_MALLOC
1713 /* Declare system malloc and friends. */
1714 extern void *malloc (size_t size);
1715 extern void *realloc (void *ptr, size_t size);
1716 extern void *calloc (size_t nmemb, size_t size);
1717 extern void free (void *ptr);
1718 #ifdef HAVE_ALIGNED_ALLOC
1719 extern void *aligned_alloc (size_t alignment, size_t size);
1720 #elif defined HAVE_POSIX_MEMALIGN
1721 extern int posix_memalign (void **memptr, size_t alignment, size_t size);
1722 #endif
1724 /* See the comments near the beginning of this file for explanations
1725 of the following functions. */
1727 void *
1728 hybrid_malloc (size_t size)
1730 if (DUMPED)
1731 return malloc (size);
1732 return gmalloc (size);
1735 void *
1736 hybrid_calloc (size_t nmemb, size_t size)
1738 if (DUMPED)
1739 return calloc (nmemb, size);
1740 return gcalloc (nmemb, size);
1743 void
1744 hybrid_free (void *ptr)
1746 if (!DUMPED)
1747 gfree (ptr);
1748 else if (!ALLOCATED_BEFORE_DUMPING (ptr))
1749 free (ptr);
1750 /* Otherwise the dumped emacs is trying to free something allocated
1751 before dumping; do nothing. */
1752 return;
1755 #if defined HAVE_ALIGNED_ALLOC || defined HAVE_POSIX_MEMALIGN
1756 void *
1757 hybrid_aligned_alloc (size_t alignment, size_t size)
1759 if (!DUMPED)
1760 return galigned_alloc (alignment, size);
1761 /* The following is copied from alloc.c */
1762 #ifdef HAVE_ALIGNED_ALLOC
1763 return aligned_alloc (alignment, size);
1764 #else /* HAVE_POSIX_MEMALIGN */
1765 void *p;
1766 return posix_memalign (&p, alignment, size) == 0 ? p : 0;
1767 #endif
1769 #endif
1771 void *
1772 hybrid_realloc (void *ptr, size_t size)
1774 void *result;
1775 int type;
1776 size_t block, oldsize;
1778 if (!DUMPED)
1779 return grealloc (ptr, size);
1780 if (!ALLOCATED_BEFORE_DUMPING (ptr))
1781 return realloc (ptr, size);
1783 /* The dumped emacs is trying to realloc storage allocated before
1784 dumping. We just malloc new space and copy the data. */
1785 if (size == 0 || ptr == NULL)
1786 return malloc (size);
1787 block = ((char *) ptr - _heapbase) / BLOCKSIZE + 1;
1788 type = _heapinfo[block].busy.type;
1789 oldsize =
1790 type == 0 ? _heapinfo[block].busy.info.size * BLOCKSIZE
1791 : (size_t) 1 << type;
1792 result = malloc (size);
1793 if (result)
1794 return memcpy (result, ptr, min (oldsize, size));
1795 return result;
1798 #else /* ! HYBRID_MALLOC */
1800 void *
1801 malloc (size_t size)
1803 return gmalloc (size);
1806 void *
1807 calloc (size_t nmemb, size_t size)
1809 return gcalloc (nmemb, size);
1812 void
1813 free (void *ptr)
1815 gfree (ptr);
1818 void *
1819 aligned_alloc (size_t alignment, size_t size)
1821 return galigned_alloc (alignment, size);
1824 void *
1825 realloc (void *ptr, size_t size)
1827 return grealloc (ptr, size);
1830 #endif /* HYBRID_MALLOC */
1832 #ifdef GC_MCHECK
1834 /* Standard debugging hooks for `malloc'.
1835 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1836 Written May 1989 by Mike Haertel.
1838 This library is free software; you can redistribute it and/or
1839 modify it under the terms of the GNU General Public License as
1840 published by the Free Software Foundation; either version 2 of the
1841 License, or (at your option) any later version.
1843 This library is distributed in the hope that it will be useful,
1844 but WITHOUT ANY WARRANTY; without even the implied warranty of
1845 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1846 General Public License for more details.
1848 You should have received a copy of the GNU General Public
1849 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1851 The author may be reached (Email) at the address mike@ai.mit.edu,
1852 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1854 #include <stdio.h>
1856 /* Old hook values. */
1857 static void (*old_free_hook) (void *ptr);
1858 static void *(*old_malloc_hook) (size_t size);
1859 static void *(*old_realloc_hook) (void *ptr, size_t size);
1861 /* Function to call when something awful happens. */
1862 static void (*abortfunc) (enum mcheck_status);
1864 /* Arbitrary magical numbers. */
1865 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1866 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1867 #define MAGICBYTE ((char) 0xd7)
1868 #define MALLOCFLOOD ((char) 0x93)
1869 #define FREEFLOOD ((char) 0x95)
1871 struct hdr
1873 size_t size; /* Exact size requested by user. */
1874 size_t magic; /* Magic number to check header integrity. */
1877 static enum mcheck_status
1878 checkhdr (const struct hdr *hdr)
1880 enum mcheck_status status;
1881 switch (hdr->magic)
1883 default:
1884 status = MCHECK_HEAD;
1885 break;
1886 case MAGICFREE:
1887 status = MCHECK_FREE;
1888 break;
1889 case MAGICWORD:
1890 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1891 status = MCHECK_TAIL;
1892 else
1893 status = MCHECK_OK;
1894 break;
1896 if (status != MCHECK_OK)
1897 (*abortfunc) (status);
1898 return status;
1901 static void
1902 freehook (void *ptr)
1904 struct hdr *hdr;
1906 if (ptr)
1908 struct alignlist *l;
1910 /* If the block was allocated by aligned_alloc, its real pointer
1911 to free is recorded in _aligned_blocks; find that. */
1912 PROTECT_MALLOC_STATE (0);
1913 LOCK_ALIGNED_BLOCKS ();
1914 for (l = _aligned_blocks; l != NULL; l = l->next)
1915 if (l->aligned == ptr)
1917 l->aligned = NULL; /* Mark the slot in the list as free. */
1918 ptr = l->exact;
1919 break;
1921 UNLOCK_ALIGNED_BLOCKS ();
1922 PROTECT_MALLOC_STATE (1);
1924 hdr = ((struct hdr *) ptr) - 1;
1925 checkhdr (hdr);
1926 hdr->magic = MAGICFREE;
1927 memset (ptr, FREEFLOOD, hdr->size);
1929 else
1930 hdr = NULL;
1932 gfree_hook = old_free_hook;
1933 free (hdr);
1934 gfree_hook = freehook;
1937 static void *
1938 mallochook (size_t size)
1940 struct hdr *hdr;
1942 gmalloc_hook = old_malloc_hook;
1943 hdr = malloc (sizeof *hdr + size + 1);
1944 gmalloc_hook = mallochook;
1945 if (hdr == NULL)
1946 return NULL;
1948 hdr->size = size;
1949 hdr->magic = MAGICWORD;
1950 ((char *) &hdr[1])[size] = MAGICBYTE;
1951 return memset (hdr + 1, MALLOCFLOOD, size);
1954 static void *
1955 reallochook (void *ptr, size_t size)
1957 struct hdr *hdr = NULL;
1958 size_t osize = 0;
1960 if (ptr)
1962 hdr = ((struct hdr *) ptr) - 1;
1963 osize = hdr->size;
1965 checkhdr (hdr);
1966 if (size < osize)
1967 memset ((char *) ptr + size, FREEFLOOD, osize - size);
1970 gfree_hook = old_free_hook;
1971 gmalloc_hook = old_malloc_hook;
1972 grealloc_hook = old_realloc_hook;
1973 hdr = realloc (hdr, sizeof *hdr + size + 1);
1974 gfree_hook = freehook;
1975 gmalloc_hook = mallochook;
1976 grealloc_hook = reallochook;
1977 if (hdr == NULL)
1978 return NULL;
1980 hdr->size = size;
1981 hdr->magic = MAGICWORD;
1982 ((char *) &hdr[1])[size] = MAGICBYTE;
1983 if (size > osize)
1984 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1985 return hdr + 1;
1988 static void
1989 mabort (enum mcheck_status status)
1991 const char *msg;
1992 switch (status)
1994 case MCHECK_OK:
1995 msg = "memory is consistent, library is buggy";
1996 break;
1997 case MCHECK_HEAD:
1998 msg = "memory clobbered before allocated block";
1999 break;
2000 case MCHECK_TAIL:
2001 msg = "memory clobbered past end of allocated block";
2002 break;
2003 case MCHECK_FREE:
2004 msg = "block freed twice";
2005 break;
2006 default:
2007 msg = "bogus mcheck_status, library is buggy";
2008 break;
2010 #ifdef __GNU_LIBRARY__
2011 __libc_fatal (msg);
2012 #else
2013 fprintf (stderr, "mcheck: %s\n", msg);
2014 fflush (stderr);
2015 # ifdef emacs
2016 emacs_abort ();
2017 # else
2018 abort ();
2019 # endif
2020 #endif
2023 static int mcheck_used = 0;
2026 mcheck (void (*func) (enum mcheck_status))
2028 abortfunc = (func != NULL) ? func : &mabort;
2030 /* These hooks may not be safely inserted if malloc is already in use. */
2031 if (!__malloc_initialized && !mcheck_used)
2033 old_free_hook = gfree_hook;
2034 gfree_hook = freehook;
2035 old_malloc_hook = gmalloc_hook;
2036 gmalloc_hook = mallochook;
2037 old_realloc_hook = grealloc_hook;
2038 grealloc_hook = reallochook;
2039 mcheck_used = 1;
2042 return mcheck_used ? 0 : -1;
2045 enum mcheck_status
2046 mprobe (void *ptr)
2048 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2051 #endif /* GC_MCHECK */