cua-prefix-override-inhibit-delay doc fix
[emacs.git] / src / gmalloc.c
blob00b836413196e90a3aeb8b24b37a73585c3a1cfa
1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2016 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <http://www.gnu.org/licenses/>.
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
22 #include <config.h>
24 #if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
25 #define USE_PTHREAD
26 #endif
28 #include <string.h>
29 #include <limits.h>
30 #include <stdint.h>
32 #ifdef HYBRID_GET_CURRENT_DIR_NAME
33 #undef get_current_dir_name
34 #endif
36 #include <unistd.h>
38 #ifdef USE_PTHREAD
39 #include <pthread.h>
40 #endif
42 #ifdef WINDOWSNT
43 #include <w32heap.h> /* for sbrk */
44 #endif
46 #ifdef emacs
47 extern void emacs_abort (void);
48 #endif
50 /* If HYBRID_MALLOC is defined, then temacs will use malloc,
51 realloc... as defined in this file (and renamed gmalloc,
52 grealloc... via the macros that follow). The dumped emacs,
53 however, will use the system malloc, realloc.... In other source
54 files, malloc, realloc... are renamed hybrid_malloc,
55 hybrid_realloc... via macros in conf_post.h. hybrid_malloc and
56 friends are wrapper functions defined later in this file.
57 aligned_alloc is defined as a macro only in alloc.c.
59 As of this writing (August 2014), Cygwin is the only platform on
60 which HYBRID_MACRO is defined. Any other platform that wants to
61 define it will have to define the macros DUMPED and
62 ALLOCATED_BEFORE_DUMPING, defined below for Cygwin. */
63 #undef malloc
64 #undef realloc
65 #undef calloc
66 #undef free
67 #define malloc gmalloc
68 #define realloc grealloc
69 #define calloc gcalloc
70 #define aligned_alloc galigned_alloc
71 #define free gfree
73 #ifdef CYGWIN
74 extern void *bss_sbrk (ptrdiff_t size);
75 extern int bss_sbrk_did_unexec;
76 extern char bss_sbrk_buffer[];
77 extern void *bss_sbrk_buffer_end;
78 #define DUMPED bss_sbrk_did_unexec
79 #define ALLOCATED_BEFORE_DUMPING(P) \
80 ((P) < bss_sbrk_buffer_end && (P) >= (void *) bss_sbrk_buffer)
81 #endif
83 #ifdef __cplusplus
84 extern "C"
86 #endif
88 #include <stddef.h>
91 /* Allocate SIZE bytes of memory. */
92 extern void *malloc (size_t size) ATTRIBUTE_MALLOC_SIZE ((1));
93 /* Re-allocate the previously allocated block
94 in ptr, making the new block SIZE bytes long. */
95 extern void *realloc (void *ptr, size_t size) ATTRIBUTE_ALLOC_SIZE ((2));
96 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
97 extern void *calloc (size_t nmemb, size_t size) ATTRIBUTE_MALLOC_SIZE ((1,2));
98 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
99 extern void free (void *ptr);
101 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
102 #ifdef MSDOS
103 extern void *aligned_alloc (size_t, size_t);
104 extern void *memalign (size_t, size_t);
105 extern int posix_memalign (void **, size_t, size_t);
106 #endif
108 #ifdef USE_PTHREAD
109 /* Set up mutexes and make malloc etc. thread-safe. */
110 extern void malloc_enable_thread (void);
111 #endif
113 #ifdef emacs
114 extern void emacs_abort (void);
115 #endif
117 /* The allocator divides the heap into blocks of fixed size; large
118 requests receive one or more whole blocks, and small requests
119 receive a fragment of a block. Fragment sizes are powers of two,
120 and all fragments of a block are the same size. When all the
121 fragments in a block have been freed, the block itself is freed. */
122 #define INT_BIT (CHAR_BIT * sizeof (int))
123 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
124 #define BLOCKSIZE (1 << BLOCKLOG)
125 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
127 /* Determine the amount of memory spanned by the initial heap table
128 (not an absolute limit). */
129 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
131 /* Number of contiguous free blocks allowed to build up at the end of
132 memory before they will be returned to the system. */
133 #define FINAL_FREE_BLOCKS 8
135 /* Data structure giving per-block information. */
136 typedef union
138 /* Heap information for a busy block. */
139 struct
141 /* Zero for a large (multiblock) object, or positive giving the
142 logarithm to the base two of the fragment size. */
143 int type;
144 union
146 struct
148 size_t nfree; /* Free frags in a fragmented block. */
149 size_t first; /* First free fragment of the block. */
150 } frag;
151 /* For a large object, in its first block, this has the number
152 of blocks in the object. In the other blocks, this has a
153 negative number which says how far back the first block is. */
154 ptrdiff_t size;
155 } info;
156 } busy;
157 /* Heap information for a free block
158 (that may be the first of a free cluster). */
159 struct
161 size_t size; /* Size (in blocks) of a free cluster. */
162 size_t next; /* Index of next free cluster. */
163 size_t prev; /* Index of previous free cluster. */
164 } free;
165 } malloc_info;
167 /* Pointer to first block of the heap. */
168 extern char *_heapbase;
170 /* Table indexed by block number giving per-block information. */
171 extern malloc_info *_heapinfo;
173 /* Address to block number and vice versa. */
174 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
175 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
177 /* Current search index for the heap table. */
178 extern size_t _heapindex;
180 /* Limit of valid info table indices. */
181 extern size_t _heaplimit;
183 /* Doubly linked lists of free fragments. */
184 struct list
186 struct list *next;
187 struct list *prev;
190 /* Free list headers for each fragment size. */
191 extern struct list _fraghead[];
193 /* List of blocks allocated with aligned_alloc and friends. */
194 struct alignlist
196 struct alignlist *next;
197 void *aligned; /* The address that aligned_alloc returned. */
198 void *exact; /* The address that malloc returned. */
200 extern struct alignlist *_aligned_blocks;
202 /* Instrumentation. */
203 extern size_t _chunks_used;
204 extern size_t _bytes_used;
205 extern size_t _chunks_free;
206 extern size_t _bytes_free;
208 /* Internal versions of `malloc', `realloc', and `free'
209 used when these functions need to call each other.
210 They are the same but don't call the hooks. */
211 extern void *_malloc_internal (size_t);
212 extern void *_realloc_internal (void *, size_t);
213 extern void _free_internal (void *);
214 extern void *_malloc_internal_nolock (size_t);
215 extern void *_realloc_internal_nolock (void *, size_t);
216 extern void _free_internal_nolock (void *);
218 #ifdef USE_PTHREAD
219 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
220 extern int _malloc_thread_enabled_p;
221 #define LOCK() \
222 do { \
223 if (_malloc_thread_enabled_p) \
224 pthread_mutex_lock (&_malloc_mutex); \
225 } while (0)
226 #define UNLOCK() \
227 do { \
228 if (_malloc_thread_enabled_p) \
229 pthread_mutex_unlock (&_malloc_mutex); \
230 } while (0)
231 #define LOCK_ALIGNED_BLOCKS() \
232 do { \
233 if (_malloc_thread_enabled_p) \
234 pthread_mutex_lock (&_aligned_blocks_mutex); \
235 } while (0)
236 #define UNLOCK_ALIGNED_BLOCKS() \
237 do { \
238 if (_malloc_thread_enabled_p) \
239 pthread_mutex_unlock (&_aligned_blocks_mutex); \
240 } while (0)
241 #else
242 #define LOCK()
243 #define UNLOCK()
244 #define LOCK_ALIGNED_BLOCKS()
245 #define UNLOCK_ALIGNED_BLOCKS()
246 #endif
248 /* Given an address in the middle of a malloc'd object,
249 return the address of the beginning of the object. */
250 extern void *malloc_find_object_address (void *ptr);
252 /* Underlying allocation function; successive calls should
253 return contiguous pieces of memory. */
254 extern void *(*__morecore) (ptrdiff_t size);
256 /* Default value of `__morecore'. */
257 extern void *__default_morecore (ptrdiff_t size);
259 /* If not NULL, this function is called after each time
260 `__morecore' is called to increase the data size. */
261 extern void (*__after_morecore_hook) (void);
263 /* Number of extra blocks to get each time we ask for more core.
264 This reduces the frequency of calling `(*__morecore)'. */
265 extern size_t __malloc_extra_blocks;
267 /* Nonzero if `malloc' has been called and done its initialization. */
268 extern int __malloc_initialized;
269 /* Function called to initialize malloc data structures. */
270 extern int __malloc_initialize (void);
272 /* Hooks for debugging versions. */
273 extern void (*__malloc_initialize_hook) (void);
274 extern void (*__free_hook) (void *ptr);
275 extern void *(*__malloc_hook) (size_t size);
276 extern void *(*__realloc_hook) (void *ptr, size_t size);
277 extern void *(*__memalign_hook) (size_t size, size_t alignment);
279 /* Return values for `mprobe': these are the kinds of inconsistencies that
280 `mcheck' enables detection of. */
281 enum mcheck_status
283 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
284 MCHECK_OK, /* Block is fine. */
285 MCHECK_FREE, /* Block freed twice. */
286 MCHECK_HEAD, /* Memory before the block was clobbered. */
287 MCHECK_TAIL /* Memory after the block was clobbered. */
290 /* Activate a standard collection of debugging hooks. This must be called
291 before `malloc' is ever called. ABORTFUNC is called with an error code
292 (see enum above) when an inconsistency is detected. If ABORTFUNC is
293 null, the standard function prints on stderr and then calls `abort'. */
294 extern int mcheck (void (*abortfunc) (enum mcheck_status));
296 /* Check for aberrations in a particular malloc'd block. You must have
297 called `mcheck' already. These are the same checks that `mcheck' does
298 when you free or reallocate a block. */
299 extern enum mcheck_status mprobe (void *ptr);
301 /* Activate a standard collection of tracing hooks. */
302 extern void mtrace (void);
303 extern void muntrace (void);
305 /* Statistics available to the user. */
306 struct mstats
308 size_t bytes_total; /* Total size of the heap. */
309 size_t chunks_used; /* Chunks allocated by the user. */
310 size_t bytes_used; /* Byte total of user-allocated chunks. */
311 size_t chunks_free; /* Chunks in the free list. */
312 size_t bytes_free; /* Byte total of chunks in the free list. */
315 /* Pick up the current statistics. */
316 extern struct mstats mstats (void);
318 /* Call WARNFUN with a warning message when memory usage is high. */
319 extern void memory_warnings (void *start, void (*warnfun) (const char *));
321 #ifdef __cplusplus
323 #endif
325 /* Memory allocator `malloc'.
326 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
327 Written May 1989 by Mike Haertel.
329 This library is free software; you can redistribute it and/or
330 modify it under the terms of the GNU General Public License as
331 published by the Free Software Foundation; either version 2 of the
332 License, or (at your option) any later version.
334 This library is distributed in the hope that it will be useful,
335 but WITHOUT ANY WARRANTY; without even the implied warranty of
336 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
337 General Public License for more details.
339 You should have received a copy of the GNU General Public
340 License along with this library. If not, see <http://www.gnu.org/licenses/>.
342 The author may be reached (Email) at the address mike@ai.mit.edu,
343 or (US mail) as Mike Haertel c/o Free Software Foundation. */
345 #include <errno.h>
347 void *(*__morecore) (ptrdiff_t size) = __default_morecore;
349 /* Debugging hook for `malloc'. */
350 void *(*__malloc_hook) (size_t size);
352 /* Pointer to the base of the first block. */
353 char *_heapbase;
355 /* Block information table. Allocated with align/__free (not malloc/free). */
356 malloc_info *_heapinfo;
358 /* Number of info entries. */
359 static size_t heapsize;
361 /* Search index in the info table. */
362 size_t _heapindex;
364 /* Limit of valid info table indices. */
365 size_t _heaplimit;
367 /* Free lists for each fragment size. */
368 struct list _fraghead[BLOCKLOG];
370 /* Instrumentation. */
371 size_t _chunks_used;
372 size_t _bytes_used;
373 size_t _chunks_free;
374 size_t _bytes_free;
376 /* Are you experienced? */
377 int __malloc_initialized;
379 size_t __malloc_extra_blocks;
381 void (*__malloc_initialize_hook) (void);
382 void (*__after_morecore_hook) (void);
384 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
386 /* Some code for hunting a bug writing into _heapinfo.
388 Call this macro with argument PROT non-zero to protect internal
389 malloc state against writing to it, call it with a zero argument to
390 make it readable and writable.
392 Note that this only works if BLOCKSIZE == page size, which is
393 the case on the i386. */
395 #include <sys/types.h>
396 #include <sys/mman.h>
398 static int state_protected_p;
399 static size_t last_state_size;
400 static malloc_info *last_heapinfo;
402 void
403 protect_malloc_state (int protect_p)
405 /* If _heapinfo has been relocated, make sure its old location
406 isn't left read-only; it will be reused by malloc. */
407 if (_heapinfo != last_heapinfo
408 && last_heapinfo
409 && state_protected_p)
410 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
412 last_state_size = _heaplimit * sizeof *_heapinfo;
413 last_heapinfo = _heapinfo;
415 if (protect_p != state_protected_p)
417 state_protected_p = protect_p;
418 if (mprotect (_heapinfo, last_state_size,
419 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
420 abort ();
424 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
426 #else
427 #define PROTECT_MALLOC_STATE(PROT) /* empty */
428 #endif
431 /* Aligned allocation. */
432 static void *
433 align (size_t size)
435 void *result;
436 ptrdiff_t adj;
438 /* align accepts an unsigned argument, but __morecore accepts a
439 signed one. This could lead to trouble if SIZE overflows the
440 ptrdiff_t type accepted by __morecore. We just punt in that
441 case, since they are requesting a ludicrous amount anyway. */
442 if (PTRDIFF_MAX < size)
443 result = 0;
444 else
445 result = (*__morecore) (size);
446 adj = (uintptr_t) result % BLOCKSIZE;
447 if (adj != 0)
449 adj = BLOCKSIZE - adj;
450 (*__morecore) (adj);
451 result = (char *) result + adj;
454 if (__after_morecore_hook)
455 (*__after_morecore_hook) ();
457 return result;
460 /* Get SIZE bytes, if we can get them starting at END.
461 Return the address of the space we got.
462 If we cannot get space at END, fail and return 0. */
463 static void *
464 get_contiguous_space (ptrdiff_t size, void *position)
466 void *before;
467 void *after;
469 before = (*__morecore) (0);
470 /* If we can tell in advance that the break is at the wrong place,
471 fail now. */
472 if (before != position)
473 return 0;
475 /* Allocate SIZE bytes and get the address of them. */
476 after = (*__morecore) (size);
477 if (!after)
478 return 0;
480 /* It was not contiguous--reject it. */
481 if (after != position)
483 (*__morecore) (- size);
484 return 0;
487 return after;
491 /* This is called when `_heapinfo' and `heapsize' have just
492 been set to describe a new info table. Set up the table
493 to describe itself and account for it in the statistics. */
494 static void
495 register_heapinfo (void)
497 size_t block, blocks;
499 block = BLOCK (_heapinfo);
500 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
502 /* Account for the _heapinfo block itself in the statistics. */
503 _bytes_used += blocks * BLOCKSIZE;
504 ++_chunks_used;
506 /* Describe the heapinfo block itself in the heapinfo. */
507 _heapinfo[block].busy.type = 0;
508 _heapinfo[block].busy.info.size = blocks;
509 /* Leave back-pointers for malloc_find_address. */
510 while (--blocks > 0)
511 _heapinfo[block + blocks].busy.info.size = -blocks;
514 #ifdef USE_PTHREAD
515 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
516 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
517 int _malloc_thread_enabled_p;
519 static void
520 malloc_atfork_handler_prepare (void)
522 LOCK ();
523 LOCK_ALIGNED_BLOCKS ();
526 static void
527 malloc_atfork_handler_parent (void)
529 UNLOCK_ALIGNED_BLOCKS ();
530 UNLOCK ();
533 static void
534 malloc_atfork_handler_child (void)
536 UNLOCK_ALIGNED_BLOCKS ();
537 UNLOCK ();
540 /* Set up mutexes and make malloc etc. thread-safe. */
541 void
542 malloc_enable_thread (void)
544 if (_malloc_thread_enabled_p)
545 return;
547 /* Some pthread implementations call malloc for statically
548 initialized mutexes when they are used first. To avoid such a
549 situation, we initialize mutexes here while their use is
550 disabled in malloc etc. */
551 pthread_mutex_init (&_malloc_mutex, NULL);
552 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
553 pthread_atfork (malloc_atfork_handler_prepare,
554 malloc_atfork_handler_parent,
555 malloc_atfork_handler_child);
556 _malloc_thread_enabled_p = 1;
558 #endif /* USE_PTHREAD */
560 static void
561 malloc_initialize_1 (void)
563 #ifdef GC_MCHECK
564 mcheck (NULL);
565 #endif
567 if (__malloc_initialize_hook)
568 (*__malloc_initialize_hook) ();
570 heapsize = HEAP / BLOCKSIZE;
571 _heapinfo = align (heapsize * sizeof (malloc_info));
572 if (_heapinfo == NULL)
573 return;
574 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
575 _heapinfo[0].free.size = 0;
576 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
577 _heapindex = 0;
578 _heapbase = (char *) _heapinfo;
579 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
581 register_heapinfo ();
583 __malloc_initialized = 1;
584 PROTECT_MALLOC_STATE (1);
585 return;
588 /* Set everything up and remember that we have.
589 main will call malloc which calls this function. That is before any threads
590 or signal handlers has been set up, so we don't need thread protection. */
592 __malloc_initialize (void)
594 if (__malloc_initialized)
595 return 0;
597 malloc_initialize_1 ();
599 return __malloc_initialized;
602 static int morecore_recursing;
604 /* Get neatly aligned memory, initializing or
605 growing the heap info table as necessary. */
606 static void *
607 morecore_nolock (size_t size)
609 void *result;
610 malloc_info *newinfo, *oldinfo;
611 size_t newsize;
613 if (morecore_recursing)
614 /* Avoid recursion. The caller will know how to handle a null return. */
615 return NULL;
617 result = align (size);
618 if (result == NULL)
619 return NULL;
621 PROTECT_MALLOC_STATE (0);
623 /* Check if we need to grow the info table. */
624 if ((size_t) BLOCK ((char *) result + size) > heapsize)
626 /* Calculate the new _heapinfo table size. We do not account for the
627 added blocks in the table itself, as we hope to place them in
628 existing free space, which is already covered by part of the
629 existing table. */
630 newsize = heapsize;
632 newsize *= 2;
633 while ((size_t) BLOCK ((char *) result + size) > newsize);
635 /* We must not reuse existing core for the new info table when called
636 from realloc in the case of growing a large block, because the
637 block being grown is momentarily marked as free. In this case
638 _heaplimit is zero so we know not to reuse space for internal
639 allocation. */
640 if (_heaplimit != 0)
642 /* First try to allocate the new info table in core we already
643 have, in the usual way using realloc. If realloc cannot
644 extend it in place or relocate it to existing sufficient core,
645 we will get called again, and the code above will notice the
646 `morecore_recursing' flag and return null. */
647 int save = errno; /* Don't want to clobber errno with ENOMEM. */
648 morecore_recursing = 1;
649 newinfo = _realloc_internal_nolock (_heapinfo,
650 newsize * sizeof (malloc_info));
651 morecore_recursing = 0;
652 if (newinfo == NULL)
653 errno = save;
654 else
656 /* We found some space in core, and realloc has put the old
657 table's blocks on the free list. Now zero the new part
658 of the table and install the new table location. */
659 memset (&newinfo[heapsize], 0,
660 (newsize - heapsize) * sizeof (malloc_info));
661 _heapinfo = newinfo;
662 heapsize = newsize;
663 goto got_heap;
667 /* Allocate new space for the malloc info table. */
668 while (1)
670 newinfo = align (newsize * sizeof (malloc_info));
672 /* Did it fail? */
673 if (newinfo == NULL)
675 (*__morecore) (-size);
676 return NULL;
679 /* Is it big enough to record status for its own space?
680 If so, we win. */
681 if ((size_t) BLOCK ((char *) newinfo
682 + newsize * sizeof (malloc_info))
683 < newsize)
684 break;
686 /* Must try again. First give back most of what we just got. */
687 (*__morecore) (- newsize * sizeof (malloc_info));
688 newsize *= 2;
691 /* Copy the old table to the beginning of the new,
692 and zero the rest of the new table. */
693 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
694 memset (&newinfo[heapsize], 0,
695 (newsize - heapsize) * sizeof (malloc_info));
696 oldinfo = _heapinfo;
697 _heapinfo = newinfo;
698 heapsize = newsize;
700 register_heapinfo ();
702 /* Reset _heaplimit so _free_internal never decides
703 it can relocate or resize the info table. */
704 _heaplimit = 0;
705 _free_internal_nolock (oldinfo);
706 PROTECT_MALLOC_STATE (0);
708 /* The new heap limit includes the new table just allocated. */
709 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
710 return result;
713 got_heap:
714 _heaplimit = BLOCK ((char *) result + size);
715 return result;
718 /* Allocate memory from the heap. */
719 void *
720 _malloc_internal_nolock (size_t size)
722 void *result;
723 size_t block, blocks, lastblocks, start;
724 register size_t i;
725 struct list *next;
727 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
728 valid address you can realloc and free (though not dereference).
730 It turns out that some extant code (sunrpc, at least Ultrix's version)
731 expects `malloc (0)' to return non-NULL and breaks otherwise.
732 Be compatible. */
734 #if 0
735 if (size == 0)
736 return NULL;
737 #endif
739 PROTECT_MALLOC_STATE (0);
741 if (size < sizeof (struct list))
742 size = sizeof (struct list);
744 /* Determine the allocation policy based on the request size. */
745 if (size <= BLOCKSIZE / 2)
747 /* Small allocation to receive a fragment of a block.
748 Determine the logarithm to base two of the fragment size. */
749 register size_t log = 1;
750 --size;
751 while ((size /= 2) != 0)
752 ++log;
754 /* Look in the fragment lists for a
755 free fragment of the desired size. */
756 next = _fraghead[log].next;
757 if (next != NULL)
759 /* There are free fragments of this size.
760 Pop a fragment out of the fragment list and return it.
761 Update the block's nfree and first counters. */
762 result = next;
763 next->prev->next = next->next;
764 if (next->next != NULL)
765 next->next->prev = next->prev;
766 block = BLOCK (result);
767 if (--_heapinfo[block].busy.info.frag.nfree != 0)
768 _heapinfo[block].busy.info.frag.first =
769 (uintptr_t) next->next % BLOCKSIZE >> log;
771 /* Update the statistics. */
772 ++_chunks_used;
773 _bytes_used += 1 << log;
774 --_chunks_free;
775 _bytes_free -= 1 << log;
777 else
779 /* No free fragments of the desired size, so get a new block
780 and break it into fragments, returning the first. */
781 #ifdef GC_MALLOC_CHECK
782 result = _malloc_internal_nolock (BLOCKSIZE);
783 PROTECT_MALLOC_STATE (0);
784 #elif defined (USE_PTHREAD)
785 result = _malloc_internal_nolock (BLOCKSIZE);
786 #else
787 result = malloc (BLOCKSIZE);
788 #endif
789 if (result == NULL)
791 PROTECT_MALLOC_STATE (1);
792 goto out;
795 /* Link all fragments but the first into the free list. */
796 next = (struct list *) ((char *) result + (1 << log));
797 next->next = NULL;
798 next->prev = &_fraghead[log];
799 _fraghead[log].next = next;
801 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
803 next = (struct list *) ((char *) result + (i << log));
804 next->next = _fraghead[log].next;
805 next->prev = &_fraghead[log];
806 next->prev->next = next;
807 next->next->prev = next;
810 /* Initialize the nfree and first counters for this block. */
811 block = BLOCK (result);
812 _heapinfo[block].busy.type = log;
813 _heapinfo[block].busy.info.frag.nfree = i - 1;
814 _heapinfo[block].busy.info.frag.first = i - 1;
816 _chunks_free += (BLOCKSIZE >> log) - 1;
817 _bytes_free += BLOCKSIZE - (1 << log);
818 _bytes_used -= BLOCKSIZE - (1 << log);
821 else
823 /* Large allocation to receive one or more blocks.
824 Search the free list in a circle starting at the last place visited.
825 If we loop completely around without finding a large enough
826 space we will have to get more memory from the system. */
827 blocks = BLOCKIFY (size);
828 start = block = _heapindex;
829 while (_heapinfo[block].free.size < blocks)
831 block = _heapinfo[block].free.next;
832 if (block == start)
834 /* Need to get more from the system. Get a little extra. */
835 size_t wantblocks = blocks + __malloc_extra_blocks;
836 block = _heapinfo[0].free.prev;
837 lastblocks = _heapinfo[block].free.size;
838 /* Check to see if the new core will be contiguous with the
839 final free block; if so we don't need to get as much. */
840 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
841 /* We can't do this if we will have to make the heap info
842 table bigger to accommodate the new space. */
843 block + wantblocks <= heapsize &&
844 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
845 ADDRESS (block + lastblocks)))
847 /* We got it contiguously. Which block we are extending
848 (the `final free block' referred to above) might have
849 changed, if it got combined with a freed info table. */
850 block = _heapinfo[0].free.prev;
851 _heapinfo[block].free.size += (wantblocks - lastblocks);
852 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
853 _heaplimit += wantblocks - lastblocks;
854 continue;
856 result = morecore_nolock (wantblocks * BLOCKSIZE);
857 if (result == NULL)
858 goto out;
859 block = BLOCK (result);
860 /* Put the new block at the end of the free list. */
861 _heapinfo[block].free.size = wantblocks;
862 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
863 _heapinfo[block].free.next = 0;
864 _heapinfo[0].free.prev = block;
865 _heapinfo[_heapinfo[block].free.prev].free.next = block;
866 ++_chunks_free;
867 /* Now loop to use some of that block for this allocation. */
871 /* At this point we have found a suitable free list entry.
872 Figure out how to remove what we need from the list. */
873 result = ADDRESS (block);
874 if (_heapinfo[block].free.size > blocks)
876 /* The block we found has a bit left over,
877 so relink the tail end back into the free list. */
878 _heapinfo[block + blocks].free.size
879 = _heapinfo[block].free.size - blocks;
880 _heapinfo[block + blocks].free.next
881 = _heapinfo[block].free.next;
882 _heapinfo[block + blocks].free.prev
883 = _heapinfo[block].free.prev;
884 _heapinfo[_heapinfo[block].free.prev].free.next
885 = _heapinfo[_heapinfo[block].free.next].free.prev
886 = _heapindex = block + blocks;
888 else
890 /* The block exactly matches our requirements,
891 so just remove it from the list. */
892 _heapinfo[_heapinfo[block].free.next].free.prev
893 = _heapinfo[block].free.prev;
894 _heapinfo[_heapinfo[block].free.prev].free.next
895 = _heapindex = _heapinfo[block].free.next;
896 --_chunks_free;
899 _heapinfo[block].busy.type = 0;
900 _heapinfo[block].busy.info.size = blocks;
901 ++_chunks_used;
902 _bytes_used += blocks * BLOCKSIZE;
903 _bytes_free -= blocks * BLOCKSIZE;
905 /* Mark all the blocks of the object just allocated except for the
906 first with a negative number so you can find the first block by
907 adding that adjustment. */
908 while (--blocks > 0)
909 _heapinfo[block + blocks].busy.info.size = -blocks;
912 PROTECT_MALLOC_STATE (1);
913 out:
914 return result;
917 void *
918 _malloc_internal (size_t size)
920 void *result;
922 LOCK ();
923 result = _malloc_internal_nolock (size);
924 UNLOCK ();
926 return result;
929 void *
930 malloc (size_t size)
932 void *(*hook) (size_t);
934 if (!__malloc_initialized && !__malloc_initialize ())
935 return NULL;
937 /* Copy the value of __malloc_hook to an automatic variable in case
938 __malloc_hook is modified in another thread between its
939 NULL-check and the use.
941 Note: Strictly speaking, this is not a right solution. We should
942 use mutexes to access non-read-only variables that are shared
943 among multiple threads. We just leave it for compatibility with
944 glibc malloc (i.e., assignments to __malloc_hook) for now. */
945 hook = __malloc_hook;
946 return (hook != NULL ? *hook : _malloc_internal) (size);
949 #ifndef _LIBC
951 /* On some ANSI C systems, some libc functions call _malloc, _free
952 and _realloc. Make them use the GNU functions. */
954 extern void *_malloc (size_t);
955 extern void _free (void *);
956 extern void *_realloc (void *, size_t);
958 void *
959 _malloc (size_t size)
961 return malloc (size);
964 void
965 _free (void *ptr)
967 free (ptr);
970 void *
971 _realloc (void *ptr, size_t size)
973 return realloc (ptr, size);
976 #endif
977 /* Free a block of memory allocated by `malloc'.
978 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
979 Written May 1989 by Mike Haertel.
981 This library is free software; you can redistribute it and/or
982 modify it under the terms of the GNU General Public License as
983 published by the Free Software Foundation; either version 2 of the
984 License, or (at your option) any later version.
986 This library is distributed in the hope that it will be useful,
987 but WITHOUT ANY WARRANTY; without even the implied warranty of
988 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
989 General Public License for more details.
991 You should have received a copy of the GNU General Public
992 License along with this library. If not, see <http://www.gnu.org/licenses/>.
994 The author may be reached (Email) at the address mike@ai.mit.edu,
995 or (US mail) as Mike Haertel c/o Free Software Foundation. */
998 /* Debugging hook for free. */
999 void (*__free_hook) (void *__ptr);
1001 /* List of blocks allocated by aligned_alloc. */
1002 struct alignlist *_aligned_blocks = NULL;
1004 /* Return memory to the heap.
1005 Like `_free_internal' but don't lock mutex. */
1006 void
1007 _free_internal_nolock (void *ptr)
1009 int type;
1010 size_t block, blocks;
1011 register size_t i;
1012 struct list *prev, *next;
1013 void *curbrk;
1014 const size_t lesscore_threshold
1015 /* Threshold of free space at which we will return some to the system. */
1016 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1018 register struct alignlist *l;
1020 if (ptr == NULL)
1021 return;
1023 PROTECT_MALLOC_STATE (0);
1025 LOCK_ALIGNED_BLOCKS ();
1026 for (l = _aligned_blocks; l != NULL; l = l->next)
1027 if (l->aligned == ptr)
1029 l->aligned = NULL; /* Mark the slot in the list as free. */
1030 ptr = l->exact;
1031 break;
1033 UNLOCK_ALIGNED_BLOCKS ();
1035 block = BLOCK (ptr);
1037 type = _heapinfo[block].busy.type;
1038 switch (type)
1040 case 0:
1041 /* Get as many statistics as early as we can. */
1042 --_chunks_used;
1043 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1044 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1046 /* Find the free cluster previous to this one in the free list.
1047 Start searching at the last block referenced; this may benefit
1048 programs with locality of allocation. */
1049 i = _heapindex;
1050 if (i > block)
1051 while (i > block)
1052 i = _heapinfo[i].free.prev;
1053 else
1056 i = _heapinfo[i].free.next;
1057 while (i > 0 && i < block);
1058 i = _heapinfo[i].free.prev;
1061 /* Determine how to link this block into the free list. */
1062 if (block == i + _heapinfo[i].free.size)
1064 /* Coalesce this block with its predecessor. */
1065 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1066 block = i;
1068 else
1070 /* Really link this block back into the free list. */
1071 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1072 _heapinfo[block].free.next = _heapinfo[i].free.next;
1073 _heapinfo[block].free.prev = i;
1074 _heapinfo[i].free.next = block;
1075 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1076 ++_chunks_free;
1079 /* Now that the block is linked in, see if we can coalesce it
1080 with its successor (by deleting its successor from the list
1081 and adding in its size). */
1082 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1084 _heapinfo[block].free.size
1085 += _heapinfo[_heapinfo[block].free.next].free.size;
1086 _heapinfo[block].free.next
1087 = _heapinfo[_heapinfo[block].free.next].free.next;
1088 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1089 --_chunks_free;
1092 /* How many trailing free blocks are there now? */
1093 blocks = _heapinfo[block].free.size;
1095 /* Where is the current end of accessible core? */
1096 curbrk = (*__morecore) (0);
1098 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1100 /* The end of the malloc heap is at the end of accessible core.
1101 It's possible that moving _heapinfo will allow us to
1102 return some space to the system. */
1104 size_t info_block = BLOCK (_heapinfo);
1105 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1106 size_t prev_block = _heapinfo[block].free.prev;
1107 size_t prev_blocks = _heapinfo[prev_block].free.size;
1108 size_t next_block = _heapinfo[block].free.next;
1109 size_t next_blocks = _heapinfo[next_block].free.size;
1111 if (/* Win if this block being freed is last in core, the info table
1112 is just before it, the previous free block is just before the
1113 info table, and the two free blocks together form a useful
1114 amount to return to the system. */
1115 (block + blocks == _heaplimit &&
1116 info_block + info_blocks == block &&
1117 prev_block != 0 && prev_block + prev_blocks == info_block &&
1118 blocks + prev_blocks >= lesscore_threshold) ||
1119 /* Nope, not the case. We can also win if this block being
1120 freed is just before the info table, and the table extends
1121 to the end of core or is followed only by a free block,
1122 and the total free space is worth returning to the system. */
1123 (block + blocks == info_block &&
1124 ((info_block + info_blocks == _heaplimit &&
1125 blocks >= lesscore_threshold) ||
1126 (info_block + info_blocks == next_block &&
1127 next_block + next_blocks == _heaplimit &&
1128 blocks + next_blocks >= lesscore_threshold)))
1131 malloc_info *newinfo;
1132 size_t oldlimit = _heaplimit;
1134 /* Free the old info table, clearing _heaplimit to avoid
1135 recursion into this code. We don't want to return the
1136 table's blocks to the system before we have copied them to
1137 the new location. */
1138 _heaplimit = 0;
1139 _free_internal_nolock (_heapinfo);
1140 _heaplimit = oldlimit;
1142 /* Tell malloc to search from the beginning of the heap for
1143 free blocks, so it doesn't reuse the ones just freed. */
1144 _heapindex = 0;
1146 /* Allocate new space for the info table and move its data. */
1147 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
1148 PROTECT_MALLOC_STATE (0);
1149 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1150 _heapinfo = newinfo;
1152 /* We should now have coalesced the free block with the
1153 blocks freed from the old info table. Examine the entire
1154 trailing free block to decide below whether to return some
1155 to the system. */
1156 block = _heapinfo[0].free.prev;
1157 blocks = _heapinfo[block].free.size;
1160 /* Now see if we can return stuff to the system. */
1161 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1163 register size_t bytes = blocks * BLOCKSIZE;
1164 _heaplimit -= blocks;
1165 (*__morecore) (-bytes);
1166 _heapinfo[_heapinfo[block].free.prev].free.next
1167 = _heapinfo[block].free.next;
1168 _heapinfo[_heapinfo[block].free.next].free.prev
1169 = _heapinfo[block].free.prev;
1170 block = _heapinfo[block].free.prev;
1171 --_chunks_free;
1172 _bytes_free -= bytes;
1176 /* Set the next search to begin at this block. */
1177 _heapindex = block;
1178 break;
1180 default:
1181 /* Do some of the statistics. */
1182 --_chunks_used;
1183 _bytes_used -= 1 << type;
1184 ++_chunks_free;
1185 _bytes_free += 1 << type;
1187 /* Get the address of the first free fragment in this block. */
1188 prev = (struct list *) ((char *) ADDRESS (block) +
1189 (_heapinfo[block].busy.info.frag.first << type));
1191 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1193 /* If all fragments of this block are free, remove them
1194 from the fragment list and free the whole block. */
1195 next = prev;
1196 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
1197 next = next->next;
1198 prev->prev->next = next;
1199 if (next != NULL)
1200 next->prev = prev->prev;
1201 _heapinfo[block].busy.type = 0;
1202 _heapinfo[block].busy.info.size = 1;
1204 /* Keep the statistics accurate. */
1205 ++_chunks_used;
1206 _bytes_used += BLOCKSIZE;
1207 _chunks_free -= BLOCKSIZE >> type;
1208 _bytes_free -= BLOCKSIZE;
1210 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1211 _free_internal_nolock (ADDRESS (block));
1212 #else
1213 free (ADDRESS (block));
1214 #endif
1216 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1218 /* If some fragments of this block are free, link this
1219 fragment into the fragment list after the first free
1220 fragment of this block. */
1221 next = ptr;
1222 next->next = prev->next;
1223 next->prev = prev;
1224 prev->next = next;
1225 if (next->next != NULL)
1226 next->next->prev = next;
1227 ++_heapinfo[block].busy.info.frag.nfree;
1229 else
1231 /* No fragments of this block are free, so link this
1232 fragment into the fragment list and announce that
1233 it is the first free fragment of this block. */
1234 prev = ptr;
1235 _heapinfo[block].busy.info.frag.nfree = 1;
1236 _heapinfo[block].busy.info.frag.first =
1237 (uintptr_t) ptr % BLOCKSIZE >> type;
1238 prev->next = _fraghead[type].next;
1239 prev->prev = &_fraghead[type];
1240 prev->prev->next = prev;
1241 if (prev->next != NULL)
1242 prev->next->prev = prev;
1244 break;
1247 PROTECT_MALLOC_STATE (1);
1250 /* Return memory to the heap.
1251 Like `free' but don't call a __free_hook if there is one. */
1252 void
1253 _free_internal (void *ptr)
1255 LOCK ();
1256 _free_internal_nolock (ptr);
1257 UNLOCK ();
1260 /* Return memory to the heap. */
1262 void
1263 free (void *ptr)
1265 void (*hook) (void *) = __free_hook;
1267 if (hook != NULL)
1268 (*hook) (ptr);
1269 else
1270 _free_internal (ptr);
1273 /* Define the `cfree' alias for `free'. */
1274 #ifdef weak_alias
1275 weak_alias (free, cfree)
1276 #else
1277 void
1278 cfree (void *ptr)
1280 free (ptr);
1282 #endif
1283 /* Change the size of a block allocated by `malloc'.
1284 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1285 Written May 1989 by Mike Haertel.
1287 This library is free software; you can redistribute it and/or
1288 modify it under the terms of the GNU General Public License as
1289 published by the Free Software Foundation; either version 2 of the
1290 License, or (at your option) any later version.
1292 This library is distributed in the hope that it will be useful,
1293 but WITHOUT ANY WARRANTY; without even the implied warranty of
1294 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1295 General Public License for more details.
1297 You should have received a copy of the GNU General Public
1298 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1300 The author may be reached (Email) at the address mike@ai.mit.edu,
1301 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1303 #ifndef min
1304 #define min(a, b) ((a) < (b) ? (a) : (b))
1305 #endif
1307 /* Debugging hook for realloc. */
1308 void *(*__realloc_hook) (void *ptr, size_t size);
1310 /* Resize the given region to the new size, returning a pointer
1311 to the (possibly moved) region. This is optimized for speed;
1312 some benchmarks seem to indicate that greater compactness is
1313 achieved by unconditionally allocating and copying to a
1314 new region. This module has incestuous knowledge of the
1315 internals of both free and malloc. */
1316 void *
1317 _realloc_internal_nolock (void *ptr, size_t size)
1319 void *result;
1320 int type;
1321 size_t block, blocks, oldlimit;
1323 if (size == 0)
1325 _free_internal_nolock (ptr);
1326 return _malloc_internal_nolock (0);
1328 else if (ptr == NULL)
1329 return _malloc_internal_nolock (size);
1331 block = BLOCK (ptr);
1333 PROTECT_MALLOC_STATE (0);
1335 type = _heapinfo[block].busy.type;
1336 switch (type)
1338 case 0:
1339 /* Maybe reallocate a large block to a small fragment. */
1340 if (size <= BLOCKSIZE / 2)
1342 result = _malloc_internal_nolock (size);
1343 if (result != NULL)
1345 memcpy (result, ptr, size);
1346 _free_internal_nolock (ptr);
1347 goto out;
1351 /* The new size is a large allocation as well;
1352 see if we can hold it in place. */
1353 blocks = BLOCKIFY (size);
1354 if (blocks < _heapinfo[block].busy.info.size)
1356 /* The new size is smaller; return
1357 excess memory to the free list. */
1358 _heapinfo[block + blocks].busy.type = 0;
1359 _heapinfo[block + blocks].busy.info.size
1360 = _heapinfo[block].busy.info.size - blocks;
1361 _heapinfo[block].busy.info.size = blocks;
1362 /* We have just created a new chunk by splitting a chunk in two.
1363 Now we will free this chunk; increment the statistics counter
1364 so it doesn't become wrong when _free_internal decrements it. */
1365 ++_chunks_used;
1366 _free_internal_nolock (ADDRESS (block + blocks));
1367 result = ptr;
1369 else if (blocks == _heapinfo[block].busy.info.size)
1370 /* No size change necessary. */
1371 result = ptr;
1372 else
1374 /* Won't fit, so allocate a new region that will.
1375 Free the old region first in case there is sufficient
1376 adjacent free space to grow without moving. */
1377 blocks = _heapinfo[block].busy.info.size;
1378 /* Prevent free from actually returning memory to the system. */
1379 oldlimit = _heaplimit;
1380 _heaplimit = 0;
1381 _free_internal_nolock (ptr);
1382 result = _malloc_internal_nolock (size);
1383 PROTECT_MALLOC_STATE (0);
1384 if (_heaplimit == 0)
1385 _heaplimit = oldlimit;
1386 if (result == NULL)
1388 /* Now we're really in trouble. We have to unfree
1389 the thing we just freed. Unfortunately it might
1390 have been coalesced with its neighbors. */
1391 if (_heapindex == block)
1392 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1393 else
1395 void *previous
1396 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1397 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1398 _free_internal_nolock (previous);
1400 goto out;
1402 if (ptr != result)
1403 memmove (result, ptr, blocks * BLOCKSIZE);
1405 break;
1407 default:
1408 /* Old size is a fragment; type is logarithm
1409 to base two of the fragment size. */
1410 if (size > (size_t) (1 << (type - 1)) &&
1411 size <= (size_t) (1 << type))
1412 /* The new size is the same kind of fragment. */
1413 result = ptr;
1414 else
1416 /* The new size is different; allocate a new space,
1417 and copy the lesser of the new size and the old. */
1418 result = _malloc_internal_nolock (size);
1419 if (result == NULL)
1420 goto out;
1421 memcpy (result, ptr, min (size, (size_t) 1 << type));
1422 _free_internal_nolock (ptr);
1424 break;
1427 PROTECT_MALLOC_STATE (1);
1428 out:
1429 return result;
1432 void *
1433 _realloc_internal (void *ptr, size_t size)
1435 void *result;
1437 LOCK ();
1438 result = _realloc_internal_nolock (ptr, size);
1439 UNLOCK ();
1441 return result;
1444 void *
1445 realloc (void *ptr, size_t size)
1447 void *(*hook) (void *, size_t);
1449 if (!__malloc_initialized && !__malloc_initialize ())
1450 return NULL;
1452 hook = __realloc_hook;
1453 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1455 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1457 This library is free software; you can redistribute it and/or
1458 modify it under the terms of the GNU General Public License as
1459 published by the Free Software Foundation; either version 2 of the
1460 License, or (at your option) any later version.
1462 This library is distributed in the hope that it will be useful,
1463 but WITHOUT ANY WARRANTY; without even the implied warranty of
1464 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1465 General Public License for more details.
1467 You should have received a copy of the GNU General Public
1468 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1470 The author may be reached (Email) at the address mike@ai.mit.edu,
1471 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1473 /* Allocate an array of NMEMB elements each SIZE bytes long.
1474 The entire array is initialized to zeros. */
1475 void *
1476 calloc (size_t nmemb, size_t size)
1478 void *result;
1479 size_t bytes = nmemb * size;
1481 if (size != 0 && bytes / size != nmemb)
1483 errno = ENOMEM;
1484 return NULL;
1487 result = malloc (bytes);
1488 if (result)
1489 return memset (result, 0, bytes);
1490 return result;
1492 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1493 This file is part of the GNU C Library.
1495 The GNU C Library is free software; you can redistribute it and/or modify
1496 it under the terms of the GNU General Public License as published by
1497 the Free Software Foundation; either version 2, or (at your option)
1498 any later version.
1500 The GNU C Library is distributed in the hope that it will be useful,
1501 but WITHOUT ANY WARRANTY; without even the implied warranty of
1502 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1503 GNU General Public License for more details.
1505 You should have received a copy of the GNU General Public License
1506 along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
1508 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1509 compatible. */
1510 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1511 #define __sbrk sbrk
1512 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1513 /* It is best not to declare this and cast its result on foreign operating
1514 systems with potentially hostile include files. */
1516 extern void *__sbrk (ptrdiff_t increment);
1517 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1519 /* Allocate INCREMENT more bytes of data space,
1520 and return the start of data space, or NULL on errors.
1521 If INCREMENT is negative, shrink data space. */
1522 void *
1523 __default_morecore (ptrdiff_t increment)
1525 void *result;
1526 #if defined (CYGWIN)
1527 if (!DUMPED)
1529 return bss_sbrk (increment);
1531 #endif
1532 result = (void *) __sbrk (increment);
1533 if (result == (void *) -1)
1534 return NULL;
1535 return result;
1537 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1539 This library is free software; you can redistribute it and/or
1540 modify it under the terms of the GNU General Public License as
1541 published by the Free Software Foundation; either version 2 of the
1542 License, or (at your option) any later version.
1544 This library is distributed in the hope that it will be useful,
1545 but WITHOUT ANY WARRANTY; without even the implied warranty of
1546 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1547 General Public License for more details.
1549 You should have received a copy of the GNU General Public
1550 License along with this library. If not, see <http://www.gnu.org/licenses/>. */
1552 void *(*__memalign_hook) (size_t size, size_t alignment);
1554 void *
1555 aligned_alloc (size_t alignment, size_t size)
1557 void *result;
1558 size_t adj, lastadj;
1559 void *(*hook) (size_t, size_t) = __memalign_hook;
1561 if (hook)
1562 return (*hook) (alignment, size);
1564 /* Allocate a block with enough extra space to pad the block with up to
1565 (ALIGNMENT - 1) bytes if necessary. */
1566 if (- size < alignment)
1568 errno = ENOMEM;
1569 return NULL;
1571 result = malloc (size + alignment - 1);
1572 if (result == NULL)
1573 return NULL;
1575 /* Figure out how much we will need to pad this particular block
1576 to achieve the required alignment. */
1577 adj = alignment - (uintptr_t) result % alignment;
1578 if (adj == alignment)
1579 adj = 0;
1581 if (adj != alignment - 1)
1585 /* Reallocate the block with only as much excess as it
1586 needs. */
1587 free (result);
1588 result = malloc (size + adj);
1589 if (result == NULL) /* Impossible unless interrupted. */
1590 return NULL;
1592 lastadj = adj;
1593 adj = alignment - (uintptr_t) result % alignment;
1594 if (adj == alignment)
1595 adj = 0;
1596 /* It's conceivable we might have been so unlucky as to get
1597 a different block with weaker alignment. If so, this
1598 block is too short to contain SIZE after alignment
1599 correction. So we must try again and get another block,
1600 slightly larger. */
1601 } while (adj > lastadj);
1604 if (adj != 0)
1606 /* Record this block in the list of aligned blocks, so that `free'
1607 can identify the pointer it is passed, which will be in the middle
1608 of an allocated block. */
1610 struct alignlist *l;
1611 LOCK_ALIGNED_BLOCKS ();
1612 for (l = _aligned_blocks; l != NULL; l = l->next)
1613 if (l->aligned == NULL)
1614 /* This slot is free. Use it. */
1615 break;
1616 if (l == NULL)
1618 l = malloc (sizeof *l);
1619 if (l != NULL)
1621 l->next = _aligned_blocks;
1622 _aligned_blocks = l;
1625 if (l != NULL)
1627 l->exact = result;
1628 result = l->aligned = (char *) result + adj;
1630 UNLOCK_ALIGNED_BLOCKS ();
1631 if (l == NULL)
1633 free (result);
1634 result = NULL;
1638 return result;
1641 /* An obsolete alias for aligned_alloc, for any old libraries that use
1642 this alias. */
1644 void *
1645 memalign (size_t alignment, size_t size)
1647 return aligned_alloc (alignment, size);
1650 /* If HYBRID_MALLOC is defined, we may want to use the system
1651 posix_memalign below. */
1652 #ifndef HYBRID_MALLOC
1654 posix_memalign (void **memptr, size_t alignment, size_t size)
1656 void *mem;
1658 if (alignment == 0
1659 || alignment % sizeof (void *) != 0
1660 || (alignment & (alignment - 1)) != 0)
1661 return EINVAL;
1663 mem = aligned_alloc (alignment, size);
1664 if (mem == NULL)
1665 return ENOMEM;
1667 *memptr = mem;
1669 return 0;
1671 #endif
1673 /* Allocate memory on a page boundary.
1674 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1676 This library is free software; you can redistribute it and/or
1677 modify it under the terms of the GNU General Public License as
1678 published by the Free Software Foundation; either version 2 of the
1679 License, or (at your option) any later version.
1681 This library is distributed in the hope that it will be useful,
1682 but WITHOUT ANY WARRANTY; without even the implied warranty of
1683 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1684 General Public License for more details.
1686 You should have received a copy of the GNU General Public
1687 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1689 The author may be reached (Email) at the address mike@ai.mit.edu,
1690 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1692 /* Allocate SIZE bytes on a page boundary. */
1693 extern void *valloc (size_t);
1695 #if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1696 # include "getpagesize.h"
1697 #elif !defined getpagesize
1698 extern int getpagesize (void);
1699 #endif
1701 static size_t pagesize;
1703 void *
1704 valloc (size_t size)
1706 if (pagesize == 0)
1707 pagesize = getpagesize ();
1709 return aligned_alloc (pagesize, size);
1712 #undef malloc
1713 #undef realloc
1714 #undef calloc
1715 #undef aligned_alloc
1716 #undef free
1718 #ifdef HYBRID_MALLOC
1719 /* Declare system malloc and friends. */
1720 extern void *malloc (size_t size);
1721 extern void *realloc (void *ptr, size_t size);
1722 extern void *calloc (size_t nmemb, size_t size);
1723 extern void free (void *ptr);
1724 #ifdef HAVE_ALIGNED_ALLOC
1725 extern void *aligned_alloc (size_t alignment, size_t size);
1726 #elif defined HAVE_POSIX_MEMALIGN
1727 extern int posix_memalign (void **memptr, size_t alignment, size_t size);
1728 #endif
1730 /* See the comments near the beginning of this file for explanations
1731 of the following functions. */
1733 void *
1734 hybrid_malloc (size_t size)
1736 if (DUMPED)
1737 return malloc (size);
1738 return gmalloc (size);
1741 void *
1742 hybrid_calloc (size_t nmemb, size_t size)
1744 if (DUMPED)
1745 return calloc (nmemb, size);
1746 return gcalloc (nmemb, size);
1749 void
1750 hybrid_free (void *ptr)
1752 if (!DUMPED)
1753 gfree (ptr);
1754 else if (!ALLOCATED_BEFORE_DUMPING (ptr))
1755 free (ptr);
1756 /* Otherwise the dumped emacs is trying to free something allocated
1757 before dumping; do nothing. */
1758 return;
1761 #if defined HAVE_ALIGNED_ALLOC || defined HAVE_POSIX_MEMALIGN
1762 void *
1763 hybrid_aligned_alloc (size_t alignment, size_t size)
1765 if (!DUMPED)
1766 return galigned_alloc (alignment, size);
1767 /* The following is copied from alloc.c */
1768 #ifdef HAVE_ALIGNED_ALLOC
1769 return aligned_alloc (alignment, size);
1770 #else /* HAVE_POSIX_MEMALIGN */
1771 void *p;
1772 return posix_memalign (&p, alignment, size) == 0 ? p : 0;
1773 #endif
1775 #endif
1777 void *
1778 hybrid_realloc (void *ptr, size_t size)
1780 void *result;
1781 int type;
1782 size_t block, oldsize;
1784 if (!DUMPED)
1785 return grealloc (ptr, size);
1786 if (!ALLOCATED_BEFORE_DUMPING (ptr))
1787 return realloc (ptr, size);
1789 /* The dumped emacs is trying to realloc storage allocated before
1790 dumping. We just malloc new space and copy the data. */
1791 if (size == 0 || ptr == NULL)
1792 return malloc (size);
1793 block = ((char *) ptr - _heapbase) / BLOCKSIZE + 1;
1794 type = _heapinfo[block].busy.type;
1795 oldsize =
1796 type == 0 ? _heapinfo[block].busy.info.size * BLOCKSIZE
1797 : (size_t) 1 << type;
1798 result = malloc (size);
1799 if (result)
1800 return memcpy (result, ptr, min (oldsize, size));
1801 return result;
1804 #ifdef HYBRID_GET_CURRENT_DIR_NAME
1805 /* Defined in sysdep.c. */
1806 char *gget_current_dir_name (void);
1808 char *
1809 hybrid_get_current_dir_name (void)
1811 if (DUMPED)
1812 return get_current_dir_name ();
1813 return gget_current_dir_name ();
1815 #endif
1817 #else /* ! HYBRID_MALLOC */
1819 void *
1820 malloc (size_t size)
1822 return gmalloc (size);
1825 void *
1826 calloc (size_t nmemb, size_t size)
1828 return gcalloc (nmemb, size);
1831 void
1832 free (void *ptr)
1834 gfree (ptr);
1837 void *
1838 aligned_alloc (size_t alignment, size_t size)
1840 return galigned_alloc (alignment, size);
1843 void *
1844 realloc (void *ptr, size_t size)
1846 return grealloc (ptr, size);
1849 #endif /* HYBRID_MALLOC */
1851 #ifdef GC_MCHECK
1853 /* Standard debugging hooks for `malloc'.
1854 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1855 Written May 1989 by Mike Haertel.
1857 This library is free software; you can redistribute it and/or
1858 modify it under the terms of the GNU General Public License as
1859 published by the Free Software Foundation; either version 2 of the
1860 License, or (at your option) any later version.
1862 This library is distributed in the hope that it will be useful,
1863 but WITHOUT ANY WARRANTY; without even the implied warranty of
1864 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1865 General Public License for more details.
1867 You should have received a copy of the GNU General Public
1868 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1870 The author may be reached (Email) at the address mike@ai.mit.edu,
1871 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1873 #include <stdio.h>
1875 /* Old hook values. */
1876 static void (*old_free_hook) (void *ptr);
1877 static void *(*old_malloc_hook) (size_t size);
1878 static void *(*old_realloc_hook) (void *ptr, size_t size);
1880 /* Function to call when something awful happens. */
1881 static void (*abortfunc) (enum mcheck_status);
1883 /* Arbitrary magical numbers. */
1884 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1885 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1886 #define MAGICBYTE ((char) 0xd7)
1887 #define MALLOCFLOOD ((char) 0x93)
1888 #define FREEFLOOD ((char) 0x95)
1890 struct hdr
1892 size_t size; /* Exact size requested by user. */
1893 size_t magic; /* Magic number to check header integrity. */
1896 static enum mcheck_status
1897 checkhdr (const struct hdr *hdr)
1899 enum mcheck_status status;
1900 switch (hdr->magic)
1902 default:
1903 status = MCHECK_HEAD;
1904 break;
1905 case MAGICFREE:
1906 status = MCHECK_FREE;
1907 break;
1908 case MAGICWORD:
1909 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1910 status = MCHECK_TAIL;
1911 else
1912 status = MCHECK_OK;
1913 break;
1915 if (status != MCHECK_OK)
1916 (*abortfunc) (status);
1917 return status;
1920 static void
1921 freehook (void *ptr)
1923 struct hdr *hdr;
1925 if (ptr)
1927 struct alignlist *l;
1929 /* If the block was allocated by aligned_alloc, its real pointer
1930 to free is recorded in _aligned_blocks; find that. */
1931 PROTECT_MALLOC_STATE (0);
1932 LOCK_ALIGNED_BLOCKS ();
1933 for (l = _aligned_blocks; l != NULL; l = l->next)
1934 if (l->aligned == ptr)
1936 l->aligned = NULL; /* Mark the slot in the list as free. */
1937 ptr = l->exact;
1938 break;
1940 UNLOCK_ALIGNED_BLOCKS ();
1941 PROTECT_MALLOC_STATE (1);
1943 hdr = ((struct hdr *) ptr) - 1;
1944 checkhdr (hdr);
1945 hdr->magic = MAGICFREE;
1946 memset (ptr, FREEFLOOD, hdr->size);
1948 else
1949 hdr = NULL;
1951 __free_hook = old_free_hook;
1952 free (hdr);
1953 __free_hook = freehook;
1956 static void *
1957 mallochook (size_t size)
1959 struct hdr *hdr;
1961 __malloc_hook = old_malloc_hook;
1962 hdr = malloc (sizeof *hdr + size + 1);
1963 __malloc_hook = mallochook;
1964 if (hdr == NULL)
1965 return NULL;
1967 hdr->size = size;
1968 hdr->magic = MAGICWORD;
1969 ((char *) &hdr[1])[size] = MAGICBYTE;
1970 return memset (hdr + 1, MALLOCFLOOD, size);
1973 static void *
1974 reallochook (void *ptr, size_t size)
1976 struct hdr *hdr = NULL;
1977 size_t osize = 0;
1979 if (ptr)
1981 hdr = ((struct hdr *) ptr) - 1;
1982 osize = hdr->size;
1984 checkhdr (hdr);
1985 if (size < osize)
1986 memset ((char *) ptr + size, FREEFLOOD, osize - size);
1989 __free_hook = old_free_hook;
1990 __malloc_hook = old_malloc_hook;
1991 __realloc_hook = old_realloc_hook;
1992 hdr = realloc (hdr, sizeof *hdr + size + 1);
1993 __free_hook = freehook;
1994 __malloc_hook = mallochook;
1995 __realloc_hook = reallochook;
1996 if (hdr == NULL)
1997 return NULL;
1999 hdr->size = size;
2000 hdr->magic = MAGICWORD;
2001 ((char *) &hdr[1])[size] = MAGICBYTE;
2002 if (size > osize)
2003 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
2004 return hdr + 1;
2007 static void
2008 mabort (enum mcheck_status status)
2010 const char *msg;
2011 switch (status)
2013 case MCHECK_OK:
2014 msg = "memory is consistent, library is buggy";
2015 break;
2016 case MCHECK_HEAD:
2017 msg = "memory clobbered before allocated block";
2018 break;
2019 case MCHECK_TAIL:
2020 msg = "memory clobbered past end of allocated block";
2021 break;
2022 case MCHECK_FREE:
2023 msg = "block freed twice";
2024 break;
2025 default:
2026 msg = "bogus mcheck_status, library is buggy";
2027 break;
2029 #ifdef __GNU_LIBRARY__
2030 __libc_fatal (msg);
2031 #else
2032 fprintf (stderr, "mcheck: %s\n", msg);
2033 fflush (stderr);
2034 # ifdef emacs
2035 emacs_abort ();
2036 # else
2037 abort ();
2038 # endif
2039 #endif
2042 static int mcheck_used = 0;
2045 mcheck (void (*func) (enum mcheck_status))
2047 abortfunc = (func != NULL) ? func : &mabort;
2049 /* These hooks may not be safely inserted if malloc is already in use. */
2050 if (!__malloc_initialized && !mcheck_used)
2052 old_free_hook = __free_hook;
2053 __free_hook = freehook;
2054 old_malloc_hook = __malloc_hook;
2055 __malloc_hook = mallochook;
2056 old_realloc_hook = __realloc_hook;
2057 __realloc_hook = reallochook;
2058 mcheck_used = 1;
2061 return mcheck_used ? 0 : -1;
2064 enum mcheck_status
2065 mprobe (void *ptr)
2067 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2070 #endif /* GC_MCHECK */