lisp/strokes.el: Fix typos.
[emacs.git] / src / gmalloc.c
blobf8d0cfdc30a47cacf18f85d208711f8bdf6a069c
1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2014 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <http://www.gnu.org/licenses/>.
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
22 #include <config.h>
24 #ifdef HAVE_PTHREAD
25 #define USE_PTHREAD
26 #endif
28 #include <string.h>
29 #include <limits.h>
30 #include <stdint.h>
31 #include <unistd.h>
33 #ifdef USE_PTHREAD
34 #include <pthread.h>
35 #endif
37 #ifdef WINDOWSNT
38 #include <w32heap.h> /* for sbrk */
39 #endif
41 #ifdef __cplusplus
42 extern "C"
44 #endif
46 #include <stddef.h>
49 /* Allocate SIZE bytes of memory. */
50 extern void *malloc (size_t size);
51 /* Re-allocate the previously allocated block
52 in ptr, making the new block SIZE bytes long. */
53 extern void *realloc (void *ptr, size_t size);
54 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
55 extern void *calloc (size_t nmemb, size_t size);
56 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
57 extern void free (void *ptr);
59 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
60 #ifdef MSDOS
61 extern void *aligned_alloc (size_t, size_t);
62 extern void *memalign (size_t, size_t);
63 extern int posix_memalign (void **, size_t, size_t);
64 #endif
66 #ifdef USE_PTHREAD
67 /* Set up mutexes and make malloc etc. thread-safe. */
68 extern void malloc_enable_thread (void);
69 #endif
71 #ifdef emacs
72 extern void emacs_abort (void);
73 #endif
75 /* The allocator divides the heap into blocks of fixed size; large
76 requests receive one or more whole blocks, and small requests
77 receive a fragment of a block. Fragment sizes are powers of two,
78 and all fragments of a block are the same size. When all the
79 fragments in a block have been freed, the block itself is freed. */
80 #define INT_BIT (CHAR_BIT * sizeof (int))
81 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
82 #define BLOCKSIZE (1 << BLOCKLOG)
83 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
85 /* Determine the amount of memory spanned by the initial heap table
86 (not an absolute limit). */
87 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
89 /* Number of contiguous free blocks allowed to build up at the end of
90 memory before they will be returned to the system. */
91 #define FINAL_FREE_BLOCKS 8
93 /* Data structure giving per-block information. */
94 typedef union
96 /* Heap information for a busy block. */
97 struct
99 /* Zero for a large (multiblock) object, or positive giving the
100 logarithm to the base two of the fragment size. */
101 int type;
102 union
104 struct
106 size_t nfree; /* Free frags in a fragmented block. */
107 size_t first; /* First free fragment of the block. */
108 } frag;
109 /* For a large object, in its first block, this has the number
110 of blocks in the object. In the other blocks, this has a
111 negative number which says how far back the first block is. */
112 ptrdiff_t size;
113 } info;
114 } busy;
115 /* Heap information for a free block
116 (that may be the first of a free cluster). */
117 struct
119 size_t size; /* Size (in blocks) of a free cluster. */
120 size_t next; /* Index of next free cluster. */
121 size_t prev; /* Index of previous free cluster. */
122 } free;
123 } malloc_info;
125 /* Pointer to first block of the heap. */
126 extern char *_heapbase;
128 /* Table indexed by block number giving per-block information. */
129 extern malloc_info *_heapinfo;
131 /* Address to block number and vice versa. */
132 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
133 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
135 /* Current search index for the heap table. */
136 extern size_t _heapindex;
138 /* Limit of valid info table indices. */
139 extern size_t _heaplimit;
141 /* Doubly linked lists of free fragments. */
142 struct list
144 struct list *next;
145 struct list *prev;
148 /* Free list headers for each fragment size. */
149 extern struct list _fraghead[];
151 /* List of blocks allocated with aligned_alloc and friends. */
152 struct alignlist
154 struct alignlist *next;
155 void *aligned; /* The address that aligned_alloc returned. */
156 void *exact; /* The address that malloc returned. */
158 extern struct alignlist *_aligned_blocks;
160 /* Instrumentation. */
161 extern size_t _chunks_used;
162 extern size_t _bytes_used;
163 extern size_t _chunks_free;
164 extern size_t _bytes_free;
166 /* Internal versions of `malloc', `realloc', and `free'
167 used when these functions need to call each other.
168 They are the same but don't call the hooks. */
169 extern void *_malloc_internal (size_t);
170 extern void *_realloc_internal (void *, size_t);
171 extern void _free_internal (void *);
172 extern void *_malloc_internal_nolock (size_t);
173 extern void *_realloc_internal_nolock (void *, size_t);
174 extern void _free_internal_nolock (void *);
176 #ifdef USE_PTHREAD
177 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
178 extern int _malloc_thread_enabled_p;
179 #define LOCK() \
180 do { \
181 if (_malloc_thread_enabled_p) \
182 pthread_mutex_lock (&_malloc_mutex); \
183 } while (0)
184 #define UNLOCK() \
185 do { \
186 if (_malloc_thread_enabled_p) \
187 pthread_mutex_unlock (&_malloc_mutex); \
188 } while (0)
189 #define LOCK_ALIGNED_BLOCKS() \
190 do { \
191 if (_malloc_thread_enabled_p) \
192 pthread_mutex_lock (&_aligned_blocks_mutex); \
193 } while (0)
194 #define UNLOCK_ALIGNED_BLOCKS() \
195 do { \
196 if (_malloc_thread_enabled_p) \
197 pthread_mutex_unlock (&_aligned_blocks_mutex); \
198 } while (0)
199 #else
200 #define LOCK()
201 #define UNLOCK()
202 #define LOCK_ALIGNED_BLOCKS()
203 #define UNLOCK_ALIGNED_BLOCKS()
204 #endif
206 /* Given an address in the middle of a malloc'd object,
207 return the address of the beginning of the object. */
208 extern void *malloc_find_object_address (void *ptr);
210 /* Underlying allocation function; successive calls should
211 return contiguous pieces of memory. */
212 extern void *(*__morecore) (ptrdiff_t size);
214 /* Default value of `__morecore'. */
215 extern void *__default_morecore (ptrdiff_t size);
217 /* If not NULL, this function is called after each time
218 `__morecore' is called to increase the data size. */
219 extern void (*__after_morecore_hook) (void);
221 /* Number of extra blocks to get each time we ask for more core.
222 This reduces the frequency of calling `(*__morecore)'. */
223 extern size_t __malloc_extra_blocks;
225 /* Nonzero if `malloc' has been called and done its initialization. */
226 extern int __malloc_initialized;
227 /* Function called to initialize malloc data structures. */
228 extern int __malloc_initialize (void);
230 /* Hooks for debugging versions. */
231 extern void (*__malloc_initialize_hook) (void);
232 extern void (*__free_hook) (void *ptr);
233 extern void *(*__malloc_hook) (size_t size);
234 extern void *(*__realloc_hook) (void *ptr, size_t size);
235 extern void *(*__memalign_hook) (size_t size, size_t alignment);
237 /* Return values for `mprobe': these are the kinds of inconsistencies that
238 `mcheck' enables detection of. */
239 enum mcheck_status
241 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
242 MCHECK_OK, /* Block is fine. */
243 MCHECK_FREE, /* Block freed twice. */
244 MCHECK_HEAD, /* Memory before the block was clobbered. */
245 MCHECK_TAIL /* Memory after the block was clobbered. */
248 /* Activate a standard collection of debugging hooks. This must be called
249 before `malloc' is ever called. ABORTFUNC is called with an error code
250 (see enum above) when an inconsistency is detected. If ABORTFUNC is
251 null, the standard function prints on stderr and then calls `abort'. */
252 extern int mcheck (void (*abortfunc) (enum mcheck_status));
254 /* Check for aberrations in a particular malloc'd block. You must have
255 called `mcheck' already. These are the same checks that `mcheck' does
256 when you free or reallocate a block. */
257 extern enum mcheck_status mprobe (void *ptr);
259 /* Activate a standard collection of tracing hooks. */
260 extern void mtrace (void);
261 extern void muntrace (void);
263 /* Statistics available to the user. */
264 struct mstats
266 size_t bytes_total; /* Total size of the heap. */
267 size_t chunks_used; /* Chunks allocated by the user. */
268 size_t bytes_used; /* Byte total of user-allocated chunks. */
269 size_t chunks_free; /* Chunks in the free list. */
270 size_t bytes_free; /* Byte total of chunks in the free list. */
273 /* Pick up the current statistics. */
274 extern struct mstats mstats (void);
276 /* Call WARNFUN with a warning message when memory usage is high. */
277 extern void memory_warnings (void *start, void (*warnfun) (const char *));
279 #ifdef __cplusplus
281 #endif
283 /* Memory allocator `malloc'.
284 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
285 Written May 1989 by Mike Haertel.
287 This library is free software; you can redistribute it and/or
288 modify it under the terms of the GNU General Public License as
289 published by the Free Software Foundation; either version 2 of the
290 License, or (at your option) any later version.
292 This library is distributed in the hope that it will be useful,
293 but WITHOUT ANY WARRANTY; without even the implied warranty of
294 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
295 General Public License for more details.
297 You should have received a copy of the GNU General Public
298 License along with this library. If not, see <http://www.gnu.org/licenses/>.
300 The author may be reached (Email) at the address mike@ai.mit.edu,
301 or (US mail) as Mike Haertel c/o Free Software Foundation. */
303 #include <errno.h>
305 /* On Cygwin there are two heaps. temacs uses the static heap
306 (defined in sheap.c and managed with bss_sbrk), and the dumped
307 emacs uses the Cygwin heap (managed with sbrk). When emacs starts
308 on Cygwin, it reinitializes malloc, and we save the old info for
309 use by free and realloc if they're called with a pointer into the
310 static heap.
312 Currently (2011-08-16) the Cygwin build doesn't use ralloc.c; if
313 this is changed in the future, we'll have to similarly deal with
314 reinitializing ralloc. */
315 #ifdef CYGWIN
316 extern void *bss_sbrk (ptrdiff_t size);
317 extern int bss_sbrk_did_unexec;
318 char *bss_sbrk_heapbase; /* _heapbase for static heap */
319 malloc_info *bss_sbrk_heapinfo; /* _heapinfo for static heap */
320 #endif
321 void *(*__morecore) (ptrdiff_t size) = __default_morecore;
323 /* Debugging hook for `malloc'. */
324 void *(*__malloc_hook) (size_t size);
326 /* Pointer to the base of the first block. */
327 char *_heapbase;
329 /* Block information table. Allocated with align/__free (not malloc/free). */
330 malloc_info *_heapinfo;
332 /* Number of info entries. */
333 static size_t heapsize;
335 /* Search index in the info table. */
336 size_t _heapindex;
338 /* Limit of valid info table indices. */
339 size_t _heaplimit;
341 /* Free lists for each fragment size. */
342 struct list _fraghead[BLOCKLOG];
344 /* Instrumentation. */
345 size_t _chunks_used;
346 size_t _bytes_used;
347 size_t _chunks_free;
348 size_t _bytes_free;
350 /* Are you experienced? */
351 int __malloc_initialized;
353 size_t __malloc_extra_blocks;
355 void (*__malloc_initialize_hook) (void);
356 void (*__after_morecore_hook) (void);
358 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
360 /* Some code for hunting a bug writing into _heapinfo.
362 Call this macro with argument PROT non-zero to protect internal
363 malloc state against writing to it, call it with a zero argument to
364 make it readable and writable.
366 Note that this only works if BLOCKSIZE == page size, which is
367 the case on the i386. */
369 #include <sys/types.h>
370 #include <sys/mman.h>
372 static int state_protected_p;
373 static size_t last_state_size;
374 static malloc_info *last_heapinfo;
376 void
377 protect_malloc_state (int protect_p)
379 /* If _heapinfo has been relocated, make sure its old location
380 isn't left read-only; it will be reused by malloc. */
381 if (_heapinfo != last_heapinfo
382 && last_heapinfo
383 && state_protected_p)
384 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
386 last_state_size = _heaplimit * sizeof *_heapinfo;
387 last_heapinfo = _heapinfo;
389 if (protect_p != state_protected_p)
391 state_protected_p = protect_p;
392 if (mprotect (_heapinfo, last_state_size,
393 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
394 abort ();
398 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
400 #else
401 #define PROTECT_MALLOC_STATE(PROT) /* empty */
402 #endif
405 /* Aligned allocation. */
406 static void *
407 align (size_t size)
409 void *result;
410 ptrdiff_t adj;
412 /* align accepts an unsigned argument, but __morecore accepts a
413 signed one. This could lead to trouble if SIZE overflows the
414 ptrdiff_t type accepted by __morecore. We just punt in that
415 case, since they are requesting a ludicrous amount anyway. */
416 if (PTRDIFF_MAX < size)
417 result = 0;
418 else
419 result = (*__morecore) (size);
420 adj = (uintptr_t) result % BLOCKSIZE;
421 if (adj != 0)
423 adj = BLOCKSIZE - adj;
424 (*__morecore) (adj);
425 result = (char *) result + adj;
428 if (__after_morecore_hook)
429 (*__after_morecore_hook) ();
431 return result;
434 /* Get SIZE bytes, if we can get them starting at END.
435 Return the address of the space we got.
436 If we cannot get space at END, fail and return 0. */
437 static void *
438 get_contiguous_space (ptrdiff_t size, void *position)
440 void *before;
441 void *after;
443 before = (*__morecore) (0);
444 /* If we can tell in advance that the break is at the wrong place,
445 fail now. */
446 if (before != position)
447 return 0;
449 /* Allocate SIZE bytes and get the address of them. */
450 after = (*__morecore) (size);
451 if (!after)
452 return 0;
454 /* It was not contiguous--reject it. */
455 if (after != position)
457 (*__morecore) (- size);
458 return 0;
461 return after;
465 /* This is called when `_heapinfo' and `heapsize' have just
466 been set to describe a new info table. Set up the table
467 to describe itself and account for it in the statistics. */
468 static void
469 register_heapinfo (void)
471 size_t block, blocks;
473 block = BLOCK (_heapinfo);
474 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
476 /* Account for the _heapinfo block itself in the statistics. */
477 _bytes_used += blocks * BLOCKSIZE;
478 ++_chunks_used;
480 /* Describe the heapinfo block itself in the heapinfo. */
481 _heapinfo[block].busy.type = 0;
482 _heapinfo[block].busy.info.size = blocks;
483 /* Leave back-pointers for malloc_find_address. */
484 while (--blocks > 0)
485 _heapinfo[block + blocks].busy.info.size = -blocks;
488 #ifdef USE_PTHREAD
489 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
490 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
491 int _malloc_thread_enabled_p;
493 static void
494 malloc_atfork_handler_prepare (void)
496 LOCK ();
497 LOCK_ALIGNED_BLOCKS ();
500 static void
501 malloc_atfork_handler_parent (void)
503 UNLOCK_ALIGNED_BLOCKS ();
504 UNLOCK ();
507 static void
508 malloc_atfork_handler_child (void)
510 UNLOCK_ALIGNED_BLOCKS ();
511 UNLOCK ();
514 /* Set up mutexes and make malloc etc. thread-safe. */
515 void
516 malloc_enable_thread (void)
518 if (_malloc_thread_enabled_p)
519 return;
521 /* Some pthread implementations call malloc for statically
522 initialized mutexes when they are used first. To avoid such a
523 situation, we initialize mutexes here while their use is
524 disabled in malloc etc. */
525 pthread_mutex_init (&_malloc_mutex, NULL);
526 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
527 pthread_atfork (malloc_atfork_handler_prepare,
528 malloc_atfork_handler_parent,
529 malloc_atfork_handler_child);
530 _malloc_thread_enabled_p = 1;
532 #endif
534 static void
535 malloc_initialize_1 (void)
537 #ifdef GC_MCHECK
538 mcheck (NULL);
539 #endif
541 #ifdef CYGWIN
542 if (bss_sbrk_did_unexec)
543 /* we're reinitializing the dumped emacs */
545 bss_sbrk_heapbase = _heapbase;
546 bss_sbrk_heapinfo = _heapinfo;
547 memset (_fraghead, 0, BLOCKLOG * sizeof (struct list));
549 #endif
551 if (__malloc_initialize_hook)
552 (*__malloc_initialize_hook) ();
554 heapsize = HEAP / BLOCKSIZE;
555 _heapinfo = align (heapsize * sizeof (malloc_info));
556 if (_heapinfo == NULL)
557 return;
558 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
559 _heapinfo[0].free.size = 0;
560 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
561 _heapindex = 0;
562 _heapbase = (char *) _heapinfo;
563 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
565 register_heapinfo ();
567 __malloc_initialized = 1;
568 PROTECT_MALLOC_STATE (1);
569 return;
572 /* Set everything up and remember that we have.
573 main will call malloc which calls this function. That is before any threads
574 or signal handlers has been set up, so we don't need thread protection. */
576 __malloc_initialize (void)
578 if (__malloc_initialized)
579 return 0;
581 malloc_initialize_1 ();
583 return __malloc_initialized;
586 static int morecore_recursing;
588 /* Get neatly aligned memory, initializing or
589 growing the heap info table as necessary. */
590 static void *
591 morecore_nolock (size_t size)
593 void *result;
594 malloc_info *newinfo, *oldinfo;
595 size_t newsize;
597 if (morecore_recursing)
598 /* Avoid recursion. The caller will know how to handle a null return. */
599 return NULL;
601 result = align (size);
602 if (result == NULL)
603 return NULL;
605 PROTECT_MALLOC_STATE (0);
607 /* Check if we need to grow the info table. */
608 if ((size_t) BLOCK ((char *) result + size) > heapsize)
610 /* Calculate the new _heapinfo table size. We do not account for the
611 added blocks in the table itself, as we hope to place them in
612 existing free space, which is already covered by part of the
613 existing table. */
614 newsize = heapsize;
616 newsize *= 2;
617 while ((size_t) BLOCK ((char *) result + size) > newsize);
619 /* We must not reuse existing core for the new info table when called
620 from realloc in the case of growing a large block, because the
621 block being grown is momentarily marked as free. In this case
622 _heaplimit is zero so we know not to reuse space for internal
623 allocation. */
624 if (_heaplimit != 0)
626 /* First try to allocate the new info table in core we already
627 have, in the usual way using realloc. If realloc cannot
628 extend it in place or relocate it to existing sufficient core,
629 we will get called again, and the code above will notice the
630 `morecore_recursing' flag and return null. */
631 int save = errno; /* Don't want to clobber errno with ENOMEM. */
632 morecore_recursing = 1;
633 newinfo = _realloc_internal_nolock (_heapinfo,
634 newsize * sizeof (malloc_info));
635 morecore_recursing = 0;
636 if (newinfo == NULL)
637 errno = save;
638 else
640 /* We found some space in core, and realloc has put the old
641 table's blocks on the free list. Now zero the new part
642 of the table and install the new table location. */
643 memset (&newinfo[heapsize], 0,
644 (newsize - heapsize) * sizeof (malloc_info));
645 _heapinfo = newinfo;
646 heapsize = newsize;
647 goto got_heap;
651 /* Allocate new space for the malloc info table. */
652 while (1)
654 newinfo = align (newsize * sizeof (malloc_info));
656 /* Did it fail? */
657 if (newinfo == NULL)
659 (*__morecore) (-size);
660 return NULL;
663 /* Is it big enough to record status for its own space?
664 If so, we win. */
665 if ((size_t) BLOCK ((char *) newinfo
666 + newsize * sizeof (malloc_info))
667 < newsize)
668 break;
670 /* Must try again. First give back most of what we just got. */
671 (*__morecore) (- newsize * sizeof (malloc_info));
672 newsize *= 2;
675 /* Copy the old table to the beginning of the new,
676 and zero the rest of the new table. */
677 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
678 memset (&newinfo[heapsize], 0,
679 (newsize - heapsize) * sizeof (malloc_info));
680 oldinfo = _heapinfo;
681 _heapinfo = newinfo;
682 heapsize = newsize;
684 register_heapinfo ();
686 /* Reset _heaplimit so _free_internal never decides
687 it can relocate or resize the info table. */
688 _heaplimit = 0;
689 _free_internal_nolock (oldinfo);
690 PROTECT_MALLOC_STATE (0);
692 /* The new heap limit includes the new table just allocated. */
693 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
694 return result;
697 got_heap:
698 _heaplimit = BLOCK ((char *) result + size);
699 return result;
702 /* Allocate memory from the heap. */
703 void *
704 _malloc_internal_nolock (size_t size)
706 void *result;
707 size_t block, blocks, lastblocks, start;
708 register size_t i;
709 struct list *next;
711 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
712 valid address you can realloc and free (though not dereference).
714 It turns out that some extant code (sunrpc, at least Ultrix's version)
715 expects `malloc (0)' to return non-NULL and breaks otherwise.
716 Be compatible. */
718 #if 0
719 if (size == 0)
720 return NULL;
721 #endif
723 PROTECT_MALLOC_STATE (0);
725 if (size < sizeof (struct list))
726 size = sizeof (struct list);
728 /* Determine the allocation policy based on the request size. */
729 if (size <= BLOCKSIZE / 2)
731 /* Small allocation to receive a fragment of a block.
732 Determine the logarithm to base two of the fragment size. */
733 register size_t log = 1;
734 --size;
735 while ((size /= 2) != 0)
736 ++log;
738 /* Look in the fragment lists for a
739 free fragment of the desired size. */
740 next = _fraghead[log].next;
741 if (next != NULL)
743 /* There are free fragments of this size.
744 Pop a fragment out of the fragment list and return it.
745 Update the block's nfree and first counters. */
746 result = next;
747 next->prev->next = next->next;
748 if (next->next != NULL)
749 next->next->prev = next->prev;
750 block = BLOCK (result);
751 if (--_heapinfo[block].busy.info.frag.nfree != 0)
752 _heapinfo[block].busy.info.frag.first =
753 (uintptr_t) next->next % BLOCKSIZE >> log;
755 /* Update the statistics. */
756 ++_chunks_used;
757 _bytes_used += 1 << log;
758 --_chunks_free;
759 _bytes_free -= 1 << log;
761 else
763 /* No free fragments of the desired size, so get a new block
764 and break it into fragments, returning the first. */
765 #ifdef GC_MALLOC_CHECK
766 result = _malloc_internal_nolock (BLOCKSIZE);
767 PROTECT_MALLOC_STATE (0);
768 #elif defined (USE_PTHREAD)
769 result = _malloc_internal_nolock (BLOCKSIZE);
770 #else
771 result = malloc (BLOCKSIZE);
772 #endif
773 if (result == NULL)
775 PROTECT_MALLOC_STATE (1);
776 goto out;
779 /* Link all fragments but the first into the free list. */
780 next = (struct list *) ((char *) result + (1 << log));
781 next->next = NULL;
782 next->prev = &_fraghead[log];
783 _fraghead[log].next = next;
785 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
787 next = (struct list *) ((char *) result + (i << log));
788 next->next = _fraghead[log].next;
789 next->prev = &_fraghead[log];
790 next->prev->next = next;
791 next->next->prev = next;
794 /* Initialize the nfree and first counters for this block. */
795 block = BLOCK (result);
796 _heapinfo[block].busy.type = log;
797 _heapinfo[block].busy.info.frag.nfree = i - 1;
798 _heapinfo[block].busy.info.frag.first = i - 1;
800 _chunks_free += (BLOCKSIZE >> log) - 1;
801 _bytes_free += BLOCKSIZE - (1 << log);
802 _bytes_used -= BLOCKSIZE - (1 << log);
805 else
807 /* Large allocation to receive one or more blocks.
808 Search the free list in a circle starting at the last place visited.
809 If we loop completely around without finding a large enough
810 space we will have to get more memory from the system. */
811 blocks = BLOCKIFY (size);
812 start = block = _heapindex;
813 while (_heapinfo[block].free.size < blocks)
815 block = _heapinfo[block].free.next;
816 if (block == start)
818 /* Need to get more from the system. Get a little extra. */
819 size_t wantblocks = blocks + __malloc_extra_blocks;
820 block = _heapinfo[0].free.prev;
821 lastblocks = _heapinfo[block].free.size;
822 /* Check to see if the new core will be contiguous with the
823 final free block; if so we don't need to get as much. */
824 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
825 /* We can't do this if we will have to make the heap info
826 table bigger to accommodate the new space. */
827 block + wantblocks <= heapsize &&
828 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
829 ADDRESS (block + lastblocks)))
831 /* We got it contiguously. Which block we are extending
832 (the `final free block' referred to above) might have
833 changed, if it got combined with a freed info table. */
834 block = _heapinfo[0].free.prev;
835 _heapinfo[block].free.size += (wantblocks - lastblocks);
836 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
837 _heaplimit += wantblocks - lastblocks;
838 continue;
840 result = morecore_nolock (wantblocks * BLOCKSIZE);
841 if (result == NULL)
842 goto out;
843 block = BLOCK (result);
844 /* Put the new block at the end of the free list. */
845 _heapinfo[block].free.size = wantblocks;
846 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
847 _heapinfo[block].free.next = 0;
848 _heapinfo[0].free.prev = block;
849 _heapinfo[_heapinfo[block].free.prev].free.next = block;
850 ++_chunks_free;
851 /* Now loop to use some of that block for this allocation. */
855 /* At this point we have found a suitable free list entry.
856 Figure out how to remove what we need from the list. */
857 result = ADDRESS (block);
858 if (_heapinfo[block].free.size > blocks)
860 /* The block we found has a bit left over,
861 so relink the tail end back into the free list. */
862 _heapinfo[block + blocks].free.size
863 = _heapinfo[block].free.size - blocks;
864 _heapinfo[block + blocks].free.next
865 = _heapinfo[block].free.next;
866 _heapinfo[block + blocks].free.prev
867 = _heapinfo[block].free.prev;
868 _heapinfo[_heapinfo[block].free.prev].free.next
869 = _heapinfo[_heapinfo[block].free.next].free.prev
870 = _heapindex = block + blocks;
872 else
874 /* The block exactly matches our requirements,
875 so just remove it from the list. */
876 _heapinfo[_heapinfo[block].free.next].free.prev
877 = _heapinfo[block].free.prev;
878 _heapinfo[_heapinfo[block].free.prev].free.next
879 = _heapindex = _heapinfo[block].free.next;
880 --_chunks_free;
883 _heapinfo[block].busy.type = 0;
884 _heapinfo[block].busy.info.size = blocks;
885 ++_chunks_used;
886 _bytes_used += blocks * BLOCKSIZE;
887 _bytes_free -= blocks * BLOCKSIZE;
889 /* Mark all the blocks of the object just allocated except for the
890 first with a negative number so you can find the first block by
891 adding that adjustment. */
892 while (--blocks > 0)
893 _heapinfo[block + blocks].busy.info.size = -blocks;
896 PROTECT_MALLOC_STATE (1);
897 out:
898 return result;
901 void *
902 _malloc_internal (size_t size)
904 void *result;
906 LOCK ();
907 result = _malloc_internal_nolock (size);
908 UNLOCK ();
910 return result;
913 void *
914 malloc (size_t size)
916 void *(*hook) (size_t);
918 if (!__malloc_initialized && !__malloc_initialize ())
919 return NULL;
921 /* Copy the value of __malloc_hook to an automatic variable in case
922 __malloc_hook is modified in another thread between its
923 NULL-check and the use.
925 Note: Strictly speaking, this is not a right solution. We should
926 use mutexes to access non-read-only variables that are shared
927 among multiple threads. We just leave it for compatibility with
928 glibc malloc (i.e., assignments to __malloc_hook) for now. */
929 hook = __malloc_hook;
930 return (hook != NULL ? *hook : _malloc_internal) (size);
933 #ifndef _LIBC
935 /* On some ANSI C systems, some libc functions call _malloc, _free
936 and _realloc. Make them use the GNU functions. */
938 extern void *_malloc (size_t);
939 extern void _free (void *);
940 extern void *_realloc (void *, size_t);
942 void *
943 _malloc (size_t size)
945 return malloc (size);
948 void
949 _free (void *ptr)
951 free (ptr);
954 void *
955 _realloc (void *ptr, size_t size)
957 return realloc (ptr, size);
960 #endif
961 /* Free a block of memory allocated by `malloc'.
962 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
963 Written May 1989 by Mike Haertel.
965 This library is free software; you can redistribute it and/or
966 modify it under the terms of the GNU General Public License as
967 published by the Free Software Foundation; either version 2 of the
968 License, or (at your option) any later version.
970 This library is distributed in the hope that it will be useful,
971 but WITHOUT ANY WARRANTY; without even the implied warranty of
972 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
973 General Public License for more details.
975 You should have received a copy of the GNU General Public
976 License along with this library. If not, see <http://www.gnu.org/licenses/>.
978 The author may be reached (Email) at the address mike@ai.mit.edu,
979 or (US mail) as Mike Haertel c/o Free Software Foundation. */
982 /* Debugging hook for free. */
983 void (*__free_hook) (void *__ptr);
985 /* List of blocks allocated by aligned_alloc. */
986 struct alignlist *_aligned_blocks = NULL;
988 /* Return memory to the heap.
989 Like `_free_internal' but don't lock mutex. */
990 void
991 _free_internal_nolock (void *ptr)
993 int type;
994 size_t block, blocks;
995 register size_t i;
996 struct list *prev, *next;
997 void *curbrk;
998 const size_t lesscore_threshold
999 /* Threshold of free space at which we will return some to the system. */
1000 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1002 register struct alignlist *l;
1004 if (ptr == NULL)
1005 return;
1007 #ifdef CYGWIN
1008 if ((char *) ptr < _heapbase)
1009 /* We're being asked to free something in the static heap. */
1010 return;
1011 #endif
1013 PROTECT_MALLOC_STATE (0);
1015 LOCK_ALIGNED_BLOCKS ();
1016 for (l = _aligned_blocks; l != NULL; l = l->next)
1017 if (l->aligned == ptr)
1019 l->aligned = NULL; /* Mark the slot in the list as free. */
1020 ptr = l->exact;
1021 break;
1023 UNLOCK_ALIGNED_BLOCKS ();
1025 block = BLOCK (ptr);
1027 type = _heapinfo[block].busy.type;
1028 switch (type)
1030 case 0:
1031 /* Get as many statistics as early as we can. */
1032 --_chunks_used;
1033 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1034 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1036 /* Find the free cluster previous to this one in the free list.
1037 Start searching at the last block referenced; this may benefit
1038 programs with locality of allocation. */
1039 i = _heapindex;
1040 if (i > block)
1041 while (i > block)
1042 i = _heapinfo[i].free.prev;
1043 else
1046 i = _heapinfo[i].free.next;
1047 while (i > 0 && i < block);
1048 i = _heapinfo[i].free.prev;
1051 /* Determine how to link this block into the free list. */
1052 if (block == i + _heapinfo[i].free.size)
1054 /* Coalesce this block with its predecessor. */
1055 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1056 block = i;
1058 else
1060 /* Really link this block back into the free list. */
1061 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1062 _heapinfo[block].free.next = _heapinfo[i].free.next;
1063 _heapinfo[block].free.prev = i;
1064 _heapinfo[i].free.next = block;
1065 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1066 ++_chunks_free;
1069 /* Now that the block is linked in, see if we can coalesce it
1070 with its successor (by deleting its successor from the list
1071 and adding in its size). */
1072 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1074 _heapinfo[block].free.size
1075 += _heapinfo[_heapinfo[block].free.next].free.size;
1076 _heapinfo[block].free.next
1077 = _heapinfo[_heapinfo[block].free.next].free.next;
1078 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1079 --_chunks_free;
1082 /* How many trailing free blocks are there now? */
1083 blocks = _heapinfo[block].free.size;
1085 /* Where is the current end of accessible core? */
1086 curbrk = (*__morecore) (0);
1088 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1090 /* The end of the malloc heap is at the end of accessible core.
1091 It's possible that moving _heapinfo will allow us to
1092 return some space to the system. */
1094 size_t info_block = BLOCK (_heapinfo);
1095 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1096 size_t prev_block = _heapinfo[block].free.prev;
1097 size_t prev_blocks = _heapinfo[prev_block].free.size;
1098 size_t next_block = _heapinfo[block].free.next;
1099 size_t next_blocks = _heapinfo[next_block].free.size;
1101 if (/* Win if this block being freed is last in core, the info table
1102 is just before it, the previous free block is just before the
1103 info table, and the two free blocks together form a useful
1104 amount to return to the system. */
1105 (block + blocks == _heaplimit &&
1106 info_block + info_blocks == block &&
1107 prev_block != 0 && prev_block + prev_blocks == info_block &&
1108 blocks + prev_blocks >= lesscore_threshold) ||
1109 /* Nope, not the case. We can also win if this block being
1110 freed is just before the info table, and the table extends
1111 to the end of core or is followed only by a free block,
1112 and the total free space is worth returning to the system. */
1113 (block + blocks == info_block &&
1114 ((info_block + info_blocks == _heaplimit &&
1115 blocks >= lesscore_threshold) ||
1116 (info_block + info_blocks == next_block &&
1117 next_block + next_blocks == _heaplimit &&
1118 blocks + next_blocks >= lesscore_threshold)))
1121 malloc_info *newinfo;
1122 size_t oldlimit = _heaplimit;
1124 /* Free the old info table, clearing _heaplimit to avoid
1125 recursion into this code. We don't want to return the
1126 table's blocks to the system before we have copied them to
1127 the new location. */
1128 _heaplimit = 0;
1129 _free_internal_nolock (_heapinfo);
1130 _heaplimit = oldlimit;
1132 /* Tell malloc to search from the beginning of the heap for
1133 free blocks, so it doesn't reuse the ones just freed. */
1134 _heapindex = 0;
1136 /* Allocate new space for the info table and move its data. */
1137 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
1138 PROTECT_MALLOC_STATE (0);
1139 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1140 _heapinfo = newinfo;
1142 /* We should now have coalesced the free block with the
1143 blocks freed from the old info table. Examine the entire
1144 trailing free block to decide below whether to return some
1145 to the system. */
1146 block = _heapinfo[0].free.prev;
1147 blocks = _heapinfo[block].free.size;
1150 /* Now see if we can return stuff to the system. */
1151 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1153 register size_t bytes = blocks * BLOCKSIZE;
1154 _heaplimit -= blocks;
1155 (*__morecore) (-bytes);
1156 _heapinfo[_heapinfo[block].free.prev].free.next
1157 = _heapinfo[block].free.next;
1158 _heapinfo[_heapinfo[block].free.next].free.prev
1159 = _heapinfo[block].free.prev;
1160 block = _heapinfo[block].free.prev;
1161 --_chunks_free;
1162 _bytes_free -= bytes;
1166 /* Set the next search to begin at this block. */
1167 _heapindex = block;
1168 break;
1170 default:
1171 /* Do some of the statistics. */
1172 --_chunks_used;
1173 _bytes_used -= 1 << type;
1174 ++_chunks_free;
1175 _bytes_free += 1 << type;
1177 /* Get the address of the first free fragment in this block. */
1178 prev = (struct list *) ((char *) ADDRESS (block) +
1179 (_heapinfo[block].busy.info.frag.first << type));
1181 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1183 /* If all fragments of this block are free, remove them
1184 from the fragment list and free the whole block. */
1185 next = prev;
1186 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
1187 next = next->next;
1188 prev->prev->next = next;
1189 if (next != NULL)
1190 next->prev = prev->prev;
1191 _heapinfo[block].busy.type = 0;
1192 _heapinfo[block].busy.info.size = 1;
1194 /* Keep the statistics accurate. */
1195 ++_chunks_used;
1196 _bytes_used += BLOCKSIZE;
1197 _chunks_free -= BLOCKSIZE >> type;
1198 _bytes_free -= BLOCKSIZE;
1200 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1201 _free_internal_nolock (ADDRESS (block));
1202 #else
1203 free (ADDRESS (block));
1204 #endif
1206 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1208 /* If some fragments of this block are free, link this
1209 fragment into the fragment list after the first free
1210 fragment of this block. */
1211 next = ptr;
1212 next->next = prev->next;
1213 next->prev = prev;
1214 prev->next = next;
1215 if (next->next != NULL)
1216 next->next->prev = next;
1217 ++_heapinfo[block].busy.info.frag.nfree;
1219 else
1221 /* No fragments of this block are free, so link this
1222 fragment into the fragment list and announce that
1223 it is the first free fragment of this block. */
1224 prev = ptr;
1225 _heapinfo[block].busy.info.frag.nfree = 1;
1226 _heapinfo[block].busy.info.frag.first =
1227 (uintptr_t) ptr % BLOCKSIZE >> type;
1228 prev->next = _fraghead[type].next;
1229 prev->prev = &_fraghead[type];
1230 prev->prev->next = prev;
1231 if (prev->next != NULL)
1232 prev->next->prev = prev;
1234 break;
1237 PROTECT_MALLOC_STATE (1);
1240 /* Return memory to the heap.
1241 Like `free' but don't call a __free_hook if there is one. */
1242 void
1243 _free_internal (void *ptr)
1245 LOCK ();
1246 _free_internal_nolock (ptr);
1247 UNLOCK ();
1250 /* Return memory to the heap. */
1252 void
1253 free (void *ptr)
1255 void (*hook) (void *) = __free_hook;
1257 if (hook != NULL)
1258 (*hook) (ptr);
1259 else
1260 _free_internal (ptr);
1263 /* Define the `cfree' alias for `free'. */
1264 #ifdef weak_alias
1265 weak_alias (free, cfree)
1266 #else
1267 void
1268 cfree (void *ptr)
1270 free (ptr);
1272 #endif
1273 /* Change the size of a block allocated by `malloc'.
1274 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1275 Written May 1989 by Mike Haertel.
1277 This library is free software; you can redistribute it and/or
1278 modify it under the terms of the GNU General Public License as
1279 published by the Free Software Foundation; either version 2 of the
1280 License, or (at your option) any later version.
1282 This library is distributed in the hope that it will be useful,
1283 but WITHOUT ANY WARRANTY; without even the implied warranty of
1284 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1285 General Public License for more details.
1287 You should have received a copy of the GNU General Public
1288 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1290 The author may be reached (Email) at the address mike@ai.mit.edu,
1291 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1293 #ifndef min
1294 #define min(A, B) ((A) < (B) ? (A) : (B))
1295 #endif
1297 /* On Cygwin the dumped emacs may try to realloc storage allocated in
1298 the static heap. We just malloc space in the new heap and copy the
1299 data. */
1300 #ifdef CYGWIN
1301 void *
1302 special_realloc (void *ptr, size_t size)
1304 void *result;
1305 int type;
1306 size_t block, oldsize;
1308 block = ((char *) ptr - bss_sbrk_heapbase) / BLOCKSIZE + 1;
1309 type = bss_sbrk_heapinfo[block].busy.type;
1310 oldsize =
1311 type == 0 ? bss_sbrk_heapinfo[block].busy.info.size * BLOCKSIZE
1312 : (size_t) 1 << type;
1313 result = _malloc_internal_nolock (size);
1314 if (result)
1315 return memcpy (result, ptr, min (oldsize, size));
1316 return result;
1318 #endif
1320 /* Debugging hook for realloc. */
1321 void *(*__realloc_hook) (void *ptr, size_t size);
1323 /* Resize the given region to the new size, returning a pointer
1324 to the (possibly moved) region. This is optimized for speed;
1325 some benchmarks seem to indicate that greater compactness is
1326 achieved by unconditionally allocating and copying to a
1327 new region. This module has incestuous knowledge of the
1328 internals of both free and malloc. */
1329 void *
1330 _realloc_internal_nolock (void *ptr, size_t size)
1332 void *result;
1333 int type;
1334 size_t block, blocks, oldlimit;
1336 if (size == 0)
1338 _free_internal_nolock (ptr);
1339 return _malloc_internal_nolock (0);
1341 else if (ptr == NULL)
1342 return _malloc_internal_nolock (size);
1344 #ifdef CYGWIN
1345 if ((char *) ptr < _heapbase)
1346 /* ptr points into the static heap */
1347 return special_realloc (ptr, size);
1348 #endif
1350 block = BLOCK (ptr);
1352 PROTECT_MALLOC_STATE (0);
1354 type = _heapinfo[block].busy.type;
1355 switch (type)
1357 case 0:
1358 /* Maybe reallocate a large block to a small fragment. */
1359 if (size <= BLOCKSIZE / 2)
1361 result = _malloc_internal_nolock (size);
1362 if (result != NULL)
1364 memcpy (result, ptr, size);
1365 _free_internal_nolock (ptr);
1366 goto out;
1370 /* The new size is a large allocation as well;
1371 see if we can hold it in place. */
1372 blocks = BLOCKIFY (size);
1373 if (blocks < _heapinfo[block].busy.info.size)
1375 /* The new size is smaller; return
1376 excess memory to the free list. */
1377 _heapinfo[block + blocks].busy.type = 0;
1378 _heapinfo[block + blocks].busy.info.size
1379 = _heapinfo[block].busy.info.size - blocks;
1380 _heapinfo[block].busy.info.size = blocks;
1381 /* We have just created a new chunk by splitting a chunk in two.
1382 Now we will free this chunk; increment the statistics counter
1383 so it doesn't become wrong when _free_internal decrements it. */
1384 ++_chunks_used;
1385 _free_internal_nolock (ADDRESS (block + blocks));
1386 result = ptr;
1388 else if (blocks == _heapinfo[block].busy.info.size)
1389 /* No size change necessary. */
1390 result = ptr;
1391 else
1393 /* Won't fit, so allocate a new region that will.
1394 Free the old region first in case there is sufficient
1395 adjacent free space to grow without moving. */
1396 blocks = _heapinfo[block].busy.info.size;
1397 /* Prevent free from actually returning memory to the system. */
1398 oldlimit = _heaplimit;
1399 _heaplimit = 0;
1400 _free_internal_nolock (ptr);
1401 result = _malloc_internal_nolock (size);
1402 PROTECT_MALLOC_STATE (0);
1403 if (_heaplimit == 0)
1404 _heaplimit = oldlimit;
1405 if (result == NULL)
1407 /* Now we're really in trouble. We have to unfree
1408 the thing we just freed. Unfortunately it might
1409 have been coalesced with its neighbors. */
1410 if (_heapindex == block)
1411 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1412 else
1414 void *previous
1415 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1416 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1417 _free_internal_nolock (previous);
1419 goto out;
1421 if (ptr != result)
1422 memmove (result, ptr, blocks * BLOCKSIZE);
1424 break;
1426 default:
1427 /* Old size is a fragment; type is logarithm
1428 to base two of the fragment size. */
1429 if (size > (size_t) (1 << (type - 1)) &&
1430 size <= (size_t) (1 << type))
1431 /* The new size is the same kind of fragment. */
1432 result = ptr;
1433 else
1435 /* The new size is different; allocate a new space,
1436 and copy the lesser of the new size and the old. */
1437 result = _malloc_internal_nolock (size);
1438 if (result == NULL)
1439 goto out;
1440 memcpy (result, ptr, min (size, (size_t) 1 << type));
1441 _free_internal_nolock (ptr);
1443 break;
1446 PROTECT_MALLOC_STATE (1);
1447 out:
1448 return result;
1451 void *
1452 _realloc_internal (void *ptr, size_t size)
1454 void *result;
1456 LOCK ();
1457 result = _realloc_internal_nolock (ptr, size);
1458 UNLOCK ();
1460 return result;
1463 void *
1464 realloc (void *ptr, size_t size)
1466 void *(*hook) (void *, size_t);
1468 if (!__malloc_initialized && !__malloc_initialize ())
1469 return NULL;
1471 hook = __realloc_hook;
1472 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1474 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1476 This library is free software; you can redistribute it and/or
1477 modify it under the terms of the GNU General Public License as
1478 published by the Free Software Foundation; either version 2 of the
1479 License, or (at your option) any later version.
1481 This library is distributed in the hope that it will be useful,
1482 but WITHOUT ANY WARRANTY; without even the implied warranty of
1483 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1484 General Public License for more details.
1486 You should have received a copy of the GNU General Public
1487 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1489 The author may be reached (Email) at the address mike@ai.mit.edu,
1490 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1492 /* Allocate an array of NMEMB elements each SIZE bytes long.
1493 The entire array is initialized to zeros. */
1494 void *
1495 calloc (size_t nmemb, size_t size)
1497 void *result;
1498 size_t bytes = nmemb * size;
1500 if (size != 0 && bytes / size != nmemb)
1502 errno = ENOMEM;
1503 return NULL;
1506 result = malloc (bytes);
1507 if (result)
1508 return memset (result, 0, bytes);
1509 return result;
1511 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1512 This file is part of the GNU C Library.
1514 The GNU C Library is free software; you can redistribute it and/or modify
1515 it under the terms of the GNU General Public License as published by
1516 the Free Software Foundation; either version 2, or (at your option)
1517 any later version.
1519 The GNU C Library is distributed in the hope that it will be useful,
1520 but WITHOUT ANY WARRANTY; without even the implied warranty of
1521 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1522 GNU General Public License for more details.
1524 You should have received a copy of the GNU General Public License
1525 along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
1527 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1528 compatible. */
1529 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1530 #define __sbrk sbrk
1531 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1532 /* It is best not to declare this and cast its result on foreign operating
1533 systems with potentially hostile include files. */
1535 extern void *__sbrk (ptrdiff_t increment);
1536 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1538 /* Allocate INCREMENT more bytes of data space,
1539 and return the start of data space, or NULL on errors.
1540 If INCREMENT is negative, shrink data space. */
1541 void *
1542 __default_morecore (ptrdiff_t increment)
1544 void *result;
1545 #if defined (CYGWIN)
1546 if (!bss_sbrk_did_unexec)
1548 return bss_sbrk (increment);
1550 #endif
1551 result = (void *) __sbrk (increment);
1552 if (result == (void *) -1)
1553 return NULL;
1554 return result;
1556 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1558 This library is free software; you can redistribute it and/or
1559 modify it under the terms of the GNU General Public License as
1560 published by the Free Software Foundation; either version 2 of the
1561 License, or (at your option) any later version.
1563 This library is distributed in the hope that it will be useful,
1564 but WITHOUT ANY WARRANTY; without even the implied warranty of
1565 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1566 General Public License for more details.
1568 You should have received a copy of the GNU General Public
1569 License along with this library. If not, see <http://www.gnu.org/licenses/>. */
1571 void *(*__memalign_hook) (size_t size, size_t alignment);
1573 void *
1574 aligned_alloc (size_t alignment, size_t size)
1576 void *result;
1577 size_t adj, lastadj;
1578 void *(*hook) (size_t, size_t) = __memalign_hook;
1580 if (hook)
1581 return (*hook) (alignment, size);
1583 /* Allocate a block with enough extra space to pad the block with up to
1584 (ALIGNMENT - 1) bytes if necessary. */
1585 if (- size < alignment)
1587 errno = ENOMEM;
1588 return NULL;
1590 result = malloc (size + alignment - 1);
1591 if (result == NULL)
1592 return NULL;
1594 /* Figure out how much we will need to pad this particular block
1595 to achieve the required alignment. */
1596 adj = (uintptr_t) result % alignment;
1600 /* Reallocate the block with only as much excess as it needs. */
1601 free (result);
1602 result = malloc (size + alignment - adj);
1603 if (result == NULL) /* Impossible unless interrupted. */
1604 return NULL;
1606 lastadj = adj;
1607 adj = (uintptr_t) result % alignment;
1608 /* It's conceivable we might have been so unlucky as to get a
1609 different block with weaker alignment. If so, this block is too
1610 short to contain SIZE after alignment correction. So we must
1611 try again and get another block, slightly larger. */
1612 } while (adj < lastadj);
1614 if (adj != 0)
1616 /* Record this block in the list of aligned blocks, so that `free'
1617 can identify the pointer it is passed, which will be in the middle
1618 of an allocated block. */
1620 struct alignlist *l;
1621 LOCK_ALIGNED_BLOCKS ();
1622 for (l = _aligned_blocks; l != NULL; l = l->next)
1623 if (l->aligned == NULL)
1624 /* This slot is free. Use it. */
1625 break;
1626 if (l == NULL)
1628 l = malloc (sizeof *l);
1629 if (l != NULL)
1631 l->next = _aligned_blocks;
1632 _aligned_blocks = l;
1635 if (l != NULL)
1637 l->exact = result;
1638 result = l->aligned = (char *) result + alignment - adj;
1640 UNLOCK_ALIGNED_BLOCKS ();
1641 if (l == NULL)
1643 free (result);
1644 result = NULL;
1648 return result;
1651 /* An obsolete alias for aligned_alloc, for any old libraries that use
1652 this alias. */
1654 void *
1655 memalign (size_t alignment, size_t size)
1657 return aligned_alloc (alignment, size);
1661 posix_memalign (void **memptr, size_t alignment, size_t size)
1663 void *mem;
1665 if (alignment == 0
1666 || alignment % sizeof (void *) != 0
1667 || (alignment & (alignment - 1)) != 0)
1668 return EINVAL;
1670 mem = aligned_alloc (alignment, size);
1671 if (mem == NULL)
1672 return ENOMEM;
1674 *memptr = mem;
1676 return 0;
1679 /* Allocate memory on a page boundary.
1680 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1682 This library is free software; you can redistribute it and/or
1683 modify it under the terms of the GNU General Public License as
1684 published by the Free Software Foundation; either version 2 of the
1685 License, or (at your option) any later version.
1687 This library is distributed in the hope that it will be useful,
1688 but WITHOUT ANY WARRANTY; without even the implied warranty of
1689 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1690 General Public License for more details.
1692 You should have received a copy of the GNU General Public
1693 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1695 The author may be reached (Email) at the address mike@ai.mit.edu,
1696 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1698 /* Allocate SIZE bytes on a page boundary. */
1699 extern void *valloc (size_t);
1701 #if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1702 # include "getpagesize.h"
1703 #elif !defined getpagesize
1704 extern int getpagesize (void);
1705 #endif
1707 static size_t pagesize;
1709 void *
1710 valloc (size_t size)
1712 if (pagesize == 0)
1713 pagesize = getpagesize ();
1715 return aligned_alloc (pagesize, size);
1718 #ifdef GC_MCHECK
1720 /* Standard debugging hooks for `malloc'.
1721 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1722 Written May 1989 by Mike Haertel.
1724 This library is free software; you can redistribute it and/or
1725 modify it under the terms of the GNU General Public License as
1726 published by the Free Software Foundation; either version 2 of the
1727 License, or (at your option) any later version.
1729 This library is distributed in the hope that it will be useful,
1730 but WITHOUT ANY WARRANTY; without even the implied warranty of
1731 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1732 General Public License for more details.
1734 You should have received a copy of the GNU General Public
1735 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1737 The author may be reached (Email) at the address mike@ai.mit.edu,
1738 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1740 #include <stdio.h>
1742 /* Old hook values. */
1743 static void (*old_free_hook) (void *ptr);
1744 static void *(*old_malloc_hook) (size_t size);
1745 static void *(*old_realloc_hook) (void *ptr, size_t size);
1747 /* Function to call when something awful happens. */
1748 static void (*abortfunc) (enum mcheck_status);
1750 /* Arbitrary magical numbers. */
1751 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1752 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1753 #define MAGICBYTE ((char) 0xd7)
1754 #define MALLOCFLOOD ((char) 0x93)
1755 #define FREEFLOOD ((char) 0x95)
1757 struct hdr
1759 size_t size; /* Exact size requested by user. */
1760 size_t magic; /* Magic number to check header integrity. */
1763 static enum mcheck_status
1764 checkhdr (const struct hdr *hdr)
1766 enum mcheck_status status;
1767 switch (hdr->magic)
1769 default:
1770 status = MCHECK_HEAD;
1771 break;
1772 case MAGICFREE:
1773 status = MCHECK_FREE;
1774 break;
1775 case MAGICWORD:
1776 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1777 status = MCHECK_TAIL;
1778 else
1779 status = MCHECK_OK;
1780 break;
1782 if (status != MCHECK_OK)
1783 (*abortfunc) (status);
1784 return status;
1787 static void
1788 freehook (void *ptr)
1790 struct hdr *hdr;
1792 if (ptr)
1794 struct alignlist *l;
1796 /* If the block was allocated by aligned_alloc, its real pointer
1797 to free is recorded in _aligned_blocks; find that. */
1798 PROTECT_MALLOC_STATE (0);
1799 LOCK_ALIGNED_BLOCKS ();
1800 for (l = _aligned_blocks; l != NULL; l = l->next)
1801 if (l->aligned == ptr)
1803 l->aligned = NULL; /* Mark the slot in the list as free. */
1804 ptr = l->exact;
1805 break;
1807 UNLOCK_ALIGNED_BLOCKS ();
1808 PROTECT_MALLOC_STATE (1);
1810 hdr = ((struct hdr *) ptr) - 1;
1811 checkhdr (hdr);
1812 hdr->magic = MAGICFREE;
1813 memset (ptr, FREEFLOOD, hdr->size);
1815 else
1816 hdr = NULL;
1818 __free_hook = old_free_hook;
1819 free (hdr);
1820 __free_hook = freehook;
1823 static void *
1824 mallochook (size_t size)
1826 struct hdr *hdr;
1828 __malloc_hook = old_malloc_hook;
1829 hdr = malloc (sizeof *hdr + size + 1);
1830 __malloc_hook = mallochook;
1831 if (hdr == NULL)
1832 return NULL;
1834 hdr->size = size;
1835 hdr->magic = MAGICWORD;
1836 ((char *) &hdr[1])[size] = MAGICBYTE;
1837 return memset (hdr + 1, MALLOCFLOOD, size);
1840 static void *
1841 reallochook (void *ptr, size_t size)
1843 struct hdr *hdr = NULL;
1844 size_t osize = 0;
1846 if (ptr)
1848 hdr = ((struct hdr *) ptr) - 1;
1849 osize = hdr->size;
1851 checkhdr (hdr);
1852 if (size < osize)
1853 memset ((char *) ptr + size, FREEFLOOD, osize - size);
1856 __free_hook = old_free_hook;
1857 __malloc_hook = old_malloc_hook;
1858 __realloc_hook = old_realloc_hook;
1859 hdr = realloc (hdr, sizeof *hdr + size + 1);
1860 __free_hook = freehook;
1861 __malloc_hook = mallochook;
1862 __realloc_hook = reallochook;
1863 if (hdr == NULL)
1864 return NULL;
1866 hdr->size = size;
1867 hdr->magic = MAGICWORD;
1868 ((char *) &hdr[1])[size] = MAGICBYTE;
1869 if (size > osize)
1870 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1871 return hdr + 1;
1874 static void
1875 mabort (enum mcheck_status status)
1877 const char *msg;
1878 switch (status)
1880 case MCHECK_OK:
1881 msg = "memory is consistent, library is buggy";
1882 break;
1883 case MCHECK_HEAD:
1884 msg = "memory clobbered before allocated block";
1885 break;
1886 case MCHECK_TAIL:
1887 msg = "memory clobbered past end of allocated block";
1888 break;
1889 case MCHECK_FREE:
1890 msg = "block freed twice";
1891 break;
1892 default:
1893 msg = "bogus mcheck_status, library is buggy";
1894 break;
1896 #ifdef __GNU_LIBRARY__
1897 __libc_fatal (msg);
1898 #else
1899 fprintf (stderr, "mcheck: %s\n", msg);
1900 fflush (stderr);
1901 # ifdef emacs
1902 emacs_abort ();
1903 # else
1904 abort ();
1905 # endif
1906 #endif
1909 static int mcheck_used = 0;
1912 mcheck (void (*func) (enum mcheck_status))
1914 abortfunc = (func != NULL) ? func : &mabort;
1916 /* These hooks may not be safely inserted if malloc is already in use. */
1917 if (!__malloc_initialized && !mcheck_used)
1919 old_free_hook = __free_hook;
1920 __free_hook = freehook;
1921 old_malloc_hook = __malloc_hook;
1922 __malloc_hook = mallochook;
1923 old_realloc_hook = __realloc_hook;
1924 __realloc_hook = reallochook;
1925 mcheck_used = 1;
1928 return mcheck_used ? 0 : -1;
1931 enum mcheck_status
1932 mprobe (void *ptr)
1934 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
1937 #endif /* GC_MCHECK */