(complete-with-action): Backport from trunk (for vc-arch.el).
[emacs.git] / src / gmalloc.c
blobccc08e1ff68fe632a0b4fae997bcfbc9fd2280c9
1 /* This file is no longer automatically generated from libc. */
3 #define _MALLOC_INTERNAL
5 /* The malloc headers and source files from the C library follow here. */
7 /* Declarations for `malloc' and friends.
8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
9 2005, 2006, 2007 Free Software Foundation, Inc.
10 Written May 1989 by Mike Haertel.
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public
23 License along with this library; see the file COPYING. If
24 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25 Fifth Floor, Boston, MA 02110-1301, USA.
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
30 #ifndef _MALLOC_H
32 #define _MALLOC_H 1
34 #ifdef _MALLOC_INTERNAL
36 #ifdef HAVE_CONFIG_H
37 #include <config.h>
38 #endif
40 #ifdef HAVE_GTK_AND_PTHREAD
41 #define USE_PTHREAD
42 #endif
44 #if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
45 || defined STDC_HEADERS || defined PROTOTYPES) \
46 && ! defined (BROKEN_PROTOTYPES))
47 #undef PP
48 #define PP(args) args
49 #undef __ptr_t
50 #define __ptr_t void *
51 #else /* Not C++ or ANSI C. */
52 #undef PP
53 #define PP(args) ()
54 #undef __ptr_t
55 #define __ptr_t char *
56 #endif /* C++ or ANSI C. */
58 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
59 #include <string.h>
60 #else
61 #ifndef memset
62 #define memset(s, zero, n) bzero ((s), (n))
63 #endif
64 #ifndef memcpy
65 #define memcpy(d, s, n) bcopy ((s), (d), (n))
66 #endif
67 #endif
69 #ifdef HAVE_LIMITS_H
70 #include <limits.h>
71 #endif
72 #ifndef CHAR_BIT
73 #define CHAR_BIT 8
74 #endif
76 #ifdef HAVE_UNISTD_H
77 #include <unistd.h>
78 #endif
80 #ifdef USE_PTHREAD
81 #include <pthread.h>
82 #endif
84 #endif /* _MALLOC_INTERNAL. */
87 #ifdef __cplusplus
88 extern "C"
90 #endif
92 #ifdef STDC_HEADERS
93 #include <stddef.h>
94 #define __malloc_size_t size_t
95 #define __malloc_ptrdiff_t ptrdiff_t
96 #else
97 #ifdef __GNUC__
98 #include <stddef.h>
99 #ifdef __SIZE_TYPE__
100 #define __malloc_size_t __SIZE_TYPE__
101 #endif
102 #endif
103 #ifndef __malloc_size_t
104 #define __malloc_size_t unsigned int
105 #endif
106 #define __malloc_ptrdiff_t int
107 #endif
109 #ifndef NULL
110 #define NULL 0
111 #endif
113 #ifndef FREE_RETURN_TYPE
114 #define FREE_RETURN_TYPE void
115 #endif
118 /* Allocate SIZE bytes of memory. */
119 extern __ptr_t malloc PP ((__malloc_size_t __size));
120 /* Re-allocate the previously allocated block
121 in __ptr_t, making the new block SIZE bytes long. */
122 extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
123 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
124 extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
125 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
126 extern FREE_RETURN_TYPE free PP ((__ptr_t __ptr));
128 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
129 #if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict. */
130 extern __ptr_t memalign PP ((__malloc_size_t __alignment,
131 __malloc_size_t __size));
132 extern int posix_memalign PP ((__ptr_t *, __malloc_size_t,
133 __malloc_size_t size));
134 #endif
136 /* Allocate SIZE bytes on a page boundary. */
137 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
138 extern __ptr_t valloc PP ((__malloc_size_t __size));
139 #endif
141 #ifdef USE_PTHREAD
142 /* Set up mutexes and make malloc etc. thread-safe. */
143 extern void malloc_enable_thread PP ((void));
144 #endif
146 #ifdef _MALLOC_INTERNAL
148 /* The allocator divides the heap into blocks of fixed size; large
149 requests receive one or more whole blocks, and small requests
150 receive a fragment of a block. Fragment sizes are powers of two,
151 and all fragments of a block are the same size. When all the
152 fragments in a block have been freed, the block itself is freed. */
153 #define INT_BIT (CHAR_BIT * sizeof(int))
154 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
155 #define BLOCKSIZE (1 << BLOCKLOG)
156 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
158 /* Determine the amount of memory spanned by the initial heap table
159 (not an absolute limit). */
160 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
162 /* Number of contiguous free blocks allowed to build up at the end of
163 memory before they will be returned to the system. */
164 #define FINAL_FREE_BLOCKS 8
166 /* Data structure giving per-block information. */
167 typedef union
169 /* Heap information for a busy block. */
170 struct
172 /* Zero for a large (multiblock) object, or positive giving the
173 logarithm to the base two of the fragment size. */
174 int type;
175 union
177 struct
179 __malloc_size_t nfree; /* Free frags in a fragmented block. */
180 __malloc_size_t first; /* First free fragment of the block. */
181 } frag;
182 /* For a large object, in its first block, this has the number
183 of blocks in the object. In the other blocks, this has a
184 negative number which says how far back the first block is. */
185 __malloc_ptrdiff_t size;
186 } info;
187 } busy;
188 /* Heap information for a free block
189 (that may be the first of a free cluster). */
190 struct
192 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
193 __malloc_size_t next; /* Index of next free cluster. */
194 __malloc_size_t prev; /* Index of previous free cluster. */
195 } free;
196 } malloc_info;
198 /* Pointer to first block of the heap. */
199 extern char *_heapbase;
201 /* Table indexed by block number giving per-block information. */
202 extern malloc_info *_heapinfo;
204 /* Address to block number and vice versa. */
205 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
206 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
208 /* Current search index for the heap table. */
209 extern __malloc_size_t _heapindex;
211 /* Limit of valid info table indices. */
212 extern __malloc_size_t _heaplimit;
214 /* Doubly linked lists of free fragments. */
215 struct list
217 struct list *next;
218 struct list *prev;
221 /* Free list headers for each fragment size. */
222 extern struct list _fraghead[];
224 /* List of blocks allocated with `memalign' (or `valloc'). */
225 struct alignlist
227 struct alignlist *next;
228 __ptr_t aligned; /* The address that memaligned returned. */
229 __ptr_t exact; /* The address that malloc returned. */
231 extern struct alignlist *_aligned_blocks;
233 /* Instrumentation. */
234 extern __malloc_size_t _chunks_used;
235 extern __malloc_size_t _bytes_used;
236 extern __malloc_size_t _chunks_free;
237 extern __malloc_size_t _bytes_free;
239 /* Internal versions of `malloc', `realloc', and `free'
240 used when these functions need to call each other.
241 They are the same but don't call the hooks. */
242 extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
243 extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
244 extern void _free_internal PP ((__ptr_t __ptr));
245 extern __ptr_t _malloc_internal_nolock PP ((__malloc_size_t __size));
246 extern __ptr_t _realloc_internal_nolock PP ((__ptr_t __ptr, __malloc_size_t __size));
247 extern void _free_internal_nolock PP ((__ptr_t __ptr));
249 #ifdef USE_PTHREAD
250 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
251 extern int _malloc_thread_enabled_p;
252 #define LOCK() \
253 do { \
254 if (_malloc_thread_enabled_p) \
255 pthread_mutex_lock (&_malloc_mutex); \
256 } while (0)
257 #define UNLOCK() \
258 do { \
259 if (_malloc_thread_enabled_p) \
260 pthread_mutex_unlock (&_malloc_mutex); \
261 } while (0)
262 #define LOCK_ALIGNED_BLOCKS() \
263 do { \
264 if (_malloc_thread_enabled_p) \
265 pthread_mutex_lock (&_aligned_blocks_mutex); \
266 } while (0)
267 #define UNLOCK_ALIGNED_BLOCKS() \
268 do { \
269 if (_malloc_thread_enabled_p) \
270 pthread_mutex_unlock (&_aligned_blocks_mutex); \
271 } while (0)
272 #else
273 #define LOCK()
274 #define UNLOCK()
275 #define LOCK_ALIGNED_BLOCKS()
276 #define UNLOCK_ALIGNED_BLOCKS()
277 #endif
279 #endif /* _MALLOC_INTERNAL. */
281 /* Given an address in the middle of a malloc'd object,
282 return the address of the beginning of the object. */
283 extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
285 /* Underlying allocation function; successive calls should
286 return contiguous pieces of memory. */
287 extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
289 /* Default value of `__morecore'. */
290 extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
292 /* If not NULL, this function is called after each time
293 `__morecore' is called to increase the data size. */
294 extern void (*__after_morecore_hook) PP ((void));
296 /* Number of extra blocks to get each time we ask for more core.
297 This reduces the frequency of calling `(*__morecore)'. */
298 extern __malloc_size_t __malloc_extra_blocks;
300 /* Nonzero if `malloc' has been called and done its initialization. */
301 extern int __malloc_initialized;
302 /* Function called to initialize malloc data structures. */
303 extern int __malloc_initialize PP ((void));
305 /* Hooks for debugging versions. */
306 extern void (*__malloc_initialize_hook) PP ((void));
307 extern void (*__free_hook) PP ((__ptr_t __ptr));
308 extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
309 extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
310 extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
311 __malloc_size_t __alignment));
313 /* Return values for `mprobe': these are the kinds of inconsistencies that
314 `mcheck' enables detection of. */
315 enum mcheck_status
317 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
318 MCHECK_OK, /* Block is fine. */
319 MCHECK_FREE, /* Block freed twice. */
320 MCHECK_HEAD, /* Memory before the block was clobbered. */
321 MCHECK_TAIL /* Memory after the block was clobbered. */
324 /* Activate a standard collection of debugging hooks. This must be called
325 before `malloc' is ever called. ABORTFUNC is called with an error code
326 (see enum above) when an inconsistency is detected. If ABORTFUNC is
327 null, the standard function prints on stderr and then calls `abort'. */
328 extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
330 /* Check for aberrations in a particular malloc'd block. You must have
331 called `mcheck' already. These are the same checks that `mcheck' does
332 when you free or reallocate a block. */
333 extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
335 /* Activate a standard collection of tracing hooks. */
336 extern void mtrace PP ((void));
337 extern void muntrace PP ((void));
339 /* Statistics available to the user. */
340 struct mstats
342 __malloc_size_t bytes_total; /* Total size of the heap. */
343 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
344 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
345 __malloc_size_t chunks_free; /* Chunks in the free list. */
346 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
349 /* Pick up the current statistics. */
350 extern struct mstats mstats PP ((void));
352 /* Call WARNFUN with a warning message when memory usage is high. */
353 extern void memory_warnings PP ((__ptr_t __start,
354 void (*__warnfun) PP ((const char *))));
357 /* Relocating allocator. */
359 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
360 extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
362 /* Free the storage allocated in HANDLEPTR. */
363 extern void r_alloc_free PP ((__ptr_t *__handleptr));
365 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
366 extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
369 #ifdef __cplusplus
371 #endif
373 #endif /* malloc.h */
374 /* Memory allocator `malloc'.
375 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
376 Written May 1989 by Mike Haertel.
378 This library is free software; you can redistribute it and/or
379 modify it under the terms of the GNU General Public License as
380 published by the Free Software Foundation; either version 2 of the
381 License, or (at your option) any later version.
383 This library is distributed in the hope that it will be useful,
384 but WITHOUT ANY WARRANTY; without even the implied warranty of
385 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
386 General Public License for more details.
388 You should have received a copy of the GNU General Public
389 License along with this library; see the file COPYING. If
390 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
391 Fifth Floor, Boston, MA 02110-1301, USA.
393 The author may be reached (Email) at the address mike@ai.mit.edu,
394 or (US mail) as Mike Haertel c/o Free Software Foundation. */
396 #ifndef _MALLOC_INTERNAL
397 #define _MALLOC_INTERNAL
398 #include <malloc.h>
399 #endif
400 #include <errno.h>
402 /* How to really get more memory. */
403 #if defined(CYGWIN)
404 extern __ptr_t bss_sbrk PP ((ptrdiff_t __size));
405 extern int bss_sbrk_did_unexec;
406 #endif
407 __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size)) = __default_morecore;
409 /* Debugging hook for `malloc'. */
410 __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
412 /* Pointer to the base of the first block. */
413 char *_heapbase;
415 /* Block information table. Allocated with align/__free (not malloc/free). */
416 malloc_info *_heapinfo;
418 /* Number of info entries. */
419 static __malloc_size_t heapsize;
421 /* Search index in the info table. */
422 __malloc_size_t _heapindex;
424 /* Limit of valid info table indices. */
425 __malloc_size_t _heaplimit;
427 /* Free lists for each fragment size. */
428 struct list _fraghead[BLOCKLOG];
430 /* Instrumentation. */
431 __malloc_size_t _chunks_used;
432 __malloc_size_t _bytes_used;
433 __malloc_size_t _chunks_free;
434 __malloc_size_t _bytes_free;
436 /* Are you experienced? */
437 int __malloc_initialized;
439 __malloc_size_t __malloc_extra_blocks;
441 void (*__malloc_initialize_hook) PP ((void));
442 void (*__after_morecore_hook) PP ((void));
444 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
446 /* Some code for hunting a bug writing into _heapinfo.
448 Call this macro with argument PROT non-zero to protect internal
449 malloc state against writing to it, call it with a zero argument to
450 make it readable and writable.
452 Note that this only works if BLOCKSIZE == page size, which is
453 the case on the i386. */
455 #include <sys/types.h>
456 #include <sys/mman.h>
458 static int state_protected_p;
459 static __malloc_size_t last_state_size;
460 static malloc_info *last_heapinfo;
462 void
463 protect_malloc_state (protect_p)
464 int protect_p;
466 /* If _heapinfo has been relocated, make sure its old location
467 isn't left read-only; it will be reused by malloc. */
468 if (_heapinfo != last_heapinfo
469 && last_heapinfo
470 && state_protected_p)
471 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
473 last_state_size = _heaplimit * sizeof *_heapinfo;
474 last_heapinfo = _heapinfo;
476 if (protect_p != state_protected_p)
478 state_protected_p = protect_p;
479 if (mprotect (_heapinfo, last_state_size,
480 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
481 abort ();
485 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
487 #else
488 #define PROTECT_MALLOC_STATE(PROT) /* empty */
489 #endif
492 /* Aligned allocation. */
493 static __ptr_t align PP ((__malloc_size_t));
494 static __ptr_t
495 align (size)
496 __malloc_size_t size;
498 __ptr_t result;
499 unsigned long int adj;
501 /* align accepts an unsigned argument, but __morecore accepts a
502 signed one. This could lead to trouble if SIZE overflows a
503 signed int type accepted by __morecore. We just punt in that
504 case, since they are requesting a ludicrous amount anyway. */
505 if ((__malloc_ptrdiff_t)size < 0)
506 result = 0;
507 else
508 result = (*__morecore) (size);
509 adj = (unsigned long int) ((unsigned long int) ((char *) result -
510 (char *) NULL)) % BLOCKSIZE;
511 if (adj != 0)
513 __ptr_t new;
514 adj = BLOCKSIZE - adj;
515 new = (*__morecore) (adj);
516 result = (char *) result + adj;
519 if (__after_morecore_hook)
520 (*__after_morecore_hook) ();
522 return result;
525 /* Get SIZE bytes, if we can get them starting at END.
526 Return the address of the space we got.
527 If we cannot get space at END, fail and return 0. */
528 static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
529 static __ptr_t
530 get_contiguous_space (size, position)
531 __malloc_ptrdiff_t size;
532 __ptr_t position;
534 __ptr_t before;
535 __ptr_t after;
537 before = (*__morecore) (0);
538 /* If we can tell in advance that the break is at the wrong place,
539 fail now. */
540 if (before != position)
541 return 0;
543 /* Allocate SIZE bytes and get the address of them. */
544 after = (*__morecore) (size);
545 if (!after)
546 return 0;
548 /* It was not contiguous--reject it. */
549 if (after != position)
551 (*__morecore) (- size);
552 return 0;
555 return after;
559 /* This is called when `_heapinfo' and `heapsize' have just
560 been set to describe a new info table. Set up the table
561 to describe itself and account for it in the statistics. */
562 static void register_heapinfo PP ((void));
563 #ifdef __GNUC__
564 __inline__
565 #endif
566 static void
567 register_heapinfo ()
569 __malloc_size_t block, blocks;
571 block = BLOCK (_heapinfo);
572 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
574 /* Account for the _heapinfo block itself in the statistics. */
575 _bytes_used += blocks * BLOCKSIZE;
576 ++_chunks_used;
578 /* Describe the heapinfo block itself in the heapinfo. */
579 _heapinfo[block].busy.type = 0;
580 _heapinfo[block].busy.info.size = blocks;
581 /* Leave back-pointers for malloc_find_address. */
582 while (--blocks > 0)
583 _heapinfo[block + blocks].busy.info.size = -blocks;
586 #ifdef USE_PTHREAD
587 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
588 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
589 int _malloc_thread_enabled_p;
591 static void
592 malloc_atfork_handler_prepare ()
594 LOCK ();
595 LOCK_ALIGNED_BLOCKS ();
598 static void
599 malloc_atfork_handler_parent ()
601 UNLOCK_ALIGNED_BLOCKS ();
602 UNLOCK ();
605 static void
606 malloc_atfork_handler_child ()
608 UNLOCK_ALIGNED_BLOCKS ();
609 UNLOCK ();
612 /* Set up mutexes and make malloc etc. thread-safe. */
613 void
614 malloc_enable_thread ()
616 if (_malloc_thread_enabled_p)
617 return;
619 /* Some pthread implementations call malloc for statically
620 initialized mutexes when they are used first. To avoid such a
621 situation, we initialize mutexes here while their use is
622 disabled in malloc etc. */
623 pthread_mutex_init (&_malloc_mutex, NULL);
624 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
625 pthread_atfork (malloc_atfork_handler_prepare,
626 malloc_atfork_handler_parent,
627 malloc_atfork_handler_child);
628 _malloc_thread_enabled_p = 1;
630 #endif
632 static void
633 malloc_initialize_1 ()
635 #ifdef GC_MCHECK
636 mcheck (NULL);
637 #endif
639 if (__malloc_initialize_hook)
640 (*__malloc_initialize_hook) ();
642 heapsize = HEAP / BLOCKSIZE;
643 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
644 if (_heapinfo == NULL)
645 return;
646 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
647 _heapinfo[0].free.size = 0;
648 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
649 _heapindex = 0;
650 _heapbase = (char *) _heapinfo;
651 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
653 register_heapinfo ();
655 __malloc_initialized = 1;
656 PROTECT_MALLOC_STATE (1);
657 return;
660 /* Set everything up and remember that we have.
661 main will call malloc which calls this function. That is before any threads
662 or signal handlers has been set up, so we don't need thread protection. */
664 __malloc_initialize ()
666 if (__malloc_initialized)
667 return 0;
669 malloc_initialize_1 ();
671 return __malloc_initialized;
674 static int morecore_recursing;
676 /* Get neatly aligned memory, initializing or
677 growing the heap info table as necessary. */
678 static __ptr_t morecore_nolock PP ((__malloc_size_t));
679 static __ptr_t
680 morecore_nolock (size)
681 __malloc_size_t size;
683 __ptr_t result;
684 malloc_info *newinfo, *oldinfo;
685 __malloc_size_t newsize;
687 if (morecore_recursing)
688 /* Avoid recursion. The caller will know how to handle a null return. */
689 return NULL;
691 result = align (size);
692 if (result == NULL)
693 return NULL;
695 PROTECT_MALLOC_STATE (0);
697 /* Check if we need to grow the info table. */
698 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
700 /* Calculate the new _heapinfo table size. We do not account for the
701 added blocks in the table itself, as we hope to place them in
702 existing free space, which is already covered by part of the
703 existing table. */
704 newsize = heapsize;
706 newsize *= 2;
707 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
709 /* We must not reuse existing core for the new info table when called
710 from realloc in the case of growing a large block, because the
711 block being grown is momentarily marked as free. In this case
712 _heaplimit is zero so we know not to reuse space for internal
713 allocation. */
714 if (_heaplimit != 0)
716 /* First try to allocate the new info table in core we already
717 have, in the usual way using realloc. If realloc cannot
718 extend it in place or relocate it to existing sufficient core,
719 we will get called again, and the code above will notice the
720 `morecore_recursing' flag and return null. */
721 int save = errno; /* Don't want to clobber errno with ENOMEM. */
722 morecore_recursing = 1;
723 newinfo = (malloc_info *) _realloc_internal_nolock
724 (_heapinfo, newsize * sizeof (malloc_info));
725 morecore_recursing = 0;
726 if (newinfo == NULL)
727 errno = save;
728 else
730 /* We found some space in core, and realloc has put the old
731 table's blocks on the free list. Now zero the new part
732 of the table and install the new table location. */
733 memset (&newinfo[heapsize], 0,
734 (newsize - heapsize) * sizeof (malloc_info));
735 _heapinfo = newinfo;
736 heapsize = newsize;
737 goto got_heap;
741 /* Allocate new space for the malloc info table. */
742 while (1)
744 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
746 /* Did it fail? */
747 if (newinfo == NULL)
749 (*__morecore) (-size);
750 return NULL;
753 /* Is it big enough to record status for its own space?
754 If so, we win. */
755 if ((__malloc_size_t) BLOCK ((char *) newinfo
756 + newsize * sizeof (malloc_info))
757 < newsize)
758 break;
760 /* Must try again. First give back most of what we just got. */
761 (*__morecore) (- newsize * sizeof (malloc_info));
762 newsize *= 2;
765 /* Copy the old table to the beginning of the new,
766 and zero the rest of the new table. */
767 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
768 memset (&newinfo[heapsize], 0,
769 (newsize - heapsize) * sizeof (malloc_info));
770 oldinfo = _heapinfo;
771 _heapinfo = newinfo;
772 heapsize = newsize;
774 register_heapinfo ();
776 /* Reset _heaplimit so _free_internal never decides
777 it can relocate or resize the info table. */
778 _heaplimit = 0;
779 _free_internal_nolock (oldinfo);
780 PROTECT_MALLOC_STATE (0);
782 /* The new heap limit includes the new table just allocated. */
783 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
784 return result;
787 got_heap:
788 _heaplimit = BLOCK ((char *) result + size);
789 return result;
792 /* Allocate memory from the heap. */
793 __ptr_t
794 _malloc_internal_nolock (size)
795 __malloc_size_t size;
797 __ptr_t result;
798 __malloc_size_t block, blocks, lastblocks, start;
799 register __malloc_size_t i;
800 struct list *next;
802 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
803 valid address you can realloc and free (though not dereference).
805 It turns out that some extant code (sunrpc, at least Ultrix's version)
806 expects `malloc (0)' to return non-NULL and breaks otherwise.
807 Be compatible. */
809 #if 0
810 if (size == 0)
811 return NULL;
812 #endif
814 PROTECT_MALLOC_STATE (0);
816 if (size < sizeof (struct list))
817 size = sizeof (struct list);
819 #ifdef SUNOS_LOCALTIME_BUG
820 if (size < 16)
821 size = 16;
822 #endif
824 /* Determine the allocation policy based on the request size. */
825 if (size <= BLOCKSIZE / 2)
827 /* Small allocation to receive a fragment of a block.
828 Determine the logarithm to base two of the fragment size. */
829 register __malloc_size_t log = 1;
830 --size;
831 while ((size /= 2) != 0)
832 ++log;
834 /* Look in the fragment lists for a
835 free fragment of the desired size. */
836 next = _fraghead[log].next;
837 if (next != NULL)
839 /* There are free fragments of this size.
840 Pop a fragment out of the fragment list and return it.
841 Update the block's nfree and first counters. */
842 result = (__ptr_t) next;
843 next->prev->next = next->next;
844 if (next->next != NULL)
845 next->next->prev = next->prev;
846 block = BLOCK (result);
847 if (--_heapinfo[block].busy.info.frag.nfree != 0)
848 _heapinfo[block].busy.info.frag.first = (unsigned long int)
849 ((unsigned long int) ((char *) next->next - (char *) NULL)
850 % BLOCKSIZE) >> log;
852 /* Update the statistics. */
853 ++_chunks_used;
854 _bytes_used += 1 << log;
855 --_chunks_free;
856 _bytes_free -= 1 << log;
858 else
860 /* No free fragments of the desired size, so get a new block
861 and break it into fragments, returning the first. */
862 #ifdef GC_MALLOC_CHECK
863 result = _malloc_internal_nolock (BLOCKSIZE);
864 PROTECT_MALLOC_STATE (0);
865 #elif defined (USE_PTHREAD)
866 result = _malloc_internal_nolock (BLOCKSIZE);
867 #else
868 result = malloc (BLOCKSIZE);
869 #endif
870 if (result == NULL)
872 PROTECT_MALLOC_STATE (1);
873 goto out;
876 /* Link all fragments but the first into the free list. */
877 next = (struct list *) ((char *) result + (1 << log));
878 next->next = NULL;
879 next->prev = &_fraghead[log];
880 _fraghead[log].next = next;
882 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
884 next = (struct list *) ((char *) result + (i << log));
885 next->next = _fraghead[log].next;
886 next->prev = &_fraghead[log];
887 next->prev->next = next;
888 next->next->prev = next;
891 /* Initialize the nfree and first counters for this block. */
892 block = BLOCK (result);
893 _heapinfo[block].busy.type = log;
894 _heapinfo[block].busy.info.frag.nfree = i - 1;
895 _heapinfo[block].busy.info.frag.first = i - 1;
897 _chunks_free += (BLOCKSIZE >> log) - 1;
898 _bytes_free += BLOCKSIZE - (1 << log);
899 _bytes_used -= BLOCKSIZE - (1 << log);
902 else
904 /* Large allocation to receive one or more blocks.
905 Search the free list in a circle starting at the last place visited.
906 If we loop completely around without finding a large enough
907 space we will have to get more memory from the system. */
908 blocks = BLOCKIFY (size);
909 start = block = _heapindex;
910 while (_heapinfo[block].free.size < blocks)
912 block = _heapinfo[block].free.next;
913 if (block == start)
915 /* Need to get more from the system. Get a little extra. */
916 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
917 block = _heapinfo[0].free.prev;
918 lastblocks = _heapinfo[block].free.size;
919 /* Check to see if the new core will be contiguous with the
920 final free block; if so we don't need to get as much. */
921 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
922 /* We can't do this if we will have to make the heap info
923 table bigger to accomodate the new space. */
924 block + wantblocks <= heapsize &&
925 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
926 ADDRESS (block + lastblocks)))
928 /* We got it contiguously. Which block we are extending
929 (the `final free block' referred to above) might have
930 changed, if it got combined with a freed info table. */
931 block = _heapinfo[0].free.prev;
932 _heapinfo[block].free.size += (wantblocks - lastblocks);
933 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
934 _heaplimit += wantblocks - lastblocks;
935 continue;
937 result = morecore_nolock (wantblocks * BLOCKSIZE);
938 if (result == NULL)
939 goto out;
940 block = BLOCK (result);
941 /* Put the new block at the end of the free list. */
942 _heapinfo[block].free.size = wantblocks;
943 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
944 _heapinfo[block].free.next = 0;
945 _heapinfo[0].free.prev = block;
946 _heapinfo[_heapinfo[block].free.prev].free.next = block;
947 ++_chunks_free;
948 /* Now loop to use some of that block for this allocation. */
952 /* At this point we have found a suitable free list entry.
953 Figure out how to remove what we need from the list. */
954 result = ADDRESS (block);
955 if (_heapinfo[block].free.size > blocks)
957 /* The block we found has a bit left over,
958 so relink the tail end back into the free list. */
959 _heapinfo[block + blocks].free.size
960 = _heapinfo[block].free.size - blocks;
961 _heapinfo[block + blocks].free.next
962 = _heapinfo[block].free.next;
963 _heapinfo[block + blocks].free.prev
964 = _heapinfo[block].free.prev;
965 _heapinfo[_heapinfo[block].free.prev].free.next
966 = _heapinfo[_heapinfo[block].free.next].free.prev
967 = _heapindex = block + blocks;
969 else
971 /* The block exactly matches our requirements,
972 so just remove it from the list. */
973 _heapinfo[_heapinfo[block].free.next].free.prev
974 = _heapinfo[block].free.prev;
975 _heapinfo[_heapinfo[block].free.prev].free.next
976 = _heapindex = _heapinfo[block].free.next;
977 --_chunks_free;
980 _heapinfo[block].busy.type = 0;
981 _heapinfo[block].busy.info.size = blocks;
982 ++_chunks_used;
983 _bytes_used += blocks * BLOCKSIZE;
984 _bytes_free -= blocks * BLOCKSIZE;
986 /* Mark all the blocks of the object just allocated except for the
987 first with a negative number so you can find the first block by
988 adding that adjustment. */
989 while (--blocks > 0)
990 _heapinfo[block + blocks].busy.info.size = -blocks;
993 PROTECT_MALLOC_STATE (1);
994 out:
995 return result;
998 __ptr_t
999 _malloc_internal (size)
1000 __malloc_size_t size;
1002 __ptr_t result;
1004 LOCK ();
1005 result = _malloc_internal_nolock (size);
1006 UNLOCK ();
1008 return result;
1011 __ptr_t
1012 malloc (size)
1013 __malloc_size_t size;
1015 __ptr_t (*hook) (__malloc_size_t);
1017 if (!__malloc_initialized && !__malloc_initialize ())
1018 return NULL;
1020 /* Copy the value of __malloc_hook to an automatic variable in case
1021 __malloc_hook is modified in another thread between its
1022 NULL-check and the use.
1024 Note: Strictly speaking, this is not a right solution. We should
1025 use mutexes to access non-read-only variables that are shared
1026 among multiple threads. We just leave it for compatibility with
1027 glibc malloc (i.e., assignments to __malloc_hook) for now. */
1028 hook = __malloc_hook;
1029 return (hook != NULL ? *hook : _malloc_internal) (size);
1032 #ifndef _LIBC
1034 /* On some ANSI C systems, some libc functions call _malloc, _free
1035 and _realloc. Make them use the GNU functions. */
1037 __ptr_t
1038 _malloc (size)
1039 __malloc_size_t size;
1041 return malloc (size);
1044 void
1045 _free (ptr)
1046 __ptr_t ptr;
1048 free (ptr);
1051 __ptr_t
1052 _realloc (ptr, size)
1053 __ptr_t ptr;
1054 __malloc_size_t size;
1056 return realloc (ptr, size);
1059 #endif
1060 /* Free a block of memory allocated by `malloc'.
1061 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1062 Written May 1989 by Mike Haertel.
1064 This library is free software; you can redistribute it and/or
1065 modify it under the terms of the GNU General Public License as
1066 published by the Free Software Foundation; either version 2 of the
1067 License, or (at your option) any later version.
1069 This library is distributed in the hope that it will be useful,
1070 but WITHOUT ANY WARRANTY; without even the implied warranty of
1071 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1072 General Public License for more details.
1074 You should have received a copy of the GNU General Public
1075 License along with this library; see the file COPYING. If
1076 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1077 Fifth Floor, Boston, MA 02110-1301, USA.
1079 The author may be reached (Email) at the address mike@ai.mit.edu,
1080 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1082 #ifndef _MALLOC_INTERNAL
1083 #define _MALLOC_INTERNAL
1084 #include <malloc.h>
1085 #endif
1088 /* Cope with systems lacking `memmove'. */
1089 #ifndef memmove
1090 #if (defined (MEMMOVE_MISSING) || \
1091 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1092 #ifdef emacs
1093 #undef __malloc_safe_bcopy
1094 #define __malloc_safe_bcopy safe_bcopy
1095 #endif
1096 /* This function is defined in realloc.c. */
1097 extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
1098 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1099 #endif
1100 #endif
1103 /* Debugging hook for free. */
1104 void (*__free_hook) PP ((__ptr_t __ptr));
1106 /* List of blocks allocated by memalign. */
1107 struct alignlist *_aligned_blocks = NULL;
1109 /* Return memory to the heap.
1110 Like `_free_internal' but don't lock mutex. */
1111 void
1112 _free_internal_nolock (ptr)
1113 __ptr_t ptr;
1115 int type;
1116 __malloc_size_t block, blocks;
1117 register __malloc_size_t i;
1118 struct list *prev, *next;
1119 __ptr_t curbrk;
1120 const __malloc_size_t lesscore_threshold
1121 /* Threshold of free space at which we will return some to the system. */
1122 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1124 register struct alignlist *l;
1126 if (ptr == NULL)
1127 return;
1129 PROTECT_MALLOC_STATE (0);
1131 LOCK_ALIGNED_BLOCKS ();
1132 for (l = _aligned_blocks; l != NULL; l = l->next)
1133 if (l->aligned == ptr)
1135 l->aligned = NULL; /* Mark the slot in the list as free. */
1136 ptr = l->exact;
1137 break;
1139 UNLOCK_ALIGNED_BLOCKS ();
1141 block = BLOCK (ptr);
1143 type = _heapinfo[block].busy.type;
1144 switch (type)
1146 case 0:
1147 /* Get as many statistics as early as we can. */
1148 --_chunks_used;
1149 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1150 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1152 /* Find the free cluster previous to this one in the free list.
1153 Start searching at the last block referenced; this may benefit
1154 programs with locality of allocation. */
1155 i = _heapindex;
1156 if (i > block)
1157 while (i > block)
1158 i = _heapinfo[i].free.prev;
1159 else
1162 i = _heapinfo[i].free.next;
1163 while (i > 0 && i < block);
1164 i = _heapinfo[i].free.prev;
1167 /* Determine how to link this block into the free list. */
1168 if (block == i + _heapinfo[i].free.size)
1170 /* Coalesce this block with its predecessor. */
1171 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1172 block = i;
1174 else
1176 /* Really link this block back into the free list. */
1177 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1178 _heapinfo[block].free.next = _heapinfo[i].free.next;
1179 _heapinfo[block].free.prev = i;
1180 _heapinfo[i].free.next = block;
1181 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1182 ++_chunks_free;
1185 /* Now that the block is linked in, see if we can coalesce it
1186 with its successor (by deleting its successor from the list
1187 and adding in its size). */
1188 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1190 _heapinfo[block].free.size
1191 += _heapinfo[_heapinfo[block].free.next].free.size;
1192 _heapinfo[block].free.next
1193 = _heapinfo[_heapinfo[block].free.next].free.next;
1194 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1195 --_chunks_free;
1198 /* How many trailing free blocks are there now? */
1199 blocks = _heapinfo[block].free.size;
1201 /* Where is the current end of accessible core? */
1202 curbrk = (*__morecore) (0);
1204 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1206 /* The end of the malloc heap is at the end of accessible core.
1207 It's possible that moving _heapinfo will allow us to
1208 return some space to the system. */
1210 __malloc_size_t info_block = BLOCK (_heapinfo);
1211 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1212 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1213 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1214 __malloc_size_t next_block = _heapinfo[block].free.next;
1215 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1217 if (/* Win if this block being freed is last in core, the info table
1218 is just before it, the previous free block is just before the
1219 info table, and the two free blocks together form a useful
1220 amount to return to the system. */
1221 (block + blocks == _heaplimit &&
1222 info_block + info_blocks == block &&
1223 prev_block != 0 && prev_block + prev_blocks == info_block &&
1224 blocks + prev_blocks >= lesscore_threshold) ||
1225 /* Nope, not the case. We can also win if this block being
1226 freed is just before the info table, and the table extends
1227 to the end of core or is followed only by a free block,
1228 and the total free space is worth returning to the system. */
1229 (block + blocks == info_block &&
1230 ((info_block + info_blocks == _heaplimit &&
1231 blocks >= lesscore_threshold) ||
1232 (info_block + info_blocks == next_block &&
1233 next_block + next_blocks == _heaplimit &&
1234 blocks + next_blocks >= lesscore_threshold)))
1237 malloc_info *newinfo;
1238 __malloc_size_t oldlimit = _heaplimit;
1240 /* Free the old info table, clearing _heaplimit to avoid
1241 recursion into this code. We don't want to return the
1242 table's blocks to the system before we have copied them to
1243 the new location. */
1244 _heaplimit = 0;
1245 _free_internal_nolock (_heapinfo);
1246 _heaplimit = oldlimit;
1248 /* Tell malloc to search from the beginning of the heap for
1249 free blocks, so it doesn't reuse the ones just freed. */
1250 _heapindex = 0;
1252 /* Allocate new space for the info table and move its data. */
1253 newinfo = (malloc_info *) _malloc_internal_nolock (info_blocks
1254 * BLOCKSIZE);
1255 PROTECT_MALLOC_STATE (0);
1256 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1257 _heapinfo = newinfo;
1259 /* We should now have coalesced the free block with the
1260 blocks freed from the old info table. Examine the entire
1261 trailing free block to decide below whether to return some
1262 to the system. */
1263 block = _heapinfo[0].free.prev;
1264 blocks = _heapinfo[block].free.size;
1267 /* Now see if we can return stuff to the system. */
1268 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1270 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1271 _heaplimit -= blocks;
1272 (*__morecore) (-bytes);
1273 _heapinfo[_heapinfo[block].free.prev].free.next
1274 = _heapinfo[block].free.next;
1275 _heapinfo[_heapinfo[block].free.next].free.prev
1276 = _heapinfo[block].free.prev;
1277 block = _heapinfo[block].free.prev;
1278 --_chunks_free;
1279 _bytes_free -= bytes;
1283 /* Set the next search to begin at this block. */
1284 _heapindex = block;
1285 break;
1287 default:
1288 /* Do some of the statistics. */
1289 --_chunks_used;
1290 _bytes_used -= 1 << type;
1291 ++_chunks_free;
1292 _bytes_free += 1 << type;
1294 /* Get the address of the first free fragment in this block. */
1295 prev = (struct list *) ((char *) ADDRESS (block) +
1296 (_heapinfo[block].busy.info.frag.first << type));
1298 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1300 /* If all fragments of this block are free, remove them
1301 from the fragment list and free the whole block. */
1302 next = prev;
1303 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1304 next = next->next;
1305 prev->prev->next = next;
1306 if (next != NULL)
1307 next->prev = prev->prev;
1308 _heapinfo[block].busy.type = 0;
1309 _heapinfo[block].busy.info.size = 1;
1311 /* Keep the statistics accurate. */
1312 ++_chunks_used;
1313 _bytes_used += BLOCKSIZE;
1314 _chunks_free -= BLOCKSIZE >> type;
1315 _bytes_free -= BLOCKSIZE;
1317 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1318 _free_internal_nolock (ADDRESS (block));
1319 #else
1320 free (ADDRESS (block));
1321 #endif
1323 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1325 /* If some fragments of this block are free, link this
1326 fragment into the fragment list after the first free
1327 fragment of this block. */
1328 next = (struct list *) ptr;
1329 next->next = prev->next;
1330 next->prev = prev;
1331 prev->next = next;
1332 if (next->next != NULL)
1333 next->next->prev = next;
1334 ++_heapinfo[block].busy.info.frag.nfree;
1336 else
1338 /* No fragments of this block are free, so link this
1339 fragment into the fragment list and announce that
1340 it is the first free fragment of this block. */
1341 prev = (struct list *) ptr;
1342 _heapinfo[block].busy.info.frag.nfree = 1;
1343 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1344 ((unsigned long int) ((char *) ptr - (char *) NULL)
1345 % BLOCKSIZE >> type);
1346 prev->next = _fraghead[type].next;
1347 prev->prev = &_fraghead[type];
1348 prev->prev->next = prev;
1349 if (prev->next != NULL)
1350 prev->next->prev = prev;
1352 break;
1355 PROTECT_MALLOC_STATE (1);
1358 /* Return memory to the heap.
1359 Like `free' but don't call a __free_hook if there is one. */
1360 void
1361 _free_internal (ptr)
1362 __ptr_t ptr;
1364 LOCK ();
1365 _free_internal_nolock (ptr);
1366 UNLOCK ();
1369 /* Return memory to the heap. */
1371 FREE_RETURN_TYPE
1372 free (ptr)
1373 __ptr_t ptr;
1375 void (*hook) (__ptr_t) = __free_hook;
1377 if (hook != NULL)
1378 (*hook) (ptr);
1379 else
1380 _free_internal (ptr);
1383 /* Define the `cfree' alias for `free'. */
1384 #ifdef weak_alias
1385 weak_alias (free, cfree)
1386 #else
1387 void
1388 cfree (ptr)
1389 __ptr_t ptr;
1391 free (ptr);
1393 #endif
1394 /* Change the size of a block allocated by `malloc'.
1395 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1396 Written May 1989 by Mike Haertel.
1398 This library is free software; you can redistribute it and/or
1399 modify it under the terms of the GNU General Public License as
1400 published by the Free Software Foundation; either version 2 of the
1401 License, or (at your option) any later version.
1403 This library is distributed in the hope that it will be useful,
1404 but WITHOUT ANY WARRANTY; without even the implied warranty of
1405 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1406 General Public License for more details.
1408 You should have received a copy of the GNU General Public
1409 License along with this library; see the file COPYING. If
1410 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1411 Fifth Floor, Boston, MA 02110-1301, USA.
1413 The author may be reached (Email) at the address mike@ai.mit.edu,
1414 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1416 #ifndef _MALLOC_INTERNAL
1417 #define _MALLOC_INTERNAL
1418 #include <malloc.h>
1419 #endif
1423 /* Cope with systems lacking `memmove'. */
1424 #if (defined (MEMMOVE_MISSING) || \
1425 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1427 #ifdef emacs
1428 #undef __malloc_safe_bcopy
1429 #define __malloc_safe_bcopy safe_bcopy
1430 #else
1432 /* Snarfed directly from Emacs src/dispnew.c:
1433 XXX Should use system bcopy if it handles overlap. */
1435 /* Like bcopy except never gets confused by overlap. */
1437 void
1438 __malloc_safe_bcopy (afrom, ato, size)
1439 __ptr_t afrom;
1440 __ptr_t ato;
1441 __malloc_size_t size;
1443 char *from = afrom, *to = ato;
1445 if (size <= 0 || from == to)
1446 return;
1448 /* If the source and destination don't overlap, then bcopy can
1449 handle it. If they do overlap, but the destination is lower in
1450 memory than the source, we'll assume bcopy can handle that. */
1451 if (to < from || from + size <= to)
1452 bcopy (from, to, size);
1454 /* Otherwise, we'll copy from the end. */
1455 else
1457 register char *endf = from + size;
1458 register char *endt = to + size;
1460 /* If TO - FROM is large, then we should break the copy into
1461 nonoverlapping chunks of TO - FROM bytes each. However, if
1462 TO - FROM is small, then the bcopy function call overhead
1463 makes this not worth it. The crossover point could be about
1464 anywhere. Since I don't think the obvious copy loop is too
1465 bad, I'm trying to err in its favor. */
1466 if (to - from < 64)
1469 *--endt = *--endf;
1470 while (endf != from);
1472 else
1474 for (;;)
1476 endt -= (to - from);
1477 endf -= (to - from);
1479 if (endt < to)
1480 break;
1482 bcopy (endf, endt, to - from);
1485 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1486 little left over. The amount left over is
1487 (endt + (to - from)) - to, which is endt - from. */
1488 bcopy (from, to, endt - from);
1492 #endif /* emacs */
1494 #ifndef memmove
1495 extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
1496 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1497 #endif
1499 #endif
1502 #define min(A, B) ((A) < (B) ? (A) : (B))
1504 /* Debugging hook for realloc. */
1505 __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
1507 /* Resize the given region to the new size, returning a pointer
1508 to the (possibly moved) region. This is optimized for speed;
1509 some benchmarks seem to indicate that greater compactness is
1510 achieved by unconditionally allocating and copying to a
1511 new region. This module has incestuous knowledge of the
1512 internals of both free and malloc. */
1513 __ptr_t
1514 _realloc_internal_nolock (ptr, size)
1515 __ptr_t ptr;
1516 __malloc_size_t size;
1518 __ptr_t result;
1519 int type;
1520 __malloc_size_t block, blocks, oldlimit;
1522 if (size == 0)
1524 _free_internal_nolock (ptr);
1525 return _malloc_internal_nolock (0);
1527 else if (ptr == NULL)
1528 return _malloc_internal_nolock (size);
1530 block = BLOCK (ptr);
1532 PROTECT_MALLOC_STATE (0);
1534 type = _heapinfo[block].busy.type;
1535 switch (type)
1537 case 0:
1538 /* Maybe reallocate a large block to a small fragment. */
1539 if (size <= BLOCKSIZE / 2)
1541 result = _malloc_internal_nolock (size);
1542 if (result != NULL)
1544 memcpy (result, ptr, size);
1545 _free_internal_nolock (ptr);
1546 goto out;
1550 /* The new size is a large allocation as well;
1551 see if we can hold it in place. */
1552 blocks = BLOCKIFY (size);
1553 if (blocks < _heapinfo[block].busy.info.size)
1555 /* The new size is smaller; return
1556 excess memory to the free list. */
1557 _heapinfo[block + blocks].busy.type = 0;
1558 _heapinfo[block + blocks].busy.info.size
1559 = _heapinfo[block].busy.info.size - blocks;
1560 _heapinfo[block].busy.info.size = blocks;
1561 /* We have just created a new chunk by splitting a chunk in two.
1562 Now we will free this chunk; increment the statistics counter
1563 so it doesn't become wrong when _free_internal decrements it. */
1564 ++_chunks_used;
1565 _free_internal_nolock (ADDRESS (block + blocks));
1566 result = ptr;
1568 else if (blocks == _heapinfo[block].busy.info.size)
1569 /* No size change necessary. */
1570 result = ptr;
1571 else
1573 /* Won't fit, so allocate a new region that will.
1574 Free the old region first in case there is sufficient
1575 adjacent free space to grow without moving. */
1576 blocks = _heapinfo[block].busy.info.size;
1577 /* Prevent free from actually returning memory to the system. */
1578 oldlimit = _heaplimit;
1579 _heaplimit = 0;
1580 _free_internal_nolock (ptr);
1581 result = _malloc_internal_nolock (size);
1582 PROTECT_MALLOC_STATE (0);
1583 if (_heaplimit == 0)
1584 _heaplimit = oldlimit;
1585 if (result == NULL)
1587 /* Now we're really in trouble. We have to unfree
1588 the thing we just freed. Unfortunately it might
1589 have been coalesced with its neighbors. */
1590 if (_heapindex == block)
1591 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1592 else
1594 __ptr_t previous
1595 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1596 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1597 _free_internal_nolock (previous);
1599 goto out;
1601 if (ptr != result)
1602 memmove (result, ptr, blocks * BLOCKSIZE);
1604 break;
1606 default:
1607 /* Old size is a fragment; type is logarithm
1608 to base two of the fragment size. */
1609 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1610 size <= (__malloc_size_t) (1 << type))
1611 /* The new size is the same kind of fragment. */
1612 result = ptr;
1613 else
1615 /* The new size is different; allocate a new space,
1616 and copy the lesser of the new size and the old. */
1617 result = _malloc_internal_nolock (size);
1618 if (result == NULL)
1619 goto out;
1620 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1621 _free_internal_nolock (ptr);
1623 break;
1626 PROTECT_MALLOC_STATE (1);
1627 out:
1628 return result;
1631 __ptr_t
1632 _realloc_internal (ptr, size)
1633 __ptr_t ptr;
1634 __malloc_size_t size;
1636 __ptr_t result;
1638 LOCK();
1639 result = _realloc_internal_nolock (ptr, size);
1640 UNLOCK ();
1642 return result;
1645 __ptr_t
1646 realloc (ptr, size)
1647 __ptr_t ptr;
1648 __malloc_size_t size;
1650 __ptr_t (*hook) (__ptr_t, __malloc_size_t);
1652 if (!__malloc_initialized && !__malloc_initialize ())
1653 return NULL;
1655 hook = __realloc_hook;
1656 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1658 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1660 This library is free software; you can redistribute it and/or
1661 modify it under the terms of the GNU General Public License as
1662 published by the Free Software Foundation; either version 2 of the
1663 License, or (at your option) any later version.
1665 This library is distributed in the hope that it will be useful,
1666 but WITHOUT ANY WARRANTY; without even the implied warranty of
1667 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1668 General Public License for more details.
1670 You should have received a copy of the GNU General Public
1671 License along with this library; see the file COPYING. If
1672 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1673 Fifth Floor, Boston, MA 02110-1301, USA.
1675 The author may be reached (Email) at the address mike@ai.mit.edu,
1676 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1678 #ifndef _MALLOC_INTERNAL
1679 #define _MALLOC_INTERNAL
1680 #include <malloc.h>
1681 #endif
1683 /* Allocate an array of NMEMB elements each SIZE bytes long.
1684 The entire array is initialized to zeros. */
1685 __ptr_t
1686 calloc (nmemb, size)
1687 register __malloc_size_t nmemb;
1688 register __malloc_size_t size;
1690 register __ptr_t result = malloc (nmemb * size);
1692 if (result != NULL)
1693 (void) memset (result, 0, nmemb * size);
1695 return result;
1697 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1698 This file is part of the GNU C Library.
1700 The GNU C Library is free software; you can redistribute it and/or modify
1701 it under the terms of the GNU General Public License as published by
1702 the Free Software Foundation; either version 2, or (at your option)
1703 any later version.
1705 The GNU C Library is distributed in the hope that it will be useful,
1706 but WITHOUT ANY WARRANTY; without even the implied warranty of
1707 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1708 GNU General Public License for more details.
1710 You should have received a copy of the GNU General Public License
1711 along with the GNU C Library; see the file COPYING. If not, write to
1712 the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1713 MA 02110-1301, USA. */
1715 #ifndef _MALLOC_INTERNAL
1716 #define _MALLOC_INTERNAL
1717 #include <malloc.h>
1718 #endif
1720 #ifndef __GNU_LIBRARY__
1721 #define __sbrk sbrk
1722 #endif
1724 #ifdef __GNU_LIBRARY__
1725 /* It is best not to declare this and cast its result on foreign operating
1726 systems with potentially hostile include files. */
1728 #include <stddef.h>
1729 extern __ptr_t __sbrk PP ((ptrdiff_t increment));
1730 #endif
1732 #ifndef NULL
1733 #define NULL 0
1734 #endif
1736 /* Allocate INCREMENT more bytes of data space,
1737 and return the start of data space, or NULL on errors.
1738 If INCREMENT is negative, shrink data space. */
1739 __ptr_t
1740 __default_morecore (increment)
1741 __malloc_ptrdiff_t increment;
1743 __ptr_t result;
1744 #if defined(CYGWIN)
1745 if (!bss_sbrk_did_unexec)
1747 return bss_sbrk (increment);
1749 #endif
1750 result = (__ptr_t) __sbrk (increment);
1751 if (result == (__ptr_t) -1)
1752 return NULL;
1753 return result;
1755 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1757 This library is free software; you can redistribute it and/or
1758 modify it under the terms of the GNU General Public License as
1759 published by the Free Software Foundation; either version 2 of the
1760 License, or (at your option) any later version.
1762 This library is distributed in the hope that it will be useful,
1763 but WITHOUT ANY WARRANTY; without even the implied warranty of
1764 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1765 General Public License for more details.
1767 You should have received a copy of the GNU General Public
1768 License along with this library; see the file COPYING. If
1769 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1770 Fifth Floor, Boston, MA 02110-1301, USA. */
1772 #ifndef _MALLOC_INTERNAL
1773 #define _MALLOC_INTERNAL
1774 #include <malloc.h>
1775 #endif
1777 #if __DJGPP__ - 0 == 1
1779 /* There is some problem with memalign in DJGPP v1 and we are supposed
1780 to omit it. Noone told me why, they just told me to do it. */
1782 #else
1784 __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
1785 __malloc_size_t __alignment));
1787 __ptr_t
1788 memalign (alignment, size)
1789 __malloc_size_t alignment;
1790 __malloc_size_t size;
1792 __ptr_t result;
1793 unsigned long int adj, lastadj;
1794 __ptr_t (*hook) (__malloc_size_t, __malloc_size_t) = __memalign_hook;
1796 if (hook)
1797 return (*hook) (alignment, size);
1799 /* Allocate a block with enough extra space to pad the block with up to
1800 (ALIGNMENT - 1) bytes if necessary. */
1801 result = malloc (size + alignment - 1);
1802 if (result == NULL)
1803 return NULL;
1805 /* Figure out how much we will need to pad this particular block
1806 to achieve the required alignment. */
1807 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1811 /* Reallocate the block with only as much excess as it needs. */
1812 free (result);
1813 result = malloc (adj + size);
1814 if (result == NULL) /* Impossible unless interrupted. */
1815 return NULL;
1817 lastadj = adj;
1818 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1819 /* It's conceivable we might have been so unlucky as to get a
1820 different block with weaker alignment. If so, this block is too
1821 short to contain SIZE after alignment correction. So we must
1822 try again and get another block, slightly larger. */
1823 } while (adj > lastadj);
1825 if (adj != 0)
1827 /* Record this block in the list of aligned blocks, so that `free'
1828 can identify the pointer it is passed, which will be in the middle
1829 of an allocated block. */
1831 struct alignlist *l;
1832 LOCK_ALIGNED_BLOCKS ();
1833 for (l = _aligned_blocks; l != NULL; l = l->next)
1834 if (l->aligned == NULL)
1835 /* This slot is free. Use it. */
1836 break;
1837 if (l == NULL)
1839 l = (struct alignlist *) malloc (sizeof (struct alignlist));
1840 if (l != NULL)
1842 l->next = _aligned_blocks;
1843 _aligned_blocks = l;
1846 if (l != NULL)
1848 l->exact = result;
1849 result = l->aligned = (char *) result + alignment - adj;
1851 UNLOCK_ALIGNED_BLOCKS ();
1852 if (l == NULL)
1854 free (result);
1855 result = NULL;
1859 return result;
1862 #ifndef ENOMEM
1863 #define ENOMEM 12
1864 #endif
1866 #ifndef EINVAL
1867 #define EINVAL 22
1868 #endif
1871 posix_memalign (memptr, alignment, size)
1872 __ptr_t *memptr;
1873 __malloc_size_t alignment;
1874 __malloc_size_t size;
1876 __ptr_t mem;
1878 if (alignment == 0
1879 || alignment % sizeof (__ptr_t) != 0
1880 || (alignment & (alignment - 1)) != 0)
1881 return EINVAL;
1883 mem = memalign (alignment, size);
1884 if (mem == NULL)
1885 return ENOMEM;
1887 *memptr = mem;
1889 return 0;
1892 #endif /* Not DJGPP v1 */
1893 /* Allocate memory on a page boundary.
1894 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1896 This library is free software; you can redistribute it and/or
1897 modify it under the terms of the GNU General Public License as
1898 published by the Free Software Foundation; either version 2 of the
1899 License, or (at your option) any later version.
1901 This library is distributed in the hope that it will be useful,
1902 but WITHOUT ANY WARRANTY; without even the implied warranty of
1903 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1904 General Public License for more details.
1906 You should have received a copy of the GNU General Public
1907 License along with this library; see the file COPYING. If
1908 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1909 Fifth Floor, Boston, MA 02110-1301, USA.
1911 The author may be reached (Email) at the address mike@ai.mit.edu,
1912 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1914 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1916 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1917 on MSDOS, where it conflicts with a system header file. */
1919 #define ELIDE_VALLOC
1921 #endif
1923 #ifndef ELIDE_VALLOC
1925 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
1926 #include <stddef.h>
1927 #include <sys/cdefs.h>
1928 #if defined (__GLIBC__) && __GLIBC__ >= 2
1929 /* __getpagesize is already declared in <unistd.h> with return type int */
1930 #else
1931 extern size_t __getpagesize PP ((void));
1932 #endif
1933 #else
1934 #include "getpagesize.h"
1935 #define __getpagesize() getpagesize()
1936 #endif
1938 #ifndef _MALLOC_INTERNAL
1939 #define _MALLOC_INTERNAL
1940 #include <malloc.h>
1941 #endif
1943 static __malloc_size_t pagesize;
1945 __ptr_t
1946 valloc (size)
1947 __malloc_size_t size;
1949 if (pagesize == 0)
1950 pagesize = __getpagesize ();
1952 return memalign (pagesize, size);
1955 #endif /* Not ELIDE_VALLOC. */
1957 #ifdef GC_MCHECK
1959 /* Standard debugging hooks for `malloc'.
1960 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1961 Written May 1989 by Mike Haertel.
1963 This library is free software; you can redistribute it and/or
1964 modify it under the terms of the GNU General Public License as
1965 published by the Free Software Foundation; either version 2 of the
1966 License, or (at your option) any later version.
1968 This library is distributed in the hope that it will be useful,
1969 but WITHOUT ANY WARRANTY; without even the implied warranty of
1970 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1971 General Public License for more details.
1973 You should have received a copy of the GNU General Public
1974 License along with this library; see the file COPYING. If
1975 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1976 Fifth Floor, Boston, MA 02110-1301, USA.
1978 The author may be reached (Email) at the address mike@ai.mit.edu,
1979 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1981 #ifdef emacs
1982 #include <stdio.h>
1983 #else
1984 #ifndef _MALLOC_INTERNAL
1985 #define _MALLOC_INTERNAL
1986 #include <malloc.h>
1987 #include <stdio.h>
1988 #endif
1989 #endif
1991 /* Old hook values. */
1992 static void (*old_free_hook) __P ((__ptr_t ptr));
1993 static __ptr_t (*old_malloc_hook) __P ((__malloc_size_t size));
1994 static __ptr_t (*old_realloc_hook) __P ((__ptr_t ptr, __malloc_size_t size));
1996 /* Function to call when something awful happens. */
1997 static void (*abortfunc) __P ((enum mcheck_status));
1999 /* Arbitrary magical numbers. */
2000 #define MAGICWORD 0xfedabeeb
2001 #define MAGICFREE 0xd8675309
2002 #define MAGICBYTE ((char) 0xd7)
2003 #define MALLOCFLOOD ((char) 0x93)
2004 #define FREEFLOOD ((char) 0x95)
2006 struct hdr
2008 __malloc_size_t size; /* Exact size requested by user. */
2009 unsigned long int magic; /* Magic number to check header integrity. */
2012 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
2013 #define flood memset
2014 #else
2015 static void flood __P ((__ptr_t, int, __malloc_size_t));
2016 static void
2017 flood (ptr, val, size)
2018 __ptr_t ptr;
2019 int val;
2020 __malloc_size_t size;
2022 char *cp = ptr;
2023 while (size--)
2024 *cp++ = val;
2026 #endif
2028 static enum mcheck_status checkhdr __P ((const struct hdr *));
2029 static enum mcheck_status
2030 checkhdr (hdr)
2031 const struct hdr *hdr;
2033 enum mcheck_status status;
2034 switch (hdr->magic)
2036 default:
2037 status = MCHECK_HEAD;
2038 break;
2039 case MAGICFREE:
2040 status = MCHECK_FREE;
2041 break;
2042 case MAGICWORD:
2043 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
2044 status = MCHECK_TAIL;
2045 else
2046 status = MCHECK_OK;
2047 break;
2049 if (status != MCHECK_OK)
2050 (*abortfunc) (status);
2051 return status;
2054 static void freehook __P ((__ptr_t));
2055 static void
2056 freehook (ptr)
2057 __ptr_t ptr;
2059 struct hdr *hdr;
2061 if (ptr)
2063 hdr = ((struct hdr *) ptr) - 1;
2064 checkhdr (hdr);
2065 hdr->magic = MAGICFREE;
2066 flood (ptr, FREEFLOOD, hdr->size);
2068 else
2069 hdr = NULL;
2071 __free_hook = old_free_hook;
2072 free (hdr);
2073 __free_hook = freehook;
2076 static __ptr_t mallochook __P ((__malloc_size_t));
2077 static __ptr_t
2078 mallochook (size)
2079 __malloc_size_t size;
2081 struct hdr *hdr;
2083 __malloc_hook = old_malloc_hook;
2084 hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
2085 __malloc_hook = mallochook;
2086 if (hdr == NULL)
2087 return NULL;
2089 hdr->size = size;
2090 hdr->magic = MAGICWORD;
2091 ((char *) &hdr[1])[size] = MAGICBYTE;
2092 flood ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
2093 return (__ptr_t) (hdr + 1);
2096 static __ptr_t reallochook __P ((__ptr_t, __malloc_size_t));
2097 static __ptr_t
2098 reallochook (ptr, size)
2099 __ptr_t ptr;
2100 __malloc_size_t size;
2102 struct hdr *hdr = NULL;
2103 __malloc_size_t osize = 0;
2105 if (ptr)
2107 hdr = ((struct hdr *) ptr) - 1;
2108 osize = hdr->size;
2110 checkhdr (hdr);
2111 if (size < osize)
2112 flood ((char *) ptr + size, FREEFLOOD, osize - size);
2115 __free_hook = old_free_hook;
2116 __malloc_hook = old_malloc_hook;
2117 __realloc_hook = old_realloc_hook;
2118 hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
2119 __free_hook = freehook;
2120 __malloc_hook = mallochook;
2121 __realloc_hook = reallochook;
2122 if (hdr == NULL)
2123 return NULL;
2125 hdr->size = size;
2126 hdr->magic = MAGICWORD;
2127 ((char *) &hdr[1])[size] = MAGICBYTE;
2128 if (size > osize)
2129 flood ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
2130 return (__ptr_t) (hdr + 1);
2133 static void
2134 mabort (status)
2135 enum mcheck_status status;
2137 const char *msg;
2138 switch (status)
2140 case MCHECK_OK:
2141 msg = "memory is consistent, library is buggy";
2142 break;
2143 case MCHECK_HEAD:
2144 msg = "memory clobbered before allocated block";
2145 break;
2146 case MCHECK_TAIL:
2147 msg = "memory clobbered past end of allocated block";
2148 break;
2149 case MCHECK_FREE:
2150 msg = "block freed twice";
2151 break;
2152 default:
2153 msg = "bogus mcheck_status, library is buggy";
2154 break;
2156 #ifdef __GNU_LIBRARY__
2157 __libc_fatal (msg);
2158 #else
2159 fprintf (stderr, "mcheck: %s\n", msg);
2160 fflush (stderr);
2161 abort ();
2162 #endif
2165 static int mcheck_used = 0;
2168 mcheck (func)
2169 void (*func) __P ((enum mcheck_status));
2171 abortfunc = (func != NULL) ? func : &mabort;
2173 /* These hooks may not be safely inserted if malloc is already in use. */
2174 if (!__malloc_initialized && !mcheck_used)
2176 old_free_hook = __free_hook;
2177 __free_hook = freehook;
2178 old_malloc_hook = __malloc_hook;
2179 __malloc_hook = mallochook;
2180 old_realloc_hook = __realloc_hook;
2181 __realloc_hook = reallochook;
2182 mcheck_used = 1;
2185 return mcheck_used ? 0 : -1;
2188 enum mcheck_status
2189 mprobe (__ptr_t ptr)
2191 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2194 #endif /* GC_MCHECK */
2196 /* arch-tag: 93dce5c0-f49a-41b5-86b1-f91c4169c02e
2197 (do not change this comment) */