** sdl.web@gmail.com: problem with transparent PNG image display
[emacs.git] / src / gmalloc.c
blobea6ccc4bf1f1d954bda190f35c6965bb6f4ea3d1
1 /* This file is no longer automatically generated from libc. */
3 #define _MALLOC_INTERNAL
5 /* The malloc headers and source files from the C library follow here. */
7 /* Declarations for `malloc' and friends.
8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
9 2005, 2006, 2007 Free Software Foundation, Inc.
10 Written May 1989 by Mike Haertel.
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public
23 License along with this library; see the file COPYING. If
24 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25 Fifth Floor, Boston, MA 02110-1301, USA.
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
30 #ifndef _MALLOC_H
32 #define _MALLOC_H 1
34 #ifdef _MALLOC_INTERNAL
36 #ifdef HAVE_CONFIG_H
37 #include <config.h>
38 #endif
40 #ifdef HAVE_GTK_AND_PTHREAD
41 #define USE_PTHREAD
42 #endif
44 #if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
45 || defined STDC_HEADERS || defined PROTOTYPES) \
46 && ! defined (BROKEN_PROTOTYPES))
47 #undef PP
48 #define PP(args) args
49 #undef __ptr_t
50 #define __ptr_t void *
51 #else /* Not C++ or ANSI C. */
52 #undef PP
53 #define PP(args) ()
54 #undef __ptr_t
55 #define __ptr_t char *
56 #endif /* C++ or ANSI C. */
58 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
59 #include <string.h>
60 #else
61 #ifndef memset
62 #define memset(s, zero, n) bzero ((s), (n))
63 #endif
64 #ifndef memcpy
65 #define memcpy(d, s, n) bcopy ((s), (d), (n))
66 #endif
67 #endif
69 #ifdef HAVE_LIMITS_H
70 #include <limits.h>
71 #endif
72 #ifndef CHAR_BIT
73 #define CHAR_BIT 8
74 #endif
76 #ifdef HAVE_UNISTD_H
77 #include <unistd.h>
78 #endif
80 #ifdef USE_PTHREAD
81 #include <pthread.h>
82 #endif
84 #endif /* _MALLOC_INTERNAL. */
87 #ifdef __cplusplus
88 extern "C"
90 #endif
92 #ifdef STDC_HEADERS
93 #include <stddef.h>
94 #define __malloc_size_t size_t
95 #define __malloc_ptrdiff_t ptrdiff_t
96 #else
97 #ifdef __GNUC__
98 #include <stddef.h>
99 #ifdef __SIZE_TYPE__
100 #define __malloc_size_t __SIZE_TYPE__
101 #endif
102 #endif
103 #ifndef __malloc_size_t
104 #define __malloc_size_t unsigned int
105 #endif
106 #define __malloc_ptrdiff_t int
107 #endif
109 #ifndef NULL
110 #define NULL 0
111 #endif
113 #ifndef FREE_RETURN_TYPE
114 #define FREE_RETURN_TYPE void
115 #endif
118 /* Allocate SIZE bytes of memory. */
119 extern __ptr_t malloc PP ((__malloc_size_t __size));
120 /* Re-allocate the previously allocated block
121 in __ptr_t, making the new block SIZE bytes long. */
122 extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
123 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
124 extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
125 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
126 extern FREE_RETURN_TYPE free PP ((__ptr_t __ptr));
128 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
129 #if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict. */
130 extern __ptr_t memalign PP ((__malloc_size_t __alignment,
131 __malloc_size_t __size));
132 #endif
134 /* Allocate SIZE bytes on a page boundary. */
135 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
136 extern __ptr_t valloc PP ((__malloc_size_t __size));
137 #endif
139 #ifdef USE_PTHREAD
140 /* Set up mutexes and make malloc etc. thread-safe. */
141 extern void malloc_enable_thread PP ((void));
142 #endif
144 #ifdef _MALLOC_INTERNAL
146 /* The allocator divides the heap into blocks of fixed size; large
147 requests receive one or more whole blocks, and small requests
148 receive a fragment of a block. Fragment sizes are powers of two,
149 and all fragments of a block are the same size. When all the
150 fragments in a block have been freed, the block itself is freed. */
151 #define INT_BIT (CHAR_BIT * sizeof(int))
152 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
153 #define BLOCKSIZE (1 << BLOCKLOG)
154 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
156 /* Determine the amount of memory spanned by the initial heap table
157 (not an absolute limit). */
158 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
160 /* Number of contiguous free blocks allowed to build up at the end of
161 memory before they will be returned to the system. */
162 #define FINAL_FREE_BLOCKS 8
164 /* Data structure giving per-block information. */
165 typedef union
167 /* Heap information for a busy block. */
168 struct
170 /* Zero for a large (multiblock) object, or positive giving the
171 logarithm to the base two of the fragment size. */
172 int type;
173 union
175 struct
177 __malloc_size_t nfree; /* Free frags in a fragmented block. */
178 __malloc_size_t first; /* First free fragment of the block. */
179 } frag;
180 /* For a large object, in its first block, this has the number
181 of blocks in the object. In the other blocks, this has a
182 negative number which says how far back the first block is. */
183 __malloc_ptrdiff_t size;
184 } info;
185 } busy;
186 /* Heap information for a free block
187 (that may be the first of a free cluster). */
188 struct
190 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
191 __malloc_size_t next; /* Index of next free cluster. */
192 __malloc_size_t prev; /* Index of previous free cluster. */
193 } free;
194 } malloc_info;
196 /* Pointer to first block of the heap. */
197 extern char *_heapbase;
199 /* Table indexed by block number giving per-block information. */
200 extern malloc_info *_heapinfo;
202 /* Address to block number and vice versa. */
203 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
204 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
206 /* Current search index for the heap table. */
207 extern __malloc_size_t _heapindex;
209 /* Limit of valid info table indices. */
210 extern __malloc_size_t _heaplimit;
212 /* Doubly linked lists of free fragments. */
213 struct list
215 struct list *next;
216 struct list *prev;
219 /* Free list headers for each fragment size. */
220 extern struct list _fraghead[];
222 /* List of blocks allocated with `memalign' (or `valloc'). */
223 struct alignlist
225 struct alignlist *next;
226 __ptr_t aligned; /* The address that memaligned returned. */
227 __ptr_t exact; /* The address that malloc returned. */
229 extern struct alignlist *_aligned_blocks;
231 /* Instrumentation. */
232 extern __malloc_size_t _chunks_used;
233 extern __malloc_size_t _bytes_used;
234 extern __malloc_size_t _chunks_free;
235 extern __malloc_size_t _bytes_free;
237 /* Internal versions of `malloc', `realloc', and `free'
238 used when these functions need to call each other.
239 They are the same but don't call the hooks. */
240 extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
241 extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
242 extern void _free_internal PP ((__ptr_t __ptr));
243 extern __ptr_t _malloc_internal_nolock PP ((__malloc_size_t __size));
244 extern __ptr_t _realloc_internal_nolock PP ((__ptr_t __ptr, __malloc_size_t __size));
245 extern void _free_internal_nolock PP ((__ptr_t __ptr));
247 #ifdef USE_PTHREAD
248 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
249 extern int _malloc_thread_enabled_p;
250 #define LOCK() \
251 do { \
252 if (_malloc_thread_enabled_p) \
253 pthread_mutex_lock (&_malloc_mutex); \
254 } while (0)
255 #define UNLOCK() \
256 do { \
257 if (_malloc_thread_enabled_p) \
258 pthread_mutex_unlock (&_malloc_mutex); \
259 } while (0)
260 #define LOCK_ALIGNED_BLOCKS() \
261 do { \
262 if (_malloc_thread_enabled_p) \
263 pthread_mutex_lock (&_aligned_blocks_mutex); \
264 } while (0)
265 #define UNLOCK_ALIGNED_BLOCKS() \
266 do { \
267 if (_malloc_thread_enabled_p) \
268 pthread_mutex_unlock (&_aligned_blocks_mutex); \
269 } while (0)
270 #else
271 #define LOCK()
272 #define UNLOCK()
273 #define LOCK_ALIGNED_BLOCKS()
274 #define UNLOCK_ALIGNED_BLOCKS()
275 #endif
277 #endif /* _MALLOC_INTERNAL. */
279 /* Given an address in the middle of a malloc'd object,
280 return the address of the beginning of the object. */
281 extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
283 /* Underlying allocation function; successive calls should
284 return contiguous pieces of memory. */
285 extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
287 /* Default value of `__morecore'. */
288 extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
290 /* If not NULL, this function is called after each time
291 `__morecore' is called to increase the data size. */
292 extern void (*__after_morecore_hook) PP ((void));
294 /* Number of extra blocks to get each time we ask for more core.
295 This reduces the frequency of calling `(*__morecore)'. */
296 extern __malloc_size_t __malloc_extra_blocks;
298 /* Nonzero if `malloc' has been called and done its initialization. */
299 extern int __malloc_initialized;
300 /* Function called to initialize malloc data structures. */
301 extern int __malloc_initialize PP ((void));
303 /* Hooks for debugging versions. */
304 extern void (*__malloc_initialize_hook) PP ((void));
305 extern void (*__free_hook) PP ((__ptr_t __ptr));
306 extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
307 extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
308 extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
309 __malloc_size_t __alignment));
311 /* Return values for `mprobe': these are the kinds of inconsistencies that
312 `mcheck' enables detection of. */
313 enum mcheck_status
315 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
316 MCHECK_OK, /* Block is fine. */
317 MCHECK_FREE, /* Block freed twice. */
318 MCHECK_HEAD, /* Memory before the block was clobbered. */
319 MCHECK_TAIL /* Memory after the block was clobbered. */
322 /* Activate a standard collection of debugging hooks. This must be called
323 before `malloc' is ever called. ABORTFUNC is called with an error code
324 (see enum above) when an inconsistency is detected. If ABORTFUNC is
325 null, the standard function prints on stderr and then calls `abort'. */
326 extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
328 /* Check for aberrations in a particular malloc'd block. You must have
329 called `mcheck' already. These are the same checks that `mcheck' does
330 when you free or reallocate a block. */
331 extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
333 /* Activate a standard collection of tracing hooks. */
334 extern void mtrace PP ((void));
335 extern void muntrace PP ((void));
337 /* Statistics available to the user. */
338 struct mstats
340 __malloc_size_t bytes_total; /* Total size of the heap. */
341 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
342 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
343 __malloc_size_t chunks_free; /* Chunks in the free list. */
344 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
347 /* Pick up the current statistics. */
348 extern struct mstats mstats PP ((void));
350 /* Call WARNFUN with a warning message when memory usage is high. */
351 extern void memory_warnings PP ((__ptr_t __start,
352 void (*__warnfun) PP ((const char *))));
355 /* Relocating allocator. */
357 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
358 extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
360 /* Free the storage allocated in HANDLEPTR. */
361 extern void r_alloc_free PP ((__ptr_t *__handleptr));
363 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
364 extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
367 #ifdef __cplusplus
369 #endif
371 #endif /* malloc.h */
372 /* Memory allocator `malloc'.
373 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
374 Written May 1989 by Mike Haertel.
376 This library is free software; you can redistribute it and/or
377 modify it under the terms of the GNU General Public License as
378 published by the Free Software Foundation; either version 2 of the
379 License, or (at your option) any later version.
381 This library is distributed in the hope that it will be useful,
382 but WITHOUT ANY WARRANTY; without even the implied warranty of
383 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
384 General Public License for more details.
386 You should have received a copy of the GNU General Public
387 License along with this library; see the file COPYING. If
388 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
389 Fifth Floor, Boston, MA 02110-1301, USA.
391 The author may be reached (Email) at the address mike@ai.mit.edu,
392 or (US mail) as Mike Haertel c/o Free Software Foundation. */
394 #ifndef _MALLOC_INTERNAL
395 #define _MALLOC_INTERNAL
396 #include <malloc.h>
397 #endif
398 #include <errno.h>
400 /* How to really get more memory. */
401 #if defined(CYGWIN)
402 extern __ptr_t bss_sbrk PP ((ptrdiff_t __size));
403 extern int bss_sbrk_did_unexec;
404 #endif
405 __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size)) = __default_morecore;
407 /* Debugging hook for `malloc'. */
408 __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
410 /* Pointer to the base of the first block. */
411 char *_heapbase;
413 /* Block information table. Allocated with align/__free (not malloc/free). */
414 malloc_info *_heapinfo;
416 /* Number of info entries. */
417 static __malloc_size_t heapsize;
419 /* Search index in the info table. */
420 __malloc_size_t _heapindex;
422 /* Limit of valid info table indices. */
423 __malloc_size_t _heaplimit;
425 /* Free lists for each fragment size. */
426 struct list _fraghead[BLOCKLOG];
428 /* Instrumentation. */
429 __malloc_size_t _chunks_used;
430 __malloc_size_t _bytes_used;
431 __malloc_size_t _chunks_free;
432 __malloc_size_t _bytes_free;
434 /* Are you experienced? */
435 int __malloc_initialized;
437 __malloc_size_t __malloc_extra_blocks;
439 void (*__malloc_initialize_hook) PP ((void));
440 void (*__after_morecore_hook) PP ((void));
442 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
444 /* Some code for hunting a bug writing into _heapinfo.
446 Call this macro with argument PROT non-zero to protect internal
447 malloc state against writing to it, call it with a zero argument to
448 make it readable and writable.
450 Note that this only works if BLOCKSIZE == page size, which is
451 the case on the i386. */
453 #include <sys/types.h>
454 #include <sys/mman.h>
456 static int state_protected_p;
457 static __malloc_size_t last_state_size;
458 static malloc_info *last_heapinfo;
460 void
461 protect_malloc_state (protect_p)
462 int protect_p;
464 /* If _heapinfo has been relocated, make sure its old location
465 isn't left read-only; it will be reused by malloc. */
466 if (_heapinfo != last_heapinfo
467 && last_heapinfo
468 && state_protected_p)
469 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
471 last_state_size = _heaplimit * sizeof *_heapinfo;
472 last_heapinfo = _heapinfo;
474 if (protect_p != state_protected_p)
476 state_protected_p = protect_p;
477 if (mprotect (_heapinfo, last_state_size,
478 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
479 abort ();
483 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
485 #else
486 #define PROTECT_MALLOC_STATE(PROT) /* empty */
487 #endif
490 /* Aligned allocation. */
491 static __ptr_t align PP ((__malloc_size_t));
492 static __ptr_t
493 align (size)
494 __malloc_size_t size;
496 __ptr_t result;
497 unsigned long int adj;
499 /* align accepts an unsigned argument, but __morecore accepts a
500 signed one. This could lead to trouble if SIZE overflows a
501 signed int type accepted by __morecore. We just punt in that
502 case, since they are requesting a ludicrous amount anyway. */
503 if ((__malloc_ptrdiff_t)size < 0)
504 result = 0;
505 else
506 result = (*__morecore) (size);
507 adj = (unsigned long int) ((unsigned long int) ((char *) result -
508 (char *) NULL)) % BLOCKSIZE;
509 if (adj != 0)
511 __ptr_t new;
512 adj = BLOCKSIZE - adj;
513 new = (*__morecore) (adj);
514 result = (char *) result + adj;
517 if (__after_morecore_hook)
518 (*__after_morecore_hook) ();
520 return result;
523 /* Get SIZE bytes, if we can get them starting at END.
524 Return the address of the space we got.
525 If we cannot get space at END, fail and return 0. */
526 static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
527 static __ptr_t
528 get_contiguous_space (size, position)
529 __malloc_ptrdiff_t size;
530 __ptr_t position;
532 __ptr_t before;
533 __ptr_t after;
535 before = (*__morecore) (0);
536 /* If we can tell in advance that the break is at the wrong place,
537 fail now. */
538 if (before != position)
539 return 0;
541 /* Allocate SIZE bytes and get the address of them. */
542 after = (*__morecore) (size);
543 if (!after)
544 return 0;
546 /* It was not contiguous--reject it. */
547 if (after != position)
549 (*__morecore) (- size);
550 return 0;
553 return after;
557 /* This is called when `_heapinfo' and `heapsize' have just
558 been set to describe a new info table. Set up the table
559 to describe itself and account for it in the statistics. */
560 static void register_heapinfo PP ((void));
561 #ifdef __GNUC__
562 __inline__
563 #endif
564 static void
565 register_heapinfo ()
567 __malloc_size_t block, blocks;
569 block = BLOCK (_heapinfo);
570 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
572 /* Account for the _heapinfo block itself in the statistics. */
573 _bytes_used += blocks * BLOCKSIZE;
574 ++_chunks_used;
576 /* Describe the heapinfo block itself in the heapinfo. */
577 _heapinfo[block].busy.type = 0;
578 _heapinfo[block].busy.info.size = blocks;
579 /* Leave back-pointers for malloc_find_address. */
580 while (--blocks > 0)
581 _heapinfo[block + blocks].busy.info.size = -blocks;
584 #ifdef USE_PTHREAD
585 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
586 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
587 int _malloc_thread_enabled_p;
589 static void
590 malloc_atfork_handler_prepare ()
592 LOCK ();
593 LOCK_ALIGNED_BLOCKS ();
596 static void
597 malloc_atfork_handler_parent ()
599 UNLOCK_ALIGNED_BLOCKS ();
600 UNLOCK ();
603 static void
604 malloc_atfork_handler_child ()
606 UNLOCK_ALIGNED_BLOCKS ();
607 UNLOCK ();
610 /* Set up mutexes and make malloc etc. thread-safe. */
611 void
612 malloc_enable_thread ()
614 if (_malloc_thread_enabled_p)
615 return;
617 /* Some pthread implementations call malloc for statically
618 initialized mutexes when they are used first. To avoid such a
619 situation, we initialize mutexes here while their use is
620 disabled in malloc etc. */
621 pthread_mutex_init (&_malloc_mutex, NULL);
622 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
623 pthread_atfork (malloc_atfork_handler_prepare,
624 malloc_atfork_handler_parent,
625 malloc_atfork_handler_child);
626 _malloc_thread_enabled_p = 1;
628 #endif
630 static void
631 malloc_initialize_1 ()
633 #ifdef GC_MCHECK
634 mcheck (NULL);
635 #endif
637 if (__malloc_initialize_hook)
638 (*__malloc_initialize_hook) ();
640 heapsize = HEAP / BLOCKSIZE;
641 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
642 if (_heapinfo == NULL)
643 return;
644 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
645 _heapinfo[0].free.size = 0;
646 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
647 _heapindex = 0;
648 _heapbase = (char *) _heapinfo;
649 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
651 register_heapinfo ();
653 __malloc_initialized = 1;
654 PROTECT_MALLOC_STATE (1);
655 return;
658 /* Set everything up and remember that we have.
659 main will call malloc which calls this function. That is before any threads
660 or signal handlers has been set up, so we don't need thread protection. */
662 __malloc_initialize ()
664 if (__malloc_initialized)
665 return 0;
667 malloc_initialize_1 ();
669 return __malloc_initialized;
672 static int morecore_recursing;
674 /* Get neatly aligned memory, initializing or
675 growing the heap info table as necessary. */
676 static __ptr_t morecore_nolock PP ((__malloc_size_t));
677 static __ptr_t
678 morecore_nolock (size)
679 __malloc_size_t size;
681 __ptr_t result;
682 malloc_info *newinfo, *oldinfo;
683 __malloc_size_t newsize;
685 if (morecore_recursing)
686 /* Avoid recursion. The caller will know how to handle a null return. */
687 return NULL;
689 result = align (size);
690 if (result == NULL)
691 return NULL;
693 PROTECT_MALLOC_STATE (0);
695 /* Check if we need to grow the info table. */
696 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
698 /* Calculate the new _heapinfo table size. We do not account for the
699 added blocks in the table itself, as we hope to place them in
700 existing free space, which is already covered by part of the
701 existing table. */
702 newsize = heapsize;
704 newsize *= 2;
705 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
707 /* We must not reuse existing core for the new info table when called
708 from realloc in the case of growing a large block, because the
709 block being grown is momentarily marked as free. In this case
710 _heaplimit is zero so we know not to reuse space for internal
711 allocation. */
712 if (_heaplimit != 0)
714 /* First try to allocate the new info table in core we already
715 have, in the usual way using realloc. If realloc cannot
716 extend it in place or relocate it to existing sufficient core,
717 we will get called again, and the code above will notice the
718 `morecore_recursing' flag and return null. */
719 int save = errno; /* Don't want to clobber errno with ENOMEM. */
720 morecore_recursing = 1;
721 newinfo = (malloc_info *) _realloc_internal_nolock
722 (_heapinfo, newsize * sizeof (malloc_info));
723 morecore_recursing = 0;
724 if (newinfo == NULL)
725 errno = save;
726 else
728 /* We found some space in core, and realloc has put the old
729 table's blocks on the free list. Now zero the new part
730 of the table and install the new table location. */
731 memset (&newinfo[heapsize], 0,
732 (newsize - heapsize) * sizeof (malloc_info));
733 _heapinfo = newinfo;
734 heapsize = newsize;
735 goto got_heap;
739 /* Allocate new space for the malloc info table. */
740 while (1)
742 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
744 /* Did it fail? */
745 if (newinfo == NULL)
747 (*__morecore) (-size);
748 return NULL;
751 /* Is it big enough to record status for its own space?
752 If so, we win. */
753 if ((__malloc_size_t) BLOCK ((char *) newinfo
754 + newsize * sizeof (malloc_info))
755 < newsize)
756 break;
758 /* Must try again. First give back most of what we just got. */
759 (*__morecore) (- newsize * sizeof (malloc_info));
760 newsize *= 2;
763 /* Copy the old table to the beginning of the new,
764 and zero the rest of the new table. */
765 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
766 memset (&newinfo[heapsize], 0,
767 (newsize - heapsize) * sizeof (malloc_info));
768 oldinfo = _heapinfo;
769 _heapinfo = newinfo;
770 heapsize = newsize;
772 register_heapinfo ();
774 /* Reset _heaplimit so _free_internal never decides
775 it can relocate or resize the info table. */
776 _heaplimit = 0;
777 _free_internal_nolock (oldinfo);
778 PROTECT_MALLOC_STATE (0);
780 /* The new heap limit includes the new table just allocated. */
781 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
782 return result;
785 got_heap:
786 _heaplimit = BLOCK ((char *) result + size);
787 return result;
790 /* Allocate memory from the heap. */
791 __ptr_t
792 _malloc_internal_nolock (size)
793 __malloc_size_t size;
795 __ptr_t result;
796 __malloc_size_t block, blocks, lastblocks, start;
797 register __malloc_size_t i;
798 struct list *next;
800 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
801 valid address you can realloc and free (though not dereference).
803 It turns out that some extant code (sunrpc, at least Ultrix's version)
804 expects `malloc (0)' to return non-NULL and breaks otherwise.
805 Be compatible. */
807 #if 0
808 if (size == 0)
809 return NULL;
810 #endif
812 PROTECT_MALLOC_STATE (0);
814 if (size < sizeof (struct list))
815 size = sizeof (struct list);
817 #ifdef SUNOS_LOCALTIME_BUG
818 if (size < 16)
819 size = 16;
820 #endif
822 /* Determine the allocation policy based on the request size. */
823 if (size <= BLOCKSIZE / 2)
825 /* Small allocation to receive a fragment of a block.
826 Determine the logarithm to base two of the fragment size. */
827 register __malloc_size_t log = 1;
828 --size;
829 while ((size /= 2) != 0)
830 ++log;
832 /* Look in the fragment lists for a
833 free fragment of the desired size. */
834 next = _fraghead[log].next;
835 if (next != NULL)
837 /* There are free fragments of this size.
838 Pop a fragment out of the fragment list and return it.
839 Update the block's nfree and first counters. */
840 result = (__ptr_t) next;
841 next->prev->next = next->next;
842 if (next->next != NULL)
843 next->next->prev = next->prev;
844 block = BLOCK (result);
845 if (--_heapinfo[block].busy.info.frag.nfree != 0)
846 _heapinfo[block].busy.info.frag.first = (unsigned long int)
847 ((unsigned long int) ((char *) next->next - (char *) NULL)
848 % BLOCKSIZE) >> log;
850 /* Update the statistics. */
851 ++_chunks_used;
852 _bytes_used += 1 << log;
853 --_chunks_free;
854 _bytes_free -= 1 << log;
856 else
858 /* No free fragments of the desired size, so get a new block
859 and break it into fragments, returning the first. */
860 #ifdef GC_MALLOC_CHECK
861 result = _malloc_internal_nolock (BLOCKSIZE);
862 PROTECT_MALLOC_STATE (0);
863 #elif defined (USE_PTHREAD)
864 result = _malloc_internal_nolock (BLOCKSIZE);
865 #else
866 result = malloc (BLOCKSIZE);
867 #endif
868 if (result == NULL)
870 PROTECT_MALLOC_STATE (1);
871 goto out;
874 /* Link all fragments but the first into the free list. */
875 next = (struct list *) ((char *) result + (1 << log));
876 next->next = NULL;
877 next->prev = &_fraghead[log];
878 _fraghead[log].next = next;
880 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
882 next = (struct list *) ((char *) result + (i << log));
883 next->next = _fraghead[log].next;
884 next->prev = &_fraghead[log];
885 next->prev->next = next;
886 next->next->prev = next;
889 /* Initialize the nfree and first counters for this block. */
890 block = BLOCK (result);
891 _heapinfo[block].busy.type = log;
892 _heapinfo[block].busy.info.frag.nfree = i - 1;
893 _heapinfo[block].busy.info.frag.first = i - 1;
895 _chunks_free += (BLOCKSIZE >> log) - 1;
896 _bytes_free += BLOCKSIZE - (1 << log);
897 _bytes_used -= BLOCKSIZE - (1 << log);
900 else
902 /* Large allocation to receive one or more blocks.
903 Search the free list in a circle starting at the last place visited.
904 If we loop completely around without finding a large enough
905 space we will have to get more memory from the system. */
906 blocks = BLOCKIFY (size);
907 start = block = _heapindex;
908 while (_heapinfo[block].free.size < blocks)
910 block = _heapinfo[block].free.next;
911 if (block == start)
913 /* Need to get more from the system. Get a little extra. */
914 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
915 block = _heapinfo[0].free.prev;
916 lastblocks = _heapinfo[block].free.size;
917 /* Check to see if the new core will be contiguous with the
918 final free block; if so we don't need to get as much. */
919 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
920 /* We can't do this if we will have to make the heap info
921 table bigger to accomodate the new space. */
922 block + wantblocks <= heapsize &&
923 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
924 ADDRESS (block + lastblocks)))
926 /* We got it contiguously. Which block we are extending
927 (the `final free block' referred to above) might have
928 changed, if it got combined with a freed info table. */
929 block = _heapinfo[0].free.prev;
930 _heapinfo[block].free.size += (wantblocks - lastblocks);
931 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
932 _heaplimit += wantblocks - lastblocks;
933 continue;
935 result = morecore_nolock (wantblocks * BLOCKSIZE);
936 if (result == NULL)
937 goto out;
938 block = BLOCK (result);
939 /* Put the new block at the end of the free list. */
940 _heapinfo[block].free.size = wantblocks;
941 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
942 _heapinfo[block].free.next = 0;
943 _heapinfo[0].free.prev = block;
944 _heapinfo[_heapinfo[block].free.prev].free.next = block;
945 ++_chunks_free;
946 /* Now loop to use some of that block for this allocation. */
950 /* At this point we have found a suitable free list entry.
951 Figure out how to remove what we need from the list. */
952 result = ADDRESS (block);
953 if (_heapinfo[block].free.size > blocks)
955 /* The block we found has a bit left over,
956 so relink the tail end back into the free list. */
957 _heapinfo[block + blocks].free.size
958 = _heapinfo[block].free.size - blocks;
959 _heapinfo[block + blocks].free.next
960 = _heapinfo[block].free.next;
961 _heapinfo[block + blocks].free.prev
962 = _heapinfo[block].free.prev;
963 _heapinfo[_heapinfo[block].free.prev].free.next
964 = _heapinfo[_heapinfo[block].free.next].free.prev
965 = _heapindex = block + blocks;
967 else
969 /* The block exactly matches our requirements,
970 so just remove it from the list. */
971 _heapinfo[_heapinfo[block].free.next].free.prev
972 = _heapinfo[block].free.prev;
973 _heapinfo[_heapinfo[block].free.prev].free.next
974 = _heapindex = _heapinfo[block].free.next;
975 --_chunks_free;
978 _heapinfo[block].busy.type = 0;
979 _heapinfo[block].busy.info.size = blocks;
980 ++_chunks_used;
981 _bytes_used += blocks * BLOCKSIZE;
982 _bytes_free -= blocks * BLOCKSIZE;
984 /* Mark all the blocks of the object just allocated except for the
985 first with a negative number so you can find the first block by
986 adding that adjustment. */
987 while (--blocks > 0)
988 _heapinfo[block + blocks].busy.info.size = -blocks;
991 PROTECT_MALLOC_STATE (1);
992 out:
993 return result;
996 __ptr_t
997 _malloc_internal (size)
998 __malloc_size_t size;
1000 __ptr_t result;
1002 LOCK ();
1003 result = _malloc_internal_nolock (size);
1004 UNLOCK ();
1006 return result;
1009 __ptr_t
1010 malloc (size)
1011 __malloc_size_t size;
1013 __ptr_t (*hook) (__malloc_size_t);
1015 if (!__malloc_initialized && !__malloc_initialize ())
1016 return NULL;
1018 /* Copy the value of __malloc_hook to an automatic variable in case
1019 __malloc_hook is modified in another thread between its
1020 NULL-check and the use.
1022 Note: Strictly speaking, this is not a right solution. We should
1023 use mutexes to access non-read-only variables that are shared
1024 among multiple threads. We just leave it for compatibility with
1025 glibc malloc (i.e., assignments to __malloc_hook) for now. */
1026 hook = __malloc_hook;
1027 return (hook != NULL ? *hook : _malloc_internal) (size);
1030 #ifndef _LIBC
1032 /* On some ANSI C systems, some libc functions call _malloc, _free
1033 and _realloc. Make them use the GNU functions. */
1035 __ptr_t
1036 _malloc (size)
1037 __malloc_size_t size;
1039 return malloc (size);
1042 void
1043 _free (ptr)
1044 __ptr_t ptr;
1046 free (ptr);
1049 __ptr_t
1050 _realloc (ptr, size)
1051 __ptr_t ptr;
1052 __malloc_size_t size;
1054 return realloc (ptr, size);
1057 #endif
1058 /* Free a block of memory allocated by `malloc'.
1059 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1060 Written May 1989 by Mike Haertel.
1062 This library is free software; you can redistribute it and/or
1063 modify it under the terms of the GNU General Public License as
1064 published by the Free Software Foundation; either version 2 of the
1065 License, or (at your option) any later version.
1067 This library is distributed in the hope that it will be useful,
1068 but WITHOUT ANY WARRANTY; without even the implied warranty of
1069 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1070 General Public License for more details.
1072 You should have received a copy of the GNU General Public
1073 License along with this library; see the file COPYING. If
1074 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1075 Fifth Floor, Boston, MA 02110-1301, USA.
1077 The author may be reached (Email) at the address mike@ai.mit.edu,
1078 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1080 #ifndef _MALLOC_INTERNAL
1081 #define _MALLOC_INTERNAL
1082 #include <malloc.h>
1083 #endif
1086 /* Cope with systems lacking `memmove'. */
1087 #ifndef memmove
1088 #if (defined (MEMMOVE_MISSING) || \
1089 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1090 #ifdef emacs
1091 #undef __malloc_safe_bcopy
1092 #define __malloc_safe_bcopy safe_bcopy
1093 #endif
1094 /* This function is defined in realloc.c. */
1095 extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
1096 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1097 #endif
1098 #endif
1101 /* Debugging hook for free. */
1102 void (*__free_hook) PP ((__ptr_t __ptr));
1104 /* List of blocks allocated by memalign. */
1105 struct alignlist *_aligned_blocks = NULL;
1107 /* Return memory to the heap.
1108 Like `_free_internal' but don't lock mutex. */
1109 void
1110 _free_internal_nolock (ptr)
1111 __ptr_t ptr;
1113 int type;
1114 __malloc_size_t block, blocks;
1115 register __malloc_size_t i;
1116 struct list *prev, *next;
1117 __ptr_t curbrk;
1118 const __malloc_size_t lesscore_threshold
1119 /* Threshold of free space at which we will return some to the system. */
1120 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1122 register struct alignlist *l;
1124 if (ptr == NULL)
1125 return;
1127 PROTECT_MALLOC_STATE (0);
1129 LOCK_ALIGNED_BLOCKS ();
1130 for (l = _aligned_blocks; l != NULL; l = l->next)
1131 if (l->aligned == ptr)
1133 l->aligned = NULL; /* Mark the slot in the list as free. */
1134 ptr = l->exact;
1135 break;
1137 UNLOCK_ALIGNED_BLOCKS ();
1139 block = BLOCK (ptr);
1141 type = _heapinfo[block].busy.type;
1142 switch (type)
1144 case 0:
1145 /* Get as many statistics as early as we can. */
1146 --_chunks_used;
1147 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1148 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1150 /* Find the free cluster previous to this one in the free list.
1151 Start searching at the last block referenced; this may benefit
1152 programs with locality of allocation. */
1153 i = _heapindex;
1154 if (i > block)
1155 while (i > block)
1156 i = _heapinfo[i].free.prev;
1157 else
1160 i = _heapinfo[i].free.next;
1161 while (i > 0 && i < block);
1162 i = _heapinfo[i].free.prev;
1165 /* Determine how to link this block into the free list. */
1166 if (block == i + _heapinfo[i].free.size)
1168 /* Coalesce this block with its predecessor. */
1169 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1170 block = i;
1172 else
1174 /* Really link this block back into the free list. */
1175 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1176 _heapinfo[block].free.next = _heapinfo[i].free.next;
1177 _heapinfo[block].free.prev = i;
1178 _heapinfo[i].free.next = block;
1179 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1180 ++_chunks_free;
1183 /* Now that the block is linked in, see if we can coalesce it
1184 with its successor (by deleting its successor from the list
1185 and adding in its size). */
1186 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1188 _heapinfo[block].free.size
1189 += _heapinfo[_heapinfo[block].free.next].free.size;
1190 _heapinfo[block].free.next
1191 = _heapinfo[_heapinfo[block].free.next].free.next;
1192 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1193 --_chunks_free;
1196 /* How many trailing free blocks are there now? */
1197 blocks = _heapinfo[block].free.size;
1199 /* Where is the current end of accessible core? */
1200 curbrk = (*__morecore) (0);
1202 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1204 /* The end of the malloc heap is at the end of accessible core.
1205 It's possible that moving _heapinfo will allow us to
1206 return some space to the system. */
1208 __malloc_size_t info_block = BLOCK (_heapinfo);
1209 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1210 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1211 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1212 __malloc_size_t next_block = _heapinfo[block].free.next;
1213 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1215 if (/* Win if this block being freed is last in core, the info table
1216 is just before it, the previous free block is just before the
1217 info table, and the two free blocks together form a useful
1218 amount to return to the system. */
1219 (block + blocks == _heaplimit &&
1220 info_block + info_blocks == block &&
1221 prev_block != 0 && prev_block + prev_blocks == info_block &&
1222 blocks + prev_blocks >= lesscore_threshold) ||
1223 /* Nope, not the case. We can also win if this block being
1224 freed is just before the info table, and the table extends
1225 to the end of core or is followed only by a free block,
1226 and the total free space is worth returning to the system. */
1227 (block + blocks == info_block &&
1228 ((info_block + info_blocks == _heaplimit &&
1229 blocks >= lesscore_threshold) ||
1230 (info_block + info_blocks == next_block &&
1231 next_block + next_blocks == _heaplimit &&
1232 blocks + next_blocks >= lesscore_threshold)))
1235 malloc_info *newinfo;
1236 __malloc_size_t oldlimit = _heaplimit;
1238 /* Free the old info table, clearing _heaplimit to avoid
1239 recursion into this code. We don't want to return the
1240 table's blocks to the system before we have copied them to
1241 the new location. */
1242 _heaplimit = 0;
1243 _free_internal_nolock (_heapinfo);
1244 _heaplimit = oldlimit;
1246 /* Tell malloc to search from the beginning of the heap for
1247 free blocks, so it doesn't reuse the ones just freed. */
1248 _heapindex = 0;
1250 /* Allocate new space for the info table and move its data. */
1251 newinfo = (malloc_info *) _malloc_internal_nolock (info_blocks
1252 * BLOCKSIZE);
1253 PROTECT_MALLOC_STATE (0);
1254 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1255 _heapinfo = newinfo;
1257 /* We should now have coalesced the free block with the
1258 blocks freed from the old info table. Examine the entire
1259 trailing free block to decide below whether to return some
1260 to the system. */
1261 block = _heapinfo[0].free.prev;
1262 blocks = _heapinfo[block].free.size;
1265 /* Now see if we can return stuff to the system. */
1266 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1268 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1269 _heaplimit -= blocks;
1270 (*__morecore) (-bytes);
1271 _heapinfo[_heapinfo[block].free.prev].free.next
1272 = _heapinfo[block].free.next;
1273 _heapinfo[_heapinfo[block].free.next].free.prev
1274 = _heapinfo[block].free.prev;
1275 block = _heapinfo[block].free.prev;
1276 --_chunks_free;
1277 _bytes_free -= bytes;
1281 /* Set the next search to begin at this block. */
1282 _heapindex = block;
1283 break;
1285 default:
1286 /* Do some of the statistics. */
1287 --_chunks_used;
1288 _bytes_used -= 1 << type;
1289 ++_chunks_free;
1290 _bytes_free += 1 << type;
1292 /* Get the address of the first free fragment in this block. */
1293 prev = (struct list *) ((char *) ADDRESS (block) +
1294 (_heapinfo[block].busy.info.frag.first << type));
1296 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1298 /* If all fragments of this block are free, remove them
1299 from the fragment list and free the whole block. */
1300 next = prev;
1301 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1302 next = next->next;
1303 prev->prev->next = next;
1304 if (next != NULL)
1305 next->prev = prev->prev;
1306 _heapinfo[block].busy.type = 0;
1307 _heapinfo[block].busy.info.size = 1;
1309 /* Keep the statistics accurate. */
1310 ++_chunks_used;
1311 _bytes_used += BLOCKSIZE;
1312 _chunks_free -= BLOCKSIZE >> type;
1313 _bytes_free -= BLOCKSIZE;
1315 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1316 _free_internal_nolock (ADDRESS (block));
1317 #else
1318 free (ADDRESS (block));
1319 #endif
1321 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1323 /* If some fragments of this block are free, link this
1324 fragment into the fragment list after the first free
1325 fragment of this block. */
1326 next = (struct list *) ptr;
1327 next->next = prev->next;
1328 next->prev = prev;
1329 prev->next = next;
1330 if (next->next != NULL)
1331 next->next->prev = next;
1332 ++_heapinfo[block].busy.info.frag.nfree;
1334 else
1336 /* No fragments of this block are free, so link this
1337 fragment into the fragment list and announce that
1338 it is the first free fragment of this block. */
1339 prev = (struct list *) ptr;
1340 _heapinfo[block].busy.info.frag.nfree = 1;
1341 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1342 ((unsigned long int) ((char *) ptr - (char *) NULL)
1343 % BLOCKSIZE >> type);
1344 prev->next = _fraghead[type].next;
1345 prev->prev = &_fraghead[type];
1346 prev->prev->next = prev;
1347 if (prev->next != NULL)
1348 prev->next->prev = prev;
1350 break;
1353 PROTECT_MALLOC_STATE (1);
1356 /* Return memory to the heap.
1357 Like `free' but don't call a __free_hook if there is one. */
1358 void
1359 _free_internal (ptr)
1360 __ptr_t ptr;
1362 LOCK ();
1363 _free_internal_nolock (ptr);
1364 UNLOCK ();
1367 /* Return memory to the heap. */
1369 FREE_RETURN_TYPE
1370 free (ptr)
1371 __ptr_t ptr;
1373 void (*hook) (__ptr_t) = __free_hook;
1375 if (hook != NULL)
1376 (*hook) (ptr);
1377 else
1378 _free_internal (ptr);
1381 /* Define the `cfree' alias for `free'. */
1382 #ifdef weak_alias
1383 weak_alias (free, cfree)
1384 #else
1385 void
1386 cfree (ptr)
1387 __ptr_t ptr;
1389 free (ptr);
1391 #endif
1392 /* Change the size of a block allocated by `malloc'.
1393 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1394 Written May 1989 by Mike Haertel.
1396 This library is free software; you can redistribute it and/or
1397 modify it under the terms of the GNU General Public License as
1398 published by the Free Software Foundation; either version 2 of the
1399 License, or (at your option) any later version.
1401 This library is distributed in the hope that it will be useful,
1402 but WITHOUT ANY WARRANTY; without even the implied warranty of
1403 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1404 General Public License for more details.
1406 You should have received a copy of the GNU General Public
1407 License along with this library; see the file COPYING. If
1408 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1409 Fifth Floor, Boston, MA 02110-1301, USA.
1411 The author may be reached (Email) at the address mike@ai.mit.edu,
1412 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1414 #ifndef _MALLOC_INTERNAL
1415 #define _MALLOC_INTERNAL
1416 #include <malloc.h>
1417 #endif
1421 /* Cope with systems lacking `memmove'. */
1422 #if (defined (MEMMOVE_MISSING) || \
1423 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1425 #ifdef emacs
1426 #undef __malloc_safe_bcopy
1427 #define __malloc_safe_bcopy safe_bcopy
1428 #else
1430 /* Snarfed directly from Emacs src/dispnew.c:
1431 XXX Should use system bcopy if it handles overlap. */
1433 /* Like bcopy except never gets confused by overlap. */
1435 void
1436 __malloc_safe_bcopy (afrom, ato, size)
1437 __ptr_t afrom;
1438 __ptr_t ato;
1439 __malloc_size_t size;
1441 char *from = afrom, *to = ato;
1443 if (size <= 0 || from == to)
1444 return;
1446 /* If the source and destination don't overlap, then bcopy can
1447 handle it. If they do overlap, but the destination is lower in
1448 memory than the source, we'll assume bcopy can handle that. */
1449 if (to < from || from + size <= to)
1450 bcopy (from, to, size);
1452 /* Otherwise, we'll copy from the end. */
1453 else
1455 register char *endf = from + size;
1456 register char *endt = to + size;
1458 /* If TO - FROM is large, then we should break the copy into
1459 nonoverlapping chunks of TO - FROM bytes each. However, if
1460 TO - FROM is small, then the bcopy function call overhead
1461 makes this not worth it. The crossover point could be about
1462 anywhere. Since I don't think the obvious copy loop is too
1463 bad, I'm trying to err in its favor. */
1464 if (to - from < 64)
1467 *--endt = *--endf;
1468 while (endf != from);
1470 else
1472 for (;;)
1474 endt -= (to - from);
1475 endf -= (to - from);
1477 if (endt < to)
1478 break;
1480 bcopy (endf, endt, to - from);
1483 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1484 little left over. The amount left over is
1485 (endt + (to - from)) - to, which is endt - from. */
1486 bcopy (from, to, endt - from);
1490 #endif /* emacs */
1492 #ifndef memmove
1493 extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
1494 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1495 #endif
1497 #endif
1500 #define min(A, B) ((A) < (B) ? (A) : (B))
1502 /* Debugging hook for realloc. */
1503 __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
1505 /* Resize the given region to the new size, returning a pointer
1506 to the (possibly moved) region. This is optimized for speed;
1507 some benchmarks seem to indicate that greater compactness is
1508 achieved by unconditionally allocating and copying to a
1509 new region. This module has incestuous knowledge of the
1510 internals of both free and malloc. */
1511 __ptr_t
1512 _realloc_internal_nolock (ptr, size)
1513 __ptr_t ptr;
1514 __malloc_size_t size;
1516 __ptr_t result;
1517 int type;
1518 __malloc_size_t block, blocks, oldlimit;
1520 if (size == 0)
1522 _free_internal_nolock (ptr);
1523 return _malloc_internal_nolock (0);
1525 else if (ptr == NULL)
1526 return _malloc_internal_nolock (size);
1528 block = BLOCK (ptr);
1530 PROTECT_MALLOC_STATE (0);
1532 type = _heapinfo[block].busy.type;
1533 switch (type)
1535 case 0:
1536 /* Maybe reallocate a large block to a small fragment. */
1537 if (size <= BLOCKSIZE / 2)
1539 result = _malloc_internal_nolock (size);
1540 if (result != NULL)
1542 memcpy (result, ptr, size);
1543 _free_internal_nolock (ptr);
1544 goto out;
1548 /* The new size is a large allocation as well;
1549 see if we can hold it in place. */
1550 blocks = BLOCKIFY (size);
1551 if (blocks < _heapinfo[block].busy.info.size)
1553 /* The new size is smaller; return
1554 excess memory to the free list. */
1555 _heapinfo[block + blocks].busy.type = 0;
1556 _heapinfo[block + blocks].busy.info.size
1557 = _heapinfo[block].busy.info.size - blocks;
1558 _heapinfo[block].busy.info.size = blocks;
1559 /* We have just created a new chunk by splitting a chunk in two.
1560 Now we will free this chunk; increment the statistics counter
1561 so it doesn't become wrong when _free_internal decrements it. */
1562 ++_chunks_used;
1563 _free_internal_nolock (ADDRESS (block + blocks));
1564 result = ptr;
1566 else if (blocks == _heapinfo[block].busy.info.size)
1567 /* No size change necessary. */
1568 result = ptr;
1569 else
1571 /* Won't fit, so allocate a new region that will.
1572 Free the old region first in case there is sufficient
1573 adjacent free space to grow without moving. */
1574 blocks = _heapinfo[block].busy.info.size;
1575 /* Prevent free from actually returning memory to the system. */
1576 oldlimit = _heaplimit;
1577 _heaplimit = 0;
1578 _free_internal_nolock (ptr);
1579 result = _malloc_internal_nolock (size);
1580 PROTECT_MALLOC_STATE (0);
1581 if (_heaplimit == 0)
1582 _heaplimit = oldlimit;
1583 if (result == NULL)
1585 /* Now we're really in trouble. We have to unfree
1586 the thing we just freed. Unfortunately it might
1587 have been coalesced with its neighbors. */
1588 if (_heapindex == block)
1589 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1590 else
1592 __ptr_t previous
1593 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1594 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1595 _free_internal_nolock (previous);
1597 goto out;
1599 if (ptr != result)
1600 memmove (result, ptr, blocks * BLOCKSIZE);
1602 break;
1604 default:
1605 /* Old size is a fragment; type is logarithm
1606 to base two of the fragment size. */
1607 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1608 size <= (__malloc_size_t) (1 << type))
1609 /* The new size is the same kind of fragment. */
1610 result = ptr;
1611 else
1613 /* The new size is different; allocate a new space,
1614 and copy the lesser of the new size and the old. */
1615 result = _malloc_internal_nolock (size);
1616 if (result == NULL)
1617 goto out;
1618 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1619 _free_internal_nolock (ptr);
1621 break;
1624 PROTECT_MALLOC_STATE (1);
1625 out:
1626 return result;
1629 __ptr_t
1630 _realloc_internal (ptr, size)
1631 __ptr_t ptr;
1632 __malloc_size_t size;
1634 __ptr_t result;
1636 LOCK();
1637 result = _realloc_internal_nolock (ptr, size);
1638 UNLOCK ();
1640 return result;
1643 __ptr_t
1644 realloc (ptr, size)
1645 __ptr_t ptr;
1646 __malloc_size_t size;
1648 __ptr_t (*hook) (__ptr_t, __malloc_size_t);
1650 if (!__malloc_initialized && !__malloc_initialize ())
1651 return NULL;
1653 hook = __realloc_hook;
1654 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1656 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1658 This library is free software; you can redistribute it and/or
1659 modify it under the terms of the GNU General Public License as
1660 published by the Free Software Foundation; either version 2 of the
1661 License, or (at your option) any later version.
1663 This library is distributed in the hope that it will be useful,
1664 but WITHOUT ANY WARRANTY; without even the implied warranty of
1665 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1666 General Public License for more details.
1668 You should have received a copy of the GNU General Public
1669 License along with this library; see the file COPYING. If
1670 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1671 Fifth Floor, Boston, MA 02110-1301, USA.
1673 The author may be reached (Email) at the address mike@ai.mit.edu,
1674 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1676 #ifndef _MALLOC_INTERNAL
1677 #define _MALLOC_INTERNAL
1678 #include <malloc.h>
1679 #endif
1681 /* Allocate an array of NMEMB elements each SIZE bytes long.
1682 The entire array is initialized to zeros. */
1683 __ptr_t
1684 calloc (nmemb, size)
1685 register __malloc_size_t nmemb;
1686 register __malloc_size_t size;
1688 register __ptr_t result = malloc (nmemb * size);
1690 if (result != NULL)
1691 (void) memset (result, 0, nmemb * size);
1693 return result;
1695 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1696 This file is part of the GNU C Library.
1698 The GNU C Library is free software; you can redistribute it and/or modify
1699 it under the terms of the GNU General Public License as published by
1700 the Free Software Foundation; either version 2, or (at your option)
1701 any later version.
1703 The GNU C Library is distributed in the hope that it will be useful,
1704 but WITHOUT ANY WARRANTY; without even the implied warranty of
1705 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1706 GNU General Public License for more details.
1708 You should have received a copy of the GNU General Public License
1709 along with the GNU C Library; see the file COPYING. If not, write to
1710 the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1711 MA 02110-1301, USA. */
1713 #ifndef _MALLOC_INTERNAL
1714 #define _MALLOC_INTERNAL
1715 #include <malloc.h>
1716 #endif
1718 #ifndef __GNU_LIBRARY__
1719 #define __sbrk sbrk
1720 #endif
1722 #ifdef __GNU_LIBRARY__
1723 /* It is best not to declare this and cast its result on foreign operating
1724 systems with potentially hostile include files. */
1726 #include <stddef.h>
1727 extern __ptr_t __sbrk PP ((ptrdiff_t increment));
1728 #endif
1730 #ifndef NULL
1731 #define NULL 0
1732 #endif
1734 /* Allocate INCREMENT more bytes of data space,
1735 and return the start of data space, or NULL on errors.
1736 If INCREMENT is negative, shrink data space. */
1737 __ptr_t
1738 __default_morecore (increment)
1739 __malloc_ptrdiff_t increment;
1741 __ptr_t result;
1742 #if defined(CYGWIN)
1743 if (!bss_sbrk_did_unexec)
1745 return bss_sbrk (increment);
1747 #endif
1748 result = (__ptr_t) __sbrk (increment);
1749 if (result == (__ptr_t) -1)
1750 return NULL;
1751 return result;
1753 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1755 This library is free software; you can redistribute it and/or
1756 modify it under the terms of the GNU General Public License as
1757 published by the Free Software Foundation; either version 2 of the
1758 License, or (at your option) any later version.
1760 This library is distributed in the hope that it will be useful,
1761 but WITHOUT ANY WARRANTY; without even the implied warranty of
1762 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1763 General Public License for more details.
1765 You should have received a copy of the GNU General Public
1766 License along with this library; see the file COPYING. If
1767 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1768 Fifth Floor, Boston, MA 02110-1301, USA. */
1770 #ifndef _MALLOC_INTERNAL
1771 #define _MALLOC_INTERNAL
1772 #include <malloc.h>
1773 #endif
1775 #if __DJGPP__ - 0 == 1
1777 /* There is some problem with memalign in DJGPP v1 and we are supposed
1778 to omit it. Noone told me why, they just told me to do it. */
1780 #else
1782 __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
1783 __malloc_size_t __alignment));
1785 __ptr_t
1786 memalign (alignment, size)
1787 __malloc_size_t alignment;
1788 __malloc_size_t size;
1790 __ptr_t result;
1791 unsigned long int adj, lastadj;
1792 __ptr_t (*hook) (__malloc_size_t, __malloc_size_t) = __memalign_hook;
1794 if (hook)
1795 return (*hook) (alignment, size);
1797 /* Allocate a block with enough extra space to pad the block with up to
1798 (ALIGNMENT - 1) bytes if necessary. */
1799 result = malloc (size + alignment - 1);
1800 if (result == NULL)
1801 return NULL;
1803 /* Figure out how much we will need to pad this particular block
1804 to achieve the required alignment. */
1805 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1809 /* Reallocate the block with only as much excess as it needs. */
1810 free (result);
1811 result = malloc (adj + size);
1812 if (result == NULL) /* Impossible unless interrupted. */
1813 return NULL;
1815 lastadj = adj;
1816 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1817 /* It's conceivable we might have been so unlucky as to get a
1818 different block with weaker alignment. If so, this block is too
1819 short to contain SIZE after alignment correction. So we must
1820 try again and get another block, slightly larger. */
1821 } while (adj > lastadj);
1823 if (adj != 0)
1825 /* Record this block in the list of aligned blocks, so that `free'
1826 can identify the pointer it is passed, which will be in the middle
1827 of an allocated block. */
1829 struct alignlist *l;
1830 LOCK_ALIGNED_BLOCKS ();
1831 for (l = _aligned_blocks; l != NULL; l = l->next)
1832 if (l->aligned == NULL)
1833 /* This slot is free. Use it. */
1834 break;
1835 if (l == NULL)
1837 l = (struct alignlist *) malloc (sizeof (struct alignlist));
1838 if (l != NULL)
1840 l->next = _aligned_blocks;
1841 _aligned_blocks = l;
1844 if (l != NULL)
1846 l->exact = result;
1847 result = l->aligned = (char *) result + alignment - adj;
1849 UNLOCK_ALIGNED_BLOCKS ();
1850 if (l == NULL)
1852 free (result);
1853 result = NULL;
1857 return result;
1860 #endif /* Not DJGPP v1 */
1861 /* Allocate memory on a page boundary.
1862 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1864 This library is free software; you can redistribute it and/or
1865 modify it under the terms of the GNU General Public License as
1866 published by the Free Software Foundation; either version 2 of the
1867 License, or (at your option) any later version.
1869 This library is distributed in the hope that it will be useful,
1870 but WITHOUT ANY WARRANTY; without even the implied warranty of
1871 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1872 General Public License for more details.
1874 You should have received a copy of the GNU General Public
1875 License along with this library; see the file COPYING. If
1876 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1877 Fifth Floor, Boston, MA 02110-1301, USA.
1879 The author may be reached (Email) at the address mike@ai.mit.edu,
1880 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1882 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1884 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1885 on MSDOS, where it conflicts with a system header file. */
1887 #define ELIDE_VALLOC
1889 #endif
1891 #ifndef ELIDE_VALLOC
1893 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
1894 #include <stddef.h>
1895 #include <sys/cdefs.h>
1896 #if defined (__GLIBC__) && __GLIBC__ >= 2
1897 /* __getpagesize is already declared in <unistd.h> with return type int */
1898 #else
1899 extern size_t __getpagesize PP ((void));
1900 #endif
1901 #else
1902 #include "getpagesize.h"
1903 #define __getpagesize() getpagesize()
1904 #endif
1906 #ifndef _MALLOC_INTERNAL
1907 #define _MALLOC_INTERNAL
1908 #include <malloc.h>
1909 #endif
1911 static __malloc_size_t pagesize;
1913 __ptr_t
1914 valloc (size)
1915 __malloc_size_t size;
1917 if (pagesize == 0)
1918 pagesize = __getpagesize ();
1920 return memalign (pagesize, size);
1923 #endif /* Not ELIDE_VALLOC. */
1925 #ifdef GC_MCHECK
1927 /* Standard debugging hooks for `malloc'.
1928 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1929 Written May 1989 by Mike Haertel.
1931 This library is free software; you can redistribute it and/or
1932 modify it under the terms of the GNU General Public License as
1933 published by the Free Software Foundation; either version 2 of the
1934 License, or (at your option) any later version.
1936 This library is distributed in the hope that it will be useful,
1937 but WITHOUT ANY WARRANTY; without even the implied warranty of
1938 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1939 General Public License for more details.
1941 You should have received a copy of the GNU General Public
1942 License along with this library; see the file COPYING. If
1943 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1944 Fifth Floor, Boston, MA 02110-1301, USA.
1946 The author may be reached (Email) at the address mike@ai.mit.edu,
1947 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1949 #ifdef emacs
1950 #include <stdio.h>
1951 #else
1952 #ifndef _MALLOC_INTERNAL
1953 #define _MALLOC_INTERNAL
1954 #include <malloc.h>
1955 #include <stdio.h>
1956 #endif
1957 #endif
1959 /* Old hook values. */
1960 static void (*old_free_hook) __P ((__ptr_t ptr));
1961 static __ptr_t (*old_malloc_hook) __P ((__malloc_size_t size));
1962 static __ptr_t (*old_realloc_hook) __P ((__ptr_t ptr, __malloc_size_t size));
1964 /* Function to call when something awful happens. */
1965 static void (*abortfunc) __P ((enum mcheck_status));
1967 /* Arbitrary magical numbers. */
1968 #define MAGICWORD 0xfedabeeb
1969 #define MAGICFREE 0xd8675309
1970 #define MAGICBYTE ((char) 0xd7)
1971 #define MALLOCFLOOD ((char) 0x93)
1972 #define FREEFLOOD ((char) 0x95)
1974 struct hdr
1976 __malloc_size_t size; /* Exact size requested by user. */
1977 unsigned long int magic; /* Magic number to check header integrity. */
1980 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1981 #define flood memset
1982 #else
1983 static void flood __P ((__ptr_t, int, __malloc_size_t));
1984 static void
1985 flood (ptr, val, size)
1986 __ptr_t ptr;
1987 int val;
1988 __malloc_size_t size;
1990 char *cp = ptr;
1991 while (size--)
1992 *cp++ = val;
1994 #endif
1996 static enum mcheck_status checkhdr __P ((const struct hdr *));
1997 static enum mcheck_status
1998 checkhdr (hdr)
1999 const struct hdr *hdr;
2001 enum mcheck_status status;
2002 switch (hdr->magic)
2004 default:
2005 status = MCHECK_HEAD;
2006 break;
2007 case MAGICFREE:
2008 status = MCHECK_FREE;
2009 break;
2010 case MAGICWORD:
2011 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
2012 status = MCHECK_TAIL;
2013 else
2014 status = MCHECK_OK;
2015 break;
2017 if (status != MCHECK_OK)
2018 (*abortfunc) (status);
2019 return status;
2022 static void freehook __P ((__ptr_t));
2023 static void
2024 freehook (ptr)
2025 __ptr_t ptr;
2027 struct hdr *hdr;
2029 if (ptr)
2031 hdr = ((struct hdr *) ptr) - 1;
2032 checkhdr (hdr);
2033 hdr->magic = MAGICFREE;
2034 flood (ptr, FREEFLOOD, hdr->size);
2036 else
2037 hdr = NULL;
2039 __free_hook = old_free_hook;
2040 free (hdr);
2041 __free_hook = freehook;
2044 static __ptr_t mallochook __P ((__malloc_size_t));
2045 static __ptr_t
2046 mallochook (size)
2047 __malloc_size_t size;
2049 struct hdr *hdr;
2051 __malloc_hook = old_malloc_hook;
2052 hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
2053 __malloc_hook = mallochook;
2054 if (hdr == NULL)
2055 return NULL;
2057 hdr->size = size;
2058 hdr->magic = MAGICWORD;
2059 ((char *) &hdr[1])[size] = MAGICBYTE;
2060 flood ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
2061 return (__ptr_t) (hdr + 1);
2064 static __ptr_t reallochook __P ((__ptr_t, __malloc_size_t));
2065 static __ptr_t
2066 reallochook (ptr, size)
2067 __ptr_t ptr;
2068 __malloc_size_t size;
2070 struct hdr *hdr = NULL;
2071 __malloc_size_t osize = 0;
2073 if (ptr)
2075 hdr = ((struct hdr *) ptr) - 1;
2076 osize = hdr->size;
2078 checkhdr (hdr);
2079 if (size < osize)
2080 flood ((char *) ptr + size, FREEFLOOD, osize - size);
2083 __free_hook = old_free_hook;
2084 __malloc_hook = old_malloc_hook;
2085 __realloc_hook = old_realloc_hook;
2086 hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
2087 __free_hook = freehook;
2088 __malloc_hook = mallochook;
2089 __realloc_hook = reallochook;
2090 if (hdr == NULL)
2091 return NULL;
2093 hdr->size = size;
2094 hdr->magic = MAGICWORD;
2095 ((char *) &hdr[1])[size] = MAGICBYTE;
2096 if (size > osize)
2097 flood ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
2098 return (__ptr_t) (hdr + 1);
2101 static void
2102 mabort (status)
2103 enum mcheck_status status;
2105 const char *msg;
2106 switch (status)
2108 case MCHECK_OK:
2109 msg = "memory is consistent, library is buggy";
2110 break;
2111 case MCHECK_HEAD:
2112 msg = "memory clobbered before allocated block";
2113 break;
2114 case MCHECK_TAIL:
2115 msg = "memory clobbered past end of allocated block";
2116 break;
2117 case MCHECK_FREE:
2118 msg = "block freed twice";
2119 break;
2120 default:
2121 msg = "bogus mcheck_status, library is buggy";
2122 break;
2124 #ifdef __GNU_LIBRARY__
2125 __libc_fatal (msg);
2126 #else
2127 fprintf (stderr, "mcheck: %s\n", msg);
2128 fflush (stderr);
2129 abort ();
2130 #endif
2133 static int mcheck_used = 0;
2136 mcheck (func)
2137 void (*func) __P ((enum mcheck_status));
2139 abortfunc = (func != NULL) ? func : &mabort;
2141 /* These hooks may not be safely inserted if malloc is already in use. */
2142 if (!__malloc_initialized && !mcheck_used)
2144 old_free_hook = __free_hook;
2145 __free_hook = freehook;
2146 old_malloc_hook = __malloc_hook;
2147 __malloc_hook = mallochook;
2148 old_realloc_hook = __realloc_hook;
2149 __realloc_hook = reallochook;
2150 mcheck_used = 1;
2153 return mcheck_used ? 0 : -1;
2156 enum mcheck_status
2157 mprobe (__ptr_t ptr)
2159 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2162 #endif /* GC_MCHECK */
2164 /* arch-tag: 93dce5c0-f49a-41b5-86b1-f91c4169c02e
2165 (do not change this comment) */