Misc doc fixes for Dired.
[emacs.git] / src / gmalloc.c
blob0df050e127a19aac562731e2146efbe8d8d4efd9
1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
3 2005, 2006, 2007 Free Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public
17 License along with this library; see the file COPYING. If
18 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
19 Fifth Floor, Boston, MA 02110-1301, USA.
21 The author may be reached (Email) at the address mike@ai.mit.edu,
22 or (US mail) as Mike Haertel c/o Free Software Foundation. */
24 #ifdef HAVE_CONFIG_H
25 #include <config.h>
26 #endif
28 #ifdef HAVE_PTHREAD
29 #define USE_PTHREAD
30 #endif
32 #include <string.h>
33 #include <limits.h>
34 #include <stdint.h>
35 #include <unistd.h>
37 #ifdef USE_PTHREAD
38 #include <pthread.h>
39 #endif
41 #ifdef __cplusplus
42 extern "C"
44 #endif
46 #include <stddef.h>
49 /* Allocate SIZE bytes of memory. */
50 extern void *malloc (size_t size);
51 /* Re-allocate the previously allocated block
52 in ptr, making the new block SIZE bytes long. */
53 extern void *realloc (void *ptr, size_t size);
54 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
55 extern void *calloc (size_t nmemb, size_t size);
56 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
57 extern void free (void *ptr);
59 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
60 #ifdef MSDOS
61 extern void *memalign (size_t, size_t);
62 extern int posix_memalign (void **, size_t, size_t);
63 #endif
65 #ifdef USE_PTHREAD
66 /* Set up mutexes and make malloc etc. thread-safe. */
67 extern void malloc_enable_thread (void);
68 #endif
70 /* The allocator divides the heap into blocks of fixed size; large
71 requests receive one or more whole blocks, and small requests
72 receive a fragment of a block. Fragment sizes are powers of two,
73 and all fragments of a block are the same size. When all the
74 fragments in a block have been freed, the block itself is freed. */
75 #define INT_BIT (CHAR_BIT * sizeof (int))
76 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
77 #define BLOCKSIZE (1 << BLOCKLOG)
78 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
80 /* Determine the amount of memory spanned by the initial heap table
81 (not an absolute limit). */
82 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
84 /* Number of contiguous free blocks allowed to build up at the end of
85 memory before they will be returned to the system. */
86 #define FINAL_FREE_BLOCKS 8
88 /* Data structure giving per-block information. */
89 typedef union
91 /* Heap information for a busy block. */
92 struct
94 /* Zero for a large (multiblock) object, or positive giving the
95 logarithm to the base two of the fragment size. */
96 int type;
97 union
99 struct
101 size_t nfree; /* Free frags in a fragmented block. */
102 size_t first; /* First free fragment of the block. */
103 } frag;
104 /* For a large object, in its first block, this has the number
105 of blocks in the object. In the other blocks, this has a
106 negative number which says how far back the first block is. */
107 ptrdiff_t size;
108 } info;
109 } busy;
110 /* Heap information for a free block
111 (that may be the first of a free cluster). */
112 struct
114 size_t size; /* Size (in blocks) of a free cluster. */
115 size_t next; /* Index of next free cluster. */
116 size_t prev; /* Index of previous free cluster. */
117 } free;
118 } malloc_info;
120 /* Pointer to first block of the heap. */
121 extern char *_heapbase;
123 /* Table indexed by block number giving per-block information. */
124 extern malloc_info *_heapinfo;
126 /* Address to block number and vice versa. */
127 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
128 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
130 /* Current search index for the heap table. */
131 extern size_t _heapindex;
133 /* Limit of valid info table indices. */
134 extern size_t _heaplimit;
136 /* Doubly linked lists of free fragments. */
137 struct list
139 struct list *next;
140 struct list *prev;
143 /* Free list headers for each fragment size. */
144 extern struct list _fraghead[];
146 /* List of blocks allocated with `memalign' (or `valloc'). */
147 struct alignlist
149 struct alignlist *next;
150 void *aligned; /* The address that memaligned returned. */
151 void *exact; /* The address that malloc returned. */
153 extern struct alignlist *_aligned_blocks;
155 /* Instrumentation. */
156 extern size_t _chunks_used;
157 extern size_t _bytes_used;
158 extern size_t _chunks_free;
159 extern size_t _bytes_free;
161 /* Internal versions of `malloc', `realloc', and `free'
162 used when these functions need to call each other.
163 They are the same but don't call the hooks. */
164 extern void *_malloc_internal (size_t);
165 extern void *_realloc_internal (void *, size_t);
166 extern void _free_internal (void *);
167 extern void *_malloc_internal_nolock (size_t);
168 extern void *_realloc_internal_nolock (void *, size_t);
169 extern void _free_internal_nolock (void *);
171 #ifdef USE_PTHREAD
172 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
173 extern int _malloc_thread_enabled_p;
174 #define LOCK() \
175 do { \
176 if (_malloc_thread_enabled_p) \
177 pthread_mutex_lock (&_malloc_mutex); \
178 } while (0)
179 #define UNLOCK() \
180 do { \
181 if (_malloc_thread_enabled_p) \
182 pthread_mutex_unlock (&_malloc_mutex); \
183 } while (0)
184 #define LOCK_ALIGNED_BLOCKS() \
185 do { \
186 if (_malloc_thread_enabled_p) \
187 pthread_mutex_lock (&_aligned_blocks_mutex); \
188 } while (0)
189 #define UNLOCK_ALIGNED_BLOCKS() \
190 do { \
191 if (_malloc_thread_enabled_p) \
192 pthread_mutex_unlock (&_aligned_blocks_mutex); \
193 } while (0)
194 #else
195 #define LOCK()
196 #define UNLOCK()
197 #define LOCK_ALIGNED_BLOCKS()
198 #define UNLOCK_ALIGNED_BLOCKS()
199 #endif
201 /* Given an address in the middle of a malloc'd object,
202 return the address of the beginning of the object. */
203 extern void *malloc_find_object_address (void *ptr);
205 /* Underlying allocation function; successive calls should
206 return contiguous pieces of memory. */
207 extern void *(*__morecore) (ptrdiff_t size);
209 /* Default value of `__morecore'. */
210 extern void *__default_morecore (ptrdiff_t size);
212 /* If not NULL, this function is called after each time
213 `__morecore' is called to increase the data size. */
214 extern void (*__after_morecore_hook) (void);
216 /* Number of extra blocks to get each time we ask for more core.
217 This reduces the frequency of calling `(*__morecore)'. */
218 extern size_t __malloc_extra_blocks;
220 /* Nonzero if `malloc' has been called and done its initialization. */
221 extern int __malloc_initialized;
222 /* Function called to initialize malloc data structures. */
223 extern int __malloc_initialize (void);
225 /* Hooks for debugging versions. */
226 extern void (*__malloc_initialize_hook) (void);
227 extern void (*__free_hook) (void *ptr);
228 extern void *(*__malloc_hook) (size_t size);
229 extern void *(*__realloc_hook) (void *ptr, size_t size);
230 extern void *(*__memalign_hook) (size_t size, size_t alignment);
232 /* Return values for `mprobe': these are the kinds of inconsistencies that
233 `mcheck' enables detection of. */
234 enum mcheck_status
236 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
237 MCHECK_OK, /* Block is fine. */
238 MCHECK_FREE, /* Block freed twice. */
239 MCHECK_HEAD, /* Memory before the block was clobbered. */
240 MCHECK_TAIL /* Memory after the block was clobbered. */
243 /* Activate a standard collection of debugging hooks. This must be called
244 before `malloc' is ever called. ABORTFUNC is called with an error code
245 (see enum above) when an inconsistency is detected. If ABORTFUNC is
246 null, the standard function prints on stderr and then calls `abort'. */
247 extern int mcheck (void (*abortfunc) (enum mcheck_status));
249 /* Check for aberrations in a particular malloc'd block. You must have
250 called `mcheck' already. These are the same checks that `mcheck' does
251 when you free or reallocate a block. */
252 extern enum mcheck_status mprobe (void *ptr);
254 /* Activate a standard collection of tracing hooks. */
255 extern void mtrace (void);
256 extern void muntrace (void);
258 /* Statistics available to the user. */
259 struct mstats
261 size_t bytes_total; /* Total size of the heap. */
262 size_t chunks_used; /* Chunks allocated by the user. */
263 size_t bytes_used; /* Byte total of user-allocated chunks. */
264 size_t chunks_free; /* Chunks in the free list. */
265 size_t bytes_free; /* Byte total of chunks in the free list. */
268 /* Pick up the current statistics. */
269 extern struct mstats mstats (void);
271 /* Call WARNFUN with a warning message when memory usage is high. */
272 extern void memory_warnings (void *start, void (*warnfun) (const char *));
274 #ifdef __cplusplus
276 #endif
278 /* Memory allocator `malloc'.
279 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
280 Written May 1989 by Mike Haertel.
282 This library is free software; you can redistribute it and/or
283 modify it under the terms of the GNU General Public License as
284 published by the Free Software Foundation; either version 2 of the
285 License, or (at your option) any later version.
287 This library is distributed in the hope that it will be useful,
288 but WITHOUT ANY WARRANTY; without even the implied warranty of
289 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
290 General Public License for more details.
292 You should have received a copy of the GNU General Public
293 License along with this library; see the file COPYING. If
294 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
295 Fifth Floor, Boston, MA 02110-1301, USA.
297 The author may be reached (Email) at the address mike@ai.mit.edu,
298 or (US mail) as Mike Haertel c/o Free Software Foundation. */
300 #include <errno.h>
302 /* On Cygwin there are two heaps. temacs uses the static heap
303 (defined in sheap.c and managed with bss_sbrk), and the dumped
304 emacs uses the Cygwin heap (managed with sbrk). When emacs starts
305 on Cygwin, it reinitializes malloc, and we save the old info for
306 use by free and realloc if they're called with a pointer into the
307 static heap.
309 Currently (2011-08-16) the Cygwin build doesn't use ralloc.c; if
310 this is changed in the future, we'll have to similarly deal with
311 reinitializing ralloc. */
312 #ifdef CYGWIN
313 extern void *bss_sbrk (ptrdiff_t size);
314 extern int bss_sbrk_did_unexec;
315 char *bss_sbrk_heapbase; /* _heapbase for static heap */
316 malloc_info *bss_sbrk_heapinfo; /* _heapinfo for static heap */
317 #endif
318 void *(*__morecore) (ptrdiff_t size) = __default_morecore;
320 /* Debugging hook for `malloc'. */
321 void *(*__malloc_hook) (size_t size);
323 /* Pointer to the base of the first block. */
324 char *_heapbase;
326 /* Block information table. Allocated with align/__free (not malloc/free). */
327 malloc_info *_heapinfo;
329 /* Number of info entries. */
330 static size_t heapsize;
332 /* Search index in the info table. */
333 size_t _heapindex;
335 /* Limit of valid info table indices. */
336 size_t _heaplimit;
338 /* Free lists for each fragment size. */
339 struct list _fraghead[BLOCKLOG];
341 /* Instrumentation. */
342 size_t _chunks_used;
343 size_t _bytes_used;
344 size_t _chunks_free;
345 size_t _bytes_free;
347 /* Are you experienced? */
348 int __malloc_initialized;
350 size_t __malloc_extra_blocks;
352 void (*__malloc_initialize_hook) (void);
353 void (*__after_morecore_hook) (void);
355 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
357 /* Some code for hunting a bug writing into _heapinfo.
359 Call this macro with argument PROT non-zero to protect internal
360 malloc state against writing to it, call it with a zero argument to
361 make it readable and writable.
363 Note that this only works if BLOCKSIZE == page size, which is
364 the case on the i386. */
366 #include <sys/types.h>
367 #include <sys/mman.h>
369 static int state_protected_p;
370 static size_t last_state_size;
371 static malloc_info *last_heapinfo;
373 void
374 protect_malloc_state (int protect_p)
376 /* If _heapinfo has been relocated, make sure its old location
377 isn't left read-only; it will be reused by malloc. */
378 if (_heapinfo != last_heapinfo
379 && last_heapinfo
380 && state_protected_p)
381 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
383 last_state_size = _heaplimit * sizeof *_heapinfo;
384 last_heapinfo = _heapinfo;
386 if (protect_p != state_protected_p)
388 state_protected_p = protect_p;
389 if (mprotect (_heapinfo, last_state_size,
390 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
391 abort ();
395 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
397 #else
398 #define PROTECT_MALLOC_STATE(PROT) /* empty */
399 #endif
402 /* Aligned allocation. */
403 static void *
404 align (size_t size)
406 void *result;
407 ptrdiff_t adj;
409 /* align accepts an unsigned argument, but __morecore accepts a
410 signed one. This could lead to trouble if SIZE overflows the
411 ptrdiff_t type accepted by __morecore. We just punt in that
412 case, since they are requesting a ludicrous amount anyway. */
413 if (PTRDIFF_MAX < size)
414 result = 0;
415 else
416 result = (*__morecore) (size);
417 adj = (uintptr_t) result % BLOCKSIZE;
418 if (adj != 0)
420 adj = BLOCKSIZE - adj;
421 (*__morecore) (adj);
422 result = (char *) result + adj;
425 if (__after_morecore_hook)
426 (*__after_morecore_hook) ();
428 return result;
431 /* Get SIZE bytes, if we can get them starting at END.
432 Return the address of the space we got.
433 If we cannot get space at END, fail and return 0. */
434 static void *
435 get_contiguous_space (ptrdiff_t size, void *position)
437 void *before;
438 void *after;
440 before = (*__morecore) (0);
441 /* If we can tell in advance that the break is at the wrong place,
442 fail now. */
443 if (before != position)
444 return 0;
446 /* Allocate SIZE bytes and get the address of them. */
447 after = (*__morecore) (size);
448 if (!after)
449 return 0;
451 /* It was not contiguous--reject it. */
452 if (after != position)
454 (*__morecore) (- size);
455 return 0;
458 return after;
462 /* This is called when `_heapinfo' and `heapsize' have just
463 been set to describe a new info table. Set up the table
464 to describe itself and account for it in the statistics. */
465 static inline void
466 register_heapinfo (void)
468 size_t block, blocks;
470 block = BLOCK (_heapinfo);
471 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
473 /* Account for the _heapinfo block itself in the statistics. */
474 _bytes_used += blocks * BLOCKSIZE;
475 ++_chunks_used;
477 /* Describe the heapinfo block itself in the heapinfo. */
478 _heapinfo[block].busy.type = 0;
479 _heapinfo[block].busy.info.size = blocks;
480 /* Leave back-pointers for malloc_find_address. */
481 while (--blocks > 0)
482 _heapinfo[block + blocks].busy.info.size = -blocks;
485 #ifdef USE_PTHREAD
486 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
487 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
488 int _malloc_thread_enabled_p;
490 static void
491 malloc_atfork_handler_prepare (void)
493 LOCK ();
494 LOCK_ALIGNED_BLOCKS ();
497 static void
498 malloc_atfork_handler_parent (void)
500 UNLOCK_ALIGNED_BLOCKS ();
501 UNLOCK ();
504 static void
505 malloc_atfork_handler_child (void)
507 UNLOCK_ALIGNED_BLOCKS ();
508 UNLOCK ();
511 /* Set up mutexes and make malloc etc. thread-safe. */
512 void
513 malloc_enable_thread (void)
515 if (_malloc_thread_enabled_p)
516 return;
518 /* Some pthread implementations call malloc for statically
519 initialized mutexes when they are used first. To avoid such a
520 situation, we initialize mutexes here while their use is
521 disabled in malloc etc. */
522 pthread_mutex_init (&_malloc_mutex, NULL);
523 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
524 pthread_atfork (malloc_atfork_handler_prepare,
525 malloc_atfork_handler_parent,
526 malloc_atfork_handler_child);
527 _malloc_thread_enabled_p = 1;
529 #endif
531 static void
532 malloc_initialize_1 (void)
534 #ifdef GC_MCHECK
535 mcheck (NULL);
536 #endif
538 #ifdef CYGWIN
539 if (bss_sbrk_did_unexec)
540 /* we're reinitializing the dumped emacs */
542 bss_sbrk_heapbase = _heapbase;
543 bss_sbrk_heapinfo = _heapinfo;
544 memset (_fraghead, 0, BLOCKLOG * sizeof (struct list));
546 #endif
548 if (__malloc_initialize_hook)
549 (*__malloc_initialize_hook) ();
551 heapsize = HEAP / BLOCKSIZE;
552 _heapinfo = align (heapsize * sizeof (malloc_info));
553 if (_heapinfo == NULL)
554 return;
555 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
556 _heapinfo[0].free.size = 0;
557 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
558 _heapindex = 0;
559 _heapbase = (char *) _heapinfo;
560 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
562 register_heapinfo ();
564 __malloc_initialized = 1;
565 PROTECT_MALLOC_STATE (1);
566 return;
569 /* Set everything up and remember that we have.
570 main will call malloc which calls this function. That is before any threads
571 or signal handlers has been set up, so we don't need thread protection. */
573 __malloc_initialize (void)
575 if (__malloc_initialized)
576 return 0;
578 malloc_initialize_1 ();
580 return __malloc_initialized;
583 static int morecore_recursing;
585 /* Get neatly aligned memory, initializing or
586 growing the heap info table as necessary. */
587 static void *
588 morecore_nolock (size_t size)
590 void *result;
591 malloc_info *newinfo, *oldinfo;
592 size_t newsize;
594 if (morecore_recursing)
595 /* Avoid recursion. The caller will know how to handle a null return. */
596 return NULL;
598 result = align (size);
599 if (result == NULL)
600 return NULL;
602 PROTECT_MALLOC_STATE (0);
604 /* Check if we need to grow the info table. */
605 if ((size_t) BLOCK ((char *) result + size) > heapsize)
607 /* Calculate the new _heapinfo table size. We do not account for the
608 added blocks in the table itself, as we hope to place them in
609 existing free space, which is already covered by part of the
610 existing table. */
611 newsize = heapsize;
613 newsize *= 2;
614 while ((size_t) BLOCK ((char *) result + size) > newsize);
616 /* We must not reuse existing core for the new info table when called
617 from realloc in the case of growing a large block, because the
618 block being grown is momentarily marked as free. In this case
619 _heaplimit is zero so we know not to reuse space for internal
620 allocation. */
621 if (_heaplimit != 0)
623 /* First try to allocate the new info table in core we already
624 have, in the usual way using realloc. If realloc cannot
625 extend it in place or relocate it to existing sufficient core,
626 we will get called again, and the code above will notice the
627 `morecore_recursing' flag and return null. */
628 int save = errno; /* Don't want to clobber errno with ENOMEM. */
629 morecore_recursing = 1;
630 newinfo = _realloc_internal_nolock (_heapinfo,
631 newsize * sizeof (malloc_info));
632 morecore_recursing = 0;
633 if (newinfo == NULL)
634 errno = save;
635 else
637 /* We found some space in core, and realloc has put the old
638 table's blocks on the free list. Now zero the new part
639 of the table and install the new table location. */
640 memset (&newinfo[heapsize], 0,
641 (newsize - heapsize) * sizeof (malloc_info));
642 _heapinfo = newinfo;
643 heapsize = newsize;
644 goto got_heap;
648 /* Allocate new space for the malloc info table. */
649 while (1)
651 newinfo = align (newsize * sizeof (malloc_info));
653 /* Did it fail? */
654 if (newinfo == NULL)
656 (*__morecore) (-size);
657 return NULL;
660 /* Is it big enough to record status for its own space?
661 If so, we win. */
662 if ((size_t) BLOCK ((char *) newinfo
663 + newsize * sizeof (malloc_info))
664 < newsize)
665 break;
667 /* Must try again. First give back most of what we just got. */
668 (*__morecore) (- newsize * sizeof (malloc_info));
669 newsize *= 2;
672 /* Copy the old table to the beginning of the new,
673 and zero the rest of the new table. */
674 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
675 memset (&newinfo[heapsize], 0,
676 (newsize - heapsize) * sizeof (malloc_info));
677 oldinfo = _heapinfo;
678 _heapinfo = newinfo;
679 heapsize = newsize;
681 register_heapinfo ();
683 /* Reset _heaplimit so _free_internal never decides
684 it can relocate or resize the info table. */
685 _heaplimit = 0;
686 _free_internal_nolock (oldinfo);
687 PROTECT_MALLOC_STATE (0);
689 /* The new heap limit includes the new table just allocated. */
690 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
691 return result;
694 got_heap:
695 _heaplimit = BLOCK ((char *) result + size);
696 return result;
699 /* Allocate memory from the heap. */
700 void *
701 _malloc_internal_nolock (size_t size)
703 void *result;
704 size_t block, blocks, lastblocks, start;
705 register size_t i;
706 struct list *next;
708 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
709 valid address you can realloc and free (though not dereference).
711 It turns out that some extant code (sunrpc, at least Ultrix's version)
712 expects `malloc (0)' to return non-NULL and breaks otherwise.
713 Be compatible. */
715 #if 0
716 if (size == 0)
717 return NULL;
718 #endif
720 PROTECT_MALLOC_STATE (0);
722 if (size < sizeof (struct list))
723 size = sizeof (struct list);
725 /* Determine the allocation policy based on the request size. */
726 if (size <= BLOCKSIZE / 2)
728 /* Small allocation to receive a fragment of a block.
729 Determine the logarithm to base two of the fragment size. */
730 register size_t log = 1;
731 --size;
732 while ((size /= 2) != 0)
733 ++log;
735 /* Look in the fragment lists for a
736 free fragment of the desired size. */
737 next = _fraghead[log].next;
738 if (next != NULL)
740 /* There are free fragments of this size.
741 Pop a fragment out of the fragment list and return it.
742 Update the block's nfree and first counters. */
743 result = next;
744 next->prev->next = next->next;
745 if (next->next != NULL)
746 next->next->prev = next->prev;
747 block = BLOCK (result);
748 if (--_heapinfo[block].busy.info.frag.nfree != 0)
749 _heapinfo[block].busy.info.frag.first =
750 (uintptr_t) next->next % BLOCKSIZE >> log;
752 /* Update the statistics. */
753 ++_chunks_used;
754 _bytes_used += 1 << log;
755 --_chunks_free;
756 _bytes_free -= 1 << log;
758 else
760 /* No free fragments of the desired size, so get a new block
761 and break it into fragments, returning the first. */
762 #ifdef GC_MALLOC_CHECK
763 result = _malloc_internal_nolock (BLOCKSIZE);
764 PROTECT_MALLOC_STATE (0);
765 #elif defined (USE_PTHREAD)
766 result = _malloc_internal_nolock (BLOCKSIZE);
767 #else
768 result = malloc (BLOCKSIZE);
769 #endif
770 if (result == NULL)
772 PROTECT_MALLOC_STATE (1);
773 goto out;
776 /* Link all fragments but the first into the free list. */
777 next = (struct list *) ((char *) result + (1 << log));
778 next->next = NULL;
779 next->prev = &_fraghead[log];
780 _fraghead[log].next = next;
782 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
784 next = (struct list *) ((char *) result + (i << log));
785 next->next = _fraghead[log].next;
786 next->prev = &_fraghead[log];
787 next->prev->next = next;
788 next->next->prev = next;
791 /* Initialize the nfree and first counters for this block. */
792 block = BLOCK (result);
793 _heapinfo[block].busy.type = log;
794 _heapinfo[block].busy.info.frag.nfree = i - 1;
795 _heapinfo[block].busy.info.frag.first = i - 1;
797 _chunks_free += (BLOCKSIZE >> log) - 1;
798 _bytes_free += BLOCKSIZE - (1 << log);
799 _bytes_used -= BLOCKSIZE - (1 << log);
802 else
804 /* Large allocation to receive one or more blocks.
805 Search the free list in a circle starting at the last place visited.
806 If we loop completely around without finding a large enough
807 space we will have to get more memory from the system. */
808 blocks = BLOCKIFY (size);
809 start = block = _heapindex;
810 while (_heapinfo[block].free.size < blocks)
812 block = _heapinfo[block].free.next;
813 if (block == start)
815 /* Need to get more from the system. Get a little extra. */
816 size_t wantblocks = blocks + __malloc_extra_blocks;
817 block = _heapinfo[0].free.prev;
818 lastblocks = _heapinfo[block].free.size;
819 /* Check to see if the new core will be contiguous with the
820 final free block; if so we don't need to get as much. */
821 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
822 /* We can't do this if we will have to make the heap info
823 table bigger to accommodate the new space. */
824 block + wantblocks <= heapsize &&
825 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
826 ADDRESS (block + lastblocks)))
828 /* We got it contiguously. Which block we are extending
829 (the `final free block' referred to above) might have
830 changed, if it got combined with a freed info table. */
831 block = _heapinfo[0].free.prev;
832 _heapinfo[block].free.size += (wantblocks - lastblocks);
833 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
834 _heaplimit += wantblocks - lastblocks;
835 continue;
837 result = morecore_nolock (wantblocks * BLOCKSIZE);
838 if (result == NULL)
839 goto out;
840 block = BLOCK (result);
841 /* Put the new block at the end of the free list. */
842 _heapinfo[block].free.size = wantblocks;
843 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
844 _heapinfo[block].free.next = 0;
845 _heapinfo[0].free.prev = block;
846 _heapinfo[_heapinfo[block].free.prev].free.next = block;
847 ++_chunks_free;
848 /* Now loop to use some of that block for this allocation. */
852 /* At this point we have found a suitable free list entry.
853 Figure out how to remove what we need from the list. */
854 result = ADDRESS (block);
855 if (_heapinfo[block].free.size > blocks)
857 /* The block we found has a bit left over,
858 so relink the tail end back into the free list. */
859 _heapinfo[block + blocks].free.size
860 = _heapinfo[block].free.size - blocks;
861 _heapinfo[block + blocks].free.next
862 = _heapinfo[block].free.next;
863 _heapinfo[block + blocks].free.prev
864 = _heapinfo[block].free.prev;
865 _heapinfo[_heapinfo[block].free.prev].free.next
866 = _heapinfo[_heapinfo[block].free.next].free.prev
867 = _heapindex = block + blocks;
869 else
871 /* The block exactly matches our requirements,
872 so just remove it from the list. */
873 _heapinfo[_heapinfo[block].free.next].free.prev
874 = _heapinfo[block].free.prev;
875 _heapinfo[_heapinfo[block].free.prev].free.next
876 = _heapindex = _heapinfo[block].free.next;
877 --_chunks_free;
880 _heapinfo[block].busy.type = 0;
881 _heapinfo[block].busy.info.size = blocks;
882 ++_chunks_used;
883 _bytes_used += blocks * BLOCKSIZE;
884 _bytes_free -= blocks * BLOCKSIZE;
886 /* Mark all the blocks of the object just allocated except for the
887 first with a negative number so you can find the first block by
888 adding that adjustment. */
889 while (--blocks > 0)
890 _heapinfo[block + blocks].busy.info.size = -blocks;
893 PROTECT_MALLOC_STATE (1);
894 out:
895 return result;
898 void *
899 _malloc_internal (size_t size)
901 void *result;
903 LOCK ();
904 result = _malloc_internal_nolock (size);
905 UNLOCK ();
907 return result;
910 void *
911 malloc (size_t size)
913 void *(*hook) (size_t);
915 if (!__malloc_initialized && !__malloc_initialize ())
916 return NULL;
918 /* Copy the value of __malloc_hook to an automatic variable in case
919 __malloc_hook is modified in another thread between its
920 NULL-check and the use.
922 Note: Strictly speaking, this is not a right solution. We should
923 use mutexes to access non-read-only variables that are shared
924 among multiple threads. We just leave it for compatibility with
925 glibc malloc (i.e., assignments to __malloc_hook) for now. */
926 hook = __malloc_hook;
927 return (hook != NULL ? *hook : _malloc_internal) (size);
930 #ifndef _LIBC
932 /* On some ANSI C systems, some libc functions call _malloc, _free
933 and _realloc. Make them use the GNU functions. */
935 extern void *_malloc (size_t);
936 extern void _free (void *);
937 extern void *_realloc (void *, size_t);
939 void *
940 _malloc (size_t size)
942 return malloc (size);
945 void
946 _free (void *ptr)
948 free (ptr);
951 void *
952 _realloc (void *ptr, size_t size)
954 return realloc (ptr, size);
957 #endif
958 /* Free a block of memory allocated by `malloc'.
959 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
960 Written May 1989 by Mike Haertel.
962 This library is free software; you can redistribute it and/or
963 modify it under the terms of the GNU General Public License as
964 published by the Free Software Foundation; either version 2 of the
965 License, or (at your option) any later version.
967 This library is distributed in the hope that it will be useful,
968 but WITHOUT ANY WARRANTY; without even the implied warranty of
969 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
970 General Public License for more details.
972 You should have received a copy of the GNU General Public
973 License along with this library; see the file COPYING. If
974 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
975 Fifth Floor, Boston, MA 02110-1301, USA.
977 The author may be reached (Email) at the address mike@ai.mit.edu,
978 or (US mail) as Mike Haertel c/o Free Software Foundation. */
981 /* Debugging hook for free. */
982 void (*__free_hook) (void *__ptr);
984 /* List of blocks allocated by memalign. */
985 struct alignlist *_aligned_blocks = NULL;
987 /* Return memory to the heap.
988 Like `_free_internal' but don't lock mutex. */
989 void
990 _free_internal_nolock (void *ptr)
992 int type;
993 size_t block, blocks;
994 register size_t i;
995 struct list *prev, *next;
996 void *curbrk;
997 const size_t lesscore_threshold
998 /* Threshold of free space at which we will return some to the system. */
999 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1001 register struct alignlist *l;
1003 if (ptr == NULL)
1004 return;
1006 #ifdef CYGWIN
1007 if ((char *) ptr < _heapbase)
1008 /* We're being asked to free something in the static heap. */
1009 return;
1010 #endif
1012 PROTECT_MALLOC_STATE (0);
1014 LOCK_ALIGNED_BLOCKS ();
1015 for (l = _aligned_blocks; l != NULL; l = l->next)
1016 if (l->aligned == ptr)
1018 l->aligned = NULL; /* Mark the slot in the list as free. */
1019 ptr = l->exact;
1020 break;
1022 UNLOCK_ALIGNED_BLOCKS ();
1024 block = BLOCK (ptr);
1026 type = _heapinfo[block].busy.type;
1027 switch (type)
1029 case 0:
1030 /* Get as many statistics as early as we can. */
1031 --_chunks_used;
1032 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1033 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1035 /* Find the free cluster previous to this one in the free list.
1036 Start searching at the last block referenced; this may benefit
1037 programs with locality of allocation. */
1038 i = _heapindex;
1039 if (i > block)
1040 while (i > block)
1041 i = _heapinfo[i].free.prev;
1042 else
1045 i = _heapinfo[i].free.next;
1046 while (i > 0 && i < block);
1047 i = _heapinfo[i].free.prev;
1050 /* Determine how to link this block into the free list. */
1051 if (block == i + _heapinfo[i].free.size)
1053 /* Coalesce this block with its predecessor. */
1054 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1055 block = i;
1057 else
1059 /* Really link this block back into the free list. */
1060 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1061 _heapinfo[block].free.next = _heapinfo[i].free.next;
1062 _heapinfo[block].free.prev = i;
1063 _heapinfo[i].free.next = block;
1064 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1065 ++_chunks_free;
1068 /* Now that the block is linked in, see if we can coalesce it
1069 with its successor (by deleting its successor from the list
1070 and adding in its size). */
1071 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1073 _heapinfo[block].free.size
1074 += _heapinfo[_heapinfo[block].free.next].free.size;
1075 _heapinfo[block].free.next
1076 = _heapinfo[_heapinfo[block].free.next].free.next;
1077 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1078 --_chunks_free;
1081 /* How many trailing free blocks are there now? */
1082 blocks = _heapinfo[block].free.size;
1084 /* Where is the current end of accessible core? */
1085 curbrk = (*__morecore) (0);
1087 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1089 /* The end of the malloc heap is at the end of accessible core.
1090 It's possible that moving _heapinfo will allow us to
1091 return some space to the system. */
1093 size_t info_block = BLOCK (_heapinfo);
1094 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1095 size_t prev_block = _heapinfo[block].free.prev;
1096 size_t prev_blocks = _heapinfo[prev_block].free.size;
1097 size_t next_block = _heapinfo[block].free.next;
1098 size_t next_blocks = _heapinfo[next_block].free.size;
1100 if (/* Win if this block being freed is last in core, the info table
1101 is just before it, the previous free block is just before the
1102 info table, and the two free blocks together form a useful
1103 amount to return to the system. */
1104 (block + blocks == _heaplimit &&
1105 info_block + info_blocks == block &&
1106 prev_block != 0 && prev_block + prev_blocks == info_block &&
1107 blocks + prev_blocks >= lesscore_threshold) ||
1108 /* Nope, not the case. We can also win if this block being
1109 freed is just before the info table, and the table extends
1110 to the end of core or is followed only by a free block,
1111 and the total free space is worth returning to the system. */
1112 (block + blocks == info_block &&
1113 ((info_block + info_blocks == _heaplimit &&
1114 blocks >= lesscore_threshold) ||
1115 (info_block + info_blocks == next_block &&
1116 next_block + next_blocks == _heaplimit &&
1117 blocks + next_blocks >= lesscore_threshold)))
1120 malloc_info *newinfo;
1121 size_t oldlimit = _heaplimit;
1123 /* Free the old info table, clearing _heaplimit to avoid
1124 recursion into this code. We don't want to return the
1125 table's blocks to the system before we have copied them to
1126 the new location. */
1127 _heaplimit = 0;
1128 _free_internal_nolock (_heapinfo);
1129 _heaplimit = oldlimit;
1131 /* Tell malloc to search from the beginning of the heap for
1132 free blocks, so it doesn't reuse the ones just freed. */
1133 _heapindex = 0;
1135 /* Allocate new space for the info table and move its data. */
1136 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
1137 PROTECT_MALLOC_STATE (0);
1138 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1139 _heapinfo = newinfo;
1141 /* We should now have coalesced the free block with the
1142 blocks freed from the old info table. Examine the entire
1143 trailing free block to decide below whether to return some
1144 to the system. */
1145 block = _heapinfo[0].free.prev;
1146 blocks = _heapinfo[block].free.size;
1149 /* Now see if we can return stuff to the system. */
1150 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1152 register size_t bytes = blocks * BLOCKSIZE;
1153 _heaplimit -= blocks;
1154 (*__morecore) (-bytes);
1155 _heapinfo[_heapinfo[block].free.prev].free.next
1156 = _heapinfo[block].free.next;
1157 _heapinfo[_heapinfo[block].free.next].free.prev
1158 = _heapinfo[block].free.prev;
1159 block = _heapinfo[block].free.prev;
1160 --_chunks_free;
1161 _bytes_free -= bytes;
1165 /* Set the next search to begin at this block. */
1166 _heapindex = block;
1167 break;
1169 default:
1170 /* Do some of the statistics. */
1171 --_chunks_used;
1172 _bytes_used -= 1 << type;
1173 ++_chunks_free;
1174 _bytes_free += 1 << type;
1176 /* Get the address of the first free fragment in this block. */
1177 prev = (struct list *) ((char *) ADDRESS (block) +
1178 (_heapinfo[block].busy.info.frag.first << type));
1180 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1182 /* If all fragments of this block are free, remove them
1183 from the fragment list and free the whole block. */
1184 next = prev;
1185 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
1186 next = next->next;
1187 prev->prev->next = next;
1188 if (next != NULL)
1189 next->prev = prev->prev;
1190 _heapinfo[block].busy.type = 0;
1191 _heapinfo[block].busy.info.size = 1;
1193 /* Keep the statistics accurate. */
1194 ++_chunks_used;
1195 _bytes_used += BLOCKSIZE;
1196 _chunks_free -= BLOCKSIZE >> type;
1197 _bytes_free -= BLOCKSIZE;
1199 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1200 _free_internal_nolock (ADDRESS (block));
1201 #else
1202 free (ADDRESS (block));
1203 #endif
1205 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1207 /* If some fragments of this block are free, link this
1208 fragment into the fragment list after the first free
1209 fragment of this block. */
1210 next = ptr;
1211 next->next = prev->next;
1212 next->prev = prev;
1213 prev->next = next;
1214 if (next->next != NULL)
1215 next->next->prev = next;
1216 ++_heapinfo[block].busy.info.frag.nfree;
1218 else
1220 /* No fragments of this block are free, so link this
1221 fragment into the fragment list and announce that
1222 it is the first free fragment of this block. */
1223 prev = ptr;
1224 _heapinfo[block].busy.info.frag.nfree = 1;
1225 _heapinfo[block].busy.info.frag.first =
1226 (uintptr_t) ptr % BLOCKSIZE >> type;
1227 prev->next = _fraghead[type].next;
1228 prev->prev = &_fraghead[type];
1229 prev->prev->next = prev;
1230 if (prev->next != NULL)
1231 prev->next->prev = prev;
1233 break;
1236 PROTECT_MALLOC_STATE (1);
1239 /* Return memory to the heap.
1240 Like `free' but don't call a __free_hook if there is one. */
1241 void
1242 _free_internal (void *ptr)
1244 LOCK ();
1245 _free_internal_nolock (ptr);
1246 UNLOCK ();
1249 /* Return memory to the heap. */
1251 void
1252 free (void *ptr)
1254 void (*hook) (void *) = __free_hook;
1256 if (hook != NULL)
1257 (*hook) (ptr);
1258 else
1259 _free_internal (ptr);
1262 /* Define the `cfree' alias for `free'. */
1263 #ifdef weak_alias
1264 weak_alias (free, cfree)
1265 #else
1266 void
1267 cfree (void *ptr)
1269 free (ptr);
1271 #endif
1272 /* Change the size of a block allocated by `malloc'.
1273 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1274 Written May 1989 by Mike Haertel.
1276 This library is free software; you can redistribute it and/or
1277 modify it under the terms of the GNU General Public License as
1278 published by the Free Software Foundation; either version 2 of the
1279 License, or (at your option) any later version.
1281 This library is distributed in the hope that it will be useful,
1282 but WITHOUT ANY WARRANTY; without even the implied warranty of
1283 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1284 General Public License for more details.
1286 You should have received a copy of the GNU General Public
1287 License along with this library; see the file COPYING. If
1288 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1289 Fifth Floor, Boston, MA 02110-1301, USA.
1291 The author may be reached (Email) at the address mike@ai.mit.edu,
1292 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1294 #define min(A, B) ((A) < (B) ? (A) : (B))
1296 /* On Cygwin the dumped emacs may try to realloc storage allocated in
1297 the static heap. We just malloc space in the new heap and copy the
1298 data. */
1299 #ifdef CYGWIN
1300 void *
1301 special_realloc (void *ptr, size_t size)
1303 void *result;
1304 int type;
1305 size_t block, oldsize;
1307 block = ((char *) ptr - bss_sbrk_heapbase) / BLOCKSIZE + 1;
1308 type = bss_sbrk_heapinfo[block].busy.type;
1309 oldsize =
1310 type == 0 ? bss_sbrk_heapinfo[block].busy.info.size * BLOCKSIZE
1311 : (size_t) 1 << type;
1312 result = _malloc_internal_nolock (size);
1313 if (result != NULL)
1314 memcpy (result, ptr, min (oldsize, size));
1315 return result;
1317 #endif
1319 /* Debugging hook for realloc. */
1320 void *(*__realloc_hook) (void *ptr, size_t size);
1322 /* Resize the given region to the new size, returning a pointer
1323 to the (possibly moved) region. This is optimized for speed;
1324 some benchmarks seem to indicate that greater compactness is
1325 achieved by unconditionally allocating and copying to a
1326 new region. This module has incestuous knowledge of the
1327 internals of both free and malloc. */
1328 void *
1329 _realloc_internal_nolock (void *ptr, size_t size)
1331 void *result;
1332 int type;
1333 size_t block, blocks, oldlimit;
1335 if (size == 0)
1337 _free_internal_nolock (ptr);
1338 return _malloc_internal_nolock (0);
1340 else if (ptr == NULL)
1341 return _malloc_internal_nolock (size);
1343 #ifdef CYGWIN
1344 if ((char *) ptr < _heapbase)
1345 /* ptr points into the static heap */
1346 return special_realloc (ptr, size);
1347 #endif
1349 block = BLOCK (ptr);
1351 PROTECT_MALLOC_STATE (0);
1353 type = _heapinfo[block].busy.type;
1354 switch (type)
1356 case 0:
1357 /* Maybe reallocate a large block to a small fragment. */
1358 if (size <= BLOCKSIZE / 2)
1360 result = _malloc_internal_nolock (size);
1361 if (result != NULL)
1363 memcpy (result, ptr, size);
1364 _free_internal_nolock (ptr);
1365 goto out;
1369 /* The new size is a large allocation as well;
1370 see if we can hold it in place. */
1371 blocks = BLOCKIFY (size);
1372 if (blocks < _heapinfo[block].busy.info.size)
1374 /* The new size is smaller; return
1375 excess memory to the free list. */
1376 _heapinfo[block + blocks].busy.type = 0;
1377 _heapinfo[block + blocks].busy.info.size
1378 = _heapinfo[block].busy.info.size - blocks;
1379 _heapinfo[block].busy.info.size = blocks;
1380 /* We have just created a new chunk by splitting a chunk in two.
1381 Now we will free this chunk; increment the statistics counter
1382 so it doesn't become wrong when _free_internal decrements it. */
1383 ++_chunks_used;
1384 _free_internal_nolock (ADDRESS (block + blocks));
1385 result = ptr;
1387 else if (blocks == _heapinfo[block].busy.info.size)
1388 /* No size change necessary. */
1389 result = ptr;
1390 else
1392 /* Won't fit, so allocate a new region that will.
1393 Free the old region first in case there is sufficient
1394 adjacent free space to grow without moving. */
1395 blocks = _heapinfo[block].busy.info.size;
1396 /* Prevent free from actually returning memory to the system. */
1397 oldlimit = _heaplimit;
1398 _heaplimit = 0;
1399 _free_internal_nolock (ptr);
1400 result = _malloc_internal_nolock (size);
1401 PROTECT_MALLOC_STATE (0);
1402 if (_heaplimit == 0)
1403 _heaplimit = oldlimit;
1404 if (result == NULL)
1406 /* Now we're really in trouble. We have to unfree
1407 the thing we just freed. Unfortunately it might
1408 have been coalesced with its neighbors. */
1409 if (_heapindex == block)
1410 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1411 else
1413 void *previous
1414 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1415 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1416 _free_internal_nolock (previous);
1418 goto out;
1420 if (ptr != result)
1421 memmove (result, ptr, blocks * BLOCKSIZE);
1423 break;
1425 default:
1426 /* Old size is a fragment; type is logarithm
1427 to base two of the fragment size. */
1428 if (size > (size_t) (1 << (type - 1)) &&
1429 size <= (size_t) (1 << type))
1430 /* The new size is the same kind of fragment. */
1431 result = ptr;
1432 else
1434 /* The new size is different; allocate a new space,
1435 and copy the lesser of the new size and the old. */
1436 result = _malloc_internal_nolock (size);
1437 if (result == NULL)
1438 goto out;
1439 memcpy (result, ptr, min (size, (size_t) 1 << type));
1440 _free_internal_nolock (ptr);
1442 break;
1445 PROTECT_MALLOC_STATE (1);
1446 out:
1447 return result;
1450 void *
1451 _realloc_internal (void *ptr, size_t size)
1453 void *result;
1455 LOCK ();
1456 result = _realloc_internal_nolock (ptr, size);
1457 UNLOCK ();
1459 return result;
1462 void *
1463 realloc (void *ptr, size_t size)
1465 void *(*hook) (void *, size_t);
1467 if (!__malloc_initialized && !__malloc_initialize ())
1468 return NULL;
1470 hook = __realloc_hook;
1471 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1473 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1475 This library is free software; you can redistribute it and/or
1476 modify it under the terms of the GNU General Public License as
1477 published by the Free Software Foundation; either version 2 of the
1478 License, or (at your option) any later version.
1480 This library is distributed in the hope that it will be useful,
1481 but WITHOUT ANY WARRANTY; without even the implied warranty of
1482 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1483 General Public License for more details.
1485 You should have received a copy of the GNU General Public
1486 License along with this library; see the file COPYING. If
1487 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1488 Fifth Floor, Boston, MA 02110-1301, USA.
1490 The author may be reached (Email) at the address mike@ai.mit.edu,
1491 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1493 /* Allocate an array of NMEMB elements each SIZE bytes long.
1494 The entire array is initialized to zeros. */
1495 void *
1496 calloc (register size_t nmemb, register size_t size)
1498 register void *result = malloc (nmemb * size);
1500 if (result != NULL)
1501 (void) memset (result, 0, nmemb * size);
1503 return result;
1505 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1506 This file is part of the GNU C Library.
1508 The GNU C Library is free software; you can redistribute it and/or modify
1509 it under the terms of the GNU General Public License as published by
1510 the Free Software Foundation; either version 2, or (at your option)
1511 any later version.
1513 The GNU C Library is distributed in the hope that it will be useful,
1514 but WITHOUT ANY WARRANTY; without even the implied warranty of
1515 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1516 GNU General Public License for more details.
1518 You should have received a copy of the GNU General Public License
1519 along with the GNU C Library; see the file COPYING. If not, write to
1520 the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1521 MA 02110-1301, USA. */
1523 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1524 compatible. */
1525 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1526 #define __sbrk sbrk
1527 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1528 /* It is best not to declare this and cast its result on foreign operating
1529 systems with potentially hostile include files. */
1531 extern void *__sbrk (ptrdiff_t increment);
1532 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1534 /* Allocate INCREMENT more bytes of data space,
1535 and return the start of data space, or NULL on errors.
1536 If INCREMENT is negative, shrink data space. */
1537 void *
1538 __default_morecore (ptrdiff_t increment)
1540 void *result;
1541 #if defined (CYGWIN)
1542 if (!bss_sbrk_did_unexec)
1544 return bss_sbrk (increment);
1546 #endif
1547 result = (void *) __sbrk (increment);
1548 if (result == (void *) -1)
1549 return NULL;
1550 return result;
1552 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1554 This library is free software; you can redistribute it and/or
1555 modify it under the terms of the GNU General Public License as
1556 published by the Free Software Foundation; either version 2 of the
1557 License, or (at your option) any later version.
1559 This library is distributed in the hope that it will be useful,
1560 but WITHOUT ANY WARRANTY; without even the implied warranty of
1561 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1562 General Public License for more details.
1564 You should have received a copy of the GNU General Public
1565 License along with this library; see the file COPYING. If
1566 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1567 Fifth Floor, Boston, MA 02110-1301, USA. */
1569 void *(*__memalign_hook) (size_t size, size_t alignment);
1571 void *
1572 memalign (size_t alignment, size_t size)
1574 void *result;
1575 size_t adj, lastadj;
1576 void *(*hook) (size_t, size_t) = __memalign_hook;
1578 if (hook)
1579 return (*hook) (alignment, size);
1581 /* Allocate a block with enough extra space to pad the block with up to
1582 (ALIGNMENT - 1) bytes if necessary. */
1583 result = malloc (size + alignment - 1);
1584 if (result == NULL)
1585 return NULL;
1587 /* Figure out how much we will need to pad this particular block
1588 to achieve the required alignment. */
1589 adj = (uintptr_t) result % alignment;
1593 /* Reallocate the block with only as much excess as it needs. */
1594 free (result);
1595 result = malloc (adj + size);
1596 if (result == NULL) /* Impossible unless interrupted. */
1597 return NULL;
1599 lastadj = adj;
1600 adj = (uintptr_t) result % alignment;
1601 /* It's conceivable we might have been so unlucky as to get a
1602 different block with weaker alignment. If so, this block is too
1603 short to contain SIZE after alignment correction. So we must
1604 try again and get another block, slightly larger. */
1605 } while (adj > lastadj);
1607 if (adj != 0)
1609 /* Record this block in the list of aligned blocks, so that `free'
1610 can identify the pointer it is passed, which will be in the middle
1611 of an allocated block. */
1613 struct alignlist *l;
1614 LOCK_ALIGNED_BLOCKS ();
1615 for (l = _aligned_blocks; l != NULL; l = l->next)
1616 if (l->aligned == NULL)
1617 /* This slot is free. Use it. */
1618 break;
1619 if (l == NULL)
1621 l = malloc (sizeof (struct alignlist));
1622 if (l != NULL)
1624 l->next = _aligned_blocks;
1625 _aligned_blocks = l;
1628 if (l != NULL)
1630 l->exact = result;
1631 result = l->aligned = (char *) result + alignment - adj;
1633 UNLOCK_ALIGNED_BLOCKS ();
1634 if (l == NULL)
1636 free (result);
1637 result = NULL;
1641 return result;
1644 #ifndef ENOMEM
1645 #define ENOMEM 12
1646 #endif
1648 #ifndef EINVAL
1649 #define EINVAL 22
1650 #endif
1653 posix_memalign (void **memptr, size_t alignment, size_t size)
1655 void *mem;
1657 if (alignment == 0
1658 || alignment % sizeof (void *) != 0
1659 || (alignment & (alignment - 1)) != 0)
1660 return EINVAL;
1662 mem = memalign (alignment, size);
1663 if (mem == NULL)
1664 return ENOMEM;
1666 *memptr = mem;
1668 return 0;
1671 /* Allocate memory on a page boundary.
1672 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1674 This library is free software; you can redistribute it and/or
1675 modify it under the terms of the GNU General Public License as
1676 published by the Free Software Foundation; either version 2 of the
1677 License, or (at your option) any later version.
1679 This library is distributed in the hope that it will be useful,
1680 but WITHOUT ANY WARRANTY; without even the implied warranty of
1681 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1682 General Public License for more details.
1684 You should have received a copy of the GNU General Public
1685 License along with this library; see the file COPYING. If
1686 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1687 Fifth Floor, Boston, MA 02110-1301, USA.
1689 The author may be reached (Email) at the address mike@ai.mit.edu,
1690 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1692 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1693 on MSDOS, where it conflicts with a system header file. */
1695 #ifndef GMALLOC_INHIBIT_VALLOC
1697 /* Allocate SIZE bytes on a page boundary. */
1698 extern void *valloc (size_t);
1700 #if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1701 # include "getpagesize.h"
1702 #elif !defined getpagesize
1703 extern int getpagesize (void);
1704 #endif
1706 static size_t pagesize;
1708 void *
1709 valloc (size_t size)
1711 if (pagesize == 0)
1712 pagesize = getpagesize ();
1714 return memalign (pagesize, size);
1717 #endif /* Not ELIDE_VALLOC. */
1719 #ifdef GC_MCHECK
1721 /* Standard debugging hooks for `malloc'.
1722 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1723 Written May 1989 by Mike Haertel.
1725 This library is free software; you can redistribute it and/or
1726 modify it under the terms of the GNU General Public License as
1727 published by the Free Software Foundation; either version 2 of the
1728 License, or (at your option) any later version.
1730 This library is distributed in the hope that it will be useful,
1731 but WITHOUT ANY WARRANTY; without even the implied warranty of
1732 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1733 General Public License for more details.
1735 You should have received a copy of the GNU General Public
1736 License along with this library; see the file COPYING. If
1737 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1738 Fifth Floor, Boston, MA 02110-1301, USA.
1740 The author may be reached (Email) at the address mike@ai.mit.edu,
1741 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1743 #include <stdio.h>
1745 /* Old hook values. */
1746 static void (*old_free_hook) (void *ptr);
1747 static void *(*old_malloc_hook) (size_t size);
1748 static void *(*old_realloc_hook) (void *ptr, size_t size);
1750 /* Function to call when something awful happens. */
1751 static void (*abortfunc) (enum mcheck_status);
1753 /* Arbitrary magical numbers. */
1754 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1755 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1756 #define MAGICBYTE ((char) 0xd7)
1757 #define MALLOCFLOOD ((char) 0x93)
1758 #define FREEFLOOD ((char) 0x95)
1760 struct hdr
1762 size_t size; /* Exact size requested by user. */
1763 size_t magic; /* Magic number to check header integrity. */
1766 static enum mcheck_status
1767 checkhdr (const struct hdr *hdr)
1769 enum mcheck_status status;
1770 switch (hdr->magic)
1772 default:
1773 status = MCHECK_HEAD;
1774 break;
1775 case MAGICFREE:
1776 status = MCHECK_FREE;
1777 break;
1778 case MAGICWORD:
1779 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1780 status = MCHECK_TAIL;
1781 else
1782 status = MCHECK_OK;
1783 break;
1785 if (status != MCHECK_OK)
1786 (*abortfunc) (status);
1787 return status;
1790 static void
1791 freehook (void *ptr)
1793 struct hdr *hdr;
1795 if (ptr)
1797 hdr = ((struct hdr *) ptr) - 1;
1798 checkhdr (hdr);
1799 hdr->magic = MAGICFREE;
1800 memset (ptr, FREEFLOOD, hdr->size);
1802 else
1803 hdr = NULL;
1805 __free_hook = old_free_hook;
1806 free (hdr);
1807 __free_hook = freehook;
1810 static void *
1811 mallochook (size_t size)
1813 struct hdr *hdr;
1815 __malloc_hook = old_malloc_hook;
1816 hdr = malloc (sizeof (struct hdr) + size + 1);
1817 __malloc_hook = mallochook;
1818 if (hdr == NULL)
1819 return NULL;
1821 hdr->size = size;
1822 hdr->magic = MAGICWORD;
1823 ((char *) &hdr[1])[size] = MAGICBYTE;
1824 memset (hdr + 1, MALLOCFLOOD, size);
1825 return hdr + 1;
1828 static void *
1829 reallochook (void *ptr, size_t size)
1831 struct hdr *hdr = NULL;
1832 size_t osize = 0;
1834 if (ptr)
1836 hdr = ((struct hdr *) ptr) - 1;
1837 osize = hdr->size;
1839 checkhdr (hdr);
1840 if (size < osize)
1841 memset ((char *) ptr + size, FREEFLOOD, osize - size);
1844 __free_hook = old_free_hook;
1845 __malloc_hook = old_malloc_hook;
1846 __realloc_hook = old_realloc_hook;
1847 hdr = realloc (hdr, sizeof (struct hdr) + size + 1);
1848 __free_hook = freehook;
1849 __malloc_hook = mallochook;
1850 __realloc_hook = reallochook;
1851 if (hdr == NULL)
1852 return NULL;
1854 hdr->size = size;
1855 hdr->magic = MAGICWORD;
1856 ((char *) &hdr[1])[size] = MAGICBYTE;
1857 if (size > osize)
1858 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1859 return hdr + 1;
1862 static void
1863 mabort (enum mcheck_status status)
1865 const char *msg;
1866 switch (status)
1868 case MCHECK_OK:
1869 msg = "memory is consistent, library is buggy";
1870 break;
1871 case MCHECK_HEAD:
1872 msg = "memory clobbered before allocated block";
1873 break;
1874 case MCHECK_TAIL:
1875 msg = "memory clobbered past end of allocated block";
1876 break;
1877 case MCHECK_FREE:
1878 msg = "block freed twice";
1879 break;
1880 default:
1881 msg = "bogus mcheck_status, library is buggy";
1882 break;
1884 #ifdef __GNU_LIBRARY__
1885 __libc_fatal (msg);
1886 #else
1887 fprintf (stderr, "mcheck: %s\n", msg);
1888 fflush (stderr);
1889 abort ();
1890 #endif
1893 static int mcheck_used = 0;
1896 mcheck (void (*func) (enum mcheck_status))
1898 abortfunc = (func != NULL) ? func : &mabort;
1900 /* These hooks may not be safely inserted if malloc is already in use. */
1901 if (!__malloc_initialized && !mcheck_used)
1903 old_free_hook = __free_hook;
1904 __free_hook = freehook;
1905 old_malloc_hook = __malloc_hook;
1906 __malloc_hook = mallochook;
1907 old_realloc_hook = __realloc_hook;
1908 __realloc_hook = reallochook;
1909 mcheck_used = 1;
1912 return mcheck_used ? 0 : -1;
1915 enum mcheck_status
1916 mprobe (void *ptr)
1918 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
1921 #endif /* GC_MCHECK */