Fix bug #13515 with processing DBCS file names on MS-Windows.
[emacs.git] / src / gmalloc.c
blob8558169f7e5db46ef6a727ca749c4601075c824b
1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <http://www.gnu.org/licenses/>.
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
22 #include <config.h>
24 #ifdef HAVE_PTHREAD
25 #define USE_PTHREAD
26 #endif
28 #include <string.h>
29 #include <limits.h>
30 #include <stdint.h>
31 #include <unistd.h>
33 #ifdef USE_PTHREAD
34 #include <pthread.h>
35 #endif
37 #ifdef WINDOWSNT
38 #include <w32heap.h> /* for sbrk */
39 #endif
41 #ifdef __cplusplus
42 extern "C"
44 #endif
46 #include <stddef.h>
49 /* Allocate SIZE bytes of memory. */
50 extern void *malloc (size_t size);
51 /* Re-allocate the previously allocated block
52 in ptr, making the new block SIZE bytes long. */
53 extern void *realloc (void *ptr, size_t size);
54 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
55 extern void *calloc (size_t nmemb, size_t size);
56 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
57 extern void free (void *ptr);
59 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
60 #ifdef MSDOS
61 extern void *memalign (size_t, size_t);
62 extern int posix_memalign (void **, size_t, size_t);
63 #endif
65 #ifdef USE_PTHREAD
66 /* Set up mutexes and make malloc etc. thread-safe. */
67 extern void malloc_enable_thread (void);
68 #endif
70 /* The allocator divides the heap into blocks of fixed size; large
71 requests receive one or more whole blocks, and small requests
72 receive a fragment of a block. Fragment sizes are powers of two,
73 and all fragments of a block are the same size. When all the
74 fragments in a block have been freed, the block itself is freed. */
75 #define INT_BIT (CHAR_BIT * sizeof (int))
76 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
77 #define BLOCKSIZE (1 << BLOCKLOG)
78 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
80 /* Determine the amount of memory spanned by the initial heap table
81 (not an absolute limit). */
82 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
84 /* Number of contiguous free blocks allowed to build up at the end of
85 memory before they will be returned to the system. */
86 #define FINAL_FREE_BLOCKS 8
88 /* Data structure giving per-block information. */
89 typedef union
91 /* Heap information for a busy block. */
92 struct
94 /* Zero for a large (multiblock) object, or positive giving the
95 logarithm to the base two of the fragment size. */
96 int type;
97 union
99 struct
101 size_t nfree; /* Free frags in a fragmented block. */
102 size_t first; /* First free fragment of the block. */
103 } frag;
104 /* For a large object, in its first block, this has the number
105 of blocks in the object. In the other blocks, this has a
106 negative number which says how far back the first block is. */
107 ptrdiff_t size;
108 } info;
109 } busy;
110 /* Heap information for a free block
111 (that may be the first of a free cluster). */
112 struct
114 size_t size; /* Size (in blocks) of a free cluster. */
115 size_t next; /* Index of next free cluster. */
116 size_t prev; /* Index of previous free cluster. */
117 } free;
118 } malloc_info;
120 /* Pointer to first block of the heap. */
121 extern char *_heapbase;
123 /* Table indexed by block number giving per-block information. */
124 extern malloc_info *_heapinfo;
126 /* Address to block number and vice versa. */
127 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
128 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
130 /* Current search index for the heap table. */
131 extern size_t _heapindex;
133 /* Limit of valid info table indices. */
134 extern size_t _heaplimit;
136 /* Doubly linked lists of free fragments. */
137 struct list
139 struct list *next;
140 struct list *prev;
143 /* Free list headers for each fragment size. */
144 extern struct list _fraghead[];
146 /* List of blocks allocated with `memalign' (or `valloc'). */
147 struct alignlist
149 struct alignlist *next;
150 void *aligned; /* The address that memaligned returned. */
151 void *exact; /* The address that malloc returned. */
153 extern struct alignlist *_aligned_blocks;
155 /* Instrumentation. */
156 extern size_t _chunks_used;
157 extern size_t _bytes_used;
158 extern size_t _chunks_free;
159 extern size_t _bytes_free;
161 /* Internal versions of `malloc', `realloc', and `free'
162 used when these functions need to call each other.
163 They are the same but don't call the hooks. */
164 extern void *_malloc_internal (size_t);
165 extern void *_realloc_internal (void *, size_t);
166 extern void _free_internal (void *);
167 extern void *_malloc_internal_nolock (size_t);
168 extern void *_realloc_internal_nolock (void *, size_t);
169 extern void _free_internal_nolock (void *);
171 #ifdef USE_PTHREAD
172 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
173 extern int _malloc_thread_enabled_p;
174 #define LOCK() \
175 do { \
176 if (_malloc_thread_enabled_p) \
177 pthread_mutex_lock (&_malloc_mutex); \
178 } while (0)
179 #define UNLOCK() \
180 do { \
181 if (_malloc_thread_enabled_p) \
182 pthread_mutex_unlock (&_malloc_mutex); \
183 } while (0)
184 #define LOCK_ALIGNED_BLOCKS() \
185 do { \
186 if (_malloc_thread_enabled_p) \
187 pthread_mutex_lock (&_aligned_blocks_mutex); \
188 } while (0)
189 #define UNLOCK_ALIGNED_BLOCKS() \
190 do { \
191 if (_malloc_thread_enabled_p) \
192 pthread_mutex_unlock (&_aligned_blocks_mutex); \
193 } while (0)
194 #else
195 #define LOCK()
196 #define UNLOCK()
197 #define LOCK_ALIGNED_BLOCKS()
198 #define UNLOCK_ALIGNED_BLOCKS()
199 #endif
201 /* Given an address in the middle of a malloc'd object,
202 return the address of the beginning of the object. */
203 extern void *malloc_find_object_address (void *ptr);
205 /* Underlying allocation function; successive calls should
206 return contiguous pieces of memory. */
207 extern void *(*__morecore) (ptrdiff_t size);
209 /* Default value of `__morecore'. */
210 extern void *__default_morecore (ptrdiff_t size);
212 /* If not NULL, this function is called after each time
213 `__morecore' is called to increase the data size. */
214 extern void (*__after_morecore_hook) (void);
216 /* Number of extra blocks to get each time we ask for more core.
217 This reduces the frequency of calling `(*__morecore)'. */
218 extern size_t __malloc_extra_blocks;
220 /* Nonzero if `malloc' has been called and done its initialization. */
221 extern int __malloc_initialized;
222 /* Function called to initialize malloc data structures. */
223 extern int __malloc_initialize (void);
225 /* Hooks for debugging versions. */
226 extern void (*__malloc_initialize_hook) (void);
227 extern void (*__free_hook) (void *ptr);
228 extern void *(*__malloc_hook) (size_t size);
229 extern void *(*__realloc_hook) (void *ptr, size_t size);
230 extern void *(*__memalign_hook) (size_t size, size_t alignment);
232 /* Return values for `mprobe': these are the kinds of inconsistencies that
233 `mcheck' enables detection of. */
234 enum mcheck_status
236 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
237 MCHECK_OK, /* Block is fine. */
238 MCHECK_FREE, /* Block freed twice. */
239 MCHECK_HEAD, /* Memory before the block was clobbered. */
240 MCHECK_TAIL /* Memory after the block was clobbered. */
243 /* Activate a standard collection of debugging hooks. This must be called
244 before `malloc' is ever called. ABORTFUNC is called with an error code
245 (see enum above) when an inconsistency is detected. If ABORTFUNC is
246 null, the standard function prints on stderr and then calls `abort'. */
247 extern int mcheck (void (*abortfunc) (enum mcheck_status));
249 /* Check for aberrations in a particular malloc'd block. You must have
250 called `mcheck' already. These are the same checks that `mcheck' does
251 when you free or reallocate a block. */
252 extern enum mcheck_status mprobe (void *ptr);
254 /* Activate a standard collection of tracing hooks. */
255 extern void mtrace (void);
256 extern void muntrace (void);
258 /* Statistics available to the user. */
259 struct mstats
261 size_t bytes_total; /* Total size of the heap. */
262 size_t chunks_used; /* Chunks allocated by the user. */
263 size_t bytes_used; /* Byte total of user-allocated chunks. */
264 size_t chunks_free; /* Chunks in the free list. */
265 size_t bytes_free; /* Byte total of chunks in the free list. */
268 /* Pick up the current statistics. */
269 extern struct mstats mstats (void);
271 /* Call WARNFUN with a warning message when memory usage is high. */
272 extern void memory_warnings (void *start, void (*warnfun) (const char *));
274 #ifdef __cplusplus
276 #endif
278 /* Memory allocator `malloc'.
279 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
280 Written May 1989 by Mike Haertel.
282 This library is free software; you can redistribute it and/or
283 modify it under the terms of the GNU General Public License as
284 published by the Free Software Foundation; either version 2 of the
285 License, or (at your option) any later version.
287 This library is distributed in the hope that it will be useful,
288 but WITHOUT ANY WARRANTY; without even the implied warranty of
289 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
290 General Public License for more details.
292 You should have received a copy of the GNU General Public
293 License along with this library. If not, see <http://www.gnu.org/licenses/>.
295 The author may be reached (Email) at the address mike@ai.mit.edu,
296 or (US mail) as Mike Haertel c/o Free Software Foundation. */
298 #include <errno.h>
300 /* On Cygwin there are two heaps. temacs uses the static heap
301 (defined in sheap.c and managed with bss_sbrk), and the dumped
302 emacs uses the Cygwin heap (managed with sbrk). When emacs starts
303 on Cygwin, it reinitializes malloc, and we save the old info for
304 use by free and realloc if they're called with a pointer into the
305 static heap.
307 Currently (2011-08-16) the Cygwin build doesn't use ralloc.c; if
308 this is changed in the future, we'll have to similarly deal with
309 reinitializing ralloc. */
310 #ifdef CYGWIN
311 extern void *bss_sbrk (ptrdiff_t size);
312 extern int bss_sbrk_did_unexec;
313 char *bss_sbrk_heapbase; /* _heapbase for static heap */
314 malloc_info *bss_sbrk_heapinfo; /* _heapinfo for static heap */
315 #endif
316 void *(*__morecore) (ptrdiff_t size) = __default_morecore;
318 /* Debugging hook for `malloc'. */
319 void *(*__malloc_hook) (size_t size);
321 /* Pointer to the base of the first block. */
322 char *_heapbase;
324 /* Block information table. Allocated with align/__free (not malloc/free). */
325 malloc_info *_heapinfo;
327 /* Number of info entries. */
328 static size_t heapsize;
330 /* Search index in the info table. */
331 size_t _heapindex;
333 /* Limit of valid info table indices. */
334 size_t _heaplimit;
336 /* Free lists for each fragment size. */
337 struct list _fraghead[BLOCKLOG];
339 /* Instrumentation. */
340 size_t _chunks_used;
341 size_t _bytes_used;
342 size_t _chunks_free;
343 size_t _bytes_free;
345 /* Are you experienced? */
346 int __malloc_initialized;
348 size_t __malloc_extra_blocks;
350 void (*__malloc_initialize_hook) (void);
351 void (*__after_morecore_hook) (void);
353 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
355 /* Some code for hunting a bug writing into _heapinfo.
357 Call this macro with argument PROT non-zero to protect internal
358 malloc state against writing to it, call it with a zero argument to
359 make it readable and writable.
361 Note that this only works if BLOCKSIZE == page size, which is
362 the case on the i386. */
364 #include <sys/types.h>
365 #include <sys/mman.h>
367 static int state_protected_p;
368 static size_t last_state_size;
369 static malloc_info *last_heapinfo;
371 void
372 protect_malloc_state (int protect_p)
374 /* If _heapinfo has been relocated, make sure its old location
375 isn't left read-only; it will be reused by malloc. */
376 if (_heapinfo != last_heapinfo
377 && last_heapinfo
378 && state_protected_p)
379 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
381 last_state_size = _heaplimit * sizeof *_heapinfo;
382 last_heapinfo = _heapinfo;
384 if (protect_p != state_protected_p)
386 state_protected_p = protect_p;
387 if (mprotect (_heapinfo, last_state_size,
388 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
389 abort ();
393 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
395 #else
396 #define PROTECT_MALLOC_STATE(PROT) /* empty */
397 #endif
400 /* Aligned allocation. */
401 static void *
402 align (size_t size)
404 void *result;
405 ptrdiff_t adj;
407 /* align accepts an unsigned argument, but __morecore accepts a
408 signed one. This could lead to trouble if SIZE overflows the
409 ptrdiff_t type accepted by __morecore. We just punt in that
410 case, since they are requesting a ludicrous amount anyway. */
411 if (PTRDIFF_MAX < size)
412 result = 0;
413 else
414 result = (*__morecore) (size);
415 adj = (uintptr_t) result % BLOCKSIZE;
416 if (adj != 0)
418 adj = BLOCKSIZE - adj;
419 (*__morecore) (adj);
420 result = (char *) result + adj;
423 if (__after_morecore_hook)
424 (*__after_morecore_hook) ();
426 return result;
429 /* Get SIZE bytes, if we can get them starting at END.
430 Return the address of the space we got.
431 If we cannot get space at END, fail and return 0. */
432 static void *
433 get_contiguous_space (ptrdiff_t size, void *position)
435 void *before;
436 void *after;
438 before = (*__morecore) (0);
439 /* If we can tell in advance that the break is at the wrong place,
440 fail now. */
441 if (before != position)
442 return 0;
444 /* Allocate SIZE bytes and get the address of them. */
445 after = (*__morecore) (size);
446 if (!after)
447 return 0;
449 /* It was not contiguous--reject it. */
450 if (after != position)
452 (*__morecore) (- size);
453 return 0;
456 return after;
460 /* This is called when `_heapinfo' and `heapsize' have just
461 been set to describe a new info table. Set up the table
462 to describe itself and account for it in the statistics. */
463 static void
464 register_heapinfo (void)
466 size_t block, blocks;
468 block = BLOCK (_heapinfo);
469 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
471 /* Account for the _heapinfo block itself in the statistics. */
472 _bytes_used += blocks * BLOCKSIZE;
473 ++_chunks_used;
475 /* Describe the heapinfo block itself in the heapinfo. */
476 _heapinfo[block].busy.type = 0;
477 _heapinfo[block].busy.info.size = blocks;
478 /* Leave back-pointers for malloc_find_address. */
479 while (--blocks > 0)
480 _heapinfo[block + blocks].busy.info.size = -blocks;
483 #ifdef USE_PTHREAD
484 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
485 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
486 int _malloc_thread_enabled_p;
488 static void
489 malloc_atfork_handler_prepare (void)
491 LOCK ();
492 LOCK_ALIGNED_BLOCKS ();
495 static void
496 malloc_atfork_handler_parent (void)
498 UNLOCK_ALIGNED_BLOCKS ();
499 UNLOCK ();
502 static void
503 malloc_atfork_handler_child (void)
505 UNLOCK_ALIGNED_BLOCKS ();
506 UNLOCK ();
509 /* Set up mutexes and make malloc etc. thread-safe. */
510 void
511 malloc_enable_thread (void)
513 if (_malloc_thread_enabled_p)
514 return;
516 /* Some pthread implementations call malloc for statically
517 initialized mutexes when they are used first. To avoid such a
518 situation, we initialize mutexes here while their use is
519 disabled in malloc etc. */
520 pthread_mutex_init (&_malloc_mutex, NULL);
521 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
522 pthread_atfork (malloc_atfork_handler_prepare,
523 malloc_atfork_handler_parent,
524 malloc_atfork_handler_child);
525 _malloc_thread_enabled_p = 1;
527 #endif
529 static void
530 malloc_initialize_1 (void)
532 #ifdef GC_MCHECK
533 mcheck (NULL);
534 #endif
536 #ifdef CYGWIN
537 if (bss_sbrk_did_unexec)
538 /* we're reinitializing the dumped emacs */
540 bss_sbrk_heapbase = _heapbase;
541 bss_sbrk_heapinfo = _heapinfo;
542 memset (_fraghead, 0, BLOCKLOG * sizeof (struct list));
544 #endif
546 if (__malloc_initialize_hook)
547 (*__malloc_initialize_hook) ();
549 heapsize = HEAP / BLOCKSIZE;
550 _heapinfo = align (heapsize * sizeof (malloc_info));
551 if (_heapinfo == NULL)
552 return;
553 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
554 _heapinfo[0].free.size = 0;
555 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
556 _heapindex = 0;
557 _heapbase = (char *) _heapinfo;
558 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
560 register_heapinfo ();
562 __malloc_initialized = 1;
563 PROTECT_MALLOC_STATE (1);
564 return;
567 /* Set everything up and remember that we have.
568 main will call malloc which calls this function. That is before any threads
569 or signal handlers has been set up, so we don't need thread protection. */
571 __malloc_initialize (void)
573 if (__malloc_initialized)
574 return 0;
576 malloc_initialize_1 ();
578 return __malloc_initialized;
581 static int morecore_recursing;
583 /* Get neatly aligned memory, initializing or
584 growing the heap info table as necessary. */
585 static void *
586 morecore_nolock (size_t size)
588 void *result;
589 malloc_info *newinfo, *oldinfo;
590 size_t newsize;
592 if (morecore_recursing)
593 /* Avoid recursion. The caller will know how to handle a null return. */
594 return NULL;
596 result = align (size);
597 if (result == NULL)
598 return NULL;
600 PROTECT_MALLOC_STATE (0);
602 /* Check if we need to grow the info table. */
603 if ((size_t) BLOCK ((char *) result + size) > heapsize)
605 /* Calculate the new _heapinfo table size. We do not account for the
606 added blocks in the table itself, as we hope to place them in
607 existing free space, which is already covered by part of the
608 existing table. */
609 newsize = heapsize;
611 newsize *= 2;
612 while ((size_t) BLOCK ((char *) result + size) > newsize);
614 /* We must not reuse existing core for the new info table when called
615 from realloc in the case of growing a large block, because the
616 block being grown is momentarily marked as free. In this case
617 _heaplimit is zero so we know not to reuse space for internal
618 allocation. */
619 if (_heaplimit != 0)
621 /* First try to allocate the new info table in core we already
622 have, in the usual way using realloc. If realloc cannot
623 extend it in place or relocate it to existing sufficient core,
624 we will get called again, and the code above will notice the
625 `morecore_recursing' flag and return null. */
626 int save = errno; /* Don't want to clobber errno with ENOMEM. */
627 morecore_recursing = 1;
628 newinfo = _realloc_internal_nolock (_heapinfo,
629 newsize * sizeof (malloc_info));
630 morecore_recursing = 0;
631 if (newinfo == NULL)
632 errno = save;
633 else
635 /* We found some space in core, and realloc has put the old
636 table's blocks on the free list. Now zero the new part
637 of the table and install the new table location. */
638 memset (&newinfo[heapsize], 0,
639 (newsize - heapsize) * sizeof (malloc_info));
640 _heapinfo = newinfo;
641 heapsize = newsize;
642 goto got_heap;
646 /* Allocate new space for the malloc info table. */
647 while (1)
649 newinfo = align (newsize * sizeof (malloc_info));
651 /* Did it fail? */
652 if (newinfo == NULL)
654 (*__morecore) (-size);
655 return NULL;
658 /* Is it big enough to record status for its own space?
659 If so, we win. */
660 if ((size_t) BLOCK ((char *) newinfo
661 + newsize * sizeof (malloc_info))
662 < newsize)
663 break;
665 /* Must try again. First give back most of what we just got. */
666 (*__morecore) (- newsize * sizeof (malloc_info));
667 newsize *= 2;
670 /* Copy the old table to the beginning of the new,
671 and zero the rest of the new table. */
672 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
673 memset (&newinfo[heapsize], 0,
674 (newsize - heapsize) * sizeof (malloc_info));
675 oldinfo = _heapinfo;
676 _heapinfo = newinfo;
677 heapsize = newsize;
679 register_heapinfo ();
681 /* Reset _heaplimit so _free_internal never decides
682 it can relocate or resize the info table. */
683 _heaplimit = 0;
684 _free_internal_nolock (oldinfo);
685 PROTECT_MALLOC_STATE (0);
687 /* The new heap limit includes the new table just allocated. */
688 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
689 return result;
692 got_heap:
693 _heaplimit = BLOCK ((char *) result + size);
694 return result;
697 /* Allocate memory from the heap. */
698 void *
699 _malloc_internal_nolock (size_t size)
701 void *result;
702 size_t block, blocks, lastblocks, start;
703 register size_t i;
704 struct list *next;
706 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
707 valid address you can realloc and free (though not dereference).
709 It turns out that some extant code (sunrpc, at least Ultrix's version)
710 expects `malloc (0)' to return non-NULL and breaks otherwise.
711 Be compatible. */
713 #if 0
714 if (size == 0)
715 return NULL;
716 #endif
718 PROTECT_MALLOC_STATE (0);
720 if (size < sizeof (struct list))
721 size = sizeof (struct list);
723 /* Determine the allocation policy based on the request size. */
724 if (size <= BLOCKSIZE / 2)
726 /* Small allocation to receive a fragment of a block.
727 Determine the logarithm to base two of the fragment size. */
728 register size_t log = 1;
729 --size;
730 while ((size /= 2) != 0)
731 ++log;
733 /* Look in the fragment lists for a
734 free fragment of the desired size. */
735 next = _fraghead[log].next;
736 if (next != NULL)
738 /* There are free fragments of this size.
739 Pop a fragment out of the fragment list and return it.
740 Update the block's nfree and first counters. */
741 result = next;
742 next->prev->next = next->next;
743 if (next->next != NULL)
744 next->next->prev = next->prev;
745 block = BLOCK (result);
746 if (--_heapinfo[block].busy.info.frag.nfree != 0)
747 _heapinfo[block].busy.info.frag.first =
748 (uintptr_t) next->next % BLOCKSIZE >> log;
750 /* Update the statistics. */
751 ++_chunks_used;
752 _bytes_used += 1 << log;
753 --_chunks_free;
754 _bytes_free -= 1 << log;
756 else
758 /* No free fragments of the desired size, so get a new block
759 and break it into fragments, returning the first. */
760 #ifdef GC_MALLOC_CHECK
761 result = _malloc_internal_nolock (BLOCKSIZE);
762 PROTECT_MALLOC_STATE (0);
763 #elif defined (USE_PTHREAD)
764 result = _malloc_internal_nolock (BLOCKSIZE);
765 #else
766 result = malloc (BLOCKSIZE);
767 #endif
768 if (result == NULL)
770 PROTECT_MALLOC_STATE (1);
771 goto out;
774 /* Link all fragments but the first into the free list. */
775 next = (struct list *) ((char *) result + (1 << log));
776 next->next = NULL;
777 next->prev = &_fraghead[log];
778 _fraghead[log].next = next;
780 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
782 next = (struct list *) ((char *) result + (i << log));
783 next->next = _fraghead[log].next;
784 next->prev = &_fraghead[log];
785 next->prev->next = next;
786 next->next->prev = next;
789 /* Initialize the nfree and first counters for this block. */
790 block = BLOCK (result);
791 _heapinfo[block].busy.type = log;
792 _heapinfo[block].busy.info.frag.nfree = i - 1;
793 _heapinfo[block].busy.info.frag.first = i - 1;
795 _chunks_free += (BLOCKSIZE >> log) - 1;
796 _bytes_free += BLOCKSIZE - (1 << log);
797 _bytes_used -= BLOCKSIZE - (1 << log);
800 else
802 /* Large allocation to receive one or more blocks.
803 Search the free list in a circle starting at the last place visited.
804 If we loop completely around without finding a large enough
805 space we will have to get more memory from the system. */
806 blocks = BLOCKIFY (size);
807 start = block = _heapindex;
808 while (_heapinfo[block].free.size < blocks)
810 block = _heapinfo[block].free.next;
811 if (block == start)
813 /* Need to get more from the system. Get a little extra. */
814 size_t wantblocks = blocks + __malloc_extra_blocks;
815 block = _heapinfo[0].free.prev;
816 lastblocks = _heapinfo[block].free.size;
817 /* Check to see if the new core will be contiguous with the
818 final free block; if so we don't need to get as much. */
819 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
820 /* We can't do this if we will have to make the heap info
821 table bigger to accommodate the new space. */
822 block + wantblocks <= heapsize &&
823 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
824 ADDRESS (block + lastblocks)))
826 /* We got it contiguously. Which block we are extending
827 (the `final free block' referred to above) might have
828 changed, if it got combined with a freed info table. */
829 block = _heapinfo[0].free.prev;
830 _heapinfo[block].free.size += (wantblocks - lastblocks);
831 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
832 _heaplimit += wantblocks - lastblocks;
833 continue;
835 result = morecore_nolock (wantblocks * BLOCKSIZE);
836 if (result == NULL)
837 goto out;
838 block = BLOCK (result);
839 /* Put the new block at the end of the free list. */
840 _heapinfo[block].free.size = wantblocks;
841 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
842 _heapinfo[block].free.next = 0;
843 _heapinfo[0].free.prev = block;
844 _heapinfo[_heapinfo[block].free.prev].free.next = block;
845 ++_chunks_free;
846 /* Now loop to use some of that block for this allocation. */
850 /* At this point we have found a suitable free list entry.
851 Figure out how to remove what we need from the list. */
852 result = ADDRESS (block);
853 if (_heapinfo[block].free.size > blocks)
855 /* The block we found has a bit left over,
856 so relink the tail end back into the free list. */
857 _heapinfo[block + blocks].free.size
858 = _heapinfo[block].free.size - blocks;
859 _heapinfo[block + blocks].free.next
860 = _heapinfo[block].free.next;
861 _heapinfo[block + blocks].free.prev
862 = _heapinfo[block].free.prev;
863 _heapinfo[_heapinfo[block].free.prev].free.next
864 = _heapinfo[_heapinfo[block].free.next].free.prev
865 = _heapindex = block + blocks;
867 else
869 /* The block exactly matches our requirements,
870 so just remove it from the list. */
871 _heapinfo[_heapinfo[block].free.next].free.prev
872 = _heapinfo[block].free.prev;
873 _heapinfo[_heapinfo[block].free.prev].free.next
874 = _heapindex = _heapinfo[block].free.next;
875 --_chunks_free;
878 _heapinfo[block].busy.type = 0;
879 _heapinfo[block].busy.info.size = blocks;
880 ++_chunks_used;
881 _bytes_used += blocks * BLOCKSIZE;
882 _bytes_free -= blocks * BLOCKSIZE;
884 /* Mark all the blocks of the object just allocated except for the
885 first with a negative number so you can find the first block by
886 adding that adjustment. */
887 while (--blocks > 0)
888 _heapinfo[block + blocks].busy.info.size = -blocks;
891 PROTECT_MALLOC_STATE (1);
892 out:
893 return result;
896 void *
897 _malloc_internal (size_t size)
899 void *result;
901 LOCK ();
902 result = _malloc_internal_nolock (size);
903 UNLOCK ();
905 return result;
908 void *
909 malloc (size_t size)
911 void *(*hook) (size_t);
913 if (!__malloc_initialized && !__malloc_initialize ())
914 return NULL;
916 /* Copy the value of __malloc_hook to an automatic variable in case
917 __malloc_hook is modified in another thread between its
918 NULL-check and the use.
920 Note: Strictly speaking, this is not a right solution. We should
921 use mutexes to access non-read-only variables that are shared
922 among multiple threads. We just leave it for compatibility with
923 glibc malloc (i.e., assignments to __malloc_hook) for now. */
924 hook = __malloc_hook;
925 return (hook != NULL ? *hook : _malloc_internal) (size);
928 #ifndef _LIBC
930 /* On some ANSI C systems, some libc functions call _malloc, _free
931 and _realloc. Make them use the GNU functions. */
933 extern void *_malloc (size_t);
934 extern void _free (void *);
935 extern void *_realloc (void *, size_t);
937 void *
938 _malloc (size_t size)
940 return malloc (size);
943 void
944 _free (void *ptr)
946 free (ptr);
949 void *
950 _realloc (void *ptr, size_t size)
952 return realloc (ptr, size);
955 #endif
956 /* Free a block of memory allocated by `malloc'.
957 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
958 Written May 1989 by Mike Haertel.
960 This library is free software; you can redistribute it and/or
961 modify it under the terms of the GNU General Public License as
962 published by the Free Software Foundation; either version 2 of the
963 License, or (at your option) any later version.
965 This library is distributed in the hope that it will be useful,
966 but WITHOUT ANY WARRANTY; without even the implied warranty of
967 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
968 General Public License for more details.
970 You should have received a copy of the GNU General Public
971 License along with this library. If not, see <http://www.gnu.org/licenses/>.
973 The author may be reached (Email) at the address mike@ai.mit.edu,
974 or (US mail) as Mike Haertel c/o Free Software Foundation. */
977 /* Debugging hook for free. */
978 void (*__free_hook) (void *__ptr);
980 /* List of blocks allocated by memalign. */
981 struct alignlist *_aligned_blocks = NULL;
983 /* Return memory to the heap.
984 Like `_free_internal' but don't lock mutex. */
985 void
986 _free_internal_nolock (void *ptr)
988 int type;
989 size_t block, blocks;
990 register size_t i;
991 struct list *prev, *next;
992 void *curbrk;
993 const size_t lesscore_threshold
994 /* Threshold of free space at which we will return some to the system. */
995 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
997 register struct alignlist *l;
999 if (ptr == NULL)
1000 return;
1002 #ifdef CYGWIN
1003 if ((char *) ptr < _heapbase)
1004 /* We're being asked to free something in the static heap. */
1005 return;
1006 #endif
1008 PROTECT_MALLOC_STATE (0);
1010 LOCK_ALIGNED_BLOCKS ();
1011 for (l = _aligned_blocks; l != NULL; l = l->next)
1012 if (l->aligned == ptr)
1014 l->aligned = NULL; /* Mark the slot in the list as free. */
1015 ptr = l->exact;
1016 break;
1018 UNLOCK_ALIGNED_BLOCKS ();
1020 block = BLOCK (ptr);
1022 type = _heapinfo[block].busy.type;
1023 switch (type)
1025 case 0:
1026 /* Get as many statistics as early as we can. */
1027 --_chunks_used;
1028 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1029 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1031 /* Find the free cluster previous to this one in the free list.
1032 Start searching at the last block referenced; this may benefit
1033 programs with locality of allocation. */
1034 i = _heapindex;
1035 if (i > block)
1036 while (i > block)
1037 i = _heapinfo[i].free.prev;
1038 else
1041 i = _heapinfo[i].free.next;
1042 while (i > 0 && i < block);
1043 i = _heapinfo[i].free.prev;
1046 /* Determine how to link this block into the free list. */
1047 if (block == i + _heapinfo[i].free.size)
1049 /* Coalesce this block with its predecessor. */
1050 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1051 block = i;
1053 else
1055 /* Really link this block back into the free list. */
1056 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1057 _heapinfo[block].free.next = _heapinfo[i].free.next;
1058 _heapinfo[block].free.prev = i;
1059 _heapinfo[i].free.next = block;
1060 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1061 ++_chunks_free;
1064 /* Now that the block is linked in, see if we can coalesce it
1065 with its successor (by deleting its successor from the list
1066 and adding in its size). */
1067 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1069 _heapinfo[block].free.size
1070 += _heapinfo[_heapinfo[block].free.next].free.size;
1071 _heapinfo[block].free.next
1072 = _heapinfo[_heapinfo[block].free.next].free.next;
1073 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1074 --_chunks_free;
1077 /* How many trailing free blocks are there now? */
1078 blocks = _heapinfo[block].free.size;
1080 /* Where is the current end of accessible core? */
1081 curbrk = (*__morecore) (0);
1083 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1085 /* The end of the malloc heap is at the end of accessible core.
1086 It's possible that moving _heapinfo will allow us to
1087 return some space to the system. */
1089 size_t info_block = BLOCK (_heapinfo);
1090 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1091 size_t prev_block = _heapinfo[block].free.prev;
1092 size_t prev_blocks = _heapinfo[prev_block].free.size;
1093 size_t next_block = _heapinfo[block].free.next;
1094 size_t next_blocks = _heapinfo[next_block].free.size;
1096 if (/* Win if this block being freed is last in core, the info table
1097 is just before it, the previous free block is just before the
1098 info table, and the two free blocks together form a useful
1099 amount to return to the system. */
1100 (block + blocks == _heaplimit &&
1101 info_block + info_blocks == block &&
1102 prev_block != 0 && prev_block + prev_blocks == info_block &&
1103 blocks + prev_blocks >= lesscore_threshold) ||
1104 /* Nope, not the case. We can also win if this block being
1105 freed is just before the info table, and the table extends
1106 to the end of core or is followed only by a free block,
1107 and the total free space is worth returning to the system. */
1108 (block + blocks == info_block &&
1109 ((info_block + info_blocks == _heaplimit &&
1110 blocks >= lesscore_threshold) ||
1111 (info_block + info_blocks == next_block &&
1112 next_block + next_blocks == _heaplimit &&
1113 blocks + next_blocks >= lesscore_threshold)))
1116 malloc_info *newinfo;
1117 size_t oldlimit = _heaplimit;
1119 /* Free the old info table, clearing _heaplimit to avoid
1120 recursion into this code. We don't want to return the
1121 table's blocks to the system before we have copied them to
1122 the new location. */
1123 _heaplimit = 0;
1124 _free_internal_nolock (_heapinfo);
1125 _heaplimit = oldlimit;
1127 /* Tell malloc to search from the beginning of the heap for
1128 free blocks, so it doesn't reuse the ones just freed. */
1129 _heapindex = 0;
1131 /* Allocate new space for the info table and move its data. */
1132 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
1133 PROTECT_MALLOC_STATE (0);
1134 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1135 _heapinfo = newinfo;
1137 /* We should now have coalesced the free block with the
1138 blocks freed from the old info table. Examine the entire
1139 trailing free block to decide below whether to return some
1140 to the system. */
1141 block = _heapinfo[0].free.prev;
1142 blocks = _heapinfo[block].free.size;
1145 /* Now see if we can return stuff to the system. */
1146 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1148 register size_t bytes = blocks * BLOCKSIZE;
1149 _heaplimit -= blocks;
1150 (*__morecore) (-bytes);
1151 _heapinfo[_heapinfo[block].free.prev].free.next
1152 = _heapinfo[block].free.next;
1153 _heapinfo[_heapinfo[block].free.next].free.prev
1154 = _heapinfo[block].free.prev;
1155 block = _heapinfo[block].free.prev;
1156 --_chunks_free;
1157 _bytes_free -= bytes;
1161 /* Set the next search to begin at this block. */
1162 _heapindex = block;
1163 break;
1165 default:
1166 /* Do some of the statistics. */
1167 --_chunks_used;
1168 _bytes_used -= 1 << type;
1169 ++_chunks_free;
1170 _bytes_free += 1 << type;
1172 /* Get the address of the first free fragment in this block. */
1173 prev = (struct list *) ((char *) ADDRESS (block) +
1174 (_heapinfo[block].busy.info.frag.first << type));
1176 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1178 /* If all fragments of this block are free, remove them
1179 from the fragment list and free the whole block. */
1180 next = prev;
1181 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
1182 next = next->next;
1183 prev->prev->next = next;
1184 if (next != NULL)
1185 next->prev = prev->prev;
1186 _heapinfo[block].busy.type = 0;
1187 _heapinfo[block].busy.info.size = 1;
1189 /* Keep the statistics accurate. */
1190 ++_chunks_used;
1191 _bytes_used += BLOCKSIZE;
1192 _chunks_free -= BLOCKSIZE >> type;
1193 _bytes_free -= BLOCKSIZE;
1195 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1196 _free_internal_nolock (ADDRESS (block));
1197 #else
1198 free (ADDRESS (block));
1199 #endif
1201 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1203 /* If some fragments of this block are free, link this
1204 fragment into the fragment list after the first free
1205 fragment of this block. */
1206 next = ptr;
1207 next->next = prev->next;
1208 next->prev = prev;
1209 prev->next = next;
1210 if (next->next != NULL)
1211 next->next->prev = next;
1212 ++_heapinfo[block].busy.info.frag.nfree;
1214 else
1216 /* No fragments of this block are free, so link this
1217 fragment into the fragment list and announce that
1218 it is the first free fragment of this block. */
1219 prev = ptr;
1220 _heapinfo[block].busy.info.frag.nfree = 1;
1221 _heapinfo[block].busy.info.frag.first =
1222 (uintptr_t) ptr % BLOCKSIZE >> type;
1223 prev->next = _fraghead[type].next;
1224 prev->prev = &_fraghead[type];
1225 prev->prev->next = prev;
1226 if (prev->next != NULL)
1227 prev->next->prev = prev;
1229 break;
1232 PROTECT_MALLOC_STATE (1);
1235 /* Return memory to the heap.
1236 Like `free' but don't call a __free_hook if there is one. */
1237 void
1238 _free_internal (void *ptr)
1240 LOCK ();
1241 _free_internal_nolock (ptr);
1242 UNLOCK ();
1245 /* Return memory to the heap. */
1247 void
1248 free (void *ptr)
1250 void (*hook) (void *) = __free_hook;
1252 if (hook != NULL)
1253 (*hook) (ptr);
1254 else
1255 _free_internal (ptr);
1258 /* Define the `cfree' alias for `free'. */
1259 #ifdef weak_alias
1260 weak_alias (free, cfree)
1261 #else
1262 void
1263 cfree (void *ptr)
1265 free (ptr);
1267 #endif
1268 /* Change the size of a block allocated by `malloc'.
1269 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1270 Written May 1989 by Mike Haertel.
1272 This library is free software; you can redistribute it and/or
1273 modify it under the terms of the GNU General Public License as
1274 published by the Free Software Foundation; either version 2 of the
1275 License, or (at your option) any later version.
1277 This library is distributed in the hope that it will be useful,
1278 but WITHOUT ANY WARRANTY; without even the implied warranty of
1279 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1280 General Public License for more details.
1282 You should have received a copy of the GNU General Public
1283 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1285 The author may be reached (Email) at the address mike@ai.mit.edu,
1286 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1288 #ifndef min
1289 #define min(A, B) ((A) < (B) ? (A) : (B))
1290 #endif
1292 /* On Cygwin the dumped emacs may try to realloc storage allocated in
1293 the static heap. We just malloc space in the new heap and copy the
1294 data. */
1295 #ifdef CYGWIN
1296 void *
1297 special_realloc (void *ptr, size_t size)
1299 void *result;
1300 int type;
1301 size_t block, oldsize;
1303 block = ((char *) ptr - bss_sbrk_heapbase) / BLOCKSIZE + 1;
1304 type = bss_sbrk_heapinfo[block].busy.type;
1305 oldsize =
1306 type == 0 ? bss_sbrk_heapinfo[block].busy.info.size * BLOCKSIZE
1307 : (size_t) 1 << type;
1308 result = _malloc_internal_nolock (size);
1309 if (result != NULL)
1310 memcpy (result, ptr, min (oldsize, size));
1311 return result;
1313 #endif
1315 /* Debugging hook for realloc. */
1316 void *(*__realloc_hook) (void *ptr, size_t size);
1318 /* Resize the given region to the new size, returning a pointer
1319 to the (possibly moved) region. This is optimized for speed;
1320 some benchmarks seem to indicate that greater compactness is
1321 achieved by unconditionally allocating and copying to a
1322 new region. This module has incestuous knowledge of the
1323 internals of both free and malloc. */
1324 void *
1325 _realloc_internal_nolock (void *ptr, size_t size)
1327 void *result;
1328 int type;
1329 size_t block, blocks, oldlimit;
1331 if (size == 0)
1333 _free_internal_nolock (ptr);
1334 return _malloc_internal_nolock (0);
1336 else if (ptr == NULL)
1337 return _malloc_internal_nolock (size);
1339 #ifdef CYGWIN
1340 if ((char *) ptr < _heapbase)
1341 /* ptr points into the static heap */
1342 return special_realloc (ptr, size);
1343 #endif
1345 block = BLOCK (ptr);
1347 PROTECT_MALLOC_STATE (0);
1349 type = _heapinfo[block].busy.type;
1350 switch (type)
1352 case 0:
1353 /* Maybe reallocate a large block to a small fragment. */
1354 if (size <= BLOCKSIZE / 2)
1356 result = _malloc_internal_nolock (size);
1357 if (result != NULL)
1359 memcpy (result, ptr, size);
1360 _free_internal_nolock (ptr);
1361 goto out;
1365 /* The new size is a large allocation as well;
1366 see if we can hold it in place. */
1367 blocks = BLOCKIFY (size);
1368 if (blocks < _heapinfo[block].busy.info.size)
1370 /* The new size is smaller; return
1371 excess memory to the free list. */
1372 _heapinfo[block + blocks].busy.type = 0;
1373 _heapinfo[block + blocks].busy.info.size
1374 = _heapinfo[block].busy.info.size - blocks;
1375 _heapinfo[block].busy.info.size = blocks;
1376 /* We have just created a new chunk by splitting a chunk in two.
1377 Now we will free this chunk; increment the statistics counter
1378 so it doesn't become wrong when _free_internal decrements it. */
1379 ++_chunks_used;
1380 _free_internal_nolock (ADDRESS (block + blocks));
1381 result = ptr;
1383 else if (blocks == _heapinfo[block].busy.info.size)
1384 /* No size change necessary. */
1385 result = ptr;
1386 else
1388 /* Won't fit, so allocate a new region that will.
1389 Free the old region first in case there is sufficient
1390 adjacent free space to grow without moving. */
1391 blocks = _heapinfo[block].busy.info.size;
1392 /* Prevent free from actually returning memory to the system. */
1393 oldlimit = _heaplimit;
1394 _heaplimit = 0;
1395 _free_internal_nolock (ptr);
1396 result = _malloc_internal_nolock (size);
1397 PROTECT_MALLOC_STATE (0);
1398 if (_heaplimit == 0)
1399 _heaplimit = oldlimit;
1400 if (result == NULL)
1402 /* Now we're really in trouble. We have to unfree
1403 the thing we just freed. Unfortunately it might
1404 have been coalesced with its neighbors. */
1405 if (_heapindex == block)
1406 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1407 else
1409 void *previous
1410 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1411 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1412 _free_internal_nolock (previous);
1414 goto out;
1416 if (ptr != result)
1417 memmove (result, ptr, blocks * BLOCKSIZE);
1419 break;
1421 default:
1422 /* Old size is a fragment; type is logarithm
1423 to base two of the fragment size. */
1424 if (size > (size_t) (1 << (type - 1)) &&
1425 size <= (size_t) (1 << type))
1426 /* The new size is the same kind of fragment. */
1427 result = ptr;
1428 else
1430 /* The new size is different; allocate a new space,
1431 and copy the lesser of the new size and the old. */
1432 result = _malloc_internal_nolock (size);
1433 if (result == NULL)
1434 goto out;
1435 memcpy (result, ptr, min (size, (size_t) 1 << type));
1436 _free_internal_nolock (ptr);
1438 break;
1441 PROTECT_MALLOC_STATE (1);
1442 out:
1443 return result;
1446 void *
1447 _realloc_internal (void *ptr, size_t size)
1449 void *result;
1451 LOCK ();
1452 result = _realloc_internal_nolock (ptr, size);
1453 UNLOCK ();
1455 return result;
1458 void *
1459 realloc (void *ptr, size_t size)
1461 void *(*hook) (void *, size_t);
1463 if (!__malloc_initialized && !__malloc_initialize ())
1464 return NULL;
1466 hook = __realloc_hook;
1467 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1469 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1471 This library is free software; you can redistribute it and/or
1472 modify it under the terms of the GNU General Public License as
1473 published by the Free Software Foundation; either version 2 of the
1474 License, or (at your option) any later version.
1476 This library is distributed in the hope that it will be useful,
1477 but WITHOUT ANY WARRANTY; without even the implied warranty of
1478 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1479 General Public License for more details.
1481 You should have received a copy of the GNU General Public
1482 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1484 The author may be reached (Email) at the address mike@ai.mit.edu,
1485 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1487 /* Allocate an array of NMEMB elements each SIZE bytes long.
1488 The entire array is initialized to zeros. */
1489 void *
1490 calloc (register size_t nmemb, register size_t size)
1492 register void *result = malloc (nmemb * size);
1494 if (result != NULL)
1495 (void) memset (result, 0, nmemb * size);
1497 return result;
1499 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1500 This file is part of the GNU C Library.
1502 The GNU C Library is free software; you can redistribute it and/or modify
1503 it under the terms of the GNU General Public License as published by
1504 the Free Software Foundation; either version 2, or (at your option)
1505 any later version.
1507 The GNU C Library is distributed in the hope that it will be useful,
1508 but WITHOUT ANY WARRANTY; without even the implied warranty of
1509 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1510 GNU General Public License for more details.
1512 You should have received a copy of the GNU General Public License
1513 along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
1515 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1516 compatible. */
1517 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1518 #define __sbrk sbrk
1519 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1520 /* It is best not to declare this and cast its result on foreign operating
1521 systems with potentially hostile include files. */
1523 extern void *__sbrk (ptrdiff_t increment);
1524 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1526 /* Allocate INCREMENT more bytes of data space,
1527 and return the start of data space, or NULL on errors.
1528 If INCREMENT is negative, shrink data space. */
1529 void *
1530 __default_morecore (ptrdiff_t increment)
1532 void *result;
1533 #if defined (CYGWIN)
1534 if (!bss_sbrk_did_unexec)
1536 return bss_sbrk (increment);
1538 #endif
1539 result = (void *) __sbrk (increment);
1540 if (result == (void *) -1)
1541 return NULL;
1542 return result;
1544 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1546 This library is free software; you can redistribute it and/or
1547 modify it under the terms of the GNU General Public License as
1548 published by the Free Software Foundation; either version 2 of the
1549 License, or (at your option) any later version.
1551 This library is distributed in the hope that it will be useful,
1552 but WITHOUT ANY WARRANTY; without even the implied warranty of
1553 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1554 General Public License for more details.
1556 You should have received a copy of the GNU General Public
1557 License along with this library. If not, see <http://www.gnu.org/licenses/>. */
1559 void *(*__memalign_hook) (size_t size, size_t alignment);
1561 void *
1562 memalign (size_t alignment, size_t size)
1564 void *result;
1565 size_t adj, lastadj;
1566 void *(*hook) (size_t, size_t) = __memalign_hook;
1568 if (hook)
1569 return (*hook) (alignment, size);
1571 /* Allocate a block with enough extra space to pad the block with up to
1572 (ALIGNMENT - 1) bytes if necessary. */
1573 result = malloc (size + alignment - 1);
1574 if (result == NULL)
1575 return NULL;
1577 /* Figure out how much we will need to pad this particular block
1578 to achieve the required alignment. */
1579 adj = (uintptr_t) result % alignment;
1583 /* Reallocate the block with only as much excess as it needs. */
1584 free (result);
1585 result = malloc (adj + size);
1586 if (result == NULL) /* Impossible unless interrupted. */
1587 return NULL;
1589 lastadj = adj;
1590 adj = (uintptr_t) result % alignment;
1591 /* It's conceivable we might have been so unlucky as to get a
1592 different block with weaker alignment. If so, this block is too
1593 short to contain SIZE after alignment correction. So we must
1594 try again and get another block, slightly larger. */
1595 } while (adj > lastadj);
1597 if (adj != 0)
1599 /* Record this block in the list of aligned blocks, so that `free'
1600 can identify the pointer it is passed, which will be in the middle
1601 of an allocated block. */
1603 struct alignlist *l;
1604 LOCK_ALIGNED_BLOCKS ();
1605 for (l = _aligned_blocks; l != NULL; l = l->next)
1606 if (l->aligned == NULL)
1607 /* This slot is free. Use it. */
1608 break;
1609 if (l == NULL)
1611 l = malloc (sizeof *l);
1612 if (l != NULL)
1614 l->next = _aligned_blocks;
1615 _aligned_blocks = l;
1618 if (l != NULL)
1620 l->exact = result;
1621 result = l->aligned = (char *) result + alignment - adj;
1623 UNLOCK_ALIGNED_BLOCKS ();
1624 if (l == NULL)
1626 free (result);
1627 result = NULL;
1631 return result;
1634 #ifndef ENOMEM
1635 #define ENOMEM 12
1636 #endif
1638 #ifndef EINVAL
1639 #define EINVAL 22
1640 #endif
1643 posix_memalign (void **memptr, size_t alignment, size_t size)
1645 void *mem;
1647 if (alignment == 0
1648 || alignment % sizeof (void *) != 0
1649 || (alignment & (alignment - 1)) != 0)
1650 return EINVAL;
1652 mem = memalign (alignment, size);
1653 if (mem == NULL)
1654 return ENOMEM;
1656 *memptr = mem;
1658 return 0;
1661 /* Allocate memory on a page boundary.
1662 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1664 This library is free software; you can redistribute it and/or
1665 modify it under the terms of the GNU General Public License as
1666 published by the Free Software Foundation; either version 2 of the
1667 License, or (at your option) any later version.
1669 This library is distributed in the hope that it will be useful,
1670 but WITHOUT ANY WARRANTY; without even the implied warranty of
1671 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1672 General Public License for more details.
1674 You should have received a copy of the GNU General Public
1675 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1677 The author may be reached (Email) at the address mike@ai.mit.edu,
1678 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1680 /* Allocate SIZE bytes on a page boundary. */
1681 extern void *valloc (size_t);
1683 #if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1684 # include "getpagesize.h"
1685 #elif !defined getpagesize
1686 extern int getpagesize (void);
1687 #endif
1689 static size_t pagesize;
1691 void *
1692 valloc (size_t size)
1694 if (pagesize == 0)
1695 pagesize = getpagesize ();
1697 return memalign (pagesize, size);
1700 #ifdef GC_MCHECK
1702 /* Standard debugging hooks for `malloc'.
1703 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1704 Written May 1989 by Mike Haertel.
1706 This library is free software; you can redistribute it and/or
1707 modify it under the terms of the GNU General Public License as
1708 published by the Free Software Foundation; either version 2 of the
1709 License, or (at your option) any later version.
1711 This library is distributed in the hope that it will be useful,
1712 but WITHOUT ANY WARRANTY; without even the implied warranty of
1713 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1714 General Public License for more details.
1716 You should have received a copy of the GNU General Public
1717 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1719 The author may be reached (Email) at the address mike@ai.mit.edu,
1720 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1722 #include <stdio.h>
1724 /* Old hook values. */
1725 static void (*old_free_hook) (void *ptr);
1726 static void *(*old_malloc_hook) (size_t size);
1727 static void *(*old_realloc_hook) (void *ptr, size_t size);
1729 /* Function to call when something awful happens. */
1730 static void (*abortfunc) (enum mcheck_status);
1732 /* Arbitrary magical numbers. */
1733 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1734 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1735 #define MAGICBYTE ((char) 0xd7)
1736 #define MALLOCFLOOD ((char) 0x93)
1737 #define FREEFLOOD ((char) 0x95)
1739 struct hdr
1741 size_t size; /* Exact size requested by user. */
1742 size_t magic; /* Magic number to check header integrity. */
1745 static enum mcheck_status
1746 checkhdr (const struct hdr *hdr)
1748 enum mcheck_status status;
1749 switch (hdr->magic)
1751 default:
1752 status = MCHECK_HEAD;
1753 break;
1754 case MAGICFREE:
1755 status = MCHECK_FREE;
1756 break;
1757 case MAGICWORD:
1758 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1759 status = MCHECK_TAIL;
1760 else
1761 status = MCHECK_OK;
1762 break;
1764 if (status != MCHECK_OK)
1765 (*abortfunc) (status);
1766 return status;
1769 static void
1770 freehook (void *ptr)
1772 struct hdr *hdr;
1774 if (ptr)
1776 hdr = ((struct hdr *) ptr) - 1;
1777 checkhdr (hdr);
1778 hdr->magic = MAGICFREE;
1779 memset (ptr, FREEFLOOD, hdr->size);
1781 else
1782 hdr = NULL;
1784 __free_hook = old_free_hook;
1785 free (hdr);
1786 __free_hook = freehook;
1789 static void *
1790 mallochook (size_t size)
1792 struct hdr *hdr;
1794 __malloc_hook = old_malloc_hook;
1795 hdr = malloc (sizeof *hdr + size + 1);
1796 __malloc_hook = mallochook;
1797 if (hdr == NULL)
1798 return NULL;
1800 hdr->size = size;
1801 hdr->magic = MAGICWORD;
1802 ((char *) &hdr[1])[size] = MAGICBYTE;
1803 memset (hdr + 1, MALLOCFLOOD, size);
1804 return hdr + 1;
1807 static void *
1808 reallochook (void *ptr, size_t size)
1810 struct hdr *hdr = NULL;
1811 size_t osize = 0;
1813 if (ptr)
1815 hdr = ((struct hdr *) ptr) - 1;
1816 osize = hdr->size;
1818 checkhdr (hdr);
1819 if (size < osize)
1820 memset ((char *) ptr + size, FREEFLOOD, osize - size);
1823 __free_hook = old_free_hook;
1824 __malloc_hook = old_malloc_hook;
1825 __realloc_hook = old_realloc_hook;
1826 hdr = realloc (hdr, sizeof *hdr + size + 1);
1827 __free_hook = freehook;
1828 __malloc_hook = mallochook;
1829 __realloc_hook = reallochook;
1830 if (hdr == NULL)
1831 return NULL;
1833 hdr->size = size;
1834 hdr->magic = MAGICWORD;
1835 ((char *) &hdr[1])[size] = MAGICBYTE;
1836 if (size > osize)
1837 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1838 return hdr + 1;
1841 static void
1842 mabort (enum mcheck_status status)
1844 const char *msg;
1845 switch (status)
1847 case MCHECK_OK:
1848 msg = "memory is consistent, library is buggy";
1849 break;
1850 case MCHECK_HEAD:
1851 msg = "memory clobbered before allocated block";
1852 break;
1853 case MCHECK_TAIL:
1854 msg = "memory clobbered past end of allocated block";
1855 break;
1856 case MCHECK_FREE:
1857 msg = "block freed twice";
1858 break;
1859 default:
1860 msg = "bogus mcheck_status, library is buggy";
1861 break;
1863 #ifdef __GNU_LIBRARY__
1864 __libc_fatal (msg);
1865 #else
1866 fprintf (stderr, "mcheck: %s\n", msg);
1867 fflush (stderr);
1868 abort ();
1869 #endif
1872 static int mcheck_used = 0;
1875 mcheck (void (*func) (enum mcheck_status))
1877 abortfunc = (func != NULL) ? func : &mabort;
1879 /* These hooks may not be safely inserted if malloc is already in use. */
1880 if (!__malloc_initialized && !mcheck_used)
1882 old_free_hook = __free_hook;
1883 __free_hook = freehook;
1884 old_malloc_hook = __malloc_hook;
1885 __malloc_hook = mallochook;
1886 old_realloc_hook = __realloc_hook;
1887 __realloc_hook = reallochook;
1888 mcheck_used = 1;
1891 return mcheck_used ? 0 : -1;
1894 enum mcheck_status
1895 mprobe (void *ptr)
1897 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
1900 #endif /* GC_MCHECK */