Improve result of `auth-source-search' in Tramp
[emacs.git] / src / gmalloc.c
blob6ca35ec5f1513f75f517d469f33351f71f7c3b92
1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2016 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <http://www.gnu.org/licenses/>.
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
22 #include <config.h>
24 #if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
25 #define USE_PTHREAD
26 #endif
28 #include <stddef.h>
29 #include <string.h>
30 #include <limits.h>
31 #include <stdint.h>
32 #include <unistd.h>
34 #ifdef USE_PTHREAD
35 #include <pthread.h>
36 #endif
38 #ifdef emacs
39 # include "lisp.h"
40 #endif
42 #ifdef HAVE_MALLOC_H
43 # if GNUC_PREREQ (4, 2, 0)
44 # pragma GCC diagnostic ignored "-Wdeprecated-declarations"
45 # endif
46 # include <malloc.h>
47 #endif
48 #ifndef __MALLOC_HOOK_VOLATILE
49 # define __MALLOC_HOOK_VOLATILE volatile
50 #endif
51 #ifndef HAVE_MALLOC_H
52 extern void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
53 extern void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
54 extern void *(*__morecore) (ptrdiff_t);
55 #endif
57 /* If HYBRID_MALLOC is defined, then temacs will use malloc,
58 realloc... as defined in this file (and renamed gmalloc,
59 grealloc... via the macros that follow). The dumped emacs,
60 however, will use the system malloc, realloc.... In other source
61 files, malloc, realloc... are renamed hybrid_malloc,
62 hybrid_realloc... via macros in conf_post.h. hybrid_malloc and
63 friends are wrapper functions defined later in this file. */
64 #undef malloc
65 #undef realloc
66 #undef calloc
67 #undef aligned_alloc
68 #undef free
69 #define malloc gmalloc
70 #define realloc grealloc
71 #define calloc gcalloc
72 #define aligned_alloc galigned_alloc
73 #define free gfree
74 #define malloc_info gmalloc_info
76 #ifdef HYBRID_MALLOC
77 # include "sheap.h"
78 # define DUMPED bss_sbrk_did_unexec
79 static bool
80 ALLOCATED_BEFORE_DUMPING (char *p)
82 return bss_sbrk_buffer <= p && p < bss_sbrk_buffer + STATIC_HEAP_SIZE;
84 #endif
86 #ifdef __cplusplus
87 extern "C"
89 #endif
91 #ifdef HYBRID_MALLOC
92 #define extern static
93 #endif
95 /* Allocate SIZE bytes of memory. */
96 extern void *malloc (size_t size) ATTRIBUTE_MALLOC_SIZE ((1));
97 /* Re-allocate the previously allocated block
98 in ptr, making the new block SIZE bytes long. */
99 extern void *realloc (void *ptr, size_t size) ATTRIBUTE_ALLOC_SIZE ((2));
100 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
101 extern void *calloc (size_t nmemb, size_t size) ATTRIBUTE_MALLOC_SIZE ((1,2));
102 /* Free a block. */
103 extern void free (void *ptr);
105 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
106 extern void *aligned_alloc (size_t, size_t);
107 #ifdef MSDOS
108 extern void *memalign (size_t, size_t);
109 extern int posix_memalign (void **, size_t, size_t);
110 #endif
112 /* The allocator divides the heap into blocks of fixed size; large
113 requests receive one or more whole blocks, and small requests
114 receive a fragment of a block. Fragment sizes are powers of two,
115 and all fragments of a block are the same size. When all the
116 fragments in a block have been freed, the block itself is freed. */
117 #define BLOCKLOG (INT_WIDTH > 16 ? 12 : 9)
118 #define BLOCKSIZE (1 << BLOCKLOG)
119 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
121 /* Determine the amount of memory spanned by the initial heap table
122 (not an absolute limit). */
123 #define HEAP (INT_WIDTH > 16 ? 4194304 : 65536)
125 /* Number of contiguous free blocks allowed to build up at the end of
126 memory before they will be returned to the system. */
127 #define FINAL_FREE_BLOCKS 8
129 /* Data structure giving per-block information. */
130 typedef union
132 /* Heap information for a busy block. */
133 struct
135 /* Zero for a large (multiblock) object, or positive giving the
136 logarithm to the base two of the fragment size. */
137 int type;
138 union
140 struct
142 size_t nfree; /* Free frags in a fragmented block. */
143 size_t first; /* First free fragment of the block. */
144 } frag;
145 /* For a large object, in its first block, this has the number
146 of blocks in the object. In the other blocks, this has a
147 negative number which says how far back the first block is. */
148 ptrdiff_t size;
149 } info;
150 } busy;
151 /* Heap information for a free block
152 (that may be the first of a free cluster). */
153 struct
155 size_t size; /* Size (in blocks) of a free cluster. */
156 size_t next; /* Index of next free cluster. */
157 size_t prev; /* Index of previous free cluster. */
158 } free;
159 } malloc_info;
161 /* Pointer to first block of the heap. */
162 extern char *_heapbase;
164 /* Table indexed by block number giving per-block information. */
165 extern malloc_info *_heapinfo;
167 /* Address to block number and vice versa. */
168 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
169 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
171 /* Current search index for the heap table. */
172 extern size_t _heapindex;
174 /* Limit of valid info table indices. */
175 extern size_t _heaplimit;
177 /* Doubly linked lists of free fragments. */
178 struct list
180 struct list *next;
181 struct list *prev;
184 /* Free list headers for each fragment size. */
185 extern struct list _fraghead[];
187 /* List of blocks allocated with aligned_alloc and friends. */
188 struct alignlist
190 struct alignlist *next;
191 void *aligned; /* The address that aligned_alloc returned. */
192 void *exact; /* The address that malloc returned. */
194 extern struct alignlist *_aligned_blocks;
196 /* Instrumentation. */
197 extern size_t _chunks_used;
198 extern size_t _bytes_used;
199 extern size_t _chunks_free;
200 extern size_t _bytes_free;
202 /* Internal versions of `malloc', `realloc', and `free'
203 used when these functions need to call each other.
204 They are the same but don't call the hooks. */
205 extern void *_malloc_internal (size_t);
206 extern void *_realloc_internal (void *, size_t);
207 extern void _free_internal (void *);
208 extern void *_malloc_internal_nolock (size_t);
209 extern void *_realloc_internal_nolock (void *, size_t);
210 extern void _free_internal_nolock (void *);
212 #ifdef USE_PTHREAD
213 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
214 extern int _malloc_thread_enabled_p;
215 #define LOCK() \
216 do { \
217 if (_malloc_thread_enabled_p) \
218 pthread_mutex_lock (&_malloc_mutex); \
219 } while (0)
220 #define UNLOCK() \
221 do { \
222 if (_malloc_thread_enabled_p) \
223 pthread_mutex_unlock (&_malloc_mutex); \
224 } while (0)
225 #define LOCK_ALIGNED_BLOCKS() \
226 do { \
227 if (_malloc_thread_enabled_p) \
228 pthread_mutex_lock (&_aligned_blocks_mutex); \
229 } while (0)
230 #define UNLOCK_ALIGNED_BLOCKS() \
231 do { \
232 if (_malloc_thread_enabled_p) \
233 pthread_mutex_unlock (&_aligned_blocks_mutex); \
234 } while (0)
235 #else
236 #define LOCK()
237 #define UNLOCK()
238 #define LOCK_ALIGNED_BLOCKS()
239 #define UNLOCK_ALIGNED_BLOCKS()
240 #endif
242 /* Nonzero if `malloc' has been called and done its initialization. */
243 extern int __malloc_initialized;
244 /* Function called to initialize malloc data structures. */
245 extern int __malloc_initialize (void);
247 #ifdef GC_MCHECK
249 /* Return values for `mprobe': these are the kinds of inconsistencies that
250 `mcheck' enables detection of. */
251 enum mcheck_status
253 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
254 MCHECK_OK, /* Block is fine. */
255 MCHECK_FREE, /* Block freed twice. */
256 MCHECK_HEAD, /* Memory before the block was clobbered. */
257 MCHECK_TAIL /* Memory after the block was clobbered. */
260 /* Activate a standard collection of debugging hooks. This must be called
261 before `malloc' is ever called. ABORTFUNC is called with an error code
262 (see enum above) when an inconsistency is detected. If ABORTFUNC is
263 null, the standard function prints on stderr and then calls `abort'. */
264 extern int mcheck (void (*abortfunc) (enum mcheck_status));
266 /* Check for aberrations in a particular malloc'd block. You must have
267 called `mcheck' already. These are the same checks that `mcheck' does
268 when you free or reallocate a block. */
269 extern enum mcheck_status mprobe (void *ptr);
271 /* Activate a standard collection of tracing hooks. */
272 extern void mtrace (void);
273 extern void muntrace (void);
275 /* Statistics available to the user. */
276 struct mstats
278 size_t bytes_total; /* Total size of the heap. */
279 size_t chunks_used; /* Chunks allocated by the user. */
280 size_t bytes_used; /* Byte total of user-allocated chunks. */
281 size_t chunks_free; /* Chunks in the free list. */
282 size_t bytes_free; /* Byte total of chunks in the free list. */
285 /* Pick up the current statistics. */
286 extern struct mstats mstats (void);
288 #endif
290 #undef extern
292 #ifdef __cplusplus
294 #endif
296 /* Memory allocator `malloc'.
297 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
298 Written May 1989 by Mike Haertel.
300 This library is free software; you can redistribute it and/or
301 modify it under the terms of the GNU General Public License as
302 published by the Free Software Foundation; either version 2 of the
303 License, or (at your option) any later version.
305 This library is distributed in the hope that it will be useful,
306 but WITHOUT ANY WARRANTY; without even the implied warranty of
307 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
308 General Public License for more details.
310 You should have received a copy of the GNU General Public
311 License along with this library. If not, see <http://www.gnu.org/licenses/>.
313 The author may be reached (Email) at the address mike@ai.mit.edu,
314 or (US mail) as Mike Haertel c/o Free Software Foundation. */
316 #include <errno.h>
318 /* Debugging hook for 'malloc'. */
319 static void *(*__MALLOC_HOOK_VOLATILE gmalloc_hook) (size_t);
321 /* Replacements for traditional glibc malloc hooks, for platforms that
322 do not already have these hooks. Platforms with these hooks all
323 used relaxed ref/def, so it is OK to define them here too. */
324 void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
325 void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
326 void *(*__morecore) (ptrdiff_t);
328 #ifndef HYBRID_MALLOC
330 /* Pointer to the base of the first block. */
331 char *_heapbase;
333 /* Block information table. Allocated with align/__free (not malloc/free). */
334 malloc_info *_heapinfo;
336 /* Search index in the info table. */
337 size_t _heapindex;
339 /* Limit of valid info table indices. */
340 size_t _heaplimit;
342 /* Free lists for each fragment size. */
343 struct list _fraghead[BLOCKLOG];
345 /* Instrumentation. */
346 size_t _chunks_used;
347 size_t _bytes_used;
348 size_t _chunks_free;
349 size_t _bytes_free;
351 /* Are you experienced? */
352 int __malloc_initialized;
354 #else
356 static struct list _fraghead[BLOCKLOG];
358 #endif /* HYBRID_MALLOC */
360 /* Number of extra blocks to get each time we ask for more core.
361 This reduces the frequency of calling `(*__morecore)'. */
362 #if defined DOUG_LEA_MALLOC || defined HYBRID_MALLOC || defined SYSTEM_MALLOC
363 static
364 #endif
365 size_t __malloc_extra_blocks;
367 /* Number of info entries. */
368 static size_t heapsize;
370 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
372 /* Some code for hunting a bug writing into _heapinfo.
374 Call this macro with argument PROT non-zero to protect internal
375 malloc state against writing to it, call it with a zero argument to
376 make it readable and writable.
378 Note that this only works if BLOCKSIZE == page size, which is
379 the case on the i386. */
381 #include <sys/types.h>
382 #include <sys/mman.h>
384 static int state_protected_p;
385 static size_t last_state_size;
386 static malloc_info *last_heapinfo;
388 void
389 protect_malloc_state (int protect_p)
391 /* If _heapinfo has been relocated, make sure its old location
392 isn't left read-only; it will be reused by malloc. */
393 if (_heapinfo != last_heapinfo
394 && last_heapinfo
395 && state_protected_p)
396 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
398 last_state_size = _heaplimit * sizeof *_heapinfo;
399 last_heapinfo = _heapinfo;
401 if (protect_p != state_protected_p)
403 state_protected_p = protect_p;
404 if (mprotect (_heapinfo, last_state_size,
405 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
406 abort ();
410 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
412 #else
413 #define PROTECT_MALLOC_STATE(PROT) /* empty */
414 #endif
417 /* Aligned allocation. */
418 static void *
419 align (size_t size)
421 void *result;
422 ptrdiff_t adj;
424 /* align accepts an unsigned argument, but __morecore accepts a
425 signed one. This could lead to trouble if SIZE overflows the
426 ptrdiff_t type accepted by __morecore. We just punt in that
427 case, since they are requesting a ludicrous amount anyway. */
428 if (PTRDIFF_MAX < size)
429 result = 0;
430 else
431 result = (*__morecore) (size);
432 adj = (uintptr_t) result % BLOCKSIZE;
433 if (adj != 0)
435 adj = BLOCKSIZE - adj;
436 (*__morecore) (adj);
437 result = (char *) result + adj;
440 if (__after_morecore_hook)
441 (*__after_morecore_hook) ();
443 return result;
446 /* Get SIZE bytes, if we can get them starting at END.
447 Return the address of the space we got.
448 If we cannot get space at END, fail and return 0. */
449 static void *
450 get_contiguous_space (ptrdiff_t size, void *position)
452 void *before;
453 void *after;
455 before = (*__morecore) (0);
456 /* If we can tell in advance that the break is at the wrong place,
457 fail now. */
458 if (before != position)
459 return 0;
461 /* Allocate SIZE bytes and get the address of them. */
462 after = (*__morecore) (size);
463 if (!after)
464 return 0;
466 /* It was not contiguous--reject it. */
467 if (after != position)
469 (*__morecore) (- size);
470 return 0;
473 return after;
477 /* This is called when `_heapinfo' and `heapsize' have just
478 been set to describe a new info table. Set up the table
479 to describe itself and account for it in the statistics. */
480 static void
481 register_heapinfo (void)
483 size_t block, blocks;
485 block = BLOCK (_heapinfo);
486 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
488 /* Account for the _heapinfo block itself in the statistics. */
489 _bytes_used += blocks * BLOCKSIZE;
490 ++_chunks_used;
492 /* Describe the heapinfo block itself in the heapinfo. */
493 _heapinfo[block].busy.type = 0;
494 _heapinfo[block].busy.info.size = blocks;
495 /* Leave back-pointers for malloc_find_address. */
496 while (--blocks > 0)
497 _heapinfo[block + blocks].busy.info.size = -blocks;
500 #ifdef USE_PTHREAD
501 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
502 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
503 int _malloc_thread_enabled_p;
505 static void
506 malloc_atfork_handler_prepare (void)
508 LOCK ();
509 LOCK_ALIGNED_BLOCKS ();
512 static void
513 malloc_atfork_handler_parent (void)
515 UNLOCK_ALIGNED_BLOCKS ();
516 UNLOCK ();
519 static void
520 malloc_atfork_handler_child (void)
522 UNLOCK_ALIGNED_BLOCKS ();
523 UNLOCK ();
526 /* Set up mutexes and make malloc etc. thread-safe. */
527 void
528 malloc_enable_thread (void)
530 if (_malloc_thread_enabled_p)
531 return;
533 /* Some pthread implementations call malloc for statically
534 initialized mutexes when they are used first. To avoid such a
535 situation, we initialize mutexes here while their use is
536 disabled in malloc etc. */
537 pthread_mutex_init (&_malloc_mutex, NULL);
538 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
539 pthread_atfork (malloc_atfork_handler_prepare,
540 malloc_atfork_handler_parent,
541 malloc_atfork_handler_child);
542 _malloc_thread_enabled_p = 1;
544 #endif /* USE_PTHREAD */
546 static void
547 malloc_initialize_1 (void)
549 #ifdef GC_MCHECK
550 mcheck (NULL);
551 #endif
553 if (__malloc_initialize_hook)
554 (*__malloc_initialize_hook) ();
556 heapsize = HEAP / BLOCKSIZE;
557 _heapinfo = align (heapsize * sizeof (malloc_info));
558 if (_heapinfo == NULL)
559 return;
560 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
561 _heapinfo[0].free.size = 0;
562 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
563 _heapindex = 0;
564 _heapbase = (char *) _heapinfo;
565 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
567 register_heapinfo ();
569 __malloc_initialized = 1;
570 PROTECT_MALLOC_STATE (1);
571 return;
574 /* Set everything up and remember that we have.
575 main will call malloc which calls this function. That is before any threads
576 or signal handlers has been set up, so we don't need thread protection. */
578 __malloc_initialize (void)
580 if (__malloc_initialized)
581 return 0;
583 malloc_initialize_1 ();
585 return __malloc_initialized;
588 static int morecore_recursing;
590 /* Get neatly aligned memory, initializing or
591 growing the heap info table as necessary. */
592 static void *
593 morecore_nolock (size_t size)
595 void *result;
596 malloc_info *newinfo, *oldinfo;
597 size_t newsize;
599 if (morecore_recursing)
600 /* Avoid recursion. The caller will know how to handle a null return. */
601 return NULL;
603 result = align (size);
604 if (result == NULL)
605 return NULL;
607 PROTECT_MALLOC_STATE (0);
609 /* Check if we need to grow the info table. */
610 if ((size_t) BLOCK ((char *) result + size) > heapsize)
612 /* Calculate the new _heapinfo table size. We do not account for the
613 added blocks in the table itself, as we hope to place them in
614 existing free space, which is already covered by part of the
615 existing table. */
616 newsize = heapsize;
618 newsize *= 2;
619 while ((size_t) BLOCK ((char *) result + size) > newsize);
621 /* We must not reuse existing core for the new info table when called
622 from realloc in the case of growing a large block, because the
623 block being grown is momentarily marked as free. In this case
624 _heaplimit is zero so we know not to reuse space for internal
625 allocation. */
626 if (_heaplimit != 0)
628 /* First try to allocate the new info table in core we already
629 have, in the usual way using realloc. If realloc cannot
630 extend it in place or relocate it to existing sufficient core,
631 we will get called again, and the code above will notice the
632 `morecore_recursing' flag and return null. */
633 int save = errno; /* Don't want to clobber errno with ENOMEM. */
634 morecore_recursing = 1;
635 newinfo = _realloc_internal_nolock (_heapinfo,
636 newsize * sizeof (malloc_info));
637 morecore_recursing = 0;
638 if (newinfo == NULL)
639 errno = save;
640 else
642 /* We found some space in core, and realloc has put the old
643 table's blocks on the free list. Now zero the new part
644 of the table and install the new table location. */
645 memset (&newinfo[heapsize], 0,
646 (newsize - heapsize) * sizeof (malloc_info));
647 _heapinfo = newinfo;
648 heapsize = newsize;
649 goto got_heap;
653 /* Allocate new space for the malloc info table. */
654 while (1)
656 newinfo = align (newsize * sizeof (malloc_info));
658 /* Did it fail? */
659 if (newinfo == NULL)
661 (*__morecore) (-size);
662 return NULL;
665 /* Is it big enough to record status for its own space?
666 If so, we win. */
667 if ((size_t) BLOCK ((char *) newinfo
668 + newsize * sizeof (malloc_info))
669 < newsize)
670 break;
672 /* Must try again. First give back most of what we just got. */
673 (*__morecore) (- newsize * sizeof (malloc_info));
674 newsize *= 2;
677 /* Copy the old table to the beginning of the new,
678 and zero the rest of the new table. */
679 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
680 memset (&newinfo[heapsize], 0,
681 (newsize - heapsize) * sizeof (malloc_info));
682 oldinfo = _heapinfo;
683 _heapinfo = newinfo;
684 heapsize = newsize;
686 register_heapinfo ();
688 /* Reset _heaplimit so _free_internal never decides
689 it can relocate or resize the info table. */
690 _heaplimit = 0;
691 _free_internal_nolock (oldinfo);
692 PROTECT_MALLOC_STATE (0);
694 /* The new heap limit includes the new table just allocated. */
695 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
696 return result;
699 got_heap:
700 _heaplimit = BLOCK ((char *) result + size);
701 return result;
704 /* Allocate memory from the heap. */
705 void *
706 _malloc_internal_nolock (size_t size)
708 void *result;
709 size_t block, blocks, lastblocks, start;
710 register size_t i;
711 struct list *next;
713 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
714 valid address you can realloc and free (though not dereference).
716 It turns out that some extant code (sunrpc, at least Ultrix's version)
717 expects `malloc (0)' to return non-NULL and breaks otherwise.
718 Be compatible. */
720 #if 0
721 if (size == 0)
722 return NULL;
723 #endif
725 PROTECT_MALLOC_STATE (0);
727 if (size < sizeof (struct list))
728 size = sizeof (struct list);
730 /* Determine the allocation policy based on the request size. */
731 if (size <= BLOCKSIZE / 2)
733 /* Small allocation to receive a fragment of a block.
734 Determine the logarithm to base two of the fragment size. */
735 register size_t log = 1;
736 --size;
737 while ((size /= 2) != 0)
738 ++log;
740 /* Look in the fragment lists for a
741 free fragment of the desired size. */
742 next = _fraghead[log].next;
743 if (next != NULL)
745 /* There are free fragments of this size.
746 Pop a fragment out of the fragment list and return it.
747 Update the block's nfree and first counters. */
748 result = next;
749 next->prev->next = next->next;
750 if (next->next != NULL)
751 next->next->prev = next->prev;
752 block = BLOCK (result);
753 if (--_heapinfo[block].busy.info.frag.nfree != 0)
754 _heapinfo[block].busy.info.frag.first =
755 (uintptr_t) next->next % BLOCKSIZE >> log;
757 /* Update the statistics. */
758 ++_chunks_used;
759 _bytes_used += 1 << log;
760 --_chunks_free;
761 _bytes_free -= 1 << log;
763 else
765 /* No free fragments of the desired size, so get a new block
766 and break it into fragments, returning the first. */
767 #ifdef GC_MALLOC_CHECK
768 result = _malloc_internal_nolock (BLOCKSIZE);
769 PROTECT_MALLOC_STATE (0);
770 #elif defined (USE_PTHREAD)
771 result = _malloc_internal_nolock (BLOCKSIZE);
772 #else
773 result = malloc (BLOCKSIZE);
774 #endif
775 if (result == NULL)
777 PROTECT_MALLOC_STATE (1);
778 goto out;
781 /* Link all fragments but the first into the free list. */
782 next = (struct list *) ((char *) result + (1 << log));
783 next->next = NULL;
784 next->prev = &_fraghead[log];
785 _fraghead[log].next = next;
787 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
789 next = (struct list *) ((char *) result + (i << log));
790 next->next = _fraghead[log].next;
791 next->prev = &_fraghead[log];
792 next->prev->next = next;
793 next->next->prev = next;
796 /* Initialize the nfree and first counters for this block. */
797 block = BLOCK (result);
798 _heapinfo[block].busy.type = log;
799 _heapinfo[block].busy.info.frag.nfree = i - 1;
800 _heapinfo[block].busy.info.frag.first = i - 1;
802 _chunks_free += (BLOCKSIZE >> log) - 1;
803 _bytes_free += BLOCKSIZE - (1 << log);
804 _bytes_used -= BLOCKSIZE - (1 << log);
807 else
809 /* Large allocation to receive one or more blocks.
810 Search the free list in a circle starting at the last place visited.
811 If we loop completely around without finding a large enough
812 space we will have to get more memory from the system. */
813 blocks = BLOCKIFY (size);
814 start = block = _heapindex;
815 while (_heapinfo[block].free.size < blocks)
817 block = _heapinfo[block].free.next;
818 if (block == start)
820 /* Need to get more from the system. Get a little extra. */
821 size_t wantblocks = blocks + __malloc_extra_blocks;
822 block = _heapinfo[0].free.prev;
823 lastblocks = _heapinfo[block].free.size;
824 /* Check to see if the new core will be contiguous with the
825 final free block; if so we don't need to get as much. */
826 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
827 /* We can't do this if we will have to make the heap info
828 table bigger to accommodate the new space. */
829 block + wantblocks <= heapsize &&
830 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
831 ADDRESS (block + lastblocks)))
833 /* We got it contiguously. Which block we are extending
834 (the `final free block' referred to above) might have
835 changed, if it got combined with a freed info table. */
836 block = _heapinfo[0].free.prev;
837 _heapinfo[block].free.size += (wantblocks - lastblocks);
838 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
839 _heaplimit += wantblocks - lastblocks;
840 continue;
842 result = morecore_nolock (wantblocks * BLOCKSIZE);
843 if (result == NULL)
844 goto out;
845 block = BLOCK (result);
846 /* Put the new block at the end of the free list. */
847 _heapinfo[block].free.size = wantblocks;
848 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
849 _heapinfo[block].free.next = 0;
850 _heapinfo[0].free.prev = block;
851 _heapinfo[_heapinfo[block].free.prev].free.next = block;
852 ++_chunks_free;
853 /* Now loop to use some of that block for this allocation. */
857 /* At this point we have found a suitable free list entry.
858 Figure out how to remove what we need from the list. */
859 result = ADDRESS (block);
860 if (_heapinfo[block].free.size > blocks)
862 /* The block we found has a bit left over,
863 so relink the tail end back into the free list. */
864 _heapinfo[block + blocks].free.size
865 = _heapinfo[block].free.size - blocks;
866 _heapinfo[block + blocks].free.next
867 = _heapinfo[block].free.next;
868 _heapinfo[block + blocks].free.prev
869 = _heapinfo[block].free.prev;
870 _heapinfo[_heapinfo[block].free.prev].free.next
871 = _heapinfo[_heapinfo[block].free.next].free.prev
872 = _heapindex = block + blocks;
874 else
876 /* The block exactly matches our requirements,
877 so just remove it from the list. */
878 _heapinfo[_heapinfo[block].free.next].free.prev
879 = _heapinfo[block].free.prev;
880 _heapinfo[_heapinfo[block].free.prev].free.next
881 = _heapindex = _heapinfo[block].free.next;
882 --_chunks_free;
885 _heapinfo[block].busy.type = 0;
886 _heapinfo[block].busy.info.size = blocks;
887 ++_chunks_used;
888 _bytes_used += blocks * BLOCKSIZE;
889 _bytes_free -= blocks * BLOCKSIZE;
891 /* Mark all the blocks of the object just allocated except for the
892 first with a negative number so you can find the first block by
893 adding that adjustment. */
894 while (--blocks > 0)
895 _heapinfo[block + blocks].busy.info.size = -blocks;
898 PROTECT_MALLOC_STATE (1);
899 out:
900 return result;
903 void *
904 _malloc_internal (size_t size)
906 void *result;
908 LOCK ();
909 result = _malloc_internal_nolock (size);
910 UNLOCK ();
912 return result;
915 void *
916 malloc (size_t size)
918 void *(*hook) (size_t);
920 if (!__malloc_initialized && !__malloc_initialize ())
921 return NULL;
923 /* Copy the value of gmalloc_hook to an automatic variable in case
924 gmalloc_hook is modified in another thread between its
925 NULL-check and the use.
927 Note: Strictly speaking, this is not a right solution. We should
928 use mutexes to access non-read-only variables that are shared
929 among multiple threads. We just leave it for compatibility with
930 glibc malloc (i.e., assignments to gmalloc_hook) for now. */
931 hook = gmalloc_hook;
932 return (hook != NULL ? *hook : _malloc_internal) (size);
935 #if !(defined (_LIBC) || defined (HYBRID_MALLOC))
937 /* On some ANSI C systems, some libc functions call _malloc, _free
938 and _realloc. Make them use the GNU functions. */
940 extern void *_malloc (size_t);
941 extern void _free (void *);
942 extern void *_realloc (void *, size_t);
944 void *
945 _malloc (size_t size)
947 return malloc (size);
950 void
951 _free (void *ptr)
953 free (ptr);
956 void *
957 _realloc (void *ptr, size_t size)
959 return realloc (ptr, size);
962 #endif
963 /* Free a block of memory allocated by `malloc'.
964 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
965 Written May 1989 by Mike Haertel.
967 This library is free software; you can redistribute it and/or
968 modify it under the terms of the GNU General Public License as
969 published by the Free Software Foundation; either version 2 of the
970 License, or (at your option) any later version.
972 This library is distributed in the hope that it will be useful,
973 but WITHOUT ANY WARRANTY; without even the implied warranty of
974 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
975 General Public License for more details.
977 You should have received a copy of the GNU General Public
978 License along with this library. If not, see <http://www.gnu.org/licenses/>.
980 The author may be reached (Email) at the address mike@ai.mit.edu,
981 or (US mail) as Mike Haertel c/o Free Software Foundation. */
983 /* Debugging hook for free. */
984 static void (*__MALLOC_HOOK_VOLATILE gfree_hook) (void *);
986 #ifndef HYBRID_MALLOC
988 /* List of blocks allocated by aligned_alloc. */
989 struct alignlist *_aligned_blocks = NULL;
990 #endif
992 /* Return memory to the heap.
993 Like `_free_internal' but don't lock mutex. */
994 void
995 _free_internal_nolock (void *ptr)
997 int type;
998 size_t block, blocks;
999 register size_t i;
1000 struct list *prev, *next;
1001 void *curbrk;
1002 const size_t lesscore_threshold
1003 /* Threshold of free space at which we will return some to the system. */
1004 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1006 register struct alignlist *l;
1008 if (ptr == NULL)
1009 return;
1011 PROTECT_MALLOC_STATE (0);
1013 LOCK_ALIGNED_BLOCKS ();
1014 for (l = _aligned_blocks; l != NULL; l = l->next)
1015 if (l->aligned == ptr)
1017 l->aligned = NULL; /* Mark the slot in the list as free. */
1018 ptr = l->exact;
1019 break;
1021 UNLOCK_ALIGNED_BLOCKS ();
1023 block = BLOCK (ptr);
1025 type = _heapinfo[block].busy.type;
1026 switch (type)
1028 case 0:
1029 /* Get as many statistics as early as we can. */
1030 --_chunks_used;
1031 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1032 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1034 /* Find the free cluster previous to this one in the free list.
1035 Start searching at the last block referenced; this may benefit
1036 programs with locality of allocation. */
1037 i = _heapindex;
1038 if (i > block)
1039 while (i > block)
1040 i = _heapinfo[i].free.prev;
1041 else
1044 i = _heapinfo[i].free.next;
1045 while (i > 0 && i < block);
1046 i = _heapinfo[i].free.prev;
1049 /* Determine how to link this block into the free list. */
1050 if (block == i + _heapinfo[i].free.size)
1052 /* Coalesce this block with its predecessor. */
1053 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1054 block = i;
1056 else
1058 /* Really link this block back into the free list. */
1059 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1060 _heapinfo[block].free.next = _heapinfo[i].free.next;
1061 _heapinfo[block].free.prev = i;
1062 _heapinfo[i].free.next = block;
1063 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1064 ++_chunks_free;
1067 /* Now that the block is linked in, see if we can coalesce it
1068 with its successor (by deleting its successor from the list
1069 and adding in its size). */
1070 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1072 _heapinfo[block].free.size
1073 += _heapinfo[_heapinfo[block].free.next].free.size;
1074 _heapinfo[block].free.next
1075 = _heapinfo[_heapinfo[block].free.next].free.next;
1076 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1077 --_chunks_free;
1080 /* How many trailing free blocks are there now? */
1081 blocks = _heapinfo[block].free.size;
1083 /* Where is the current end of accessible core? */
1084 curbrk = (*__morecore) (0);
1086 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1088 /* The end of the malloc heap is at the end of accessible core.
1089 It's possible that moving _heapinfo will allow us to
1090 return some space to the system. */
1092 size_t info_block = BLOCK (_heapinfo);
1093 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1094 size_t prev_block = _heapinfo[block].free.prev;
1095 size_t prev_blocks = _heapinfo[prev_block].free.size;
1096 size_t next_block = _heapinfo[block].free.next;
1097 size_t next_blocks = _heapinfo[next_block].free.size;
1099 if (/* Win if this block being freed is last in core, the info table
1100 is just before it, the previous free block is just before the
1101 info table, and the two free blocks together form a useful
1102 amount to return to the system. */
1103 (block + blocks == _heaplimit &&
1104 info_block + info_blocks == block &&
1105 prev_block != 0 && prev_block + prev_blocks == info_block &&
1106 blocks + prev_blocks >= lesscore_threshold) ||
1107 /* Nope, not the case. We can also win if this block being
1108 freed is just before the info table, and the table extends
1109 to the end of core or is followed only by a free block,
1110 and the total free space is worth returning to the system. */
1111 (block + blocks == info_block &&
1112 ((info_block + info_blocks == _heaplimit &&
1113 blocks >= lesscore_threshold) ||
1114 (info_block + info_blocks == next_block &&
1115 next_block + next_blocks == _heaplimit &&
1116 blocks + next_blocks >= lesscore_threshold)))
1119 malloc_info *newinfo;
1120 size_t oldlimit = _heaplimit;
1122 /* Free the old info table, clearing _heaplimit to avoid
1123 recursion into this code. We don't want to return the
1124 table's blocks to the system before we have copied them to
1125 the new location. */
1126 _heaplimit = 0;
1127 _free_internal_nolock (_heapinfo);
1128 _heaplimit = oldlimit;
1130 /* Tell malloc to search from the beginning of the heap for
1131 free blocks, so it doesn't reuse the ones just freed. */
1132 _heapindex = 0;
1134 /* Allocate new space for the info table and move its data. */
1135 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
1136 PROTECT_MALLOC_STATE (0);
1137 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1138 _heapinfo = newinfo;
1140 /* We should now have coalesced the free block with the
1141 blocks freed from the old info table. Examine the entire
1142 trailing free block to decide below whether to return some
1143 to the system. */
1144 block = _heapinfo[0].free.prev;
1145 blocks = _heapinfo[block].free.size;
1148 /* Now see if we can return stuff to the system. */
1149 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1151 register size_t bytes = blocks * BLOCKSIZE;
1152 _heaplimit -= blocks;
1153 (*__morecore) (-bytes);
1154 _heapinfo[_heapinfo[block].free.prev].free.next
1155 = _heapinfo[block].free.next;
1156 _heapinfo[_heapinfo[block].free.next].free.prev
1157 = _heapinfo[block].free.prev;
1158 block = _heapinfo[block].free.prev;
1159 --_chunks_free;
1160 _bytes_free -= bytes;
1164 /* Set the next search to begin at this block. */
1165 _heapindex = block;
1166 break;
1168 default:
1169 /* Do some of the statistics. */
1170 --_chunks_used;
1171 _bytes_used -= 1 << type;
1172 ++_chunks_free;
1173 _bytes_free += 1 << type;
1175 /* Get the address of the first free fragment in this block. */
1176 prev = (struct list *) ((char *) ADDRESS (block) +
1177 (_heapinfo[block].busy.info.frag.first << type));
1179 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1181 /* If all fragments of this block are free, remove them
1182 from the fragment list and free the whole block. */
1183 next = prev;
1184 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
1185 next = next->next;
1186 prev->prev->next = next;
1187 if (next != NULL)
1188 next->prev = prev->prev;
1189 _heapinfo[block].busy.type = 0;
1190 _heapinfo[block].busy.info.size = 1;
1192 /* Keep the statistics accurate. */
1193 ++_chunks_used;
1194 _bytes_used += BLOCKSIZE;
1195 _chunks_free -= BLOCKSIZE >> type;
1196 _bytes_free -= BLOCKSIZE;
1198 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1199 _free_internal_nolock (ADDRESS (block));
1200 #else
1201 free (ADDRESS (block));
1202 #endif
1204 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1206 /* If some fragments of this block are free, link this
1207 fragment into the fragment list after the first free
1208 fragment of this block. */
1209 next = ptr;
1210 next->next = prev->next;
1211 next->prev = prev;
1212 prev->next = next;
1213 if (next->next != NULL)
1214 next->next->prev = next;
1215 ++_heapinfo[block].busy.info.frag.nfree;
1217 else
1219 /* No fragments of this block are free, so link this
1220 fragment into the fragment list and announce that
1221 it is the first free fragment of this block. */
1222 prev = ptr;
1223 _heapinfo[block].busy.info.frag.nfree = 1;
1224 _heapinfo[block].busy.info.frag.first =
1225 (uintptr_t) ptr % BLOCKSIZE >> type;
1226 prev->next = _fraghead[type].next;
1227 prev->prev = &_fraghead[type];
1228 prev->prev->next = prev;
1229 if (prev->next != NULL)
1230 prev->next->prev = prev;
1232 break;
1235 PROTECT_MALLOC_STATE (1);
1238 /* Return memory to the heap.
1239 Like 'free' but don't call a hook if there is one. */
1240 void
1241 _free_internal (void *ptr)
1243 LOCK ();
1244 _free_internal_nolock (ptr);
1245 UNLOCK ();
1248 /* Return memory to the heap. */
1250 void
1251 free (void *ptr)
1253 void (*hook) (void *) = gfree_hook;
1255 if (hook != NULL)
1256 (*hook) (ptr);
1257 else
1258 _free_internal (ptr);
1261 #ifndef HYBRID_MALLOC
1262 /* Define the `cfree' alias for `free'. */
1263 #ifdef weak_alias
1264 weak_alias (free, cfree)
1265 #else
1266 void
1267 cfree (void *ptr)
1269 free (ptr);
1271 #endif
1272 #endif
1273 /* Change the size of a block allocated by `malloc'.
1274 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1275 Written May 1989 by Mike Haertel.
1277 This library is free software; you can redistribute it and/or
1278 modify it under the terms of the GNU General Public License as
1279 published by the Free Software Foundation; either version 2 of the
1280 License, or (at your option) any later version.
1282 This library is distributed in the hope that it will be useful,
1283 but WITHOUT ANY WARRANTY; without even the implied warranty of
1284 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1285 General Public License for more details.
1287 You should have received a copy of the GNU General Public
1288 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1290 The author may be reached (Email) at the address mike@ai.mit.edu,
1291 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1293 #ifndef min
1294 #define min(a, b) ((a) < (b) ? (a) : (b))
1295 #endif
1297 /* Debugging hook for realloc. */
1298 static void *(*grealloc_hook) (void *, size_t);
1300 /* Resize the given region to the new size, returning a pointer
1301 to the (possibly moved) region. This is optimized for speed;
1302 some benchmarks seem to indicate that greater compactness is
1303 achieved by unconditionally allocating and copying to a
1304 new region. This module has incestuous knowledge of the
1305 internals of both free and malloc. */
1306 void *
1307 _realloc_internal_nolock (void *ptr, size_t size)
1309 void *result;
1310 int type;
1311 size_t block, blocks, oldlimit;
1313 if (size == 0)
1315 _free_internal_nolock (ptr);
1316 return _malloc_internal_nolock (0);
1318 else if (ptr == NULL)
1319 return _malloc_internal_nolock (size);
1321 block = BLOCK (ptr);
1323 PROTECT_MALLOC_STATE (0);
1325 type = _heapinfo[block].busy.type;
1326 switch (type)
1328 case 0:
1329 /* Maybe reallocate a large block to a small fragment. */
1330 if (size <= BLOCKSIZE / 2)
1332 result = _malloc_internal_nolock (size);
1333 if (result != NULL)
1335 memcpy (result, ptr, size);
1336 _free_internal_nolock (ptr);
1337 goto out;
1341 /* The new size is a large allocation as well;
1342 see if we can hold it in place. */
1343 blocks = BLOCKIFY (size);
1344 if (blocks < _heapinfo[block].busy.info.size)
1346 /* The new size is smaller; return
1347 excess memory to the free list. */
1348 _heapinfo[block + blocks].busy.type = 0;
1349 _heapinfo[block + blocks].busy.info.size
1350 = _heapinfo[block].busy.info.size - blocks;
1351 _heapinfo[block].busy.info.size = blocks;
1352 /* We have just created a new chunk by splitting a chunk in two.
1353 Now we will free this chunk; increment the statistics counter
1354 so it doesn't become wrong when _free_internal decrements it. */
1355 ++_chunks_used;
1356 _free_internal_nolock (ADDRESS (block + blocks));
1357 result = ptr;
1359 else if (blocks == _heapinfo[block].busy.info.size)
1360 /* No size change necessary. */
1361 result = ptr;
1362 else
1364 /* Won't fit, so allocate a new region that will.
1365 Free the old region first in case there is sufficient
1366 adjacent free space to grow without moving. */
1367 blocks = _heapinfo[block].busy.info.size;
1368 /* Prevent free from actually returning memory to the system. */
1369 oldlimit = _heaplimit;
1370 _heaplimit = 0;
1371 _free_internal_nolock (ptr);
1372 result = _malloc_internal_nolock (size);
1373 PROTECT_MALLOC_STATE (0);
1374 if (_heaplimit == 0)
1375 _heaplimit = oldlimit;
1376 if (result == NULL)
1378 /* Now we're really in trouble. We have to unfree
1379 the thing we just freed. Unfortunately it might
1380 have been coalesced with its neighbors. */
1381 if (_heapindex == block)
1382 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1383 else
1385 void *previous
1386 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1387 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1388 _free_internal_nolock (previous);
1390 goto out;
1392 if (ptr != result)
1393 memmove (result, ptr, blocks * BLOCKSIZE);
1395 break;
1397 default:
1398 /* Old size is a fragment; type is logarithm
1399 to base two of the fragment size. */
1400 if (size > (size_t) (1 << (type - 1)) &&
1401 size <= (size_t) (1 << type))
1402 /* The new size is the same kind of fragment. */
1403 result = ptr;
1404 else
1406 /* The new size is different; allocate a new space,
1407 and copy the lesser of the new size and the old. */
1408 result = _malloc_internal_nolock (size);
1409 if (result == NULL)
1410 goto out;
1411 memcpy (result, ptr, min (size, (size_t) 1 << type));
1412 _free_internal_nolock (ptr);
1414 break;
1417 PROTECT_MALLOC_STATE (1);
1418 out:
1419 return result;
1422 void *
1423 _realloc_internal (void *ptr, size_t size)
1425 void *result;
1427 LOCK ();
1428 result = _realloc_internal_nolock (ptr, size);
1429 UNLOCK ();
1431 return result;
1434 void *
1435 realloc (void *ptr, size_t size)
1437 void *(*hook) (void *, size_t);
1439 if (!__malloc_initialized && !__malloc_initialize ())
1440 return NULL;
1442 hook = grealloc_hook;
1443 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1445 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1447 This library is free software; you can redistribute it and/or
1448 modify it under the terms of the GNU General Public License as
1449 published by the Free Software Foundation; either version 2 of the
1450 License, or (at your option) any later version.
1452 This library is distributed in the hope that it will be useful,
1453 but WITHOUT ANY WARRANTY; without even the implied warranty of
1454 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1455 General Public License for more details.
1457 You should have received a copy of the GNU General Public
1458 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1460 The author may be reached (Email) at the address mike@ai.mit.edu,
1461 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1463 /* Allocate an array of NMEMB elements each SIZE bytes long.
1464 The entire array is initialized to zeros. */
1465 void *
1466 calloc (size_t nmemb, size_t size)
1468 void *result;
1469 size_t bytes = nmemb * size;
1471 if (size != 0 && bytes / size != nmemb)
1473 errno = ENOMEM;
1474 return NULL;
1477 result = malloc (bytes);
1478 if (result)
1479 return memset (result, 0, bytes);
1480 return result;
1482 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1483 This file is part of the GNU C Library.
1485 The GNU C Library is free software; you can redistribute it and/or modify
1486 it under the terms of the GNU General Public License as published by
1487 the Free Software Foundation; either version 2, or (at your option)
1488 any later version.
1490 The GNU C Library is distributed in the hope that it will be useful,
1491 but WITHOUT ANY WARRANTY; without even the implied warranty of
1492 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1493 GNU General Public License for more details.
1495 You should have received a copy of the GNU General Public License
1496 along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
1498 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1499 compatible. */
1500 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1501 #define __sbrk sbrk
1502 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1503 /* It is best not to declare this and cast its result on foreign operating
1504 systems with potentially hostile include files. */
1506 extern void *__sbrk (ptrdiff_t increment);
1507 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1509 /* Allocate INCREMENT more bytes of data space,
1510 and return the start of data space, or NULL on errors.
1511 If INCREMENT is negative, shrink data space. */
1512 static void *
1513 gdefault_morecore (ptrdiff_t increment)
1515 void *result;
1516 #ifdef HYBRID_MALLOC
1517 if (!DUMPED)
1519 return bss_sbrk (increment);
1521 #endif
1522 result = (void *) __sbrk (increment);
1523 if (result == (void *) -1)
1524 return NULL;
1525 return result;
1528 void *(*__morecore) (ptrdiff_t) = gdefault_morecore;
1530 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1532 This library is free software; you can redistribute it and/or
1533 modify it under the terms of the GNU General Public License as
1534 published by the Free Software Foundation; either version 2 of the
1535 License, or (at your option) any later version.
1537 This library is distributed in the hope that it will be useful,
1538 but WITHOUT ANY WARRANTY; without even the implied warranty of
1539 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1540 General Public License for more details.
1542 You should have received a copy of the GNU General Public
1543 License along with this library. If not, see <http://www.gnu.org/licenses/>. */
1545 void *
1546 aligned_alloc (size_t alignment, size_t size)
1548 void *result;
1549 size_t adj, lastadj;
1551 /* Allocate a block with enough extra space to pad the block with up to
1552 (ALIGNMENT - 1) bytes if necessary. */
1553 if (- size < alignment)
1555 errno = ENOMEM;
1556 return NULL;
1558 result = malloc (size + alignment - 1);
1559 if (result == NULL)
1560 return NULL;
1562 /* Figure out how much we will need to pad this particular block
1563 to achieve the required alignment. */
1564 adj = alignment - (uintptr_t) result % alignment;
1565 if (adj == alignment)
1566 adj = 0;
1568 if (adj != alignment - 1)
1572 /* Reallocate the block with only as much excess as it
1573 needs. */
1574 free (result);
1575 result = malloc (size + adj);
1576 if (result == NULL) /* Impossible unless interrupted. */
1577 return NULL;
1579 lastadj = adj;
1580 adj = alignment - (uintptr_t) result % alignment;
1581 if (adj == alignment)
1582 adj = 0;
1583 /* It's conceivable we might have been so unlucky as to get
1584 a different block with weaker alignment. If so, this
1585 block is too short to contain SIZE after alignment
1586 correction. So we must try again and get another block,
1587 slightly larger. */
1588 } while (adj > lastadj);
1591 if (adj != 0)
1593 /* Record this block in the list of aligned blocks, so that `free'
1594 can identify the pointer it is passed, which will be in the middle
1595 of an allocated block. */
1597 struct alignlist *l;
1598 LOCK_ALIGNED_BLOCKS ();
1599 for (l = _aligned_blocks; l != NULL; l = l->next)
1600 if (l->aligned == NULL)
1601 /* This slot is free. Use it. */
1602 break;
1603 if (l == NULL)
1605 l = malloc (sizeof *l);
1606 if (l != NULL)
1608 l->next = _aligned_blocks;
1609 _aligned_blocks = l;
1612 if (l != NULL)
1614 l->exact = result;
1615 result = l->aligned = (char *) result + adj;
1617 UNLOCK_ALIGNED_BLOCKS ();
1618 if (l == NULL)
1620 free (result);
1621 result = NULL;
1625 return result;
1628 /* Note that memalign and posix_memalign are not used in Emacs. */
1629 #ifndef HYBRID_MALLOC
1630 /* An obsolete alias for aligned_alloc, for any old libraries that use
1631 this alias. */
1633 void *
1634 memalign (size_t alignment, size_t size)
1636 return aligned_alloc (alignment, size);
1639 /* If HYBRID_MALLOC is defined, we may want to use the system
1640 posix_memalign below. */
1642 posix_memalign (void **memptr, size_t alignment, size_t size)
1644 void *mem;
1646 if (alignment == 0
1647 || alignment % sizeof (void *) != 0
1648 || (alignment & (alignment - 1)) != 0)
1649 return EINVAL;
1651 mem = aligned_alloc (alignment, size);
1652 if (mem == NULL)
1653 return ENOMEM;
1655 *memptr = mem;
1657 return 0;
1659 #endif
1661 /* Allocate memory on a page boundary.
1662 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1664 This library is free software; you can redistribute it and/or
1665 modify it under the terms of the GNU General Public License as
1666 published by the Free Software Foundation; either version 2 of the
1667 License, or (at your option) any later version.
1669 This library is distributed in the hope that it will be useful,
1670 but WITHOUT ANY WARRANTY; without even the implied warranty of
1671 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1672 General Public License for more details.
1674 You should have received a copy of the GNU General Public
1675 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1677 The author may be reached (Email) at the address mike@ai.mit.edu,
1678 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1680 #ifndef HYBRID_MALLOC
1682 # ifndef HAVE_MALLOC_H
1683 /* Allocate SIZE bytes on a page boundary. */
1684 extern void *valloc (size_t);
1685 # endif
1687 # if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1688 # include "getpagesize.h"
1689 # elif !defined getpagesize
1690 extern int getpagesize (void);
1691 # endif
1693 static size_t pagesize;
1695 void *
1696 valloc (size_t size)
1698 if (pagesize == 0)
1699 pagesize = getpagesize ();
1701 return aligned_alloc (pagesize, size);
1703 #endif /* HYBRID_MALLOC */
1705 #undef malloc
1706 #undef realloc
1707 #undef calloc
1708 #undef aligned_alloc
1709 #undef free
1711 #ifdef HYBRID_MALLOC
1712 /* Declare system malloc and friends. */
1713 extern void *malloc (size_t size);
1714 extern void *realloc (void *ptr, size_t size);
1715 extern void *calloc (size_t nmemb, size_t size);
1716 extern void free (void *ptr);
1717 #ifdef HAVE_ALIGNED_ALLOC
1718 extern void *aligned_alloc (size_t alignment, size_t size);
1719 #elif defined HAVE_POSIX_MEMALIGN
1720 extern int posix_memalign (void **memptr, size_t alignment, size_t size);
1721 #endif
1723 /* See the comments near the beginning of this file for explanations
1724 of the following functions. */
1726 void *
1727 hybrid_malloc (size_t size)
1729 if (DUMPED)
1730 return malloc (size);
1731 return gmalloc (size);
1734 void *
1735 hybrid_calloc (size_t nmemb, size_t size)
1737 if (DUMPED)
1738 return calloc (nmemb, size);
1739 return gcalloc (nmemb, size);
1742 void
1743 hybrid_free (void *ptr)
1745 if (!DUMPED)
1746 gfree (ptr);
1747 else if (!ALLOCATED_BEFORE_DUMPING (ptr))
1748 free (ptr);
1749 /* Otherwise the dumped emacs is trying to free something allocated
1750 before dumping; do nothing. */
1751 return;
1754 #if defined HAVE_ALIGNED_ALLOC || defined HAVE_POSIX_MEMALIGN
1755 void *
1756 hybrid_aligned_alloc (size_t alignment, size_t size)
1758 if (!DUMPED)
1759 return galigned_alloc (alignment, size);
1760 /* The following is copied from alloc.c */
1761 #ifdef HAVE_ALIGNED_ALLOC
1762 return aligned_alloc (alignment, size);
1763 #else /* HAVE_POSIX_MEMALIGN */
1764 void *p;
1765 return posix_memalign (&p, alignment, size) == 0 ? p : 0;
1766 #endif
1768 #endif
1770 void *
1771 hybrid_realloc (void *ptr, size_t size)
1773 void *result;
1774 int type;
1775 size_t block, oldsize;
1777 if (!DUMPED)
1778 return grealloc (ptr, size);
1779 if (!ALLOCATED_BEFORE_DUMPING (ptr))
1780 return realloc (ptr, size);
1782 /* The dumped emacs is trying to realloc storage allocated before
1783 dumping. We just malloc new space and copy the data. */
1784 if (size == 0 || ptr == NULL)
1785 return malloc (size);
1786 block = ((char *) ptr - _heapbase) / BLOCKSIZE + 1;
1787 type = _heapinfo[block].busy.type;
1788 oldsize =
1789 type == 0 ? _heapinfo[block].busy.info.size * BLOCKSIZE
1790 : (size_t) 1 << type;
1791 result = malloc (size);
1792 if (result)
1793 return memcpy (result, ptr, min (oldsize, size));
1794 return result;
1797 #else /* ! HYBRID_MALLOC */
1799 void *
1800 malloc (size_t size)
1802 return gmalloc (size);
1805 void *
1806 calloc (size_t nmemb, size_t size)
1808 return gcalloc (nmemb, size);
1811 void
1812 free (void *ptr)
1814 gfree (ptr);
1817 void *
1818 aligned_alloc (size_t alignment, size_t size)
1820 return galigned_alloc (alignment, size);
1823 void *
1824 realloc (void *ptr, size_t size)
1826 return grealloc (ptr, size);
1829 #endif /* HYBRID_MALLOC */
1831 #ifdef GC_MCHECK
1833 /* Standard debugging hooks for `malloc'.
1834 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1835 Written May 1989 by Mike Haertel.
1837 This library is free software; you can redistribute it and/or
1838 modify it under the terms of the GNU General Public License as
1839 published by the Free Software Foundation; either version 2 of the
1840 License, or (at your option) any later version.
1842 This library is distributed in the hope that it will be useful,
1843 but WITHOUT ANY WARRANTY; without even the implied warranty of
1844 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1845 General Public License for more details.
1847 You should have received a copy of the GNU General Public
1848 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1850 The author may be reached (Email) at the address mike@ai.mit.edu,
1851 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1853 #include <stdio.h>
1855 /* Old hook values. */
1856 static void (*old_free_hook) (void *ptr);
1857 static void *(*old_malloc_hook) (size_t size);
1858 static void *(*old_realloc_hook) (void *ptr, size_t size);
1860 /* Function to call when something awful happens. */
1861 static void (*abortfunc) (enum mcheck_status);
1863 /* Arbitrary magical numbers. */
1864 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1865 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1866 #define MAGICBYTE ((char) 0xd7)
1867 #define MALLOCFLOOD ((char) 0x93)
1868 #define FREEFLOOD ((char) 0x95)
1870 struct hdr
1872 size_t size; /* Exact size requested by user. */
1873 size_t magic; /* Magic number to check header integrity. */
1876 static enum mcheck_status
1877 checkhdr (const struct hdr *hdr)
1879 enum mcheck_status status;
1880 switch (hdr->magic)
1882 default:
1883 status = MCHECK_HEAD;
1884 break;
1885 case MAGICFREE:
1886 status = MCHECK_FREE;
1887 break;
1888 case MAGICWORD:
1889 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1890 status = MCHECK_TAIL;
1891 else
1892 status = MCHECK_OK;
1893 break;
1895 if (status != MCHECK_OK)
1896 (*abortfunc) (status);
1897 return status;
1900 static void
1901 freehook (void *ptr)
1903 struct hdr *hdr;
1905 if (ptr)
1907 struct alignlist *l;
1909 /* If the block was allocated by aligned_alloc, its real pointer
1910 to free is recorded in _aligned_blocks; find that. */
1911 PROTECT_MALLOC_STATE (0);
1912 LOCK_ALIGNED_BLOCKS ();
1913 for (l = _aligned_blocks; l != NULL; l = l->next)
1914 if (l->aligned == ptr)
1916 l->aligned = NULL; /* Mark the slot in the list as free. */
1917 ptr = l->exact;
1918 break;
1920 UNLOCK_ALIGNED_BLOCKS ();
1921 PROTECT_MALLOC_STATE (1);
1923 hdr = ((struct hdr *) ptr) - 1;
1924 checkhdr (hdr);
1925 hdr->magic = MAGICFREE;
1926 memset (ptr, FREEFLOOD, hdr->size);
1928 else
1929 hdr = NULL;
1931 gfree_hook = old_free_hook;
1932 free (hdr);
1933 gfree_hook = freehook;
1936 static void *
1937 mallochook (size_t size)
1939 struct hdr *hdr;
1941 gmalloc_hook = old_malloc_hook;
1942 hdr = malloc (sizeof *hdr + size + 1);
1943 gmalloc_hook = mallochook;
1944 if (hdr == NULL)
1945 return NULL;
1947 hdr->size = size;
1948 hdr->magic = MAGICWORD;
1949 ((char *) &hdr[1])[size] = MAGICBYTE;
1950 return memset (hdr + 1, MALLOCFLOOD, size);
1953 static void *
1954 reallochook (void *ptr, size_t size)
1956 struct hdr *hdr = NULL;
1957 size_t osize = 0;
1959 if (ptr)
1961 hdr = ((struct hdr *) ptr) - 1;
1962 osize = hdr->size;
1964 checkhdr (hdr);
1965 if (size < osize)
1966 memset ((char *) ptr + size, FREEFLOOD, osize - size);
1969 gfree_hook = old_free_hook;
1970 gmalloc_hook = old_malloc_hook;
1971 grealloc_hook = old_realloc_hook;
1972 hdr = realloc (hdr, sizeof *hdr + size + 1);
1973 gfree_hook = freehook;
1974 gmalloc_hook = mallochook;
1975 grealloc_hook = reallochook;
1976 if (hdr == NULL)
1977 return NULL;
1979 hdr->size = size;
1980 hdr->magic = MAGICWORD;
1981 ((char *) &hdr[1])[size] = MAGICBYTE;
1982 if (size > osize)
1983 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1984 return hdr + 1;
1987 static void
1988 mabort (enum mcheck_status status)
1990 const char *msg;
1991 switch (status)
1993 case MCHECK_OK:
1994 msg = "memory is consistent, library is buggy";
1995 break;
1996 case MCHECK_HEAD:
1997 msg = "memory clobbered before allocated block";
1998 break;
1999 case MCHECK_TAIL:
2000 msg = "memory clobbered past end of allocated block";
2001 break;
2002 case MCHECK_FREE:
2003 msg = "block freed twice";
2004 break;
2005 default:
2006 msg = "bogus mcheck_status, library is buggy";
2007 break;
2009 #ifdef __GNU_LIBRARY__
2010 __libc_fatal (msg);
2011 #else
2012 fprintf (stderr, "mcheck: %s\n", msg);
2013 fflush (stderr);
2014 # ifdef emacs
2015 emacs_abort ();
2016 # else
2017 abort ();
2018 # endif
2019 #endif
2022 static int mcheck_used = 0;
2025 mcheck (void (*func) (enum mcheck_status))
2027 abortfunc = (func != NULL) ? func : &mabort;
2029 /* These hooks may not be safely inserted if malloc is already in use. */
2030 if (!__malloc_initialized && !mcheck_used)
2032 old_free_hook = gfree_hook;
2033 gfree_hook = freehook;
2034 old_malloc_hook = gmalloc_hook;
2035 gmalloc_hook = mallochook;
2036 old_realloc_hook = grealloc_hook;
2037 grealloc_hook = reallochook;
2038 mcheck_used = 1;
2041 return mcheck_used ? 0 : -1;
2044 enum mcheck_status
2045 mprobe (void *ptr)
2047 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2050 #endif /* GC_MCHECK */