* lisp/comint.el: Clean up namespace
[emacs.git] / src / gmalloc.c
blobebba789f610dece7f2bdec161d1dc7812179e9c7
1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2018 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <https://www.gnu.org/licenses/>.
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
22 #include <config.h>
24 #if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
25 #define USE_PTHREAD
26 #endif
28 #include <stddef.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <limits.h>
32 #include <stdint.h>
33 #include <unistd.h>
35 #ifdef USE_PTHREAD
36 #include <pthread.h>
37 #endif
39 #ifdef emacs
40 # include "lisp.h"
41 #endif
43 #include "ptr-bounds.h"
45 #ifdef HAVE_MALLOC_H
46 # if GNUC_PREREQ (4, 2, 0)
47 # pragma GCC diagnostic ignored "-Wdeprecated-declarations"
48 # endif
49 # include <malloc.h>
50 #endif
51 #ifndef __MALLOC_HOOK_VOLATILE
52 # define __MALLOC_HOOK_VOLATILE volatile
53 #endif
54 #ifndef HAVE_MALLOC_H
55 extern void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
56 extern void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
57 extern void *(*__morecore) (ptrdiff_t);
58 #endif
60 /* If HYBRID_MALLOC is defined, then temacs will use malloc,
61 realloc... as defined in this file (and renamed gmalloc,
62 grealloc... via the macros that follow). The dumped emacs,
63 however, will use the system malloc, realloc.... In other source
64 files, malloc, realloc... are renamed hybrid_malloc,
65 hybrid_realloc... via macros in conf_post.h. hybrid_malloc and
66 friends are wrapper functions defined later in this file. */
67 #undef malloc
68 #undef realloc
69 #undef calloc
70 #undef aligned_alloc
71 #undef free
72 #define malloc gmalloc
73 #define realloc grealloc
74 #define calloc gcalloc
75 #define aligned_alloc galigned_alloc
76 #define free gfree
77 #define malloc_info gmalloc_info
79 #ifdef HYBRID_MALLOC
80 # include "sheap.h"
81 # define DUMPED bss_sbrk_did_unexec
82 #endif
84 #ifdef __cplusplus
85 extern "C"
87 #endif
89 #ifdef HYBRID_MALLOC
90 #define extern static
91 #endif
93 /* Allocate SIZE bytes of memory. */
94 extern void *malloc (size_t size) ATTRIBUTE_MALLOC_SIZE ((1));
95 /* Re-allocate the previously allocated block
96 in ptr, making the new block SIZE bytes long. */
97 extern void *realloc (void *ptr, size_t size) ATTRIBUTE_ALLOC_SIZE ((2));
98 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
99 extern void *calloc (size_t nmemb, size_t size) ATTRIBUTE_MALLOC_SIZE ((1,2));
100 /* Free a block. */
101 extern void free (void *ptr);
103 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
104 extern void *aligned_alloc (size_t, size_t);
105 #ifdef MSDOS
106 extern void *memalign (size_t, size_t);
107 extern int posix_memalign (void **, size_t, size_t);
108 #endif
110 /* The allocator divides the heap into blocks of fixed size; large
111 requests receive one or more whole blocks, and small requests
112 receive a fragment of a block. Fragment sizes are powers of two,
113 and all fragments of a block are the same size. When all the
114 fragments in a block have been freed, the block itself is freed. */
115 #define BLOCKLOG (INT_WIDTH > 16 ? 12 : 9)
116 #define BLOCKSIZE (1 << BLOCKLOG)
117 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
119 /* Determine the amount of memory spanned by the initial heap table
120 (not an absolute limit). */
121 #define HEAP (INT_WIDTH > 16 ? 4194304 : 65536)
123 /* Number of contiguous free blocks allowed to build up at the end of
124 memory before they will be returned to the system. */
125 #define FINAL_FREE_BLOCKS 8
127 /* Data structure giving per-block information. */
128 typedef union
130 /* Heap information for a busy block. */
131 struct
133 /* Zero for a block that is not one of ours (typically,
134 allocated by system malloc), positive for the log base 2 of
135 the fragment size of a fragmented block, -1 for the first
136 block of a multiblock object, and unspecified for later
137 blocks of that object. Type-0 blocks can be present
138 because the system malloc can be invoked by library
139 functions in an undumped Emacs. */
140 int type;
141 union
143 struct
145 size_t nfree; /* Free frags in a fragmented block. */
146 size_t first; /* First free fragment of the block. */
147 } frag;
148 /* For a large object, in its first block, this has the number
149 of blocks in the object. */
150 ptrdiff_t size;
151 } info;
152 } busy;
153 /* Heap information for a free block
154 (that may be the first of a free cluster). */
155 struct
157 size_t size; /* Size (in blocks) of a free cluster. */
158 size_t next; /* Index of next free cluster. */
159 size_t prev; /* Index of previous free cluster. */
160 } free;
161 } malloc_info;
163 /* Pointer to first block of the heap. */
164 extern char *_heapbase;
166 /* Table indexed by block number giving per-block information. */
167 extern malloc_info *_heapinfo;
169 /* Address to block number and vice versa. */
170 #define BLOCK(A) ((size_t) ((char *) (A) - _heapbase) / BLOCKSIZE + 1)
171 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
173 /* Current search index for the heap table. */
174 extern size_t _heapindex;
176 /* Limit of valid info table indices. */
177 extern size_t _heaplimit;
179 /* Doubly linked lists of free fragments. */
180 struct list
182 struct list *next;
183 struct list *prev;
186 /* Free list headers for each fragment size. */
187 extern struct list _fraghead[];
189 /* List of blocks allocated with aligned_alloc and friends. */
190 struct alignlist
192 struct alignlist *next;
193 void *aligned; /* The address that aligned_alloc returned. */
194 void *exact; /* The address that malloc returned. */
196 extern struct alignlist *_aligned_blocks;
198 /* Instrumentation. */
199 extern size_t _chunks_used;
200 extern size_t _bytes_used;
201 extern size_t _chunks_free;
202 extern size_t _bytes_free;
204 /* Internal versions of `malloc', `realloc', and `free'
205 used when these functions need to call each other.
206 They are the same but don't call the hooks
207 and don't bound the resulting pointers. */
208 extern void *_malloc_internal (size_t);
209 extern void *_realloc_internal (void *, size_t);
210 extern void _free_internal (void *);
211 extern void *_malloc_internal_nolock (size_t);
212 extern void *_realloc_internal_nolock (void *, size_t);
213 extern void _free_internal_nolock (void *);
215 #ifdef USE_PTHREAD
216 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
217 extern int _malloc_thread_enabled_p;
218 #define LOCK() \
219 do { \
220 if (_malloc_thread_enabled_p) \
221 pthread_mutex_lock (&_malloc_mutex); \
222 } while (0)
223 #define UNLOCK() \
224 do { \
225 if (_malloc_thread_enabled_p) \
226 pthread_mutex_unlock (&_malloc_mutex); \
227 } while (0)
228 #define LOCK_ALIGNED_BLOCKS() \
229 do { \
230 if (_malloc_thread_enabled_p) \
231 pthread_mutex_lock (&_aligned_blocks_mutex); \
232 } while (0)
233 #define UNLOCK_ALIGNED_BLOCKS() \
234 do { \
235 if (_malloc_thread_enabled_p) \
236 pthread_mutex_unlock (&_aligned_blocks_mutex); \
237 } while (0)
238 #else
239 #define LOCK()
240 #define UNLOCK()
241 #define LOCK_ALIGNED_BLOCKS()
242 #define UNLOCK_ALIGNED_BLOCKS()
243 #endif
245 /* Nonzero if `malloc' has been called and done its initialization. */
246 extern int __malloc_initialized;
247 /* Function called to initialize malloc data structures. */
248 extern int __malloc_initialize (void);
250 #ifdef GC_MCHECK
252 /* Return values for `mprobe': these are the kinds of inconsistencies that
253 `mcheck' enables detection of. */
254 enum mcheck_status
256 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
257 MCHECK_OK, /* Block is fine. */
258 MCHECK_FREE, /* Block freed twice. */
259 MCHECK_HEAD, /* Memory before the block was clobbered. */
260 MCHECK_TAIL /* Memory after the block was clobbered. */
263 /* Activate a standard collection of debugging hooks. This must be called
264 before `malloc' is ever called. ABORTFUNC is called with an error code
265 (see enum above) when an inconsistency is detected. If ABORTFUNC is
266 null, the standard function prints on stderr and then calls `abort'. */
267 extern int mcheck (void (*abortfunc) (enum mcheck_status));
269 /* Check for aberrations in a particular malloc'd block. You must have
270 called `mcheck' already. These are the same checks that `mcheck' does
271 when you free or reallocate a block. */
272 extern enum mcheck_status mprobe (void *ptr);
274 /* Activate a standard collection of tracing hooks. */
275 extern void mtrace (void);
276 extern void muntrace (void);
278 /* Statistics available to the user. */
279 struct mstats
281 size_t bytes_total; /* Total size of the heap. */
282 size_t chunks_used; /* Chunks allocated by the user. */
283 size_t bytes_used; /* Byte total of user-allocated chunks. */
284 size_t chunks_free; /* Chunks in the free list. */
285 size_t bytes_free; /* Byte total of chunks in the free list. */
288 /* Pick up the current statistics. */
289 extern struct mstats mstats (void);
291 #endif
293 #undef extern
295 #ifdef __cplusplus
297 #endif
299 /* Memory allocator `malloc'.
300 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
301 Written May 1989 by Mike Haertel.
303 This library is free software; you can redistribute it and/or
304 modify it under the terms of the GNU General Public License as
305 published by the Free Software Foundation; either version 2 of the
306 License, or (at your option) any later version.
308 This library is distributed in the hope that it will be useful,
309 but WITHOUT ANY WARRANTY; without even the implied warranty of
310 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
311 General Public License for more details.
313 You should have received a copy of the GNU General Public
314 License along with this library. If not, see <https://www.gnu.org/licenses/>.
316 The author may be reached (Email) at the address mike@ai.mit.edu,
317 or (US mail) as Mike Haertel c/o Free Software Foundation. */
319 #include <errno.h>
321 /* Debugging hook for 'malloc'. */
322 static void *(*__MALLOC_HOOK_VOLATILE gmalloc_hook) (size_t);
324 /* Replacements for traditional glibc malloc hooks, for platforms that
325 do not already have these hooks. Platforms with these hooks all
326 used relaxed ref/def, so it is OK to define them here too. */
327 void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
328 void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
329 void *(*__morecore) (ptrdiff_t);
331 #ifndef HYBRID_MALLOC
333 /* Pointer to the base of the first block. */
334 char *_heapbase;
336 /* Block information table. Allocated with align/__free (not malloc/free). */
337 malloc_info *_heapinfo;
339 /* Search index in the info table. */
340 size_t _heapindex;
342 /* Limit of valid info table indices. */
343 size_t _heaplimit;
345 /* Free lists for each fragment size. */
346 struct list _fraghead[BLOCKLOG];
348 /* Instrumentation. */
349 size_t _chunks_used;
350 size_t _bytes_used;
351 size_t _chunks_free;
352 size_t _bytes_free;
354 /* Are you experienced? */
355 int __malloc_initialized;
357 #else
359 static struct list _fraghead[BLOCKLOG];
361 #endif /* HYBRID_MALLOC */
363 /* Number of extra blocks to get each time we ask for more core.
364 This reduces the frequency of calling `(*__morecore)'. */
365 #if defined DOUG_LEA_MALLOC || defined HYBRID_MALLOC || defined SYSTEM_MALLOC
366 static
367 #endif
368 size_t __malloc_extra_blocks;
370 /* Number of info entries. */
371 static size_t heapsize;
373 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
375 /* Some code for hunting a bug writing into _heapinfo.
377 Call this macro with argument PROT non-zero to protect internal
378 malloc state against writing to it, call it with a zero argument to
379 make it readable and writable.
381 Note that this only works if BLOCKSIZE == page size, which is
382 the case on the i386. */
384 #include <sys/types.h>
385 #include <sys/mman.h>
387 static int state_protected_p;
388 static size_t last_state_size;
389 static malloc_info *last_heapinfo;
391 void
392 protect_malloc_state (int protect_p)
394 /* If _heapinfo has been relocated, make sure its old location
395 isn't left read-only; it will be reused by malloc. */
396 if (_heapinfo != last_heapinfo
397 && last_heapinfo
398 && state_protected_p)
399 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
401 last_state_size = _heaplimit * sizeof *_heapinfo;
402 last_heapinfo = _heapinfo;
404 if (protect_p != state_protected_p)
406 state_protected_p = protect_p;
407 if (mprotect (_heapinfo, last_state_size,
408 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
409 abort ();
413 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
415 #else
416 #define PROTECT_MALLOC_STATE(PROT) /* empty */
417 #endif
420 /* Aligned allocation. */
421 static void *
422 align (size_t size)
424 void *result;
425 ptrdiff_t adj;
427 /* align accepts an unsigned argument, but __morecore accepts a
428 signed one. This could lead to trouble if SIZE overflows the
429 ptrdiff_t type accepted by __morecore. We just punt in that
430 case, since they are requesting a ludicrous amount anyway. */
431 if (PTRDIFF_MAX < size)
432 result = 0;
433 else
434 result = (*__morecore) (size);
435 adj = (uintptr_t) result % BLOCKSIZE;
436 if (adj != 0)
438 adj = BLOCKSIZE - adj;
439 (*__morecore) (adj);
440 result = (char *) result + adj;
443 if (__after_morecore_hook)
444 (*__after_morecore_hook) ();
446 return result;
449 /* Get SIZE bytes, if we can get them starting at END.
450 Return the address of the space we got.
451 If we cannot get space at END, fail and return 0. */
452 static void *
453 get_contiguous_space (ptrdiff_t size, void *position)
455 void *before;
456 void *after;
458 before = (*__morecore) (0);
459 /* If we can tell in advance that the break is at the wrong place,
460 fail now. */
461 if (before != position)
462 return 0;
464 /* Allocate SIZE bytes and get the address of them. */
465 after = (*__morecore) (size);
466 if (!after)
467 return 0;
469 /* It was not contiguous--reject it. */
470 if (after != position)
472 (*__morecore) (- size);
473 return 0;
476 return after;
480 /* This is called when `_heapinfo' and `heapsize' have just
481 been set to describe a new info table. Set up the table
482 to describe itself and account for it in the statistics. */
483 static void
484 register_heapinfo (void)
486 size_t block, blocks;
488 block = BLOCK (_heapinfo);
489 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
491 /* Account for the _heapinfo block itself in the statistics. */
492 _bytes_used += blocks * BLOCKSIZE;
493 ++_chunks_used;
495 /* Describe the heapinfo block itself in the heapinfo. */
496 _heapinfo[block].busy.type = -1;
497 _heapinfo[block].busy.info.size = blocks;
500 #ifdef USE_PTHREAD
501 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
502 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
503 int _malloc_thread_enabled_p;
505 static void
506 malloc_atfork_handler_prepare (void)
508 LOCK ();
509 LOCK_ALIGNED_BLOCKS ();
512 static void
513 malloc_atfork_handler_parent (void)
515 UNLOCK_ALIGNED_BLOCKS ();
516 UNLOCK ();
519 static void
520 malloc_atfork_handler_child (void)
522 UNLOCK_ALIGNED_BLOCKS ();
523 UNLOCK ();
526 /* Set up mutexes and make malloc etc. thread-safe. */
527 void
528 malloc_enable_thread (void)
530 if (_malloc_thread_enabled_p)
531 return;
533 /* Some pthread implementations call malloc for statically
534 initialized mutexes when they are used first. To avoid such a
535 situation, we initialize mutexes here while their use is
536 disabled in malloc etc. */
537 pthread_mutex_init (&_malloc_mutex, NULL);
538 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
539 pthread_atfork (malloc_atfork_handler_prepare,
540 malloc_atfork_handler_parent,
541 malloc_atfork_handler_child);
542 _malloc_thread_enabled_p = 1;
544 #endif /* USE_PTHREAD */
546 static void
547 malloc_initialize_1 (void)
549 #ifdef GC_MCHECK
550 mcheck (NULL);
551 #endif
553 if (__malloc_initialize_hook)
554 (*__malloc_initialize_hook) ();
556 heapsize = HEAP / BLOCKSIZE;
557 _heapinfo = align (heapsize * sizeof (malloc_info));
558 if (_heapinfo == NULL)
559 return;
560 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
561 _heapinfo[0].free.size = 0;
562 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
563 _heapindex = 0;
564 _heapbase = (char *) ptr_bounds_init (_heapinfo);
565 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
567 register_heapinfo ();
569 __malloc_initialized = 1;
570 PROTECT_MALLOC_STATE (1);
571 return;
574 /* Set everything up and remember that we have.
575 main will call malloc which calls this function. That is before any threads
576 or signal handlers has been set up, so we don't need thread protection. */
578 __malloc_initialize (void)
580 if (__malloc_initialized)
581 return 0;
583 malloc_initialize_1 ();
585 return __malloc_initialized;
588 static int morecore_recursing;
590 /* Get neatly aligned memory, initializing or
591 growing the heap info table as necessary. */
592 static void *
593 morecore_nolock (size_t size)
595 void *result;
596 malloc_info *newinfo, *oldinfo;
597 size_t newsize;
599 if (morecore_recursing)
600 /* Avoid recursion. The caller will know how to handle a null return. */
601 return NULL;
603 result = align (size);
604 if (result == NULL)
605 return NULL;
607 PROTECT_MALLOC_STATE (0);
609 /* Check if we need to grow the info table. */
610 if (heapsize < BLOCK ((char *) result + size))
612 /* Calculate the new _heapinfo table size. We do not account for the
613 added blocks in the table itself, as we hope to place them in
614 existing free space, which is already covered by part of the
615 existing table. */
616 newsize = heapsize;
618 newsize *= 2;
619 while (newsize < BLOCK ((char *) result + size));
621 /* We must not reuse existing core for the new info table when called
622 from realloc in the case of growing a large block, because the
623 block being grown is momentarily marked as free. In this case
624 _heaplimit is zero so we know not to reuse space for internal
625 allocation. */
626 if (_heaplimit != 0)
628 /* First try to allocate the new info table in core we already
629 have, in the usual way using realloc. If realloc cannot
630 extend it in place or relocate it to existing sufficient core,
631 we will get called again, and the code above will notice the
632 `morecore_recursing' flag and return null. */
633 int save = errno; /* Don't want to clobber errno with ENOMEM. */
634 morecore_recursing = 1;
635 newinfo = _realloc_internal_nolock (_heapinfo,
636 newsize * sizeof (malloc_info));
637 morecore_recursing = 0;
638 if (newinfo == NULL)
639 errno = save;
640 else
642 /* We found some space in core, and realloc has put the old
643 table's blocks on the free list. Now zero the new part
644 of the table and install the new table location. */
645 memset (&newinfo[heapsize], 0,
646 (newsize - heapsize) * sizeof (malloc_info));
647 _heapinfo = newinfo;
648 heapsize = newsize;
649 goto got_heap;
653 /* Allocate new space for the malloc info table. */
654 while (1)
656 newinfo = align (newsize * sizeof (malloc_info));
658 /* Did it fail? */
659 if (newinfo == NULL)
661 (*__morecore) (-size);
662 return NULL;
665 /* Is it big enough to record status for its own space?
666 If so, we win. */
667 if (BLOCK ((char *) newinfo + newsize * sizeof (malloc_info))
668 < newsize)
669 break;
671 /* Must try again. First give back most of what we just got. */
672 (*__morecore) (- newsize * sizeof (malloc_info));
673 newsize *= 2;
676 /* Copy the old table to the beginning of the new,
677 and zero the rest of the new table. */
678 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
679 memset (&newinfo[heapsize], 0,
680 (newsize - heapsize) * sizeof (malloc_info));
681 oldinfo = _heapinfo;
682 _heapinfo = newinfo;
683 heapsize = newsize;
685 register_heapinfo ();
687 /* Reset _heaplimit so _free_internal never decides
688 it can relocate or resize the info table. */
689 _heaplimit = 0;
690 _free_internal_nolock (oldinfo);
691 PROTECT_MALLOC_STATE (0);
693 /* The new heap limit includes the new table just allocated. */
694 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
695 return result;
698 got_heap:
699 _heaplimit = BLOCK ((char *) result + size);
700 return result;
703 /* Allocate memory from the heap. */
704 void *
705 _malloc_internal_nolock (size_t size)
707 void *result;
708 size_t block, blocks, lastblocks, start;
709 register size_t i;
710 struct list *next;
712 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
713 valid address you can realloc and free (though not dereference).
715 It turns out that some extant code (sunrpc, at least Ultrix's version)
716 expects `malloc (0)' to return non-NULL and breaks otherwise.
717 Be compatible. */
719 #if 0
720 if (size == 0)
721 return NULL;
722 #endif
724 PROTECT_MALLOC_STATE (0);
726 if (size < sizeof (struct list))
727 size = sizeof (struct list);
729 /* Determine the allocation policy based on the request size. */
730 if (size <= BLOCKSIZE / 2)
732 /* Small allocation to receive a fragment of a block.
733 Determine the logarithm to base two of the fragment size. */
734 register size_t log = 1;
735 --size;
736 while ((size /= 2) != 0)
737 ++log;
739 /* Look in the fragment lists for a
740 free fragment of the desired size. */
741 next = _fraghead[log].next;
742 if (next != NULL)
744 /* There are free fragments of this size.
745 Pop a fragment out of the fragment list and return it.
746 Update the block's nfree and first counters. */
747 result = next;
748 next->prev->next = next->next;
749 if (next->next != NULL)
750 next->next->prev = next->prev;
751 block = BLOCK (result);
752 if (--_heapinfo[block].busy.info.frag.nfree != 0)
753 _heapinfo[block].busy.info.frag.first =
754 (uintptr_t) next->next % BLOCKSIZE >> log;
756 /* Update the statistics. */
757 ++_chunks_used;
758 _bytes_used += 1 << log;
759 --_chunks_free;
760 _bytes_free -= 1 << log;
762 else
764 /* No free fragments of the desired size, so get a new block
765 and break it into fragments, returning the first. */
766 #ifdef GC_MALLOC_CHECK
767 result = _malloc_internal_nolock (BLOCKSIZE);
768 PROTECT_MALLOC_STATE (0);
769 #elif defined (USE_PTHREAD)
770 result = _malloc_internal_nolock (BLOCKSIZE);
771 #else
772 result = malloc (BLOCKSIZE);
773 #endif
774 if (result == NULL)
776 PROTECT_MALLOC_STATE (1);
777 goto out;
780 /* Link all fragments but the first into the free list. */
781 next = (struct list *) ((char *) result + (1 << log));
782 next->next = NULL;
783 next->prev = &_fraghead[log];
784 _fraghead[log].next = next;
786 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
788 next = (struct list *) ((char *) result + (i << log));
789 next->next = _fraghead[log].next;
790 next->prev = &_fraghead[log];
791 next->prev->next = next;
792 next->next->prev = next;
795 /* Initialize the nfree and first counters for this block. */
796 block = BLOCK (result);
797 _heapinfo[block].busy.type = log;
798 _heapinfo[block].busy.info.frag.nfree = i - 1;
799 _heapinfo[block].busy.info.frag.first = i - 1;
801 _chunks_free += (BLOCKSIZE >> log) - 1;
802 _bytes_free += BLOCKSIZE - (1 << log);
803 _bytes_used -= BLOCKSIZE - (1 << log);
806 else
808 /* Large allocation to receive one or more blocks.
809 Search the free list in a circle starting at the last place visited.
810 If we loop completely around without finding a large enough
811 space we will have to get more memory from the system. */
812 blocks = BLOCKIFY (size);
813 start = block = _heapindex;
814 while (_heapinfo[block].free.size < blocks)
816 block = _heapinfo[block].free.next;
817 if (block == start)
819 /* Need to get more from the system. Get a little extra. */
820 size_t wantblocks = blocks + __malloc_extra_blocks;
821 block = _heapinfo[0].free.prev;
822 lastblocks = _heapinfo[block].free.size;
823 /* Check to see if the new core will be contiguous with the
824 final free block; if so we don't need to get as much. */
825 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
826 /* We can't do this if we will have to make the heap info
827 table bigger to accommodate the new space. */
828 block + wantblocks <= heapsize &&
829 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
830 ADDRESS (block + lastblocks)))
832 /* We got it contiguously. Which block we are extending
833 (the `final free block' referred to above) might have
834 changed, if it got combined with a freed info table. */
835 block = _heapinfo[0].free.prev;
836 _heapinfo[block].free.size += (wantblocks - lastblocks);
837 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
838 _heaplimit += wantblocks - lastblocks;
839 continue;
841 result = morecore_nolock (wantblocks * BLOCKSIZE);
842 if (result == NULL)
843 goto out;
844 block = BLOCK (result);
845 /* Put the new block at the end of the free list. */
846 _heapinfo[block].free.size = wantblocks;
847 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
848 _heapinfo[block].free.next = 0;
849 _heapinfo[0].free.prev = block;
850 _heapinfo[_heapinfo[block].free.prev].free.next = block;
851 ++_chunks_free;
852 /* Now loop to use some of that block for this allocation. */
856 /* At this point we have found a suitable free list entry.
857 Figure out how to remove what we need from the list. */
858 result = ADDRESS (block);
859 if (_heapinfo[block].free.size > blocks)
861 /* The block we found has a bit left over,
862 so relink the tail end back into the free list. */
863 _heapinfo[block + blocks].free.size
864 = _heapinfo[block].free.size - blocks;
865 _heapinfo[block + blocks].free.next
866 = _heapinfo[block].free.next;
867 _heapinfo[block + blocks].free.prev
868 = _heapinfo[block].free.prev;
869 _heapinfo[_heapinfo[block].free.prev].free.next
870 = _heapinfo[_heapinfo[block].free.next].free.prev
871 = _heapindex = block + blocks;
873 else
875 /* The block exactly matches our requirements,
876 so just remove it from the list. */
877 _heapinfo[_heapinfo[block].free.next].free.prev
878 = _heapinfo[block].free.prev;
879 _heapinfo[_heapinfo[block].free.prev].free.next
880 = _heapindex = _heapinfo[block].free.next;
881 --_chunks_free;
884 _heapinfo[block].busy.type = -1;
885 _heapinfo[block].busy.info.size = blocks;
886 ++_chunks_used;
887 _bytes_used += blocks * BLOCKSIZE;
888 _bytes_free -= blocks * BLOCKSIZE;
891 PROTECT_MALLOC_STATE (1);
892 out:
893 return result;
896 void *
897 _malloc_internal (size_t size)
899 void *result;
901 LOCK ();
902 result = _malloc_internal_nolock (size);
903 UNLOCK ();
905 return result;
908 void *
909 malloc (size_t size)
911 void *(*hook) (size_t);
913 if (!__malloc_initialized && !__malloc_initialize ())
914 return NULL;
916 /* Copy the value of gmalloc_hook to an automatic variable in case
917 gmalloc_hook is modified in another thread between its
918 NULL-check and the use.
920 Note: Strictly speaking, this is not a right solution. We should
921 use mutexes to access non-read-only variables that are shared
922 among multiple threads. We just leave it for compatibility with
923 glibc malloc (i.e., assignments to gmalloc_hook) for now. */
924 hook = gmalloc_hook;
925 void *result = (hook ? hook : _malloc_internal) (size);
926 return ptr_bounds_clip (result, size);
929 #if !(defined (_LIBC) || defined (HYBRID_MALLOC))
931 /* On some ANSI C systems, some libc functions call _malloc, _free
932 and _realloc. Make them use the GNU functions. */
934 extern void *_malloc (size_t);
935 extern void _free (void *);
936 extern void *_realloc (void *, size_t);
938 void *
939 _malloc (size_t size)
941 return malloc (size);
944 void
945 _free (void *ptr)
947 free (ptr);
950 void *
951 _realloc (void *ptr, size_t size)
953 return realloc (ptr, size);
956 #endif
957 /* Free a block of memory allocated by `malloc'.
958 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
959 Written May 1989 by Mike Haertel.
961 This library is free software; you can redistribute it and/or
962 modify it under the terms of the GNU General Public License as
963 published by the Free Software Foundation; either version 2 of the
964 License, or (at your option) any later version.
966 This library is distributed in the hope that it will be useful,
967 but WITHOUT ANY WARRANTY; without even the implied warranty of
968 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
969 General Public License for more details.
971 You should have received a copy of the GNU General Public
972 License along with this library. If not, see <https://www.gnu.org/licenses/>.
974 The author may be reached (Email) at the address mike@ai.mit.edu,
975 or (US mail) as Mike Haertel c/o Free Software Foundation. */
977 /* Debugging hook for free. */
978 static void (*__MALLOC_HOOK_VOLATILE gfree_hook) (void *);
980 #ifndef HYBRID_MALLOC
982 /* List of blocks allocated by aligned_alloc. */
983 struct alignlist *_aligned_blocks = NULL;
984 #endif
986 /* Return memory to the heap.
987 Like `_free_internal' but don't lock mutex. */
988 void
989 _free_internal_nolock (void *ptr)
991 int type;
992 size_t block, blocks;
993 register size_t i;
994 struct list *prev, *next;
995 void *curbrk;
996 const size_t lesscore_threshold
997 /* Threshold of free space at which we will return some to the system. */
998 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1000 register struct alignlist *l;
1002 if (ptr == NULL)
1003 return;
1004 ptr = ptr_bounds_init (ptr);
1006 PROTECT_MALLOC_STATE (0);
1008 LOCK_ALIGNED_BLOCKS ();
1009 for (l = _aligned_blocks; l != NULL; l = l->next)
1010 if (l->aligned == ptr)
1012 l->aligned = NULL; /* Mark the slot in the list as free. */
1013 ptr = l->exact;
1014 break;
1016 UNLOCK_ALIGNED_BLOCKS ();
1018 block = BLOCK (ptr);
1020 type = _heapinfo[block].busy.type;
1021 switch (type)
1023 case -1:
1024 /* Get as many statistics as early as we can. */
1025 --_chunks_used;
1026 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1027 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1029 /* Find the free cluster previous to this one in the free list.
1030 Start searching at the last block referenced; this may benefit
1031 programs with locality of allocation. */
1032 i = _heapindex;
1033 if (i > block)
1034 while (i > block)
1035 i = _heapinfo[i].free.prev;
1036 else
1039 i = _heapinfo[i].free.next;
1040 while (i > 0 && i < block);
1041 i = _heapinfo[i].free.prev;
1044 /* Determine how to link this block into the free list. */
1045 if (block == i + _heapinfo[i].free.size)
1047 /* Coalesce this block with its predecessor. */
1048 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1049 block = i;
1051 else
1053 /* Really link this block back into the free list. */
1054 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1055 _heapinfo[block].free.next = _heapinfo[i].free.next;
1056 _heapinfo[block].free.prev = i;
1057 _heapinfo[i].free.next = block;
1058 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1059 ++_chunks_free;
1062 /* Now that the block is linked in, see if we can coalesce it
1063 with its successor (by deleting its successor from the list
1064 and adding in its size). */
1065 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1067 _heapinfo[block].free.size
1068 += _heapinfo[_heapinfo[block].free.next].free.size;
1069 _heapinfo[block].free.next
1070 = _heapinfo[_heapinfo[block].free.next].free.next;
1071 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1072 --_chunks_free;
1075 /* How many trailing free blocks are there now? */
1076 blocks = _heapinfo[block].free.size;
1078 /* Where is the current end of accessible core? */
1079 curbrk = (*__morecore) (0);
1081 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1083 /* The end of the malloc heap is at the end of accessible core.
1084 It's possible that moving _heapinfo will allow us to
1085 return some space to the system. */
1087 size_t info_block = BLOCK (_heapinfo);
1088 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1089 size_t prev_block = _heapinfo[block].free.prev;
1090 size_t prev_blocks = _heapinfo[prev_block].free.size;
1091 size_t next_block = _heapinfo[block].free.next;
1092 size_t next_blocks = _heapinfo[next_block].free.size;
1094 if (/* Win if this block being freed is last in core, the info table
1095 is just before it, the previous free block is just before the
1096 info table, and the two free blocks together form a useful
1097 amount to return to the system. */
1098 (block + blocks == _heaplimit &&
1099 info_block + info_blocks == block &&
1100 prev_block != 0 && prev_block + prev_blocks == info_block &&
1101 blocks + prev_blocks >= lesscore_threshold) ||
1102 /* Nope, not the case. We can also win if this block being
1103 freed is just before the info table, and the table extends
1104 to the end of core or is followed only by a free block,
1105 and the total free space is worth returning to the system. */
1106 (block + blocks == info_block &&
1107 ((info_block + info_blocks == _heaplimit &&
1108 blocks >= lesscore_threshold) ||
1109 (info_block + info_blocks == next_block &&
1110 next_block + next_blocks == _heaplimit &&
1111 blocks + next_blocks >= lesscore_threshold)))
1114 malloc_info *newinfo;
1115 size_t oldlimit = _heaplimit;
1117 /* Free the old info table, clearing _heaplimit to avoid
1118 recursion into this code. We don't want to return the
1119 table's blocks to the system before we have copied them to
1120 the new location. */
1121 _heaplimit = 0;
1122 _free_internal_nolock (_heapinfo);
1123 _heaplimit = oldlimit;
1125 /* Tell malloc to search from the beginning of the heap for
1126 free blocks, so it doesn't reuse the ones just freed. */
1127 _heapindex = 0;
1129 /* Allocate new space for the info table and move its data. */
1130 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
1131 PROTECT_MALLOC_STATE (0);
1132 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1133 _heapinfo = newinfo;
1135 /* We should now have coalesced the free block with the
1136 blocks freed from the old info table. Examine the entire
1137 trailing free block to decide below whether to return some
1138 to the system. */
1139 block = _heapinfo[0].free.prev;
1140 blocks = _heapinfo[block].free.size;
1143 /* Now see if we can return stuff to the system. */
1144 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1146 register size_t bytes = blocks * BLOCKSIZE;
1147 _heaplimit -= blocks;
1148 (*__morecore) (-bytes);
1149 _heapinfo[_heapinfo[block].free.prev].free.next
1150 = _heapinfo[block].free.next;
1151 _heapinfo[_heapinfo[block].free.next].free.prev
1152 = _heapinfo[block].free.prev;
1153 block = _heapinfo[block].free.prev;
1154 --_chunks_free;
1155 _bytes_free -= bytes;
1159 /* Set the next search to begin at this block. */
1160 _heapindex = block;
1161 break;
1163 default:
1164 /* Do some of the statistics. */
1165 --_chunks_used;
1166 _bytes_used -= 1 << type;
1167 ++_chunks_free;
1168 _bytes_free += 1 << type;
1170 /* Get the address of the first free fragment in this block. */
1171 prev = (struct list *) ((char *) ADDRESS (block) +
1172 (_heapinfo[block].busy.info.frag.first << type));
1174 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1176 /* If all fragments of this block are free, remove them
1177 from the fragment list and free the whole block. */
1178 next = prev;
1179 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
1180 next = next->next;
1181 prev->prev->next = next;
1182 if (next != NULL)
1183 next->prev = prev->prev;
1184 _heapinfo[block].busy.type = -1;
1185 _heapinfo[block].busy.info.size = 1;
1187 /* Keep the statistics accurate. */
1188 ++_chunks_used;
1189 _bytes_used += BLOCKSIZE;
1190 _chunks_free -= BLOCKSIZE >> type;
1191 _bytes_free -= BLOCKSIZE;
1193 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1194 _free_internal_nolock (ADDRESS (block));
1195 #else
1196 free (ADDRESS (block));
1197 #endif
1199 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1201 /* If some fragments of this block are free, link this
1202 fragment into the fragment list after the first free
1203 fragment of this block. */
1204 next = ptr;
1205 next->next = prev->next;
1206 next->prev = prev;
1207 prev->next = next;
1208 if (next->next != NULL)
1209 next->next->prev = next;
1210 ++_heapinfo[block].busy.info.frag.nfree;
1212 else
1214 /* No fragments of this block are free, so link this
1215 fragment into the fragment list and announce that
1216 it is the first free fragment of this block. */
1217 prev = ptr;
1218 _heapinfo[block].busy.info.frag.nfree = 1;
1219 _heapinfo[block].busy.info.frag.first =
1220 (uintptr_t) ptr % BLOCKSIZE >> type;
1221 prev->next = _fraghead[type].next;
1222 prev->prev = &_fraghead[type];
1223 prev->prev->next = prev;
1224 if (prev->next != NULL)
1225 prev->next->prev = prev;
1227 break;
1230 PROTECT_MALLOC_STATE (1);
1233 /* Return memory to the heap.
1234 Like 'free' but don't call a hook if there is one. */
1235 void
1236 _free_internal (void *ptr)
1238 LOCK ();
1239 _free_internal_nolock (ptr);
1240 UNLOCK ();
1243 /* Return memory to the heap. */
1245 void
1246 free (void *ptr)
1248 void (*hook) (void *) = gfree_hook;
1250 if (hook != NULL)
1251 (*hook) (ptr);
1252 else
1253 _free_internal (ptr);
1256 #ifndef HYBRID_MALLOC
1257 /* Define the `cfree' alias for `free'. */
1258 #ifdef weak_alias
1259 weak_alias (free, cfree)
1260 #else
1261 void
1262 cfree (void *ptr)
1264 free (ptr);
1266 #endif
1267 #endif
1268 /* Change the size of a block allocated by `malloc'.
1269 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1270 Written May 1989 by Mike Haertel.
1272 This library is free software; you can redistribute it and/or
1273 modify it under the terms of the GNU General Public License as
1274 published by the Free Software Foundation; either version 2 of the
1275 License, or (at your option) any later version.
1277 This library is distributed in the hope that it will be useful,
1278 but WITHOUT ANY WARRANTY; without even the implied warranty of
1279 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1280 General Public License for more details.
1282 You should have received a copy of the GNU General Public
1283 License along with this library. If not, see <https://www.gnu.org/licenses/>.
1285 The author may be reached (Email) at the address mike@ai.mit.edu,
1286 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1288 #ifndef min
1289 #define min(a, b) ((a) < (b) ? (a) : (b))
1290 #endif
1292 /* Debugging hook for realloc. */
1293 static void *(*grealloc_hook) (void *, size_t);
1295 /* Resize the given region to the new size, returning a pointer
1296 to the (possibly moved) region. This is optimized for speed;
1297 some benchmarks seem to indicate that greater compactness is
1298 achieved by unconditionally allocating and copying to a
1299 new region. This module has incestuous knowledge of the
1300 internals of both free and malloc. */
1301 void *
1302 _realloc_internal_nolock (void *ptr, size_t size)
1304 void *result;
1305 int type;
1306 size_t block, blocks, oldlimit;
1308 if (size == 0)
1310 _free_internal_nolock (ptr);
1311 return _malloc_internal_nolock (0);
1313 else if (ptr == NULL)
1314 return _malloc_internal_nolock (size);
1316 ptr = ptr_bounds_init (ptr);
1317 block = BLOCK (ptr);
1319 PROTECT_MALLOC_STATE (0);
1321 type = _heapinfo[block].busy.type;
1322 switch (type)
1324 case -1:
1325 /* Maybe reallocate a large block to a small fragment. */
1326 if (size <= BLOCKSIZE / 2)
1328 result = _malloc_internal_nolock (size);
1329 if (result != NULL)
1331 memcpy (result, ptr, size);
1332 _free_internal_nolock (ptr);
1333 goto out;
1337 /* The new size is a large allocation as well;
1338 see if we can hold it in place. */
1339 blocks = BLOCKIFY (size);
1340 if (blocks < _heapinfo[block].busy.info.size)
1342 /* The new size is smaller; return
1343 excess memory to the free list. */
1344 _heapinfo[block + blocks].busy.type = -1;
1345 _heapinfo[block + blocks].busy.info.size
1346 = _heapinfo[block].busy.info.size - blocks;
1347 _heapinfo[block].busy.info.size = blocks;
1348 /* We have just created a new chunk by splitting a chunk in two.
1349 Now we will free this chunk; increment the statistics counter
1350 so it doesn't become wrong when _free_internal decrements it. */
1351 ++_chunks_used;
1352 _free_internal_nolock (ADDRESS (block + blocks));
1353 result = ptr;
1355 else if (blocks == _heapinfo[block].busy.info.size)
1356 /* No size change necessary. */
1357 result = ptr;
1358 else
1360 /* Won't fit, so allocate a new region that will.
1361 Free the old region first in case there is sufficient
1362 adjacent free space to grow without moving. */
1363 blocks = _heapinfo[block].busy.info.size;
1364 /* Prevent free from actually returning memory to the system. */
1365 oldlimit = _heaplimit;
1366 _heaplimit = 0;
1367 _free_internal_nolock (ptr);
1368 result = _malloc_internal_nolock (size);
1369 PROTECT_MALLOC_STATE (0);
1370 if (_heaplimit == 0)
1371 _heaplimit = oldlimit;
1372 if (result == NULL)
1374 /* Now we're really in trouble. We have to unfree
1375 the thing we just freed. Unfortunately it might
1376 have been coalesced with its neighbors. */
1377 if (_heapindex == block)
1378 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1379 else
1381 void *previous
1382 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1383 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1384 _free_internal_nolock (previous);
1386 goto out;
1388 if (ptr != result)
1389 memmove (result, ptr, blocks * BLOCKSIZE);
1391 break;
1393 default:
1394 /* Old size is a fragment; type is logarithm
1395 to base two of the fragment size. */
1396 if (size > (size_t) (1 << (type - 1)) &&
1397 size <= (size_t) (1 << type))
1398 /* The new size is the same kind of fragment. */
1399 result = ptr;
1400 else
1402 /* The new size is different; allocate a new space,
1403 and copy the lesser of the new size and the old. */
1404 result = _malloc_internal_nolock (size);
1405 if (result == NULL)
1406 goto out;
1407 memcpy (result, ptr, min (size, (size_t) 1 << type));
1408 _free_internal_nolock (ptr);
1410 break;
1413 PROTECT_MALLOC_STATE (1);
1414 out:
1415 return result;
1418 void *
1419 _realloc_internal (void *ptr, size_t size)
1421 void *result;
1423 LOCK ();
1424 result = _realloc_internal_nolock (ptr, size);
1425 UNLOCK ();
1427 return result;
1430 void *
1431 realloc (void *ptr, size_t size)
1433 void *(*hook) (void *, size_t);
1435 if (!__malloc_initialized && !__malloc_initialize ())
1436 return NULL;
1438 hook = grealloc_hook;
1439 void *result = (hook ? hook : _realloc_internal) (ptr, size);
1440 return ptr_bounds_clip (result, size);
1442 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1444 This library is free software; you can redistribute it and/or
1445 modify it under the terms of the GNU General Public License as
1446 published by the Free Software Foundation; either version 2 of the
1447 License, or (at your option) any later version.
1449 This library is distributed in the hope that it will be useful,
1450 but WITHOUT ANY WARRANTY; without even the implied warranty of
1451 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1452 General Public License for more details.
1454 You should have received a copy of the GNU General Public
1455 License along with this library. If not, see <https://www.gnu.org/licenses/>.
1457 The author may be reached (Email) at the address mike@ai.mit.edu,
1458 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1460 /* Allocate an array of NMEMB elements each SIZE bytes long.
1461 The entire array is initialized to zeros. */
1462 void *
1463 calloc (size_t nmemb, size_t size)
1465 void *result;
1466 size_t bytes = nmemb * size;
1468 if (size != 0 && bytes / size != nmemb)
1470 errno = ENOMEM;
1471 return NULL;
1474 result = malloc (bytes);
1475 if (result)
1476 return memset (result, 0, bytes);
1477 return result;
1479 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1480 This file is part of the GNU C Library.
1482 The GNU C Library is free software; you can redistribute it and/or modify
1483 it under the terms of the GNU General Public License as published by
1484 the Free Software Foundation; either version 2, or (at your option)
1485 any later version.
1487 The GNU C Library is distributed in the hope that it will be useful,
1488 but WITHOUT ANY WARRANTY; without even the implied warranty of
1489 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1490 GNU General Public License for more details.
1492 You should have received a copy of the GNU General Public License
1493 along with the GNU C Library. If not, see <https://www.gnu.org/licenses/>. */
1495 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1496 compatible. */
1497 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1498 #define __sbrk sbrk
1499 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1500 /* It is best not to declare this and cast its result on foreign operating
1501 systems with potentially hostile include files. */
1503 extern void *__sbrk (ptrdiff_t increment);
1504 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1506 /* Allocate INCREMENT more bytes of data space,
1507 and return the start of data space, or NULL on errors.
1508 If INCREMENT is negative, shrink data space. */
1509 static void *
1510 gdefault_morecore (ptrdiff_t increment)
1512 #ifdef HYBRID_MALLOC
1513 if (!DUMPED)
1515 return bss_sbrk (increment);
1517 #endif
1518 #ifdef HAVE_SBRK
1519 void *result = (void *) __sbrk (increment);
1520 if (result != (void *) -1)
1521 return result;
1522 #endif
1523 return NULL;
1526 void *(*__morecore) (ptrdiff_t) = gdefault_morecore;
1528 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1530 This library is free software; you can redistribute it and/or
1531 modify it under the terms of the GNU General Public License as
1532 published by the Free Software Foundation; either version 2 of the
1533 License, or (at your option) any later version.
1535 This library is distributed in the hope that it will be useful,
1536 but WITHOUT ANY WARRANTY; without even the implied warranty of
1537 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1538 General Public License for more details.
1540 You should have received a copy of the GNU General Public
1541 License along with this library. If not, see <https://www.gnu.org/licenses/>. */
1543 void *
1544 aligned_alloc (size_t alignment, size_t size)
1546 void *result;
1547 size_t adj, lastadj;
1549 /* Allocate a block with enough extra space to pad the block with up to
1550 (ALIGNMENT - 1) bytes if necessary. */
1551 if (- size < alignment)
1553 errno = ENOMEM;
1554 return NULL;
1556 result = malloc (size + alignment - 1);
1557 if (result == NULL)
1558 return NULL;
1560 /* Figure out how much we will need to pad this particular block
1561 to achieve the required alignment. */
1562 adj = alignment - (uintptr_t) result % alignment;
1563 if (adj == alignment)
1564 adj = 0;
1566 if (adj != alignment - 1)
1570 /* Reallocate the block with only as much excess as it
1571 needs. */
1572 free (result);
1573 result = malloc (size + adj);
1574 if (result == NULL) /* Impossible unless interrupted. */
1575 return NULL;
1577 lastadj = adj;
1578 adj = alignment - (uintptr_t) result % alignment;
1579 if (adj == alignment)
1580 adj = 0;
1581 /* It's conceivable we might have been so unlucky as to get
1582 a different block with weaker alignment. If so, this
1583 block is too short to contain SIZE after alignment
1584 correction. So we must try again and get another block,
1585 slightly larger. */
1586 } while (adj > lastadj);
1589 if (adj != 0)
1591 /* Record this block in the list of aligned blocks, so that `free'
1592 can identify the pointer it is passed, which will be in the middle
1593 of an allocated block. */
1595 struct alignlist *l;
1596 LOCK_ALIGNED_BLOCKS ();
1597 for (l = _aligned_blocks; l != NULL; l = l->next)
1598 if (l->aligned == NULL)
1599 /* This slot is free. Use it. */
1600 break;
1601 if (l == NULL)
1603 l = malloc (sizeof *l);
1604 if (l != NULL)
1606 l->next = _aligned_blocks;
1607 _aligned_blocks = l;
1610 if (l != NULL)
1612 l->exact = result;
1613 result = l->aligned = (char *) result + adj;
1614 result = ptr_bounds_clip (result, size);
1616 UNLOCK_ALIGNED_BLOCKS ();
1617 if (l == NULL)
1619 free (result);
1620 result = NULL;
1624 return result;
1627 /* Note that memalign and posix_memalign are not used in Emacs. */
1628 #ifndef HYBRID_MALLOC
1629 /* An obsolete alias for aligned_alloc, for any old libraries that use
1630 this alias. */
1632 void *
1633 memalign (size_t alignment, size_t size)
1635 return aligned_alloc (alignment, size);
1638 /* If HYBRID_MALLOC is defined, we may want to use the system
1639 posix_memalign below. */
1641 posix_memalign (void **memptr, size_t alignment, size_t size)
1643 void *mem;
1645 if (alignment == 0
1646 || alignment % sizeof (void *) != 0
1647 || (alignment & (alignment - 1)) != 0)
1648 return EINVAL;
1650 mem = aligned_alloc (alignment, size);
1651 if (mem == NULL)
1652 return ENOMEM;
1654 *memptr = mem;
1656 return 0;
1658 #endif
1660 /* Allocate memory on a page boundary.
1661 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1663 This library is free software; you can redistribute it and/or
1664 modify it under the terms of the GNU General Public License as
1665 published by the Free Software Foundation; either version 2 of the
1666 License, or (at your option) any later version.
1668 This library is distributed in the hope that it will be useful,
1669 but WITHOUT ANY WARRANTY; without even the implied warranty of
1670 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1671 General Public License for more details.
1673 You should have received a copy of the GNU General Public
1674 License along with this library. If not, see <https://www.gnu.org/licenses/>.
1676 The author may be reached (Email) at the address mike@ai.mit.edu,
1677 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1679 #ifndef HYBRID_MALLOC
1681 # ifndef HAVE_MALLOC_H
1682 /* Allocate SIZE bytes on a page boundary. */
1683 extern void *valloc (size_t);
1684 # endif
1686 # if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1687 # include "getpagesize.h"
1688 # elif !defined getpagesize
1689 extern int getpagesize (void);
1690 # endif
1692 static size_t pagesize;
1694 void *
1695 valloc (size_t size)
1697 if (pagesize == 0)
1698 pagesize = getpagesize ();
1700 return aligned_alloc (pagesize, size);
1702 #endif /* HYBRID_MALLOC */
1704 #undef malloc
1705 #undef realloc
1706 #undef calloc
1707 #undef aligned_alloc
1708 #undef free
1710 #ifdef HYBRID_MALLOC
1711 /* Declare system malloc and friends. */
1712 extern void *malloc (size_t size);
1713 extern void *realloc (void *ptr, size_t size);
1714 extern void *calloc (size_t nmemb, size_t size);
1715 extern void free (void *ptr);
1716 #ifdef HAVE_ALIGNED_ALLOC
1717 extern void *aligned_alloc (size_t alignment, size_t size);
1718 #elif defined HAVE_POSIX_MEMALIGN
1719 extern int posix_memalign (void **memptr, size_t alignment, size_t size);
1720 #endif
1722 /* Assuming PTR was allocated via the hybrid malloc, return true if
1723 PTR was allocated via gmalloc, not the system malloc. Also, return
1724 true if _heaplimit is zero; this can happen temporarily when
1725 gmalloc calls itself for internal use, and in that case PTR is
1726 already known to be allocated via gmalloc. */
1728 static bool
1729 allocated_via_gmalloc (void *ptr)
1731 size_t block = BLOCK (ptr);
1732 size_t blockmax = _heaplimit - 1;
1733 return block <= blockmax && _heapinfo[block].busy.type != 0;
1736 /* See the comments near the beginning of this file for explanations
1737 of the following functions. */
1739 void *
1740 hybrid_malloc (size_t size)
1742 if (DUMPED)
1743 return malloc (size);
1744 return gmalloc (size);
1747 void *
1748 hybrid_calloc (size_t nmemb, size_t size)
1750 if (DUMPED)
1751 return calloc (nmemb, size);
1752 return gcalloc (nmemb, size);
1755 void
1756 hybrid_free (void *ptr)
1758 if (allocated_via_gmalloc (ptr))
1759 gfree (ptr);
1760 else
1761 free (ptr);
1764 #if defined HAVE_ALIGNED_ALLOC || defined HAVE_POSIX_MEMALIGN
1765 void *
1766 hybrid_aligned_alloc (size_t alignment, size_t size)
1768 if (!DUMPED)
1769 return galigned_alloc (alignment, size);
1770 /* The following is copied from alloc.c */
1771 #ifdef HAVE_ALIGNED_ALLOC
1772 return aligned_alloc (alignment, size);
1773 #else /* HAVE_POSIX_MEMALIGN */
1774 void *p;
1775 return posix_memalign (&p, alignment, size) == 0 ? p : 0;
1776 #endif
1778 #endif
1780 void *
1781 hybrid_realloc (void *ptr, size_t size)
1783 void *result;
1784 int type;
1785 size_t block, oldsize;
1787 if (!ptr)
1788 return hybrid_malloc (size);
1789 if (!allocated_via_gmalloc (ptr))
1790 return realloc (ptr, size);
1791 if (!DUMPED)
1792 return grealloc (ptr, size);
1794 /* The dumped emacs is trying to realloc storage allocated before
1795 dumping via gmalloc. Allocate new space and copy the data. Do
1796 not bother with gfree (ptr), as that would just waste time. */
1797 block = BLOCK (ptr);
1798 type = _heapinfo[block].busy.type;
1799 oldsize =
1800 type < 0 ? _heapinfo[block].busy.info.size * BLOCKSIZE
1801 : (size_t) 1 << type;
1802 result = malloc (size);
1803 if (result)
1804 return memcpy (result, ptr, min (oldsize, size));
1805 return result;
1808 #else /* ! HYBRID_MALLOC */
1810 void *
1811 malloc (size_t size)
1813 return gmalloc (size);
1816 void *
1817 calloc (size_t nmemb, size_t size)
1819 return gcalloc (nmemb, size);
1822 void
1823 free (void *ptr)
1825 gfree (ptr);
1828 void *
1829 aligned_alloc (size_t alignment, size_t size)
1831 return galigned_alloc (alignment, size);
1834 void *
1835 realloc (void *ptr, size_t size)
1837 return grealloc (ptr, size);
1840 #endif /* HYBRID_MALLOC */
1842 #ifdef GC_MCHECK
1844 /* Standard debugging hooks for `malloc'.
1845 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1846 Written May 1989 by Mike Haertel.
1848 This library is free software; you can redistribute it and/or
1849 modify it under the terms of the GNU General Public License as
1850 published by the Free Software Foundation; either version 2 of the
1851 License, or (at your option) any later version.
1853 This library is distributed in the hope that it will be useful,
1854 but WITHOUT ANY WARRANTY; without even the implied warranty of
1855 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1856 General Public License for more details.
1858 You should have received a copy of the GNU General Public
1859 License along with this library. If not, see <https://www.gnu.org/licenses/>.
1861 The author may be reached (Email) at the address mike@ai.mit.edu,
1862 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1864 #include <stdio.h>
1866 /* Old hook values. */
1867 static void (*old_free_hook) (void *ptr);
1868 static void *(*old_malloc_hook) (size_t size);
1869 static void *(*old_realloc_hook) (void *ptr, size_t size);
1871 /* Function to call when something awful happens. */
1872 static void (*abortfunc) (enum mcheck_status);
1874 /* Arbitrary magical numbers. */
1875 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1876 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1877 #define MAGICBYTE ((char) 0xd7)
1878 #define MALLOCFLOOD ((char) 0x93)
1879 #define FREEFLOOD ((char) 0x95)
1881 struct hdr
1883 size_t size; /* Exact size requested by user. */
1884 size_t magic; /* Magic number to check header integrity. */
1887 static enum mcheck_status
1888 checkhdr (const struct hdr *hdr)
1890 enum mcheck_status status;
1891 switch (hdr->magic)
1893 default:
1894 status = MCHECK_HEAD;
1895 break;
1896 case MAGICFREE:
1897 status = MCHECK_FREE;
1898 break;
1899 case MAGICWORD:
1900 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1901 status = MCHECK_TAIL;
1902 else
1903 status = MCHECK_OK;
1904 break;
1906 if (status != MCHECK_OK)
1907 (*abortfunc) (status);
1908 return status;
1911 static void
1912 freehook (void *ptr)
1914 struct hdr *hdr;
1916 if (ptr)
1918 struct alignlist *l;
1920 /* If the block was allocated by aligned_alloc, its real pointer
1921 to free is recorded in _aligned_blocks; find that. */
1922 PROTECT_MALLOC_STATE (0);
1923 LOCK_ALIGNED_BLOCKS ();
1924 for (l = _aligned_blocks; l != NULL; l = l->next)
1925 if (l->aligned == ptr)
1927 l->aligned = NULL; /* Mark the slot in the list as free. */
1928 ptr = l->exact;
1929 break;
1931 UNLOCK_ALIGNED_BLOCKS ();
1932 PROTECT_MALLOC_STATE (1);
1934 hdr = ((struct hdr *) ptr) - 1;
1935 checkhdr (hdr);
1936 hdr->magic = MAGICFREE;
1937 memset (ptr, FREEFLOOD, hdr->size);
1939 else
1940 hdr = NULL;
1942 gfree_hook = old_free_hook;
1943 free (hdr);
1944 gfree_hook = freehook;
1947 static void *
1948 mallochook (size_t size)
1950 struct hdr *hdr;
1952 gmalloc_hook = old_malloc_hook;
1953 hdr = malloc (sizeof *hdr + size + 1);
1954 gmalloc_hook = mallochook;
1955 if (hdr == NULL)
1956 return NULL;
1958 hdr->size = size;
1959 hdr->magic = MAGICWORD;
1960 ((char *) &hdr[1])[size] = MAGICBYTE;
1961 return memset (hdr + 1, MALLOCFLOOD, size);
1964 static void *
1965 reallochook (void *ptr, size_t size)
1967 struct hdr *hdr = NULL;
1968 size_t osize = 0;
1970 if (ptr)
1972 hdr = ((struct hdr *) ptr) - 1;
1973 osize = hdr->size;
1975 checkhdr (hdr);
1976 if (size < osize)
1977 memset ((char *) ptr + size, FREEFLOOD, osize - size);
1980 gfree_hook = old_free_hook;
1981 gmalloc_hook = old_malloc_hook;
1982 grealloc_hook = old_realloc_hook;
1983 hdr = realloc (hdr, sizeof *hdr + size + 1);
1984 gfree_hook = freehook;
1985 gmalloc_hook = mallochook;
1986 grealloc_hook = reallochook;
1987 if (hdr == NULL)
1988 return NULL;
1990 hdr->size = size;
1991 hdr->magic = MAGICWORD;
1992 ((char *) &hdr[1])[size] = MAGICBYTE;
1993 if (size > osize)
1994 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1995 return hdr + 1;
1998 static void
1999 mabort (enum mcheck_status status)
2001 const char *msg;
2002 switch (status)
2004 case MCHECK_OK:
2005 msg = "memory is consistent, library is buggy";
2006 break;
2007 case MCHECK_HEAD:
2008 msg = "memory clobbered before allocated block";
2009 break;
2010 case MCHECK_TAIL:
2011 msg = "memory clobbered past end of allocated block";
2012 break;
2013 case MCHECK_FREE:
2014 msg = "block freed twice";
2015 break;
2016 default:
2017 msg = "bogus mcheck_status, library is buggy";
2018 break;
2020 #ifdef __GNU_LIBRARY__
2021 __libc_fatal (msg);
2022 #else
2023 fprintf (stderr, "mcheck: %s\n", msg);
2024 fflush (stderr);
2025 # ifdef emacs
2026 emacs_abort ();
2027 # else
2028 abort ();
2029 # endif
2030 #endif
2033 static int mcheck_used = 0;
2036 mcheck (void (*func) (enum mcheck_status))
2038 abortfunc = (func != NULL) ? func : &mabort;
2040 /* These hooks may not be safely inserted if malloc is already in use. */
2041 if (!__malloc_initialized && !mcheck_used)
2043 old_free_hook = gfree_hook;
2044 gfree_hook = freehook;
2045 old_malloc_hook = gmalloc_hook;
2046 gmalloc_hook = mallochook;
2047 old_realloc_hook = grealloc_hook;
2048 grealloc_hook = reallochook;
2049 mcheck_used = 1;
2052 return mcheck_used ? 0 : -1;
2055 enum mcheck_status
2056 mprobe (void *ptr)
2058 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2061 #endif /* GC_MCHECK */