Improve responsiveness while in 'replace-buffer-contents'
[emacs.git] / src / gmalloc.c
blobd013f1f72c6585ccaf63e4a8ee47ff7a3ff3e87e
1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2018 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <https://www.gnu.org/licenses/>.
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
22 #include <config.h>
24 #if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
25 #define USE_PTHREAD
26 #endif
28 #include <stddef.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <limits.h>
32 #include <stdint.h>
33 #include <unistd.h>
35 #ifdef USE_PTHREAD
36 #include <pthread.h>
37 #endif
39 #ifdef emacs
40 # include "lisp.h"
41 #endif
43 #ifdef HAVE_MALLOC_H
44 # if GNUC_PREREQ (4, 2, 0)
45 # pragma GCC diagnostic ignored "-Wdeprecated-declarations"
46 # endif
47 # include <malloc.h>
48 #endif
49 #ifndef __MALLOC_HOOK_VOLATILE
50 # define __MALLOC_HOOK_VOLATILE volatile
51 #endif
52 #ifndef HAVE_MALLOC_H
53 extern void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
54 extern void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
55 extern void *(*__morecore) (ptrdiff_t);
56 #endif
58 /* If HYBRID_MALLOC is defined, then temacs will use malloc,
59 realloc... as defined in this file (and renamed gmalloc,
60 grealloc... via the macros that follow). The dumped emacs,
61 however, will use the system malloc, realloc.... In other source
62 files, malloc, realloc... are renamed hybrid_malloc,
63 hybrid_realloc... via macros in conf_post.h. hybrid_malloc and
64 friends are wrapper functions defined later in this file. */
65 #undef malloc
66 #undef realloc
67 #undef calloc
68 #undef aligned_alloc
69 #undef free
70 #define malloc gmalloc
71 #define realloc grealloc
72 #define calloc gcalloc
73 #define aligned_alloc galigned_alloc
74 #define free gfree
75 #define malloc_info gmalloc_info
77 #ifdef HYBRID_MALLOC
78 # include "sheap.h"
79 # define DUMPED bss_sbrk_did_unexec
80 #endif
82 #ifdef __cplusplus
83 extern "C"
85 #endif
87 #ifdef HYBRID_MALLOC
88 #define extern static
89 #endif
91 /* Allocate SIZE bytes of memory. */
92 extern void *malloc (size_t size) ATTRIBUTE_MALLOC_SIZE ((1));
93 /* Re-allocate the previously allocated block
94 in ptr, making the new block SIZE bytes long. */
95 extern void *realloc (void *ptr, size_t size) ATTRIBUTE_ALLOC_SIZE ((2));
96 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
97 extern void *calloc (size_t nmemb, size_t size) ATTRIBUTE_MALLOC_SIZE ((1,2));
98 /* Free a block. */
99 extern void free (void *ptr);
101 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
102 extern void *aligned_alloc (size_t, size_t);
103 #ifdef MSDOS
104 extern void *memalign (size_t, size_t);
105 extern int posix_memalign (void **, size_t, size_t);
106 #endif
108 /* The allocator divides the heap into blocks of fixed size; large
109 requests receive one or more whole blocks, and small requests
110 receive a fragment of a block. Fragment sizes are powers of two,
111 and all fragments of a block are the same size. When all the
112 fragments in a block have been freed, the block itself is freed. */
113 #define BLOCKLOG (INT_WIDTH > 16 ? 12 : 9)
114 #define BLOCKSIZE (1 << BLOCKLOG)
115 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
117 /* Determine the amount of memory spanned by the initial heap table
118 (not an absolute limit). */
119 #define HEAP (INT_WIDTH > 16 ? 4194304 : 65536)
121 /* Number of contiguous free blocks allowed to build up at the end of
122 memory before they will be returned to the system. */
123 #define FINAL_FREE_BLOCKS 8
125 /* Data structure giving per-block information. */
126 typedef union
128 /* Heap information for a busy block. */
129 struct
131 /* Zero for a block that is not one of ours (typically,
132 allocated by system malloc), positive for the log base 2 of
133 the fragment size of a fragmented block, -1 for the first
134 block of a multiblock object, and unspecified for later
135 blocks of that object. Type-0 blocks can be present
136 because the system malloc can be invoked by library
137 functions in an undumped Emacs. */
138 int type;
139 union
141 struct
143 size_t nfree; /* Free frags in a fragmented block. */
144 size_t first; /* First free fragment of the block. */
145 } frag;
146 /* For a large object, in its first block, this has the number
147 of blocks in the object. */
148 ptrdiff_t size;
149 } info;
150 } busy;
151 /* Heap information for a free block
152 (that may be the first of a free cluster). */
153 struct
155 size_t size; /* Size (in blocks) of a free cluster. */
156 size_t next; /* Index of next free cluster. */
157 size_t prev; /* Index of previous free cluster. */
158 } free;
159 } malloc_info;
161 /* Pointer to first block of the heap. */
162 extern char *_heapbase;
164 /* Table indexed by block number giving per-block information. */
165 extern malloc_info *_heapinfo;
167 /* Address to block number and vice versa. */
168 #define BLOCK(A) ((size_t) ((char *) (A) - _heapbase) / BLOCKSIZE + 1)
169 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
171 /* Current search index for the heap table. */
172 extern size_t _heapindex;
174 /* Limit of valid info table indices. */
175 extern size_t _heaplimit;
177 /* Doubly linked lists of free fragments. */
178 struct list
180 struct list *next;
181 struct list *prev;
184 /* Free list headers for each fragment size. */
185 extern struct list _fraghead[];
187 /* List of blocks allocated with aligned_alloc and friends. */
188 struct alignlist
190 struct alignlist *next;
191 void *aligned; /* The address that aligned_alloc returned. */
192 void *exact; /* The address that malloc returned. */
194 extern struct alignlist *_aligned_blocks;
196 /* Instrumentation. */
197 extern size_t _chunks_used;
198 extern size_t _bytes_used;
199 extern size_t _chunks_free;
200 extern size_t _bytes_free;
202 /* Internal versions of `malloc', `realloc', and `free'
203 used when these functions need to call each other.
204 They are the same but don't call the hooks. */
205 extern void *_malloc_internal (size_t);
206 extern void *_realloc_internal (void *, size_t);
207 extern void _free_internal (void *);
208 extern void *_malloc_internal_nolock (size_t);
209 extern void *_realloc_internal_nolock (void *, size_t);
210 extern void _free_internal_nolock (void *);
212 #ifdef USE_PTHREAD
213 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
214 extern int _malloc_thread_enabled_p;
215 #define LOCK() \
216 do { \
217 if (_malloc_thread_enabled_p) \
218 pthread_mutex_lock (&_malloc_mutex); \
219 } while (0)
220 #define UNLOCK() \
221 do { \
222 if (_malloc_thread_enabled_p) \
223 pthread_mutex_unlock (&_malloc_mutex); \
224 } while (0)
225 #define LOCK_ALIGNED_BLOCKS() \
226 do { \
227 if (_malloc_thread_enabled_p) \
228 pthread_mutex_lock (&_aligned_blocks_mutex); \
229 } while (0)
230 #define UNLOCK_ALIGNED_BLOCKS() \
231 do { \
232 if (_malloc_thread_enabled_p) \
233 pthread_mutex_unlock (&_aligned_blocks_mutex); \
234 } while (0)
235 #else
236 #define LOCK()
237 #define UNLOCK()
238 #define LOCK_ALIGNED_BLOCKS()
239 #define UNLOCK_ALIGNED_BLOCKS()
240 #endif
242 /* Nonzero if `malloc' has been called and done its initialization. */
243 extern int __malloc_initialized;
244 /* Function called to initialize malloc data structures. */
245 extern int __malloc_initialize (void);
247 #ifdef GC_MCHECK
249 /* Return values for `mprobe': these are the kinds of inconsistencies that
250 `mcheck' enables detection of. */
251 enum mcheck_status
253 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
254 MCHECK_OK, /* Block is fine. */
255 MCHECK_FREE, /* Block freed twice. */
256 MCHECK_HEAD, /* Memory before the block was clobbered. */
257 MCHECK_TAIL /* Memory after the block was clobbered. */
260 /* Activate a standard collection of debugging hooks. This must be called
261 before `malloc' is ever called. ABORTFUNC is called with an error code
262 (see enum above) when an inconsistency is detected. If ABORTFUNC is
263 null, the standard function prints on stderr and then calls `abort'. */
264 extern int mcheck (void (*abortfunc) (enum mcheck_status));
266 /* Check for aberrations in a particular malloc'd block. You must have
267 called `mcheck' already. These are the same checks that `mcheck' does
268 when you free or reallocate a block. */
269 extern enum mcheck_status mprobe (void *ptr);
271 /* Activate a standard collection of tracing hooks. */
272 extern void mtrace (void);
273 extern void muntrace (void);
275 /* Statistics available to the user. */
276 struct mstats
278 size_t bytes_total; /* Total size of the heap. */
279 size_t chunks_used; /* Chunks allocated by the user. */
280 size_t bytes_used; /* Byte total of user-allocated chunks. */
281 size_t chunks_free; /* Chunks in the free list. */
282 size_t bytes_free; /* Byte total of chunks in the free list. */
285 /* Pick up the current statistics. */
286 extern struct mstats mstats (void);
288 #endif
290 #undef extern
292 #ifdef __cplusplus
294 #endif
296 /* Memory allocator `malloc'.
297 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
298 Written May 1989 by Mike Haertel.
300 This library is free software; you can redistribute it and/or
301 modify it under the terms of the GNU General Public License as
302 published by the Free Software Foundation; either version 2 of the
303 License, or (at your option) any later version.
305 This library is distributed in the hope that it will be useful,
306 but WITHOUT ANY WARRANTY; without even the implied warranty of
307 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
308 General Public License for more details.
310 You should have received a copy of the GNU General Public
311 License along with this library. If not, see <https://www.gnu.org/licenses/>.
313 The author may be reached (Email) at the address mike@ai.mit.edu,
314 or (US mail) as Mike Haertel c/o Free Software Foundation. */
316 #include <errno.h>
318 /* Debugging hook for 'malloc'. */
319 static void *(*__MALLOC_HOOK_VOLATILE gmalloc_hook) (size_t);
321 /* Replacements for traditional glibc malloc hooks, for platforms that
322 do not already have these hooks. Platforms with these hooks all
323 used relaxed ref/def, so it is OK to define them here too. */
324 void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
325 void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
326 void *(*__morecore) (ptrdiff_t);
328 #ifndef HYBRID_MALLOC
330 /* Pointer to the base of the first block. */
331 char *_heapbase;
333 /* Block information table. Allocated with align/__free (not malloc/free). */
334 malloc_info *_heapinfo;
336 /* Search index in the info table. */
337 size_t _heapindex;
339 /* Limit of valid info table indices. */
340 size_t _heaplimit;
342 /* Free lists for each fragment size. */
343 struct list _fraghead[BLOCKLOG];
345 /* Instrumentation. */
346 size_t _chunks_used;
347 size_t _bytes_used;
348 size_t _chunks_free;
349 size_t _bytes_free;
351 /* Are you experienced? */
352 int __malloc_initialized;
354 #else
356 static struct list _fraghead[BLOCKLOG];
358 #endif /* HYBRID_MALLOC */
360 /* Number of extra blocks to get each time we ask for more core.
361 This reduces the frequency of calling `(*__morecore)'. */
362 #if defined DOUG_LEA_MALLOC || defined HYBRID_MALLOC || defined SYSTEM_MALLOC
363 static
364 #endif
365 size_t __malloc_extra_blocks;
367 /* Number of info entries. */
368 static size_t heapsize;
370 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
372 /* Some code for hunting a bug writing into _heapinfo.
374 Call this macro with argument PROT non-zero to protect internal
375 malloc state against writing to it, call it with a zero argument to
376 make it readable and writable.
378 Note that this only works if BLOCKSIZE == page size, which is
379 the case on the i386. */
381 #include <sys/types.h>
382 #include <sys/mman.h>
384 static int state_protected_p;
385 static size_t last_state_size;
386 static malloc_info *last_heapinfo;
388 void
389 protect_malloc_state (int protect_p)
391 /* If _heapinfo has been relocated, make sure its old location
392 isn't left read-only; it will be reused by malloc. */
393 if (_heapinfo != last_heapinfo
394 && last_heapinfo
395 && state_protected_p)
396 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
398 last_state_size = _heaplimit * sizeof *_heapinfo;
399 last_heapinfo = _heapinfo;
401 if (protect_p != state_protected_p)
403 state_protected_p = protect_p;
404 if (mprotect (_heapinfo, last_state_size,
405 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
406 abort ();
410 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
412 #else
413 #define PROTECT_MALLOC_STATE(PROT) /* empty */
414 #endif
417 /* Aligned allocation. */
418 static void *
419 align (size_t size)
421 void *result;
422 ptrdiff_t adj;
424 /* align accepts an unsigned argument, but __morecore accepts a
425 signed one. This could lead to trouble if SIZE overflows the
426 ptrdiff_t type accepted by __morecore. We just punt in that
427 case, since they are requesting a ludicrous amount anyway. */
428 if (PTRDIFF_MAX < size)
429 result = 0;
430 else
431 result = (*__morecore) (size);
432 adj = (uintptr_t) result % BLOCKSIZE;
433 if (adj != 0)
435 adj = BLOCKSIZE - adj;
436 (*__morecore) (adj);
437 result = (char *) result + adj;
440 if (__after_morecore_hook)
441 (*__after_morecore_hook) ();
443 return result;
446 /* Get SIZE bytes, if we can get them starting at END.
447 Return the address of the space we got.
448 If we cannot get space at END, fail and return 0. */
449 static void *
450 get_contiguous_space (ptrdiff_t size, void *position)
452 void *before;
453 void *after;
455 before = (*__morecore) (0);
456 /* If we can tell in advance that the break is at the wrong place,
457 fail now. */
458 if (before != position)
459 return 0;
461 /* Allocate SIZE bytes and get the address of them. */
462 after = (*__morecore) (size);
463 if (!after)
464 return 0;
466 /* It was not contiguous--reject it. */
467 if (after != position)
469 (*__morecore) (- size);
470 return 0;
473 return after;
477 /* This is called when `_heapinfo' and `heapsize' have just
478 been set to describe a new info table. Set up the table
479 to describe itself and account for it in the statistics. */
480 static void
481 register_heapinfo (void)
483 size_t block, blocks;
485 block = BLOCK (_heapinfo);
486 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
488 /* Account for the _heapinfo block itself in the statistics. */
489 _bytes_used += blocks * BLOCKSIZE;
490 ++_chunks_used;
492 /* Describe the heapinfo block itself in the heapinfo. */
493 _heapinfo[block].busy.type = -1;
494 _heapinfo[block].busy.info.size = blocks;
497 #ifdef USE_PTHREAD
498 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
499 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
500 int _malloc_thread_enabled_p;
502 static void
503 malloc_atfork_handler_prepare (void)
505 LOCK ();
506 LOCK_ALIGNED_BLOCKS ();
509 static void
510 malloc_atfork_handler_parent (void)
512 UNLOCK_ALIGNED_BLOCKS ();
513 UNLOCK ();
516 static void
517 malloc_atfork_handler_child (void)
519 UNLOCK_ALIGNED_BLOCKS ();
520 UNLOCK ();
523 /* Set up mutexes and make malloc etc. thread-safe. */
524 void
525 malloc_enable_thread (void)
527 if (_malloc_thread_enabled_p)
528 return;
530 /* Some pthread implementations call malloc for statically
531 initialized mutexes when they are used first. To avoid such a
532 situation, we initialize mutexes here while their use is
533 disabled in malloc etc. */
534 pthread_mutex_init (&_malloc_mutex, NULL);
535 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
536 pthread_atfork (malloc_atfork_handler_prepare,
537 malloc_atfork_handler_parent,
538 malloc_atfork_handler_child);
539 _malloc_thread_enabled_p = 1;
541 #endif /* USE_PTHREAD */
543 static void
544 malloc_initialize_1 (void)
546 #ifdef GC_MCHECK
547 mcheck (NULL);
548 #endif
550 if (__malloc_initialize_hook)
551 (*__malloc_initialize_hook) ();
553 heapsize = HEAP / BLOCKSIZE;
554 _heapinfo = align (heapsize * sizeof (malloc_info));
555 if (_heapinfo == NULL)
556 return;
557 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
558 _heapinfo[0].free.size = 0;
559 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
560 _heapindex = 0;
561 _heapbase = (char *) _heapinfo;
562 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
564 register_heapinfo ();
566 __malloc_initialized = 1;
567 PROTECT_MALLOC_STATE (1);
568 return;
571 /* Set everything up and remember that we have.
572 main will call malloc which calls this function. That is before any threads
573 or signal handlers has been set up, so we don't need thread protection. */
575 __malloc_initialize (void)
577 if (__malloc_initialized)
578 return 0;
580 malloc_initialize_1 ();
582 return __malloc_initialized;
585 static int morecore_recursing;
587 /* Get neatly aligned memory, initializing or
588 growing the heap info table as necessary. */
589 static void *
590 morecore_nolock (size_t size)
592 void *result;
593 malloc_info *newinfo, *oldinfo;
594 size_t newsize;
596 if (morecore_recursing)
597 /* Avoid recursion. The caller will know how to handle a null return. */
598 return NULL;
600 result = align (size);
601 if (result == NULL)
602 return NULL;
604 PROTECT_MALLOC_STATE (0);
606 /* Check if we need to grow the info table. */
607 if (heapsize < BLOCK ((char *) result + size))
609 /* Calculate the new _heapinfo table size. We do not account for the
610 added blocks in the table itself, as we hope to place them in
611 existing free space, which is already covered by part of the
612 existing table. */
613 newsize = heapsize;
615 newsize *= 2;
616 while (newsize < BLOCK ((char *) result + size));
618 /* We must not reuse existing core for the new info table when called
619 from realloc in the case of growing a large block, because the
620 block being grown is momentarily marked as free. In this case
621 _heaplimit is zero so we know not to reuse space for internal
622 allocation. */
623 if (_heaplimit != 0)
625 /* First try to allocate the new info table in core we already
626 have, in the usual way using realloc. If realloc cannot
627 extend it in place or relocate it to existing sufficient core,
628 we will get called again, and the code above will notice the
629 `morecore_recursing' flag and return null. */
630 int save = errno; /* Don't want to clobber errno with ENOMEM. */
631 morecore_recursing = 1;
632 newinfo = _realloc_internal_nolock (_heapinfo,
633 newsize * sizeof (malloc_info));
634 morecore_recursing = 0;
635 if (newinfo == NULL)
636 errno = save;
637 else
639 /* We found some space in core, and realloc has put the old
640 table's blocks on the free list. Now zero the new part
641 of the table and install the new table location. */
642 memset (&newinfo[heapsize], 0,
643 (newsize - heapsize) * sizeof (malloc_info));
644 _heapinfo = newinfo;
645 heapsize = newsize;
646 goto got_heap;
650 /* Allocate new space for the malloc info table. */
651 while (1)
653 newinfo = align (newsize * sizeof (malloc_info));
655 /* Did it fail? */
656 if (newinfo == NULL)
658 (*__morecore) (-size);
659 return NULL;
662 /* Is it big enough to record status for its own space?
663 If so, we win. */
664 if (BLOCK ((char *) newinfo + newsize * sizeof (malloc_info))
665 < newsize)
666 break;
668 /* Must try again. First give back most of what we just got. */
669 (*__morecore) (- newsize * sizeof (malloc_info));
670 newsize *= 2;
673 /* Copy the old table to the beginning of the new,
674 and zero the rest of the new table. */
675 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
676 memset (&newinfo[heapsize], 0,
677 (newsize - heapsize) * sizeof (malloc_info));
678 oldinfo = _heapinfo;
679 _heapinfo = newinfo;
680 heapsize = newsize;
682 register_heapinfo ();
684 /* Reset _heaplimit so _free_internal never decides
685 it can relocate or resize the info table. */
686 _heaplimit = 0;
687 _free_internal_nolock (oldinfo);
688 PROTECT_MALLOC_STATE (0);
690 /* The new heap limit includes the new table just allocated. */
691 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
692 return result;
695 got_heap:
696 _heaplimit = BLOCK ((char *) result + size);
697 return result;
700 /* Allocate memory from the heap. */
701 void *
702 _malloc_internal_nolock (size_t size)
704 void *result;
705 size_t block, blocks, lastblocks, start;
706 register size_t i;
707 struct list *next;
709 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
710 valid address you can realloc and free (though not dereference).
712 It turns out that some extant code (sunrpc, at least Ultrix's version)
713 expects `malloc (0)' to return non-NULL and breaks otherwise.
714 Be compatible. */
716 #if 0
717 if (size == 0)
718 return NULL;
719 #endif
721 PROTECT_MALLOC_STATE (0);
723 if (size < sizeof (struct list))
724 size = sizeof (struct list);
726 /* Determine the allocation policy based on the request size. */
727 if (size <= BLOCKSIZE / 2)
729 /* Small allocation to receive a fragment of a block.
730 Determine the logarithm to base two of the fragment size. */
731 register size_t log = 1;
732 --size;
733 while ((size /= 2) != 0)
734 ++log;
736 /* Look in the fragment lists for a
737 free fragment of the desired size. */
738 next = _fraghead[log].next;
739 if (next != NULL)
741 /* There are free fragments of this size.
742 Pop a fragment out of the fragment list and return it.
743 Update the block's nfree and first counters. */
744 result = next;
745 next->prev->next = next->next;
746 if (next->next != NULL)
747 next->next->prev = next->prev;
748 block = BLOCK (result);
749 if (--_heapinfo[block].busy.info.frag.nfree != 0)
750 _heapinfo[block].busy.info.frag.first =
751 (uintptr_t) next->next % BLOCKSIZE >> log;
753 /* Update the statistics. */
754 ++_chunks_used;
755 _bytes_used += 1 << log;
756 --_chunks_free;
757 _bytes_free -= 1 << log;
759 else
761 /* No free fragments of the desired size, so get a new block
762 and break it into fragments, returning the first. */
763 #ifdef GC_MALLOC_CHECK
764 result = _malloc_internal_nolock (BLOCKSIZE);
765 PROTECT_MALLOC_STATE (0);
766 #elif defined (USE_PTHREAD)
767 result = _malloc_internal_nolock (BLOCKSIZE);
768 #else
769 result = malloc (BLOCKSIZE);
770 #endif
771 if (result == NULL)
773 PROTECT_MALLOC_STATE (1);
774 goto out;
777 /* Link all fragments but the first into the free list. */
778 next = (struct list *) ((char *) result + (1 << log));
779 next->next = NULL;
780 next->prev = &_fraghead[log];
781 _fraghead[log].next = next;
783 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
785 next = (struct list *) ((char *) result + (i << log));
786 next->next = _fraghead[log].next;
787 next->prev = &_fraghead[log];
788 next->prev->next = next;
789 next->next->prev = next;
792 /* Initialize the nfree and first counters for this block. */
793 block = BLOCK (result);
794 _heapinfo[block].busy.type = log;
795 _heapinfo[block].busy.info.frag.nfree = i - 1;
796 _heapinfo[block].busy.info.frag.first = i - 1;
798 _chunks_free += (BLOCKSIZE >> log) - 1;
799 _bytes_free += BLOCKSIZE - (1 << log);
800 _bytes_used -= BLOCKSIZE - (1 << log);
803 else
805 /* Large allocation to receive one or more blocks.
806 Search the free list in a circle starting at the last place visited.
807 If we loop completely around without finding a large enough
808 space we will have to get more memory from the system. */
809 blocks = BLOCKIFY (size);
810 start = block = _heapindex;
811 while (_heapinfo[block].free.size < blocks)
813 block = _heapinfo[block].free.next;
814 if (block == start)
816 /* Need to get more from the system. Get a little extra. */
817 size_t wantblocks = blocks + __malloc_extra_blocks;
818 block = _heapinfo[0].free.prev;
819 lastblocks = _heapinfo[block].free.size;
820 /* Check to see if the new core will be contiguous with the
821 final free block; if so we don't need to get as much. */
822 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
823 /* We can't do this if we will have to make the heap info
824 table bigger to accommodate the new space. */
825 block + wantblocks <= heapsize &&
826 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
827 ADDRESS (block + lastblocks)))
829 /* We got it contiguously. Which block we are extending
830 (the `final free block' referred to above) might have
831 changed, if it got combined with a freed info table. */
832 block = _heapinfo[0].free.prev;
833 _heapinfo[block].free.size += (wantblocks - lastblocks);
834 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
835 _heaplimit += wantblocks - lastblocks;
836 continue;
838 result = morecore_nolock (wantblocks * BLOCKSIZE);
839 if (result == NULL)
840 goto out;
841 block = BLOCK (result);
842 /* Put the new block at the end of the free list. */
843 _heapinfo[block].free.size = wantblocks;
844 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
845 _heapinfo[block].free.next = 0;
846 _heapinfo[0].free.prev = block;
847 _heapinfo[_heapinfo[block].free.prev].free.next = block;
848 ++_chunks_free;
849 /* Now loop to use some of that block for this allocation. */
853 /* At this point we have found a suitable free list entry.
854 Figure out how to remove what we need from the list. */
855 result = ADDRESS (block);
856 if (_heapinfo[block].free.size > blocks)
858 /* The block we found has a bit left over,
859 so relink the tail end back into the free list. */
860 _heapinfo[block + blocks].free.size
861 = _heapinfo[block].free.size - blocks;
862 _heapinfo[block + blocks].free.next
863 = _heapinfo[block].free.next;
864 _heapinfo[block + blocks].free.prev
865 = _heapinfo[block].free.prev;
866 _heapinfo[_heapinfo[block].free.prev].free.next
867 = _heapinfo[_heapinfo[block].free.next].free.prev
868 = _heapindex = block + blocks;
870 else
872 /* The block exactly matches our requirements,
873 so just remove it from the list. */
874 _heapinfo[_heapinfo[block].free.next].free.prev
875 = _heapinfo[block].free.prev;
876 _heapinfo[_heapinfo[block].free.prev].free.next
877 = _heapindex = _heapinfo[block].free.next;
878 --_chunks_free;
881 _heapinfo[block].busy.type = -1;
882 _heapinfo[block].busy.info.size = blocks;
883 ++_chunks_used;
884 _bytes_used += blocks * BLOCKSIZE;
885 _bytes_free -= blocks * BLOCKSIZE;
888 PROTECT_MALLOC_STATE (1);
889 out:
890 return result;
893 void *
894 _malloc_internal (size_t size)
896 void *result;
898 LOCK ();
899 result = _malloc_internal_nolock (size);
900 UNLOCK ();
902 return result;
905 void *
906 malloc (size_t size)
908 void *(*hook) (size_t);
910 if (!__malloc_initialized && !__malloc_initialize ())
911 return NULL;
913 /* Copy the value of gmalloc_hook to an automatic variable in case
914 gmalloc_hook is modified in another thread between its
915 NULL-check and the use.
917 Note: Strictly speaking, this is not a right solution. We should
918 use mutexes to access non-read-only variables that are shared
919 among multiple threads. We just leave it for compatibility with
920 glibc malloc (i.e., assignments to gmalloc_hook) for now. */
921 hook = gmalloc_hook;
922 return (hook != NULL ? *hook : _malloc_internal) (size);
925 #if !(defined (_LIBC) || defined (HYBRID_MALLOC))
927 /* On some ANSI C systems, some libc functions call _malloc, _free
928 and _realloc. Make them use the GNU functions. */
930 extern void *_malloc (size_t);
931 extern void _free (void *);
932 extern void *_realloc (void *, size_t);
934 void *
935 _malloc (size_t size)
937 return malloc (size);
940 void
941 _free (void *ptr)
943 free (ptr);
946 void *
947 _realloc (void *ptr, size_t size)
949 return realloc (ptr, size);
952 #endif
953 /* Free a block of memory allocated by `malloc'.
954 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
955 Written May 1989 by Mike Haertel.
957 This library is free software; you can redistribute it and/or
958 modify it under the terms of the GNU General Public License as
959 published by the Free Software Foundation; either version 2 of the
960 License, or (at your option) any later version.
962 This library is distributed in the hope that it will be useful,
963 but WITHOUT ANY WARRANTY; without even the implied warranty of
964 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
965 General Public License for more details.
967 You should have received a copy of the GNU General Public
968 License along with this library. If not, see <https://www.gnu.org/licenses/>.
970 The author may be reached (Email) at the address mike@ai.mit.edu,
971 or (US mail) as Mike Haertel c/o Free Software Foundation. */
973 /* Debugging hook for free. */
974 static void (*__MALLOC_HOOK_VOLATILE gfree_hook) (void *);
976 #ifndef HYBRID_MALLOC
978 /* List of blocks allocated by aligned_alloc. */
979 struct alignlist *_aligned_blocks = NULL;
980 #endif
982 /* Return memory to the heap.
983 Like `_free_internal' but don't lock mutex. */
984 void
985 _free_internal_nolock (void *ptr)
987 int type;
988 size_t block, blocks;
989 register size_t i;
990 struct list *prev, *next;
991 void *curbrk;
992 const size_t lesscore_threshold
993 /* Threshold of free space at which we will return some to the system. */
994 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
996 register struct alignlist *l;
998 if (ptr == NULL)
999 return;
1001 PROTECT_MALLOC_STATE (0);
1003 LOCK_ALIGNED_BLOCKS ();
1004 for (l = _aligned_blocks; l != NULL; l = l->next)
1005 if (l->aligned == ptr)
1007 l->aligned = NULL; /* Mark the slot in the list as free. */
1008 ptr = l->exact;
1009 break;
1011 UNLOCK_ALIGNED_BLOCKS ();
1013 block = BLOCK (ptr);
1015 type = _heapinfo[block].busy.type;
1016 switch (type)
1018 case -1:
1019 /* Get as many statistics as early as we can. */
1020 --_chunks_used;
1021 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1022 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1024 /* Find the free cluster previous to this one in the free list.
1025 Start searching at the last block referenced; this may benefit
1026 programs with locality of allocation. */
1027 i = _heapindex;
1028 if (i > block)
1029 while (i > block)
1030 i = _heapinfo[i].free.prev;
1031 else
1034 i = _heapinfo[i].free.next;
1035 while (i > 0 && i < block);
1036 i = _heapinfo[i].free.prev;
1039 /* Determine how to link this block into the free list. */
1040 if (block == i + _heapinfo[i].free.size)
1042 /* Coalesce this block with its predecessor. */
1043 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1044 block = i;
1046 else
1048 /* Really link this block back into the free list. */
1049 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1050 _heapinfo[block].free.next = _heapinfo[i].free.next;
1051 _heapinfo[block].free.prev = i;
1052 _heapinfo[i].free.next = block;
1053 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1054 ++_chunks_free;
1057 /* Now that the block is linked in, see if we can coalesce it
1058 with its successor (by deleting its successor from the list
1059 and adding in its size). */
1060 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1062 _heapinfo[block].free.size
1063 += _heapinfo[_heapinfo[block].free.next].free.size;
1064 _heapinfo[block].free.next
1065 = _heapinfo[_heapinfo[block].free.next].free.next;
1066 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1067 --_chunks_free;
1070 /* How many trailing free blocks are there now? */
1071 blocks = _heapinfo[block].free.size;
1073 /* Where is the current end of accessible core? */
1074 curbrk = (*__morecore) (0);
1076 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1078 /* The end of the malloc heap is at the end of accessible core.
1079 It's possible that moving _heapinfo will allow us to
1080 return some space to the system. */
1082 size_t info_block = BLOCK (_heapinfo);
1083 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1084 size_t prev_block = _heapinfo[block].free.prev;
1085 size_t prev_blocks = _heapinfo[prev_block].free.size;
1086 size_t next_block = _heapinfo[block].free.next;
1087 size_t next_blocks = _heapinfo[next_block].free.size;
1089 if (/* Win if this block being freed is last in core, the info table
1090 is just before it, the previous free block is just before the
1091 info table, and the two free blocks together form a useful
1092 amount to return to the system. */
1093 (block + blocks == _heaplimit &&
1094 info_block + info_blocks == block &&
1095 prev_block != 0 && prev_block + prev_blocks == info_block &&
1096 blocks + prev_blocks >= lesscore_threshold) ||
1097 /* Nope, not the case. We can also win if this block being
1098 freed is just before the info table, and the table extends
1099 to the end of core or is followed only by a free block,
1100 and the total free space is worth returning to the system. */
1101 (block + blocks == info_block &&
1102 ((info_block + info_blocks == _heaplimit &&
1103 blocks >= lesscore_threshold) ||
1104 (info_block + info_blocks == next_block &&
1105 next_block + next_blocks == _heaplimit &&
1106 blocks + next_blocks >= lesscore_threshold)))
1109 malloc_info *newinfo;
1110 size_t oldlimit = _heaplimit;
1112 /* Free the old info table, clearing _heaplimit to avoid
1113 recursion into this code. We don't want to return the
1114 table's blocks to the system before we have copied them to
1115 the new location. */
1116 _heaplimit = 0;
1117 _free_internal_nolock (_heapinfo);
1118 _heaplimit = oldlimit;
1120 /* Tell malloc to search from the beginning of the heap for
1121 free blocks, so it doesn't reuse the ones just freed. */
1122 _heapindex = 0;
1124 /* Allocate new space for the info table and move its data. */
1125 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
1126 PROTECT_MALLOC_STATE (0);
1127 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1128 _heapinfo = newinfo;
1130 /* We should now have coalesced the free block with the
1131 blocks freed from the old info table. Examine the entire
1132 trailing free block to decide below whether to return some
1133 to the system. */
1134 block = _heapinfo[0].free.prev;
1135 blocks = _heapinfo[block].free.size;
1138 /* Now see if we can return stuff to the system. */
1139 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1141 register size_t bytes = blocks * BLOCKSIZE;
1142 _heaplimit -= blocks;
1143 (*__morecore) (-bytes);
1144 _heapinfo[_heapinfo[block].free.prev].free.next
1145 = _heapinfo[block].free.next;
1146 _heapinfo[_heapinfo[block].free.next].free.prev
1147 = _heapinfo[block].free.prev;
1148 block = _heapinfo[block].free.prev;
1149 --_chunks_free;
1150 _bytes_free -= bytes;
1154 /* Set the next search to begin at this block. */
1155 _heapindex = block;
1156 break;
1158 default:
1159 /* Do some of the statistics. */
1160 --_chunks_used;
1161 _bytes_used -= 1 << type;
1162 ++_chunks_free;
1163 _bytes_free += 1 << type;
1165 /* Get the address of the first free fragment in this block. */
1166 prev = (struct list *) ((char *) ADDRESS (block) +
1167 (_heapinfo[block].busy.info.frag.first << type));
1169 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1171 /* If all fragments of this block are free, remove them
1172 from the fragment list and free the whole block. */
1173 next = prev;
1174 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
1175 next = next->next;
1176 prev->prev->next = next;
1177 if (next != NULL)
1178 next->prev = prev->prev;
1179 _heapinfo[block].busy.type = -1;
1180 _heapinfo[block].busy.info.size = 1;
1182 /* Keep the statistics accurate. */
1183 ++_chunks_used;
1184 _bytes_used += BLOCKSIZE;
1185 _chunks_free -= BLOCKSIZE >> type;
1186 _bytes_free -= BLOCKSIZE;
1188 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1189 _free_internal_nolock (ADDRESS (block));
1190 #else
1191 free (ADDRESS (block));
1192 #endif
1194 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1196 /* If some fragments of this block are free, link this
1197 fragment into the fragment list after the first free
1198 fragment of this block. */
1199 next = ptr;
1200 next->next = prev->next;
1201 next->prev = prev;
1202 prev->next = next;
1203 if (next->next != NULL)
1204 next->next->prev = next;
1205 ++_heapinfo[block].busy.info.frag.nfree;
1207 else
1209 /* No fragments of this block are free, so link this
1210 fragment into the fragment list and announce that
1211 it is the first free fragment of this block. */
1212 prev = ptr;
1213 _heapinfo[block].busy.info.frag.nfree = 1;
1214 _heapinfo[block].busy.info.frag.first =
1215 (uintptr_t) ptr % BLOCKSIZE >> type;
1216 prev->next = _fraghead[type].next;
1217 prev->prev = &_fraghead[type];
1218 prev->prev->next = prev;
1219 if (prev->next != NULL)
1220 prev->next->prev = prev;
1222 break;
1225 PROTECT_MALLOC_STATE (1);
1228 /* Return memory to the heap.
1229 Like 'free' but don't call a hook if there is one. */
1230 void
1231 _free_internal (void *ptr)
1233 LOCK ();
1234 _free_internal_nolock (ptr);
1235 UNLOCK ();
1238 /* Return memory to the heap. */
1240 void
1241 free (void *ptr)
1243 void (*hook) (void *) = gfree_hook;
1245 if (hook != NULL)
1246 (*hook) (ptr);
1247 else
1248 _free_internal (ptr);
1251 #ifndef HYBRID_MALLOC
1252 /* Define the `cfree' alias for `free'. */
1253 #ifdef weak_alias
1254 weak_alias (free, cfree)
1255 #else
1256 void
1257 cfree (void *ptr)
1259 free (ptr);
1261 #endif
1262 #endif
1263 /* Change the size of a block allocated by `malloc'.
1264 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1265 Written May 1989 by Mike Haertel.
1267 This library is free software; you can redistribute it and/or
1268 modify it under the terms of the GNU General Public License as
1269 published by the Free Software Foundation; either version 2 of the
1270 License, or (at your option) any later version.
1272 This library is distributed in the hope that it will be useful,
1273 but WITHOUT ANY WARRANTY; without even the implied warranty of
1274 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1275 General Public License for more details.
1277 You should have received a copy of the GNU General Public
1278 License along with this library. If not, see <https://www.gnu.org/licenses/>.
1280 The author may be reached (Email) at the address mike@ai.mit.edu,
1281 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1283 #ifndef min
1284 #define min(a, b) ((a) < (b) ? (a) : (b))
1285 #endif
1287 /* Debugging hook for realloc. */
1288 static void *(*grealloc_hook) (void *, size_t);
1290 /* Resize the given region to the new size, returning a pointer
1291 to the (possibly moved) region. This is optimized for speed;
1292 some benchmarks seem to indicate that greater compactness is
1293 achieved by unconditionally allocating and copying to a
1294 new region. This module has incestuous knowledge of the
1295 internals of both free and malloc. */
1296 void *
1297 _realloc_internal_nolock (void *ptr, size_t size)
1299 void *result;
1300 int type;
1301 size_t block, blocks, oldlimit;
1303 if (size == 0)
1305 _free_internal_nolock (ptr);
1306 return _malloc_internal_nolock (0);
1308 else if (ptr == NULL)
1309 return _malloc_internal_nolock (size);
1311 block = BLOCK (ptr);
1313 PROTECT_MALLOC_STATE (0);
1315 type = _heapinfo[block].busy.type;
1316 switch (type)
1318 case -1:
1319 /* Maybe reallocate a large block to a small fragment. */
1320 if (size <= BLOCKSIZE / 2)
1322 result = _malloc_internal_nolock (size);
1323 if (result != NULL)
1325 memcpy (result, ptr, size);
1326 _free_internal_nolock (ptr);
1327 goto out;
1331 /* The new size is a large allocation as well;
1332 see if we can hold it in place. */
1333 blocks = BLOCKIFY (size);
1334 if (blocks < _heapinfo[block].busy.info.size)
1336 /* The new size is smaller; return
1337 excess memory to the free list. */
1338 _heapinfo[block + blocks].busy.type = -1;
1339 _heapinfo[block + blocks].busy.info.size
1340 = _heapinfo[block].busy.info.size - blocks;
1341 _heapinfo[block].busy.info.size = blocks;
1342 /* We have just created a new chunk by splitting a chunk in two.
1343 Now we will free this chunk; increment the statistics counter
1344 so it doesn't become wrong when _free_internal decrements it. */
1345 ++_chunks_used;
1346 _free_internal_nolock (ADDRESS (block + blocks));
1347 result = ptr;
1349 else if (blocks == _heapinfo[block].busy.info.size)
1350 /* No size change necessary. */
1351 result = ptr;
1352 else
1354 /* Won't fit, so allocate a new region that will.
1355 Free the old region first in case there is sufficient
1356 adjacent free space to grow without moving. */
1357 blocks = _heapinfo[block].busy.info.size;
1358 /* Prevent free from actually returning memory to the system. */
1359 oldlimit = _heaplimit;
1360 _heaplimit = 0;
1361 _free_internal_nolock (ptr);
1362 result = _malloc_internal_nolock (size);
1363 PROTECT_MALLOC_STATE (0);
1364 if (_heaplimit == 0)
1365 _heaplimit = oldlimit;
1366 if (result == NULL)
1368 /* Now we're really in trouble. We have to unfree
1369 the thing we just freed. Unfortunately it might
1370 have been coalesced with its neighbors. */
1371 if (_heapindex == block)
1372 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1373 else
1375 void *previous
1376 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1377 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1378 _free_internal_nolock (previous);
1380 goto out;
1382 if (ptr != result)
1383 memmove (result, ptr, blocks * BLOCKSIZE);
1385 break;
1387 default:
1388 /* Old size is a fragment; type is logarithm
1389 to base two of the fragment size. */
1390 if (size > (size_t) (1 << (type - 1)) &&
1391 size <= (size_t) (1 << type))
1392 /* The new size is the same kind of fragment. */
1393 result = ptr;
1394 else
1396 /* The new size is different; allocate a new space,
1397 and copy the lesser of the new size and the old. */
1398 result = _malloc_internal_nolock (size);
1399 if (result == NULL)
1400 goto out;
1401 memcpy (result, ptr, min (size, (size_t) 1 << type));
1402 _free_internal_nolock (ptr);
1404 break;
1407 PROTECT_MALLOC_STATE (1);
1408 out:
1409 return result;
1412 void *
1413 _realloc_internal (void *ptr, size_t size)
1415 void *result;
1417 LOCK ();
1418 result = _realloc_internal_nolock (ptr, size);
1419 UNLOCK ();
1421 return result;
1424 void *
1425 realloc (void *ptr, size_t size)
1427 void *(*hook) (void *, size_t);
1429 if (!__malloc_initialized && !__malloc_initialize ())
1430 return NULL;
1432 hook = grealloc_hook;
1433 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1435 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1437 This library is free software; you can redistribute it and/or
1438 modify it under the terms of the GNU General Public License as
1439 published by the Free Software Foundation; either version 2 of the
1440 License, or (at your option) any later version.
1442 This library is distributed in the hope that it will be useful,
1443 but WITHOUT ANY WARRANTY; without even the implied warranty of
1444 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1445 General Public License for more details.
1447 You should have received a copy of the GNU General Public
1448 License along with this library. If not, see <https://www.gnu.org/licenses/>.
1450 The author may be reached (Email) at the address mike@ai.mit.edu,
1451 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1453 /* Allocate an array of NMEMB elements each SIZE bytes long.
1454 The entire array is initialized to zeros. */
1455 void *
1456 calloc (size_t nmemb, size_t size)
1458 void *result;
1459 size_t bytes = nmemb * size;
1461 if (size != 0 && bytes / size != nmemb)
1463 errno = ENOMEM;
1464 return NULL;
1467 result = malloc (bytes);
1468 if (result)
1469 return memset (result, 0, bytes);
1470 return result;
1472 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1473 This file is part of the GNU C Library.
1475 The GNU C Library is free software; you can redistribute it and/or modify
1476 it under the terms of the GNU General Public License as published by
1477 the Free Software Foundation; either version 2, or (at your option)
1478 any later version.
1480 The GNU C Library is distributed in the hope that it will be useful,
1481 but WITHOUT ANY WARRANTY; without even the implied warranty of
1482 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1483 GNU General Public License for more details.
1485 You should have received a copy of the GNU General Public License
1486 along with the GNU C Library. If not, see <https://www.gnu.org/licenses/>. */
1488 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1489 compatible. */
1490 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1491 #define __sbrk sbrk
1492 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1493 /* It is best not to declare this and cast its result on foreign operating
1494 systems with potentially hostile include files. */
1496 extern void *__sbrk (ptrdiff_t increment);
1497 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1499 /* Allocate INCREMENT more bytes of data space,
1500 and return the start of data space, or NULL on errors.
1501 If INCREMENT is negative, shrink data space. */
1502 static void *
1503 gdefault_morecore (ptrdiff_t increment)
1505 #ifdef HYBRID_MALLOC
1506 if (!DUMPED)
1508 return bss_sbrk (increment);
1510 #endif
1511 #ifdef HAVE_SBRK
1512 void *result = (void *) __sbrk (increment);
1513 if (result != (void *) -1)
1514 return result;
1515 #endif
1516 return NULL;
1519 void *(*__morecore) (ptrdiff_t) = gdefault_morecore;
1521 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1523 This library is free software; you can redistribute it and/or
1524 modify it under the terms of the GNU General Public License as
1525 published by the Free Software Foundation; either version 2 of the
1526 License, or (at your option) any later version.
1528 This library is distributed in the hope that it will be useful,
1529 but WITHOUT ANY WARRANTY; without even the implied warranty of
1530 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1531 General Public License for more details.
1533 You should have received a copy of the GNU General Public
1534 License along with this library. If not, see <https://www.gnu.org/licenses/>. */
1536 void *
1537 aligned_alloc (size_t alignment, size_t size)
1539 void *result;
1540 size_t adj, lastadj;
1542 /* Allocate a block with enough extra space to pad the block with up to
1543 (ALIGNMENT - 1) bytes if necessary. */
1544 if (- size < alignment)
1546 errno = ENOMEM;
1547 return NULL;
1549 result = malloc (size + alignment - 1);
1550 if (result == NULL)
1551 return NULL;
1553 /* Figure out how much we will need to pad this particular block
1554 to achieve the required alignment. */
1555 adj = alignment - (uintptr_t) result % alignment;
1556 if (adj == alignment)
1557 adj = 0;
1559 if (adj != alignment - 1)
1563 /* Reallocate the block with only as much excess as it
1564 needs. */
1565 free (result);
1566 result = malloc (size + adj);
1567 if (result == NULL) /* Impossible unless interrupted. */
1568 return NULL;
1570 lastadj = adj;
1571 adj = alignment - (uintptr_t) result % alignment;
1572 if (adj == alignment)
1573 adj = 0;
1574 /* It's conceivable we might have been so unlucky as to get
1575 a different block with weaker alignment. If so, this
1576 block is too short to contain SIZE after alignment
1577 correction. So we must try again and get another block,
1578 slightly larger. */
1579 } while (adj > lastadj);
1582 if (adj != 0)
1584 /* Record this block in the list of aligned blocks, so that `free'
1585 can identify the pointer it is passed, which will be in the middle
1586 of an allocated block. */
1588 struct alignlist *l;
1589 LOCK_ALIGNED_BLOCKS ();
1590 for (l = _aligned_blocks; l != NULL; l = l->next)
1591 if (l->aligned == NULL)
1592 /* This slot is free. Use it. */
1593 break;
1594 if (l == NULL)
1596 l = malloc (sizeof *l);
1597 if (l != NULL)
1599 l->next = _aligned_blocks;
1600 _aligned_blocks = l;
1603 if (l != NULL)
1605 l->exact = result;
1606 result = l->aligned = (char *) result + adj;
1608 UNLOCK_ALIGNED_BLOCKS ();
1609 if (l == NULL)
1611 free (result);
1612 result = NULL;
1616 return result;
1619 /* Note that memalign and posix_memalign are not used in Emacs. */
1620 #ifndef HYBRID_MALLOC
1621 /* An obsolete alias for aligned_alloc, for any old libraries that use
1622 this alias. */
1624 void *
1625 memalign (size_t alignment, size_t size)
1627 return aligned_alloc (alignment, size);
1630 /* If HYBRID_MALLOC is defined, we may want to use the system
1631 posix_memalign below. */
1633 posix_memalign (void **memptr, size_t alignment, size_t size)
1635 void *mem;
1637 if (alignment == 0
1638 || alignment % sizeof (void *) != 0
1639 || (alignment & (alignment - 1)) != 0)
1640 return EINVAL;
1642 mem = aligned_alloc (alignment, size);
1643 if (mem == NULL)
1644 return ENOMEM;
1646 *memptr = mem;
1648 return 0;
1650 #endif
1652 /* Allocate memory on a page boundary.
1653 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1655 This library is free software; you can redistribute it and/or
1656 modify it under the terms of the GNU General Public License as
1657 published by the Free Software Foundation; either version 2 of the
1658 License, or (at your option) any later version.
1660 This library is distributed in the hope that it will be useful,
1661 but WITHOUT ANY WARRANTY; without even the implied warranty of
1662 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1663 General Public License for more details.
1665 You should have received a copy of the GNU General Public
1666 License along with this library. If not, see <https://www.gnu.org/licenses/>.
1668 The author may be reached (Email) at the address mike@ai.mit.edu,
1669 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1671 #ifndef HYBRID_MALLOC
1673 # ifndef HAVE_MALLOC_H
1674 /* Allocate SIZE bytes on a page boundary. */
1675 extern void *valloc (size_t);
1676 # endif
1678 # if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1679 # include "getpagesize.h"
1680 # elif !defined getpagesize
1681 extern int getpagesize (void);
1682 # endif
1684 static size_t pagesize;
1686 void *
1687 valloc (size_t size)
1689 if (pagesize == 0)
1690 pagesize = getpagesize ();
1692 return aligned_alloc (pagesize, size);
1694 #endif /* HYBRID_MALLOC */
1696 #undef malloc
1697 #undef realloc
1698 #undef calloc
1699 #undef aligned_alloc
1700 #undef free
1702 #ifdef HYBRID_MALLOC
1703 /* Declare system malloc and friends. */
1704 extern void *malloc (size_t size);
1705 extern void *realloc (void *ptr, size_t size);
1706 extern void *calloc (size_t nmemb, size_t size);
1707 extern void free (void *ptr);
1708 #ifdef HAVE_ALIGNED_ALLOC
1709 extern void *aligned_alloc (size_t alignment, size_t size);
1710 #elif defined HAVE_POSIX_MEMALIGN
1711 extern int posix_memalign (void **memptr, size_t alignment, size_t size);
1712 #endif
1714 /* Assuming PTR was allocated via the hybrid malloc, return true if
1715 PTR was allocated via gmalloc, not the system malloc. Also, return
1716 true if _heaplimit is zero; this can happen temporarily when
1717 gmalloc calls itself for internal use, and in that case PTR is
1718 already known to be allocated via gmalloc. */
1720 static bool
1721 allocated_via_gmalloc (void *ptr)
1723 size_t block = BLOCK (ptr);
1724 size_t blockmax = _heaplimit - 1;
1725 return block <= blockmax && _heapinfo[block].busy.type != 0;
1728 /* See the comments near the beginning of this file for explanations
1729 of the following functions. */
1731 void *
1732 hybrid_malloc (size_t size)
1734 if (DUMPED)
1735 return malloc (size);
1736 return gmalloc (size);
1739 void *
1740 hybrid_calloc (size_t nmemb, size_t size)
1742 if (DUMPED)
1743 return calloc (nmemb, size);
1744 return gcalloc (nmemb, size);
1747 void
1748 hybrid_free (void *ptr)
1750 if (allocated_via_gmalloc (ptr))
1751 gfree (ptr);
1752 else
1753 free (ptr);
1756 #if defined HAVE_ALIGNED_ALLOC || defined HAVE_POSIX_MEMALIGN
1757 void *
1758 hybrid_aligned_alloc (size_t alignment, size_t size)
1760 if (!DUMPED)
1761 return galigned_alloc (alignment, size);
1762 /* The following is copied from alloc.c */
1763 #ifdef HAVE_ALIGNED_ALLOC
1764 return aligned_alloc (alignment, size);
1765 #else /* HAVE_POSIX_MEMALIGN */
1766 void *p;
1767 return posix_memalign (&p, alignment, size) == 0 ? p : 0;
1768 #endif
1770 #endif
1772 void *
1773 hybrid_realloc (void *ptr, size_t size)
1775 void *result;
1776 int type;
1777 size_t block, oldsize;
1779 if (!ptr)
1780 return hybrid_malloc (size);
1781 if (!allocated_via_gmalloc (ptr))
1782 return realloc (ptr, size);
1783 if (!DUMPED)
1784 return grealloc (ptr, size);
1786 /* The dumped emacs is trying to realloc storage allocated before
1787 dumping via gmalloc. Allocate new space and copy the data. Do
1788 not bother with gfree (ptr), as that would just waste time. */
1789 block = BLOCK (ptr);
1790 type = _heapinfo[block].busy.type;
1791 oldsize =
1792 type < 0 ? _heapinfo[block].busy.info.size * BLOCKSIZE
1793 : (size_t) 1 << type;
1794 result = malloc (size);
1795 if (result)
1796 return memcpy (result, ptr, min (oldsize, size));
1797 return result;
1800 #else /* ! HYBRID_MALLOC */
1802 void *
1803 malloc (size_t size)
1805 return gmalloc (size);
1808 void *
1809 calloc (size_t nmemb, size_t size)
1811 return gcalloc (nmemb, size);
1814 void
1815 free (void *ptr)
1817 gfree (ptr);
1820 void *
1821 aligned_alloc (size_t alignment, size_t size)
1823 return galigned_alloc (alignment, size);
1826 void *
1827 realloc (void *ptr, size_t size)
1829 return grealloc (ptr, size);
1832 #endif /* HYBRID_MALLOC */
1834 #ifdef GC_MCHECK
1836 /* Standard debugging hooks for `malloc'.
1837 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1838 Written May 1989 by Mike Haertel.
1840 This library is free software; you can redistribute it and/or
1841 modify it under the terms of the GNU General Public License as
1842 published by the Free Software Foundation; either version 2 of the
1843 License, or (at your option) any later version.
1845 This library is distributed in the hope that it will be useful,
1846 but WITHOUT ANY WARRANTY; without even the implied warranty of
1847 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1848 General Public License for more details.
1850 You should have received a copy of the GNU General Public
1851 License along with this library. If not, see <https://www.gnu.org/licenses/>.
1853 The author may be reached (Email) at the address mike@ai.mit.edu,
1854 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1856 #include <stdio.h>
1858 /* Old hook values. */
1859 static void (*old_free_hook) (void *ptr);
1860 static void *(*old_malloc_hook) (size_t size);
1861 static void *(*old_realloc_hook) (void *ptr, size_t size);
1863 /* Function to call when something awful happens. */
1864 static void (*abortfunc) (enum mcheck_status);
1866 /* Arbitrary magical numbers. */
1867 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1868 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1869 #define MAGICBYTE ((char) 0xd7)
1870 #define MALLOCFLOOD ((char) 0x93)
1871 #define FREEFLOOD ((char) 0x95)
1873 struct hdr
1875 size_t size; /* Exact size requested by user. */
1876 size_t magic; /* Magic number to check header integrity. */
1879 static enum mcheck_status
1880 checkhdr (const struct hdr *hdr)
1882 enum mcheck_status status;
1883 switch (hdr->magic)
1885 default:
1886 status = MCHECK_HEAD;
1887 break;
1888 case MAGICFREE:
1889 status = MCHECK_FREE;
1890 break;
1891 case MAGICWORD:
1892 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1893 status = MCHECK_TAIL;
1894 else
1895 status = MCHECK_OK;
1896 break;
1898 if (status != MCHECK_OK)
1899 (*abortfunc) (status);
1900 return status;
1903 static void
1904 freehook (void *ptr)
1906 struct hdr *hdr;
1908 if (ptr)
1910 struct alignlist *l;
1912 /* If the block was allocated by aligned_alloc, its real pointer
1913 to free is recorded in _aligned_blocks; find that. */
1914 PROTECT_MALLOC_STATE (0);
1915 LOCK_ALIGNED_BLOCKS ();
1916 for (l = _aligned_blocks; l != NULL; l = l->next)
1917 if (l->aligned == ptr)
1919 l->aligned = NULL; /* Mark the slot in the list as free. */
1920 ptr = l->exact;
1921 break;
1923 UNLOCK_ALIGNED_BLOCKS ();
1924 PROTECT_MALLOC_STATE (1);
1926 hdr = ((struct hdr *) ptr) - 1;
1927 checkhdr (hdr);
1928 hdr->magic = MAGICFREE;
1929 memset (ptr, FREEFLOOD, hdr->size);
1931 else
1932 hdr = NULL;
1934 gfree_hook = old_free_hook;
1935 free (hdr);
1936 gfree_hook = freehook;
1939 static void *
1940 mallochook (size_t size)
1942 struct hdr *hdr;
1944 gmalloc_hook = old_malloc_hook;
1945 hdr = malloc (sizeof *hdr + size + 1);
1946 gmalloc_hook = mallochook;
1947 if (hdr == NULL)
1948 return NULL;
1950 hdr->size = size;
1951 hdr->magic = MAGICWORD;
1952 ((char *) &hdr[1])[size] = MAGICBYTE;
1953 return memset (hdr + 1, MALLOCFLOOD, size);
1956 static void *
1957 reallochook (void *ptr, size_t size)
1959 struct hdr *hdr = NULL;
1960 size_t osize = 0;
1962 if (ptr)
1964 hdr = ((struct hdr *) ptr) - 1;
1965 osize = hdr->size;
1967 checkhdr (hdr);
1968 if (size < osize)
1969 memset ((char *) ptr + size, FREEFLOOD, osize - size);
1972 gfree_hook = old_free_hook;
1973 gmalloc_hook = old_malloc_hook;
1974 grealloc_hook = old_realloc_hook;
1975 hdr = realloc (hdr, sizeof *hdr + size + 1);
1976 gfree_hook = freehook;
1977 gmalloc_hook = mallochook;
1978 grealloc_hook = reallochook;
1979 if (hdr == NULL)
1980 return NULL;
1982 hdr->size = size;
1983 hdr->magic = MAGICWORD;
1984 ((char *) &hdr[1])[size] = MAGICBYTE;
1985 if (size > osize)
1986 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1987 return hdr + 1;
1990 static void
1991 mabort (enum mcheck_status status)
1993 const char *msg;
1994 switch (status)
1996 case MCHECK_OK:
1997 msg = "memory is consistent, library is buggy";
1998 break;
1999 case MCHECK_HEAD:
2000 msg = "memory clobbered before allocated block";
2001 break;
2002 case MCHECK_TAIL:
2003 msg = "memory clobbered past end of allocated block";
2004 break;
2005 case MCHECK_FREE:
2006 msg = "block freed twice";
2007 break;
2008 default:
2009 msg = "bogus mcheck_status, library is buggy";
2010 break;
2012 #ifdef __GNU_LIBRARY__
2013 __libc_fatal (msg);
2014 #else
2015 fprintf (stderr, "mcheck: %s\n", msg);
2016 fflush (stderr);
2017 # ifdef emacs
2018 emacs_abort ();
2019 # else
2020 abort ();
2021 # endif
2022 #endif
2025 static int mcheck_used = 0;
2028 mcheck (void (*func) (enum mcheck_status))
2030 abortfunc = (func != NULL) ? func : &mabort;
2032 /* These hooks may not be safely inserted if malloc is already in use. */
2033 if (!__malloc_initialized && !mcheck_used)
2035 old_free_hook = gfree_hook;
2036 gfree_hook = freehook;
2037 old_malloc_hook = gmalloc_hook;
2038 gmalloc_hook = mallochook;
2039 old_realloc_hook = grealloc_hook;
2040 grealloc_hook = reallochook;
2041 mcheck_used = 1;
2044 return mcheck_used ? 0 : -1;
2047 enum mcheck_status
2048 mprobe (void *ptr)
2050 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2053 #endif /* GC_MCHECK */