1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2019 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <https://www.gnu.org/licenses/>.
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
24 #if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
44 # if GNUC_PREREQ (4, 2, 0)
45 # pragma GCC diagnostic ignored "-Wdeprecated-declarations"
49 #ifndef __MALLOC_HOOK_VOLATILE
50 # define __MALLOC_HOOK_VOLATILE volatile
53 extern void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook
) (void);
54 extern void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook
) (void);
55 extern void *(*__morecore
) (ptrdiff_t);
58 /* If HYBRID_MALLOC is defined, then temacs will use malloc,
59 realloc... as defined in this file (and renamed gmalloc,
60 grealloc... via the macros that follow). The dumped emacs,
61 however, will use the system malloc, realloc.... In other source
62 files, malloc, realloc... are renamed hybrid_malloc,
63 hybrid_realloc... via macros in conf_post.h. hybrid_malloc and
64 friends are wrapper functions defined later in this file. */
70 #define malloc gmalloc
71 #define realloc grealloc
72 #define calloc gcalloc
73 #define aligned_alloc galigned_alloc
75 #define malloc_info gmalloc_info
79 # define DUMPED bss_sbrk_did_unexec
91 /* Allocate SIZE bytes of memory. */
92 extern void *malloc (size_t size
) ATTRIBUTE_MALLOC_SIZE ((1));
93 /* Re-allocate the previously allocated block
94 in ptr, making the new block SIZE bytes long. */
95 extern void *realloc (void *ptr
, size_t size
) ATTRIBUTE_ALLOC_SIZE ((2));
96 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
97 extern void *calloc (size_t nmemb
, size_t size
) ATTRIBUTE_MALLOC_SIZE ((1,2));
99 extern void free (void *ptr
);
101 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
102 extern void *aligned_alloc (size_t, size_t);
104 extern void *memalign (size_t, size_t);
105 extern int posix_memalign (void **, size_t, size_t);
108 /* The allocator divides the heap into blocks of fixed size; large
109 requests receive one or more whole blocks, and small requests
110 receive a fragment of a block. Fragment sizes are powers of two,
111 and all fragments of a block are the same size. When all the
112 fragments in a block have been freed, the block itself is freed. */
113 #define BLOCKLOG (INT_WIDTH > 16 ? 12 : 9)
114 #define BLOCKSIZE (1 << BLOCKLOG)
115 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
117 /* Determine the amount of memory spanned by the initial heap table
118 (not an absolute limit). */
119 #define HEAP (INT_WIDTH > 16 ? 4194304 : 65536)
121 /* Number of contiguous free blocks allowed to build up at the end of
122 memory before they will be returned to the system. */
123 #define FINAL_FREE_BLOCKS 8
125 /* Data structure giving per-block information. */
128 /* Heap information for a busy block. */
131 /* Zero for a block that is not one of ours (typically,
132 allocated by system malloc), positive for the log base 2 of
133 the fragment size of a fragmented block, -1 for the first
134 block of a multiblock object, and unspecified for later
135 blocks of that object. Type-0 blocks can be present
136 because the system malloc can be invoked by library
137 functions in an undumped Emacs. */
143 size_t nfree
; /* Free frags in a fragmented block. */
144 size_t first
; /* First free fragment of the block. */
146 /* For a large object, in its first block, this has the number
147 of blocks in the object. */
151 /* Heap information for a free block
152 (that may be the first of a free cluster). */
155 size_t size
; /* Size (in blocks) of a free cluster. */
156 size_t next
; /* Index of next free cluster. */
157 size_t prev
; /* Index of previous free cluster. */
161 /* Pointer to first block of the heap. */
162 extern char *_heapbase
;
164 /* Table indexed by block number giving per-block information. */
165 extern malloc_info
*_heapinfo
;
167 /* Address to block number and vice versa. */
168 #define BLOCK(A) ((size_t) ((char *) (A) - _heapbase) / BLOCKSIZE + 1)
169 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
171 /* Current search index for the heap table. */
172 extern size_t _heapindex
;
174 /* Limit of valid info table indices. */
175 extern size_t _heaplimit
;
177 /* Doubly linked lists of free fragments. */
184 /* Free list headers for each fragment size. */
185 static struct list _fraghead
[BLOCKLOG
];
187 /* List of blocks allocated with aligned_alloc and friends. */
190 struct alignlist
*next
;
191 void *aligned
; /* The address that aligned_alloc returned. */
192 void *exact
; /* The address that malloc returned. */
194 extern struct alignlist
*_aligned_blocks
;
196 /* Instrumentation. */
197 extern size_t _chunks_used
;
198 extern size_t _bytes_used
;
199 extern size_t _chunks_free
;
200 extern size_t _bytes_free
;
202 /* Internal versions of `malloc', `realloc', and `free'
203 used when these functions need to call each other.
204 They are the same but don't call the hooks. */
205 extern void *_malloc_internal (size_t);
206 extern void *_realloc_internal (void *, size_t);
207 extern void _free_internal (void *);
208 extern void *_malloc_internal_nolock (size_t);
209 extern void *_realloc_internal_nolock (void *, size_t);
210 extern void _free_internal_nolock (void *);
213 extern pthread_mutex_t _malloc_mutex
, _aligned_blocks_mutex
;
214 extern int _malloc_thread_enabled_p
;
217 if (_malloc_thread_enabled_p) \
218 pthread_mutex_lock (&_malloc_mutex); \
222 if (_malloc_thread_enabled_p) \
223 pthread_mutex_unlock (&_malloc_mutex); \
225 #define LOCK_ALIGNED_BLOCKS() \
227 if (_malloc_thread_enabled_p) \
228 pthread_mutex_lock (&_aligned_blocks_mutex); \
230 #define UNLOCK_ALIGNED_BLOCKS() \
232 if (_malloc_thread_enabled_p) \
233 pthread_mutex_unlock (&_aligned_blocks_mutex); \
238 #define LOCK_ALIGNED_BLOCKS()
239 #define UNLOCK_ALIGNED_BLOCKS()
242 /* Nonzero if `malloc' has been called and done its initialization. */
243 extern int __malloc_initialized
;
244 /* Function called to initialize malloc data structures. */
245 extern int __malloc_initialize (void);
249 /* Return values for `mprobe': these are the kinds of inconsistencies that
250 `mcheck' enables detection of. */
253 MCHECK_DISABLED
= -1, /* Consistency checking is not turned on. */
254 MCHECK_OK
, /* Block is fine. */
255 MCHECK_FREE
, /* Block freed twice. */
256 MCHECK_HEAD
, /* Memory before the block was clobbered. */
257 MCHECK_TAIL
/* Memory after the block was clobbered. */
260 /* Activate a standard collection of debugging hooks. This must be called
261 before `malloc' is ever called. ABORTFUNC is called with an error code
262 (see enum above) when an inconsistency is detected. If ABORTFUNC is
263 null, the standard function prints on stderr and then calls `abort'. */
264 extern int mcheck (void (*abortfunc
) (enum mcheck_status
));
266 /* Check for aberrations in a particular malloc'd block. You must have
267 called `mcheck' already. These are the same checks that `mcheck' does
268 when you free or reallocate a block. */
269 extern enum mcheck_status
mprobe (void *ptr
);
271 /* Activate a standard collection of tracing hooks. */
272 extern void mtrace (void);
273 extern void muntrace (void);
275 /* Statistics available to the user. */
278 size_t bytes_total
; /* Total size of the heap. */
279 size_t chunks_used
; /* Chunks allocated by the user. */
280 size_t bytes_used
; /* Byte total of user-allocated chunks. */
281 size_t chunks_free
; /* Chunks in the free list. */
282 size_t bytes_free
; /* Byte total of chunks in the free list. */
285 /* Pick up the current statistics. */
286 extern struct mstats
mstats (void);
296 /* Memory allocator `malloc'.
297 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
298 Written May 1989 by Mike Haertel.
300 This library is free software; you can redistribute it and/or
301 modify it under the terms of the GNU General Public License as
302 published by the Free Software Foundation; either version 2 of the
303 License, or (at your option) any later version.
305 This library is distributed in the hope that it will be useful,
306 but WITHOUT ANY WARRANTY; without even the implied warranty of
307 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
308 General Public License for more details.
310 You should have received a copy of the GNU General Public
311 License along with this library. If not, see <https://www.gnu.org/licenses/>.
313 The author may be reached (Email) at the address mike@ai.mit.edu,
314 or (US mail) as Mike Haertel c/o Free Software Foundation. */
318 /* Debugging hook for 'malloc'. */
319 static void *(*__MALLOC_HOOK_VOLATILE gmalloc_hook
) (size_t);
321 /* Replacements for traditional glibc malloc hooks, for platforms that
322 do not already have these hooks. Platforms with these hooks all
323 used relaxed ref/def, so it is OK to define them here too. */
324 void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook
) (void);
325 void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook
) (void);
326 void *(*__morecore
) (ptrdiff_t);
328 #ifndef HYBRID_MALLOC
330 /* Pointer to the base of the first block. */
333 /* Block information table. Allocated with align/__free (not malloc/free). */
334 malloc_info
*_heapinfo
;
336 /* Search index in the info table. */
339 /* Limit of valid info table indices. */
342 /* Instrumentation. */
348 /* Are you experienced? */
349 int __malloc_initialized
;
351 #endif /* HYBRID_MALLOC */
353 /* Number of extra blocks to get each time we ask for more core.
354 This reduces the frequency of calling `(*__morecore)'. */
355 #if defined DOUG_LEA_MALLOC || defined HYBRID_MALLOC || defined SYSTEM_MALLOC
358 size_t __malloc_extra_blocks
;
360 /* Number of info entries. */
361 static size_t heapsize
;
363 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
365 /* Some code for hunting a bug writing into _heapinfo.
367 Call this macro with argument PROT non-zero to protect internal
368 malloc state against writing to it, call it with a zero argument to
369 make it readable and writable.
371 Note that this only works if BLOCKSIZE == page size, which is
372 the case on the i386. */
374 #include <sys/types.h>
375 #include <sys/mman.h>
377 static int state_protected_p
;
378 static size_t last_state_size
;
379 static malloc_info
*last_heapinfo
;
382 protect_malloc_state (int protect_p
)
384 /* If _heapinfo has been relocated, make sure its old location
385 isn't left read-only; it will be reused by malloc. */
386 if (_heapinfo
!= last_heapinfo
388 && state_protected_p
)
389 mprotect (last_heapinfo
, last_state_size
, PROT_READ
| PROT_WRITE
);
391 last_state_size
= _heaplimit
* sizeof *_heapinfo
;
392 last_heapinfo
= _heapinfo
;
394 if (protect_p
!= state_protected_p
)
396 state_protected_p
= protect_p
;
397 if (mprotect (_heapinfo
, last_state_size
,
398 protect_p
? PROT_READ
: PROT_READ
| PROT_WRITE
) != 0)
403 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
406 #define PROTECT_MALLOC_STATE(PROT) /* empty */
410 /* Aligned allocation. */
417 /* align accepts an unsigned argument, but __morecore accepts a
418 signed one. This could lead to trouble if SIZE overflows the
419 ptrdiff_t type accepted by __morecore. We just punt in that
420 case, since they are requesting a ludicrous amount anyway. */
421 if (PTRDIFF_MAX
< size
)
424 result
= (*__morecore
) (size
);
425 adj
= (uintptr_t) result
% BLOCKSIZE
;
428 adj
= BLOCKSIZE
- adj
;
430 result
= (char *) result
+ adj
;
433 if (__after_morecore_hook
)
434 (*__after_morecore_hook
) ();
439 /* Get SIZE bytes, if we can get them starting at END.
440 Return the address of the space we got.
441 If we cannot get space at END, fail and return 0. */
443 get_contiguous_space (ptrdiff_t size
, void *position
)
448 before
= (*__morecore
) (0);
449 /* If we can tell in advance that the break is at the wrong place,
451 if (before
!= position
)
454 /* Allocate SIZE bytes and get the address of them. */
455 after
= (*__morecore
) (size
);
459 /* It was not contiguous--reject it. */
460 if (after
!= position
)
462 (*__morecore
) (- size
);
470 /* This is called when `_heapinfo' and `heapsize' have just
471 been set to describe a new info table. Set up the table
472 to describe itself and account for it in the statistics. */
474 register_heapinfo (void)
476 size_t block
, blocks
;
478 block
= BLOCK (_heapinfo
);
479 blocks
= BLOCKIFY (heapsize
* sizeof (malloc_info
));
481 /* Account for the _heapinfo block itself in the statistics. */
482 _bytes_used
+= blocks
* BLOCKSIZE
;
485 /* Describe the heapinfo block itself in the heapinfo. */
486 _heapinfo
[block
].busy
.type
= -1;
487 _heapinfo
[block
].busy
.info
.size
= blocks
;
491 pthread_mutex_t _malloc_mutex
= PTHREAD_MUTEX_INITIALIZER
;
492 pthread_mutex_t _aligned_blocks_mutex
= PTHREAD_MUTEX_INITIALIZER
;
493 int _malloc_thread_enabled_p
;
496 malloc_atfork_handler_prepare (void)
499 LOCK_ALIGNED_BLOCKS ();
503 malloc_atfork_handler_parent (void)
505 UNLOCK_ALIGNED_BLOCKS ();
510 malloc_atfork_handler_child (void)
512 UNLOCK_ALIGNED_BLOCKS ();
516 /* Set up mutexes and make malloc etc. thread-safe. */
518 malloc_enable_thread (void)
520 if (_malloc_thread_enabled_p
)
523 /* Some pthread implementations call malloc for statically
524 initialized mutexes when they are used first. To avoid such a
525 situation, we initialize mutexes here while their use is
526 disabled in malloc etc. */
527 pthread_mutex_init (&_malloc_mutex
, NULL
);
528 pthread_mutex_init (&_aligned_blocks_mutex
, NULL
);
529 pthread_atfork (malloc_atfork_handler_prepare
,
530 malloc_atfork_handler_parent
,
531 malloc_atfork_handler_child
);
532 _malloc_thread_enabled_p
= 1;
534 #endif /* USE_PTHREAD */
537 malloc_initialize_1 (void)
543 if (__malloc_initialize_hook
)
544 (*__malloc_initialize_hook
) ();
546 heapsize
= HEAP
/ BLOCKSIZE
;
547 _heapinfo
= align (heapsize
* sizeof (malloc_info
));
548 if (_heapinfo
== NULL
)
550 memset (_heapinfo
, 0, heapsize
* sizeof (malloc_info
));
551 _heapinfo
[0].free
.size
= 0;
552 _heapinfo
[0].free
.next
= _heapinfo
[0].free
.prev
= 0;
554 _heapbase
= (char *) _heapinfo
;
555 _heaplimit
= BLOCK (_heapbase
+ heapsize
* sizeof (malloc_info
));
557 register_heapinfo ();
559 __malloc_initialized
= 1;
560 PROTECT_MALLOC_STATE (1);
564 /* Set everything up and remember that we have.
565 main will call malloc which calls this function. That is before any threads
566 or signal handlers has been set up, so we don't need thread protection. */
568 __malloc_initialize (void)
570 if (__malloc_initialized
)
573 malloc_initialize_1 ();
575 return __malloc_initialized
;
578 static int morecore_recursing
;
580 /* Get neatly aligned memory, initializing or
581 growing the heap info table as necessary. */
583 morecore_nolock (size_t size
)
586 malloc_info
*newinfo
, *oldinfo
;
589 if (morecore_recursing
)
590 /* Avoid recursion. The caller will know how to handle a null return. */
593 result
= align (size
);
597 PROTECT_MALLOC_STATE (0);
599 /* Check if we need to grow the info table. */
600 if (heapsize
< BLOCK ((char *) result
+ size
))
602 /* Calculate the new _heapinfo table size. We do not account for the
603 added blocks in the table itself, as we hope to place them in
604 existing free space, which is already covered by part of the
609 while (newsize
< BLOCK ((char *) result
+ size
));
611 /* We must not reuse existing core for the new info table when called
612 from realloc in the case of growing a large block, because the
613 block being grown is momentarily marked as free. In this case
614 _heaplimit is zero so we know not to reuse space for internal
618 /* First try to allocate the new info table in core we already
619 have, in the usual way using realloc. If realloc cannot
620 extend it in place or relocate it to existing sufficient core,
621 we will get called again, and the code above will notice the
622 `morecore_recursing' flag and return null. */
623 int save
= errno
; /* Don't want to clobber errno with ENOMEM. */
624 morecore_recursing
= 1;
625 newinfo
= _realloc_internal_nolock (_heapinfo
,
626 newsize
* sizeof (malloc_info
));
627 morecore_recursing
= 0;
632 /* We found some space in core, and realloc has put the old
633 table's blocks on the free list. Now zero the new part
634 of the table and install the new table location. */
635 memset (&newinfo
[heapsize
], 0,
636 (newsize
- heapsize
) * sizeof (malloc_info
));
643 /* Allocate new space for the malloc info table. */
646 newinfo
= align (newsize
* sizeof (malloc_info
));
651 (*__morecore
) (-size
);
655 /* Is it big enough to record status for its own space?
657 if (BLOCK ((char *) newinfo
+ newsize
* sizeof (malloc_info
))
661 /* Must try again. First give back most of what we just got. */
662 (*__morecore
) (- newsize
* sizeof (malloc_info
));
666 /* Copy the old table to the beginning of the new,
667 and zero the rest of the new table. */
668 memcpy (newinfo
, _heapinfo
, heapsize
* sizeof (malloc_info
));
669 memset (&newinfo
[heapsize
], 0,
670 (newsize
- heapsize
) * sizeof (malloc_info
));
675 register_heapinfo ();
677 /* Reset _heaplimit so _free_internal never decides
678 it can relocate or resize the info table. */
680 _free_internal_nolock (oldinfo
);
681 PROTECT_MALLOC_STATE (0);
683 /* The new heap limit includes the new table just allocated. */
684 _heaplimit
= BLOCK ((char *) newinfo
+ heapsize
* sizeof (malloc_info
));
689 _heaplimit
= BLOCK ((char *) result
+ size
);
693 /* Allocate memory from the heap. */
695 _malloc_internal_nolock (size_t size
)
698 size_t block
, blocks
, lastblocks
, start
;
702 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
703 valid address you can realloc and free (though not dereference).
705 It turns out that some extant code (sunrpc, at least Ultrix's version)
706 expects `malloc (0)' to return non-NULL and breaks otherwise.
714 PROTECT_MALLOC_STATE (0);
716 if (size
< sizeof (struct list
))
717 size
= sizeof (struct list
);
719 /* Determine the allocation policy based on the request size. */
720 if (size
<= BLOCKSIZE
/ 2)
722 /* Small allocation to receive a fragment of a block.
723 Determine the logarithm to base two of the fragment size. */
724 register size_t log
= 1;
726 while ((size
/= 2) != 0)
729 /* Look in the fragment lists for a
730 free fragment of the desired size. */
731 next
= _fraghead
[log
].next
;
734 /* There are free fragments of this size.
735 Pop a fragment out of the fragment list and return it.
736 Update the block's nfree and first counters. */
738 next
->prev
->next
= next
->next
;
739 if (next
->next
!= NULL
)
740 next
->next
->prev
= next
->prev
;
741 block
= BLOCK (result
);
742 if (--_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
743 _heapinfo
[block
].busy
.info
.frag
.first
=
744 (uintptr_t) next
->next
% BLOCKSIZE
>> log
;
746 /* Update the statistics. */
748 _bytes_used
+= 1 << log
;
750 _bytes_free
-= 1 << log
;
754 /* No free fragments of the desired size, so get a new block
755 and break it into fragments, returning the first. */
756 #ifdef GC_MALLOC_CHECK
757 result
= _malloc_internal_nolock (BLOCKSIZE
);
758 PROTECT_MALLOC_STATE (0);
759 #elif defined (USE_PTHREAD)
760 result
= _malloc_internal_nolock (BLOCKSIZE
);
762 result
= malloc (BLOCKSIZE
);
766 PROTECT_MALLOC_STATE (1);
770 /* Link all fragments but the first into the free list. */
771 next
= (struct list
*) ((char *) result
+ (1 << log
));
773 next
->prev
= &_fraghead
[log
];
774 _fraghead
[log
].next
= next
;
776 for (i
= 2; i
< (size_t) (BLOCKSIZE
>> log
); ++i
)
778 next
= (struct list
*) ((char *) result
+ (i
<< log
));
779 next
->next
= _fraghead
[log
].next
;
780 next
->prev
= &_fraghead
[log
];
781 next
->prev
->next
= next
;
782 next
->next
->prev
= next
;
785 /* Initialize the nfree and first counters for this block. */
786 block
= BLOCK (result
);
787 _heapinfo
[block
].busy
.type
= log
;
788 _heapinfo
[block
].busy
.info
.frag
.nfree
= i
- 1;
789 _heapinfo
[block
].busy
.info
.frag
.first
= i
- 1;
791 _chunks_free
+= (BLOCKSIZE
>> log
) - 1;
792 _bytes_free
+= BLOCKSIZE
- (1 << log
);
793 _bytes_used
-= BLOCKSIZE
- (1 << log
);
798 /* Large allocation to receive one or more blocks.
799 Search the free list in a circle starting at the last place visited.
800 If we loop completely around without finding a large enough
801 space we will have to get more memory from the system. */
802 blocks
= BLOCKIFY (size
);
803 start
= block
= _heapindex
;
804 while (_heapinfo
[block
].free
.size
< blocks
)
806 block
= _heapinfo
[block
].free
.next
;
809 /* Need to get more from the system. Get a little extra. */
810 size_t wantblocks
= blocks
+ __malloc_extra_blocks
;
811 block
= _heapinfo
[0].free
.prev
;
812 lastblocks
= _heapinfo
[block
].free
.size
;
813 /* Check to see if the new core will be contiguous with the
814 final free block; if so we don't need to get as much. */
815 if (_heaplimit
!= 0 && block
+ lastblocks
== _heaplimit
&&
816 /* We can't do this if we will have to make the heap info
817 table bigger to accommodate the new space. */
818 block
+ wantblocks
<= heapsize
&&
819 get_contiguous_space ((wantblocks
- lastblocks
) * BLOCKSIZE
,
820 ADDRESS (block
+ lastblocks
)))
822 /* We got it contiguously. Which block we are extending
823 (the `final free block' referred to above) might have
824 changed, if it got combined with a freed info table. */
825 block
= _heapinfo
[0].free
.prev
;
826 _heapinfo
[block
].free
.size
+= (wantblocks
- lastblocks
);
827 _bytes_free
+= (wantblocks
- lastblocks
) * BLOCKSIZE
;
828 _heaplimit
+= wantblocks
- lastblocks
;
831 result
= morecore_nolock (wantblocks
* BLOCKSIZE
);
834 block
= BLOCK (result
);
835 /* Put the new block at the end of the free list. */
836 _heapinfo
[block
].free
.size
= wantblocks
;
837 _heapinfo
[block
].free
.prev
= _heapinfo
[0].free
.prev
;
838 _heapinfo
[block
].free
.next
= 0;
839 _heapinfo
[0].free
.prev
= block
;
840 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
= block
;
842 /* Now loop to use some of that block for this allocation. */
846 /* At this point we have found a suitable free list entry.
847 Figure out how to remove what we need from the list. */
848 result
= ADDRESS (block
);
849 if (_heapinfo
[block
].free
.size
> blocks
)
851 /* The block we found has a bit left over,
852 so relink the tail end back into the free list. */
853 _heapinfo
[block
+ blocks
].free
.size
854 = _heapinfo
[block
].free
.size
- blocks
;
855 _heapinfo
[block
+ blocks
].free
.next
856 = _heapinfo
[block
].free
.next
;
857 _heapinfo
[block
+ blocks
].free
.prev
858 = _heapinfo
[block
].free
.prev
;
859 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
860 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
861 = _heapindex
= block
+ blocks
;
865 /* The block exactly matches our requirements,
866 so just remove it from the list. */
867 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
868 = _heapinfo
[block
].free
.prev
;
869 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
870 = _heapindex
= _heapinfo
[block
].free
.next
;
874 _heapinfo
[block
].busy
.type
= -1;
875 _heapinfo
[block
].busy
.info
.size
= blocks
;
877 _bytes_used
+= blocks
* BLOCKSIZE
;
878 _bytes_free
-= blocks
* BLOCKSIZE
;
881 PROTECT_MALLOC_STATE (1);
887 _malloc_internal (size_t size
)
892 result
= _malloc_internal_nolock (size
);
901 void *(*hook
) (size_t);
903 if (!__malloc_initialized
&& !__malloc_initialize ())
906 /* Copy the value of gmalloc_hook to an automatic variable in case
907 gmalloc_hook is modified in another thread between its
908 NULL-check and the use.
910 Note: Strictly speaking, this is not a right solution. We should
911 use mutexes to access non-read-only variables that are shared
912 among multiple threads. We just leave it for compatibility with
913 glibc malloc (i.e., assignments to gmalloc_hook) for now. */
915 return (hook
!= NULL
? *hook
: _malloc_internal
) (size
);
918 #if !(defined (_LIBC) || defined (HYBRID_MALLOC))
920 /* On some ANSI C systems, some libc functions call _malloc, _free
921 and _realloc. Make them use the GNU functions. */
923 extern void *_malloc (size_t);
924 extern void _free (void *);
925 extern void *_realloc (void *, size_t);
928 _malloc (size_t size
)
930 return malloc (size
);
940 _realloc (void *ptr
, size_t size
)
942 return realloc (ptr
, size
);
946 /* Free a block of memory allocated by `malloc'.
947 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
948 Written May 1989 by Mike Haertel.
950 This library is free software; you can redistribute it and/or
951 modify it under the terms of the GNU General Public License as
952 published by the Free Software Foundation; either version 2 of the
953 License, or (at your option) any later version.
955 This library is distributed in the hope that it will be useful,
956 but WITHOUT ANY WARRANTY; without even the implied warranty of
957 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
958 General Public License for more details.
960 You should have received a copy of the GNU General Public
961 License along with this library. If not, see <https://www.gnu.org/licenses/>.
963 The author may be reached (Email) at the address mike@ai.mit.edu,
964 or (US mail) as Mike Haertel c/o Free Software Foundation. */
966 /* Debugging hook for free. */
967 static void (*__MALLOC_HOOK_VOLATILE gfree_hook
) (void *);
969 #ifndef HYBRID_MALLOC
971 /* List of blocks allocated by aligned_alloc. */
972 struct alignlist
*_aligned_blocks
= NULL
;
975 /* Return memory to the heap.
976 Like `_free_internal' but don't lock mutex. */
978 _free_internal_nolock (void *ptr
)
981 size_t block
, blocks
;
983 struct list
*prev
, *next
;
985 const size_t lesscore_threshold
986 /* Threshold of free space at which we will return some to the system. */
987 = FINAL_FREE_BLOCKS
+ 2 * __malloc_extra_blocks
;
989 register struct alignlist
*l
;
994 PROTECT_MALLOC_STATE (0);
996 LOCK_ALIGNED_BLOCKS ();
997 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
998 if (l
->aligned
== ptr
)
1000 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1004 UNLOCK_ALIGNED_BLOCKS ();
1006 block
= BLOCK (ptr
);
1008 type
= _heapinfo
[block
].busy
.type
;
1012 /* Get as many statistics as early as we can. */
1014 _bytes_used
-= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1015 _bytes_free
+= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1017 /* Find the free cluster previous to this one in the free list.
1018 Start searching at the last block referenced; this may benefit
1019 programs with locality of allocation. */
1023 i
= _heapinfo
[i
].free
.prev
;
1027 i
= _heapinfo
[i
].free
.next
;
1028 while (i
> 0 && i
< block
);
1029 i
= _heapinfo
[i
].free
.prev
;
1032 /* Determine how to link this block into the free list. */
1033 if (block
== i
+ _heapinfo
[i
].free
.size
)
1035 /* Coalesce this block with its predecessor. */
1036 _heapinfo
[i
].free
.size
+= _heapinfo
[block
].busy
.info
.size
;
1041 /* Really link this block back into the free list. */
1042 _heapinfo
[block
].free
.size
= _heapinfo
[block
].busy
.info
.size
;
1043 _heapinfo
[block
].free
.next
= _heapinfo
[i
].free
.next
;
1044 _heapinfo
[block
].free
.prev
= i
;
1045 _heapinfo
[i
].free
.next
= block
;
1046 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1050 /* Now that the block is linked in, see if we can coalesce it
1051 with its successor (by deleting its successor from the list
1052 and adding in its size). */
1053 if (block
+ _heapinfo
[block
].free
.size
== _heapinfo
[block
].free
.next
)
1055 _heapinfo
[block
].free
.size
1056 += _heapinfo
[_heapinfo
[block
].free
.next
].free
.size
;
1057 _heapinfo
[block
].free
.next
1058 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.next
;
1059 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1063 /* How many trailing free blocks are there now? */
1064 blocks
= _heapinfo
[block
].free
.size
;
1066 /* Where is the current end of accessible core? */
1067 curbrk
= (*__morecore
) (0);
1069 if (_heaplimit
!= 0 && curbrk
== ADDRESS (_heaplimit
))
1071 /* The end of the malloc heap is at the end of accessible core.
1072 It's possible that moving _heapinfo will allow us to
1073 return some space to the system. */
1075 size_t info_block
= BLOCK (_heapinfo
);
1076 size_t info_blocks
= _heapinfo
[info_block
].busy
.info
.size
;
1077 size_t prev_block
= _heapinfo
[block
].free
.prev
;
1078 size_t prev_blocks
= _heapinfo
[prev_block
].free
.size
;
1079 size_t next_block
= _heapinfo
[block
].free
.next
;
1080 size_t next_blocks
= _heapinfo
[next_block
].free
.size
;
1082 if (/* Win if this block being freed is last in core, the info table
1083 is just before it, the previous free block is just before the
1084 info table, and the two free blocks together form a useful
1085 amount to return to the system. */
1086 (block
+ blocks
== _heaplimit
&&
1087 info_block
+ info_blocks
== block
&&
1088 prev_block
!= 0 && prev_block
+ prev_blocks
== info_block
&&
1089 blocks
+ prev_blocks
>= lesscore_threshold
) ||
1090 /* Nope, not the case. We can also win if this block being
1091 freed is just before the info table, and the table extends
1092 to the end of core or is followed only by a free block,
1093 and the total free space is worth returning to the system. */
1094 (block
+ blocks
== info_block
&&
1095 ((info_block
+ info_blocks
== _heaplimit
&&
1096 blocks
>= lesscore_threshold
) ||
1097 (info_block
+ info_blocks
== next_block
&&
1098 next_block
+ next_blocks
== _heaplimit
&&
1099 blocks
+ next_blocks
>= lesscore_threshold
)))
1102 malloc_info
*newinfo
;
1103 size_t oldlimit
= _heaplimit
;
1105 /* Free the old info table, clearing _heaplimit to avoid
1106 recursion into this code. We don't want to return the
1107 table's blocks to the system before we have copied them to
1108 the new location. */
1110 _free_internal_nolock (_heapinfo
);
1111 _heaplimit
= oldlimit
;
1113 /* Tell malloc to search from the beginning of the heap for
1114 free blocks, so it doesn't reuse the ones just freed. */
1117 /* Allocate new space for the info table and move its data. */
1118 newinfo
= _malloc_internal_nolock (info_blocks
* BLOCKSIZE
);
1119 PROTECT_MALLOC_STATE (0);
1120 memmove (newinfo
, _heapinfo
, info_blocks
* BLOCKSIZE
);
1121 _heapinfo
= newinfo
;
1123 /* We should now have coalesced the free block with the
1124 blocks freed from the old info table. Examine the entire
1125 trailing free block to decide below whether to return some
1127 block
= _heapinfo
[0].free
.prev
;
1128 blocks
= _heapinfo
[block
].free
.size
;
1131 /* Now see if we can return stuff to the system. */
1132 if (block
+ blocks
== _heaplimit
&& blocks
>= lesscore_threshold
)
1134 register size_t bytes
= blocks
* BLOCKSIZE
;
1135 _heaplimit
-= blocks
;
1136 (*__morecore
) (-bytes
);
1137 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
1138 = _heapinfo
[block
].free
.next
;
1139 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
1140 = _heapinfo
[block
].free
.prev
;
1141 block
= _heapinfo
[block
].free
.prev
;
1143 _bytes_free
-= bytes
;
1147 /* Set the next search to begin at this block. */
1152 /* Do some of the statistics. */
1154 _bytes_used
-= 1 << type
;
1156 _bytes_free
+= 1 << type
;
1158 /* Get the address of the first free fragment in this block. */
1159 prev
= (struct list
*) ((char *) ADDRESS (block
) +
1160 (_heapinfo
[block
].busy
.info
.frag
.first
<< type
));
1162 if (_heapinfo
[block
].busy
.info
.frag
.nfree
== (BLOCKSIZE
>> type
) - 1)
1164 /* If all fragments of this block are free, remove them
1165 from the fragment list and free the whole block. */
1167 for (i
= 1; i
< (size_t) (BLOCKSIZE
>> type
); ++i
)
1169 prev
->prev
->next
= next
;
1171 next
->prev
= prev
->prev
;
1172 _heapinfo
[block
].busy
.type
= -1;
1173 _heapinfo
[block
].busy
.info
.size
= 1;
1175 /* Keep the statistics accurate. */
1177 _bytes_used
+= BLOCKSIZE
;
1178 _chunks_free
-= BLOCKSIZE
>> type
;
1179 _bytes_free
-= BLOCKSIZE
;
1181 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1182 _free_internal_nolock (ADDRESS (block
));
1184 free (ADDRESS (block
));
1187 else if (_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
1189 /* If some fragments of this block are free, link this
1190 fragment into the fragment list after the first free
1191 fragment of this block. */
1193 next
->next
= prev
->next
;
1196 if (next
->next
!= NULL
)
1197 next
->next
->prev
= next
;
1198 ++_heapinfo
[block
].busy
.info
.frag
.nfree
;
1202 /* No fragments of this block are free, so link this
1203 fragment into the fragment list and announce that
1204 it is the first free fragment of this block. */
1206 _heapinfo
[block
].busy
.info
.frag
.nfree
= 1;
1207 _heapinfo
[block
].busy
.info
.frag
.first
=
1208 (uintptr_t) ptr
% BLOCKSIZE
>> type
;
1209 prev
->next
= _fraghead
[type
].next
;
1210 prev
->prev
= &_fraghead
[type
];
1211 prev
->prev
->next
= prev
;
1212 if (prev
->next
!= NULL
)
1213 prev
->next
->prev
= prev
;
1218 PROTECT_MALLOC_STATE (1);
1221 /* Return memory to the heap.
1222 Like 'free' but don't call a hook if there is one. */
1224 _free_internal (void *ptr
)
1227 _free_internal_nolock (ptr
);
1231 /* Return memory to the heap. */
1236 void (*hook
) (void *) = gfree_hook
;
1241 _free_internal (ptr
);
1244 #ifndef HYBRID_MALLOC
1245 /* Define the `cfree' alias for `free'. */
1247 weak_alias (free
, cfree
)
1256 /* Change the size of a block allocated by `malloc'.
1257 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1258 Written May 1989 by Mike Haertel.
1260 This library is free software; you can redistribute it and/or
1261 modify it under the terms of the GNU General Public License as
1262 published by the Free Software Foundation; either version 2 of the
1263 License, or (at your option) any later version.
1265 This library is distributed in the hope that it will be useful,
1266 but WITHOUT ANY WARRANTY; without even the implied warranty of
1267 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1268 General Public License for more details.
1270 You should have received a copy of the GNU General Public
1271 License along with this library. If not, see <https://www.gnu.org/licenses/>.
1273 The author may be reached (Email) at the address mike@ai.mit.edu,
1274 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1277 #define min(a, b) ((a) < (b) ? (a) : (b))
1280 /* Debugging hook for realloc. */
1281 static void *(*grealloc_hook
) (void *, size_t);
1283 /* Resize the given region to the new size, returning a pointer
1284 to the (possibly moved) region. This is optimized for speed;
1285 some benchmarks seem to indicate that greater compactness is
1286 achieved by unconditionally allocating and copying to a
1287 new region. This module has incestuous knowledge of the
1288 internals of both free and malloc. */
1290 _realloc_internal_nolock (void *ptr
, size_t size
)
1294 size_t block
, blocks
, oldlimit
;
1298 _free_internal_nolock (ptr
);
1299 return _malloc_internal_nolock (0);
1301 else if (ptr
== NULL
)
1302 return _malloc_internal_nolock (size
);
1304 block
= BLOCK (ptr
);
1306 PROTECT_MALLOC_STATE (0);
1308 type
= _heapinfo
[block
].busy
.type
;
1312 /* Maybe reallocate a large block to a small fragment. */
1313 if (size
<= BLOCKSIZE
/ 2)
1315 result
= _malloc_internal_nolock (size
);
1318 memcpy (result
, ptr
, size
);
1319 _free_internal_nolock (ptr
);
1324 /* The new size is a large allocation as well;
1325 see if we can hold it in place. */
1326 blocks
= BLOCKIFY (size
);
1327 if (blocks
< _heapinfo
[block
].busy
.info
.size
)
1329 /* The new size is smaller; return
1330 excess memory to the free list. */
1331 _heapinfo
[block
+ blocks
].busy
.type
= -1;
1332 _heapinfo
[block
+ blocks
].busy
.info
.size
1333 = _heapinfo
[block
].busy
.info
.size
- blocks
;
1334 _heapinfo
[block
].busy
.info
.size
= blocks
;
1335 /* We have just created a new chunk by splitting a chunk in two.
1336 Now we will free this chunk; increment the statistics counter
1337 so it doesn't become wrong when _free_internal decrements it. */
1339 _free_internal_nolock (ADDRESS (block
+ blocks
));
1342 else if (blocks
== _heapinfo
[block
].busy
.info
.size
)
1343 /* No size change necessary. */
1347 /* Won't fit, so allocate a new region that will.
1348 Free the old region first in case there is sufficient
1349 adjacent free space to grow without moving. */
1350 blocks
= _heapinfo
[block
].busy
.info
.size
;
1351 /* Prevent free from actually returning memory to the system. */
1352 oldlimit
= _heaplimit
;
1354 _free_internal_nolock (ptr
);
1355 result
= _malloc_internal_nolock (size
);
1356 PROTECT_MALLOC_STATE (0);
1357 if (_heaplimit
== 0)
1358 _heaplimit
= oldlimit
;
1361 /* Now we're really in trouble. We have to unfree
1362 the thing we just freed. Unfortunately it might
1363 have been coalesced with its neighbors. */
1364 if (_heapindex
== block
)
1365 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1369 = _malloc_internal_nolock ((block
- _heapindex
) * BLOCKSIZE
);
1370 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1371 _free_internal_nolock (previous
);
1376 memmove (result
, ptr
, blocks
* BLOCKSIZE
);
1381 /* Old size is a fragment; type is logarithm
1382 to base two of the fragment size. */
1383 if (size
> (size_t) (1 << (type
- 1)) &&
1384 size
<= (size_t) (1 << type
))
1385 /* The new size is the same kind of fragment. */
1389 /* The new size is different; allocate a new space,
1390 and copy the lesser of the new size and the old. */
1391 result
= _malloc_internal_nolock (size
);
1394 memcpy (result
, ptr
, min (size
, (size_t) 1 << type
));
1395 _free_internal_nolock (ptr
);
1400 PROTECT_MALLOC_STATE (1);
1406 _realloc_internal (void *ptr
, size_t size
)
1411 result
= _realloc_internal_nolock (ptr
, size
);
1418 realloc (void *ptr
, size_t size
)
1420 void *(*hook
) (void *, size_t);
1422 if (!__malloc_initialized
&& !__malloc_initialize ())
1425 hook
= grealloc_hook
;
1426 return (hook
!= NULL
? *hook
: _realloc_internal
) (ptr
, size
);
1428 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1430 This library is free software; you can redistribute it and/or
1431 modify it under the terms of the GNU General Public License as
1432 published by the Free Software Foundation; either version 2 of the
1433 License, or (at your option) any later version.
1435 This library is distributed in the hope that it will be useful,
1436 but WITHOUT ANY WARRANTY; without even the implied warranty of
1437 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1438 General Public License for more details.
1440 You should have received a copy of the GNU General Public
1441 License along with this library. If not, see <https://www.gnu.org/licenses/>.
1443 The author may be reached (Email) at the address mike@ai.mit.edu,
1444 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1446 /* Allocate an array of NMEMB elements each SIZE bytes long.
1447 The entire array is initialized to zeros. */
1449 calloc (size_t nmemb
, size_t size
)
1452 size_t bytes
= nmemb
* size
;
1454 if (size
!= 0 && bytes
/ size
!= nmemb
)
1460 result
= malloc (bytes
);
1462 return memset (result
, 0, bytes
);
1465 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1466 This file is part of the GNU C Library.
1468 The GNU C Library is free software; you can redistribute it and/or modify
1469 it under the terms of the GNU General Public License as published by
1470 the Free Software Foundation; either version 2, or (at your option)
1473 The GNU C Library is distributed in the hope that it will be useful,
1474 but WITHOUT ANY WARRANTY; without even the implied warranty of
1475 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1476 GNU General Public License for more details.
1478 You should have received a copy of the GNU General Public License
1479 along with the GNU C Library. If not, see <https://www.gnu.org/licenses/>. */
1481 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1483 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1485 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1486 /* It is best not to declare this and cast its result on foreign operating
1487 systems with potentially hostile include files. */
1489 extern void *__sbrk (ptrdiff_t increment
);
1490 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1492 /* Allocate INCREMENT more bytes of data space,
1493 and return the start of data space, or NULL on errors.
1494 If INCREMENT is negative, shrink data space. */
1496 gdefault_morecore (ptrdiff_t increment
)
1498 #ifdef HYBRID_MALLOC
1501 return bss_sbrk (increment
);
1505 void *result
= (void *) __sbrk (increment
);
1506 if (result
!= (void *) -1)
1512 void *(*__morecore
) (ptrdiff_t) = gdefault_morecore
;
1514 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1516 This library is free software; you can redistribute it and/or
1517 modify it under the terms of the GNU General Public License as
1518 published by the Free Software Foundation; either version 2 of the
1519 License, or (at your option) any later version.
1521 This library is distributed in the hope that it will be useful,
1522 but WITHOUT ANY WARRANTY; without even the implied warranty of
1523 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1524 General Public License for more details.
1526 You should have received a copy of the GNU General Public
1527 License along with this library. If not, see <https://www.gnu.org/licenses/>. */
1530 aligned_alloc (size_t alignment
, size_t size
)
1533 size_t adj
, lastadj
;
1535 /* Allocate a block with enough extra space to pad the block with up to
1536 (ALIGNMENT - 1) bytes if necessary. */
1537 if (- size
< alignment
)
1542 result
= malloc (size
+ alignment
- 1);
1546 /* Figure out how much we will need to pad this particular block
1547 to achieve the required alignment. */
1548 adj
= alignment
- (uintptr_t) result
% alignment
;
1549 if (adj
== alignment
)
1552 if (adj
!= alignment
- 1)
1556 /* Reallocate the block with only as much excess as it
1559 result
= malloc (size
+ adj
);
1560 if (result
== NULL
) /* Impossible unless interrupted. */
1564 adj
= alignment
- (uintptr_t) result
% alignment
;
1565 if (adj
== alignment
)
1567 /* It's conceivable we might have been so unlucky as to get
1568 a different block with weaker alignment. If so, this
1569 block is too short to contain SIZE after alignment
1570 correction. So we must try again and get another block,
1572 } while (adj
> lastadj
);
1577 /* Record this block in the list of aligned blocks, so that `free'
1578 can identify the pointer it is passed, which will be in the middle
1579 of an allocated block. */
1581 struct alignlist
*l
;
1582 LOCK_ALIGNED_BLOCKS ();
1583 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1584 if (l
->aligned
== NULL
)
1585 /* This slot is free. Use it. */
1589 l
= malloc (sizeof *l
);
1592 l
->next
= _aligned_blocks
;
1593 _aligned_blocks
= l
;
1599 result
= l
->aligned
= (char *) result
+ adj
;
1601 UNLOCK_ALIGNED_BLOCKS ();
1612 /* Note that memalign and posix_memalign are not used in Emacs. */
1613 #ifndef HYBRID_MALLOC
1614 /* An obsolete alias for aligned_alloc, for any old libraries that use
1618 memalign (size_t alignment
, size_t size
)
1620 return aligned_alloc (alignment
, size
);
1623 /* If HYBRID_MALLOC is defined, we may want to use the system
1624 posix_memalign below. */
1626 posix_memalign (void **memptr
, size_t alignment
, size_t size
)
1631 || alignment
% sizeof (void *) != 0
1632 || (alignment
& (alignment
- 1)) != 0)
1635 mem
= aligned_alloc (alignment
, size
);
1645 /* Allocate memory on a page boundary.
1646 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1648 This library is free software; you can redistribute it and/or
1649 modify it under the terms of the GNU General Public License as
1650 published by the Free Software Foundation; either version 2 of the
1651 License, or (at your option) any later version.
1653 This library is distributed in the hope that it will be useful,
1654 but WITHOUT ANY WARRANTY; without even the implied warranty of
1655 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1656 General Public License for more details.
1658 You should have received a copy of the GNU General Public
1659 License along with this library. If not, see <https://www.gnu.org/licenses/>.
1661 The author may be reached (Email) at the address mike@ai.mit.edu,
1662 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1664 #ifndef HYBRID_MALLOC
1666 # ifndef HAVE_MALLOC_H
1667 /* Allocate SIZE bytes on a page boundary. */
1668 extern void *valloc (size_t);
1671 # if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1672 # include "getpagesize.h"
1673 # elif !defined getpagesize
1674 extern int getpagesize (void);
1677 static size_t pagesize
;
1680 valloc (size_t size
)
1683 pagesize
= getpagesize ();
1685 return aligned_alloc (pagesize
, size
);
1687 #endif /* HYBRID_MALLOC */
1692 #undef aligned_alloc
1695 #ifdef HYBRID_MALLOC
1696 /* Declare system malloc and friends. */
1697 extern void *malloc (size_t size
);
1698 extern void *realloc (void *ptr
, size_t size
);
1699 extern void *calloc (size_t nmemb
, size_t size
);
1700 extern void free (void *ptr
);
1701 #ifdef HAVE_ALIGNED_ALLOC
1702 extern void *aligned_alloc (size_t alignment
, size_t size
);
1703 #elif defined HAVE_POSIX_MEMALIGN
1704 extern int posix_memalign (void **memptr
, size_t alignment
, size_t size
);
1707 /* Assuming PTR was allocated via the hybrid malloc, return true if
1708 PTR was allocated via gmalloc, not the system malloc. Also, return
1709 true if _heaplimit is zero; this can happen temporarily when
1710 gmalloc calls itself for internal use, and in that case PTR is
1711 already known to be allocated via gmalloc. */
1714 allocated_via_gmalloc (void *ptr
)
1716 size_t block
= BLOCK (ptr
);
1717 size_t blockmax
= _heaplimit
- 1;
1718 return block
<= blockmax
&& _heapinfo
[block
].busy
.type
!= 0;
1721 /* See the comments near the beginning of this file for explanations
1722 of the following functions. */
1725 hybrid_malloc (size_t size
)
1728 return malloc (size
);
1729 return gmalloc (size
);
1733 hybrid_calloc (size_t nmemb
, size_t size
)
1736 return calloc (nmemb
, size
);
1737 return gcalloc (nmemb
, size
);
1741 hybrid_free (void *ptr
)
1743 if (allocated_via_gmalloc (ptr
))
1749 #if defined HAVE_ALIGNED_ALLOC || defined HAVE_POSIX_MEMALIGN
1751 hybrid_aligned_alloc (size_t alignment
, size_t size
)
1754 return galigned_alloc (alignment
, size
);
1755 /* The following is copied from alloc.c */
1756 #ifdef HAVE_ALIGNED_ALLOC
1757 return aligned_alloc (alignment
, size
);
1758 #else /* HAVE_POSIX_MEMALIGN */
1760 return posix_memalign (&p
, alignment
, size
) == 0 ? p
: 0;
1766 hybrid_realloc (void *ptr
, size_t size
)
1770 size_t block
, oldsize
;
1773 return hybrid_malloc (size
);
1774 if (!allocated_via_gmalloc (ptr
))
1775 return realloc (ptr
, size
);
1777 return grealloc (ptr
, size
);
1779 /* The dumped emacs is trying to realloc storage allocated before
1780 dumping via gmalloc. Allocate new space and copy the data. Do
1781 not bother with gfree (ptr), as that would just waste time. */
1782 block
= BLOCK (ptr
);
1783 type
= _heapinfo
[block
].busy
.type
;
1785 type
< 0 ? _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
1786 : (size_t) 1 << type
;
1787 result
= malloc (size
);
1789 return memcpy (result
, ptr
, min (oldsize
, size
));
1793 #else /* ! HYBRID_MALLOC */
1796 malloc (size_t size
)
1798 return gmalloc (size
);
1802 calloc (size_t nmemb
, size_t size
)
1804 return gcalloc (nmemb
, size
);
1814 aligned_alloc (size_t alignment
, size_t size
)
1816 return galigned_alloc (alignment
, size
);
1820 realloc (void *ptr
, size_t size
)
1822 return grealloc (ptr
, size
);
1825 #endif /* HYBRID_MALLOC */
1829 /* Standard debugging hooks for `malloc'.
1830 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1831 Written May 1989 by Mike Haertel.
1833 This library is free software; you can redistribute it and/or
1834 modify it under the terms of the GNU General Public License as
1835 published by the Free Software Foundation; either version 2 of the
1836 License, or (at your option) any later version.
1838 This library is distributed in the hope that it will be useful,
1839 but WITHOUT ANY WARRANTY; without even the implied warranty of
1840 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1841 General Public License for more details.
1843 You should have received a copy of the GNU General Public
1844 License along with this library. If not, see <https://www.gnu.org/licenses/>.
1846 The author may be reached (Email) at the address mike@ai.mit.edu,
1847 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1851 /* Old hook values. */
1852 static void (*old_free_hook
) (void *ptr
);
1853 static void *(*old_malloc_hook
) (size_t size
);
1854 static void *(*old_realloc_hook
) (void *ptr
, size_t size
);
1856 /* Function to call when something awful happens. */
1857 static void (*abortfunc
) (enum mcheck_status
);
1859 /* Arbitrary magical numbers. */
1860 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1861 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1862 #define MAGICBYTE ((char) 0xd7)
1863 #define MALLOCFLOOD ((char) 0x93)
1864 #define FREEFLOOD ((char) 0x95)
1868 size_t size
; /* Exact size requested by user. */
1869 size_t magic
; /* Magic number to check header integrity. */
1872 static enum mcheck_status
1873 checkhdr (const struct hdr
*hdr
)
1875 enum mcheck_status status
;
1879 status
= MCHECK_HEAD
;
1882 status
= MCHECK_FREE
;
1885 if (((char *) &hdr
[1])[hdr
->size
] != MAGICBYTE
)
1886 status
= MCHECK_TAIL
;
1891 if (status
!= MCHECK_OK
)
1892 (*abortfunc
) (status
);
1897 freehook (void *ptr
)
1903 struct alignlist
*l
;
1905 /* If the block was allocated by aligned_alloc, its real pointer
1906 to free is recorded in _aligned_blocks; find that. */
1907 PROTECT_MALLOC_STATE (0);
1908 LOCK_ALIGNED_BLOCKS ();
1909 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1910 if (l
->aligned
== ptr
)
1912 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1916 UNLOCK_ALIGNED_BLOCKS ();
1917 PROTECT_MALLOC_STATE (1);
1919 hdr
= ((struct hdr
*) ptr
) - 1;
1921 hdr
->magic
= MAGICFREE
;
1922 memset (ptr
, FREEFLOOD
, hdr
->size
);
1927 gfree_hook
= old_free_hook
;
1929 gfree_hook
= freehook
;
1933 mallochook (size_t size
)
1937 gmalloc_hook
= old_malloc_hook
;
1938 hdr
= malloc (sizeof *hdr
+ size
+ 1);
1939 gmalloc_hook
= mallochook
;
1944 hdr
->magic
= MAGICWORD
;
1945 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1946 return memset (hdr
+ 1, MALLOCFLOOD
, size
);
1950 reallochook (void *ptr
, size_t size
)
1952 struct hdr
*hdr
= NULL
;
1957 hdr
= ((struct hdr
*) ptr
) - 1;
1962 memset ((char *) ptr
+ size
, FREEFLOOD
, osize
- size
);
1965 gfree_hook
= old_free_hook
;
1966 gmalloc_hook
= old_malloc_hook
;
1967 grealloc_hook
= old_realloc_hook
;
1968 hdr
= realloc (hdr
, sizeof *hdr
+ size
+ 1);
1969 gfree_hook
= freehook
;
1970 gmalloc_hook
= mallochook
;
1971 grealloc_hook
= reallochook
;
1976 hdr
->magic
= MAGICWORD
;
1977 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1979 memset ((char *) (hdr
+ 1) + osize
, MALLOCFLOOD
, size
- osize
);
1984 mabort (enum mcheck_status status
)
1990 msg
= "memory is consistent, library is buggy";
1993 msg
= "memory clobbered before allocated block";
1996 msg
= "memory clobbered past end of allocated block";
1999 msg
= "block freed twice";
2002 msg
= "bogus mcheck_status, library is buggy";
2005 #ifdef __GNU_LIBRARY__
2008 fprintf (stderr
, "mcheck: %s\n", msg
);
2018 static int mcheck_used
= 0;
2021 mcheck (void (*func
) (enum mcheck_status
))
2023 abortfunc
= (func
!= NULL
) ? func
: &mabort
;
2025 /* These hooks may not be safely inserted if malloc is already in use. */
2026 if (!__malloc_initialized
&& !mcheck_used
)
2028 old_free_hook
= gfree_hook
;
2029 gfree_hook
= freehook
;
2030 old_malloc_hook
= gmalloc_hook
;
2031 gmalloc_hook
= mallochook
;
2032 old_realloc_hook
= grealloc_hook
;
2033 grealloc_hook
= reallochook
;
2037 return mcheck_used
? 0 : -1;
2043 return mcheck_used
? checkhdr (ptr
) : MCHECK_DISABLED
;
2046 #endif /* GC_MCHECK */