1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2017 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <http://www.gnu.org/licenses/>.
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
24 #if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
32 #ifdef HYBRID_GET_CURRENT_DIR_NAME
33 #undef get_current_dir_name
43 extern _Noreturn
void emacs_abort (void) NO_INLINE
;
46 /* If HYBRID_MALLOC is defined, then temacs will use malloc,
47 realloc... as defined in this file (and renamed gmalloc,
48 grealloc... via the macros that follow). The dumped emacs,
49 however, will use the system malloc, realloc.... In other source
50 files, malloc, realloc... are renamed hybrid_malloc,
51 hybrid_realloc... via macros in conf_post.h. hybrid_malloc and
52 friends are wrapper functions defined later in this file.
53 aligned_alloc is defined as a macro only in alloc.c.
55 As of this writing (August 2014), Cygwin is the only platform on
56 which HYBRID_MACRO is defined. Any other platform that wants to
57 define it will have to define the macros DUMPED and
58 ALLOCATED_BEFORE_DUMPING, defined below for Cygwin. */
64 #define malloc gmalloc
65 #define realloc grealloc
66 #define calloc gcalloc
67 #define aligned_alloc galigned_alloc
71 extern void *bss_sbrk (ptrdiff_t size
);
72 extern int bss_sbrk_did_unexec
;
73 extern char bss_sbrk_buffer
[];
74 extern void *bss_sbrk_buffer_end
;
75 #define DUMPED bss_sbrk_did_unexec
76 #define ALLOCATED_BEFORE_DUMPING(P) \
77 ((P) < bss_sbrk_buffer_end && (P) >= (void *) bss_sbrk_buffer)
88 /* Allocate SIZE bytes of memory. */
89 extern void *malloc (size_t size
) ATTRIBUTE_MALLOC_SIZE ((1));
90 /* Re-allocate the previously allocated block
91 in ptr, making the new block SIZE bytes long. */
92 extern void *realloc (void *ptr
, size_t size
) ATTRIBUTE_ALLOC_SIZE ((2));
93 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
94 extern void *calloc (size_t nmemb
, size_t size
) ATTRIBUTE_MALLOC_SIZE ((1,2));
95 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
96 extern void free (void *ptr
);
98 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
99 extern void *aligned_alloc (size_t, size_t);
100 extern void *memalign (size_t, size_t);
102 extern int posix_memalign (void **, size_t, size_t);
106 /* Set up mutexes and make malloc etc. thread-safe. */
107 extern void malloc_enable_thread (void);
110 /* The allocator divides the heap into blocks of fixed size; large
111 requests receive one or more whole blocks, and small requests
112 receive a fragment of a block. Fragment sizes are powers of two,
113 and all fragments of a block are the same size. When all the
114 fragments in a block have been freed, the block itself is freed. */
115 #define INT_BIT (CHAR_BIT * sizeof (int))
116 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
117 #define BLOCKSIZE (1 << BLOCKLOG)
118 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
120 /* Determine the amount of memory spanned by the initial heap table
121 (not an absolute limit). */
122 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
124 /* Number of contiguous free blocks allowed to build up at the end of
125 memory before they will be returned to the system. */
126 #define FINAL_FREE_BLOCKS 8
128 /* Data structure giving per-block information. */
131 /* Heap information for a busy block. */
134 /* Zero for a large (multiblock) object, or positive giving the
135 logarithm to the base two of the fragment size. */
141 size_t nfree
; /* Free frags in a fragmented block. */
142 size_t first
; /* First free fragment of the block. */
144 /* For a large object, in its first block, this has the number
145 of blocks in the object. In the other blocks, this has a
146 negative number which says how far back the first block is. */
150 /* Heap information for a free block
151 (that may be the first of a free cluster). */
154 size_t size
; /* Size (in blocks) of a free cluster. */
155 size_t next
; /* Index of next free cluster. */
156 size_t prev
; /* Index of previous free cluster. */
160 /* Pointer to first block of the heap. */
161 extern char *_heapbase
;
163 /* Table indexed by block number giving per-block information. */
164 extern malloc_info
*_heapinfo
;
166 /* Address to block number and vice versa. */
167 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
168 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
170 /* Current search index for the heap table. */
171 extern size_t _heapindex
;
173 /* Limit of valid info table indices. */
174 extern size_t _heaplimit
;
176 /* Doubly linked lists of free fragments. */
183 /* Free list headers for each fragment size. */
184 extern struct list _fraghead
[];
186 /* List of blocks allocated with aligned_alloc and friends. */
189 struct alignlist
*next
;
190 void *aligned
; /* The address that aligned_alloc returned. */
191 void *exact
; /* The address that malloc returned. */
193 extern struct alignlist
*_aligned_blocks
;
195 /* Instrumentation. */
196 extern size_t _chunks_used
;
197 extern size_t _bytes_used
;
198 extern size_t _chunks_free
;
199 extern size_t _bytes_free
;
201 /* Internal versions of `malloc', `realloc', and `free'
202 used when these functions need to call each other.
203 They are the same but don't call the hooks. */
204 extern void *_malloc_internal (size_t);
205 extern void *_realloc_internal (void *, size_t);
206 extern void _free_internal (void *);
207 extern void *_malloc_internal_nolock (size_t);
208 extern void *_realloc_internal_nolock (void *, size_t);
209 extern void _free_internal_nolock (void *);
212 extern pthread_mutex_t _malloc_mutex
, _aligned_blocks_mutex
;
213 extern int _malloc_thread_enabled_p
;
216 if (_malloc_thread_enabled_p) \
217 pthread_mutex_lock (&_malloc_mutex); \
221 if (_malloc_thread_enabled_p) \
222 pthread_mutex_unlock (&_malloc_mutex); \
224 #define LOCK_ALIGNED_BLOCKS() \
226 if (_malloc_thread_enabled_p) \
227 pthread_mutex_lock (&_aligned_blocks_mutex); \
229 #define UNLOCK_ALIGNED_BLOCKS() \
231 if (_malloc_thread_enabled_p) \
232 pthread_mutex_unlock (&_aligned_blocks_mutex); \
237 #define LOCK_ALIGNED_BLOCKS()
238 #define UNLOCK_ALIGNED_BLOCKS()
241 /* Given an address in the middle of a malloc'd object,
242 return the address of the beginning of the object. */
243 extern void *malloc_find_object_address (void *ptr
);
245 /* Underlying allocation function; successive calls should
246 return contiguous pieces of memory. */
247 extern void *(*__morecore
) (ptrdiff_t size
);
249 /* Default value of `__morecore'. */
250 extern void *__default_morecore (ptrdiff_t size
);
252 /* If not NULL, this function is called after each time
253 `__morecore' is called to increase the data size. */
254 extern void (*__after_morecore_hook
) (void);
256 /* Number of extra blocks to get each time we ask for more core.
257 This reduces the frequency of calling `(*__morecore)'. */
258 extern size_t __malloc_extra_blocks
;
260 /* Nonzero if `malloc' has been called and done its initialization. */
261 extern int __malloc_initialized
;
262 /* Function called to initialize malloc data structures. */
263 extern int __malloc_initialize (void);
265 /* Hooks for debugging versions. */
266 extern void (*__malloc_initialize_hook
) (void);
267 extern void (*__free_hook
) (void *ptr
);
268 extern void *(*__malloc_hook
) (size_t size
);
269 extern void *(*__realloc_hook
) (void *ptr
, size_t size
);
270 extern void *(*__memalign_hook
) (size_t size
, size_t alignment
);
272 /* Return values for `mprobe': these are the kinds of inconsistencies that
273 `mcheck' enables detection of. */
276 MCHECK_DISABLED
= -1, /* Consistency checking is not turned on. */
277 MCHECK_OK
, /* Block is fine. */
278 MCHECK_FREE
, /* Block freed twice. */
279 MCHECK_HEAD
, /* Memory before the block was clobbered. */
280 MCHECK_TAIL
/* Memory after the block was clobbered. */
283 /* Activate a standard collection of debugging hooks. This must be called
284 before `malloc' is ever called. ABORTFUNC is called with an error code
285 (see enum above) when an inconsistency is detected. If ABORTFUNC is
286 null, the standard function prints on stderr and then calls `abort'. */
287 extern int mcheck (void (*abortfunc
) (enum mcheck_status
));
289 /* Check for aberrations in a particular malloc'd block. You must have
290 called `mcheck' already. These are the same checks that `mcheck' does
291 when you free or reallocate a block. */
292 extern enum mcheck_status
mprobe (void *ptr
);
294 /* Activate a standard collection of tracing hooks. */
295 extern void mtrace (void);
296 extern void muntrace (void);
298 /* Statistics available to the user. */
301 size_t bytes_total
; /* Total size of the heap. */
302 size_t chunks_used
; /* Chunks allocated by the user. */
303 size_t bytes_used
; /* Byte total of user-allocated chunks. */
304 size_t chunks_free
; /* Chunks in the free list. */
305 size_t bytes_free
; /* Byte total of chunks in the free list. */
308 /* Pick up the current statistics. */
309 extern struct mstats
mstats (void);
311 /* Call WARNFUN with a warning message when memory usage is high. */
312 extern void memory_warnings (void *start
, void (*warnfun
) (const char *));
318 /* Memory allocator `malloc'.
319 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
320 Written May 1989 by Mike Haertel.
322 This library is free software; you can redistribute it and/or
323 modify it under the terms of the GNU General Public License as
324 published by the Free Software Foundation; either version 2 of the
325 License, or (at your option) any later version.
327 This library is distributed in the hope that it will be useful,
328 but WITHOUT ANY WARRANTY; without even the implied warranty of
329 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
330 General Public License for more details.
332 You should have received a copy of the GNU General Public
333 License along with this library. If not, see <http://www.gnu.org/licenses/>.
335 The author may be reached (Email) at the address mike@ai.mit.edu,
336 or (US mail) as Mike Haertel c/o Free Software Foundation. */
340 void *(*__morecore
) (ptrdiff_t size
) = __default_morecore
;
342 /* Debugging hook for `malloc'. */
343 void *(*__malloc_hook
) (size_t size
);
345 /* Pointer to the base of the first block. */
348 /* Block information table. Allocated with align/__free (not malloc/free). */
349 malloc_info
*_heapinfo
;
351 /* Number of info entries. */
352 static size_t heapsize
;
354 /* Search index in the info table. */
357 /* Limit of valid info table indices. */
360 /* Free lists for each fragment size. */
361 struct list _fraghead
[BLOCKLOG
];
363 /* Instrumentation. */
369 /* Are you experienced? */
370 int __malloc_initialized
;
372 size_t __malloc_extra_blocks
;
374 void (*__malloc_initialize_hook
) (void);
375 void (*__after_morecore_hook
) (void);
377 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
379 /* Some code for hunting a bug writing into _heapinfo.
381 Call this macro with argument PROT non-zero to protect internal
382 malloc state against writing to it, call it with a zero argument to
383 make it readable and writable.
385 Note that this only works if BLOCKSIZE == page size, which is
386 the case on the i386. */
388 #include <sys/types.h>
389 #include <sys/mman.h>
391 static int state_protected_p
;
392 static size_t last_state_size
;
393 static malloc_info
*last_heapinfo
;
396 protect_malloc_state (int protect_p
)
398 /* If _heapinfo has been relocated, make sure its old location
399 isn't left read-only; it will be reused by malloc. */
400 if (_heapinfo
!= last_heapinfo
402 && state_protected_p
)
403 mprotect (last_heapinfo
, last_state_size
, PROT_READ
| PROT_WRITE
);
405 last_state_size
= _heaplimit
* sizeof *_heapinfo
;
406 last_heapinfo
= _heapinfo
;
408 if (protect_p
!= state_protected_p
)
410 state_protected_p
= protect_p
;
411 if (mprotect (_heapinfo
, last_state_size
,
412 protect_p
? PROT_READ
: PROT_READ
| PROT_WRITE
) != 0)
417 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
420 #define PROTECT_MALLOC_STATE(PROT) /* empty */
424 /* Aligned allocation. */
431 /* align accepts an unsigned argument, but __morecore accepts a
432 signed one. This could lead to trouble if SIZE overflows the
433 ptrdiff_t type accepted by __morecore. We just punt in that
434 case, since they are requesting a ludicrous amount anyway. */
435 if (PTRDIFF_MAX
< size
)
438 result
= (*__morecore
) (size
);
439 adj
= (uintptr_t) result
% BLOCKSIZE
;
442 adj
= BLOCKSIZE
- adj
;
444 result
= (char *) result
+ adj
;
447 if (__after_morecore_hook
)
448 (*__after_morecore_hook
) ();
453 /* Get SIZE bytes, if we can get them starting at END.
454 Return the address of the space we got.
455 If we cannot get space at END, fail and return 0. */
457 get_contiguous_space (ptrdiff_t size
, void *position
)
462 before
= (*__morecore
) (0);
463 /* If we can tell in advance that the break is at the wrong place,
465 if (before
!= position
)
468 /* Allocate SIZE bytes and get the address of them. */
469 after
= (*__morecore
) (size
);
473 /* It was not contiguous--reject it. */
474 if (after
!= position
)
476 (*__morecore
) (- size
);
484 /* This is called when `_heapinfo' and `heapsize' have just
485 been set to describe a new info table. Set up the table
486 to describe itself and account for it in the statistics. */
488 register_heapinfo (void)
490 size_t block
, blocks
;
492 block
= BLOCK (_heapinfo
);
493 blocks
= BLOCKIFY (heapsize
* sizeof (malloc_info
));
495 /* Account for the _heapinfo block itself in the statistics. */
496 _bytes_used
+= blocks
* BLOCKSIZE
;
499 /* Describe the heapinfo block itself in the heapinfo. */
500 _heapinfo
[block
].busy
.type
= 0;
501 _heapinfo
[block
].busy
.info
.size
= blocks
;
502 /* Leave back-pointers for malloc_find_address. */
504 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
508 pthread_mutex_t _malloc_mutex
= PTHREAD_MUTEX_INITIALIZER
;
509 pthread_mutex_t _aligned_blocks_mutex
= PTHREAD_MUTEX_INITIALIZER
;
510 int _malloc_thread_enabled_p
;
513 malloc_atfork_handler_prepare (void)
516 LOCK_ALIGNED_BLOCKS ();
520 malloc_atfork_handler_parent (void)
522 UNLOCK_ALIGNED_BLOCKS ();
527 malloc_atfork_handler_child (void)
529 UNLOCK_ALIGNED_BLOCKS ();
533 /* Set up mutexes and make malloc etc. thread-safe. */
535 malloc_enable_thread (void)
537 if (_malloc_thread_enabled_p
)
540 /* Some pthread implementations call malloc for statically
541 initialized mutexes when they are used first. To avoid such a
542 situation, we initialize mutexes here while their use is
543 disabled in malloc etc. */
544 pthread_mutex_init (&_malloc_mutex
, NULL
);
545 pthread_mutex_init (&_aligned_blocks_mutex
, NULL
);
546 pthread_atfork (malloc_atfork_handler_prepare
,
547 malloc_atfork_handler_parent
,
548 malloc_atfork_handler_child
);
549 _malloc_thread_enabled_p
= 1;
551 #endif /* USE_PTHREAD */
554 malloc_initialize_1 (void)
560 if (__malloc_initialize_hook
)
561 (*__malloc_initialize_hook
) ();
563 heapsize
= HEAP
/ BLOCKSIZE
;
564 _heapinfo
= align (heapsize
* sizeof (malloc_info
));
565 if (_heapinfo
== NULL
)
567 memset (_heapinfo
, 0, heapsize
* sizeof (malloc_info
));
568 _heapinfo
[0].free
.size
= 0;
569 _heapinfo
[0].free
.next
= _heapinfo
[0].free
.prev
= 0;
571 _heapbase
= (char *) _heapinfo
;
572 _heaplimit
= BLOCK (_heapbase
+ heapsize
* sizeof (malloc_info
));
574 register_heapinfo ();
576 __malloc_initialized
= 1;
577 PROTECT_MALLOC_STATE (1);
581 /* Set everything up and remember that we have.
582 main will call malloc which calls this function. That is before any threads
583 or signal handlers has been set up, so we don't need thread protection. */
585 __malloc_initialize (void)
587 if (__malloc_initialized
)
590 malloc_initialize_1 ();
592 return __malloc_initialized
;
595 static int morecore_recursing
;
597 /* Get neatly aligned memory, initializing or
598 growing the heap info table as necessary. */
600 morecore_nolock (size_t size
)
603 malloc_info
*newinfo
, *oldinfo
;
606 if (morecore_recursing
)
607 /* Avoid recursion. The caller will know how to handle a null return. */
610 result
= align (size
);
614 PROTECT_MALLOC_STATE (0);
616 /* Check if we need to grow the info table. */
617 if ((size_t) BLOCK ((char *) result
+ size
) > heapsize
)
619 /* Calculate the new _heapinfo table size. We do not account for the
620 added blocks in the table itself, as we hope to place them in
621 existing free space, which is already covered by part of the
626 while ((size_t) BLOCK ((char *) result
+ size
) > newsize
);
628 /* We must not reuse existing core for the new info table when called
629 from realloc in the case of growing a large block, because the
630 block being grown is momentarily marked as free. In this case
631 _heaplimit is zero so we know not to reuse space for internal
635 /* First try to allocate the new info table in core we already
636 have, in the usual way using realloc. If realloc cannot
637 extend it in place or relocate it to existing sufficient core,
638 we will get called again, and the code above will notice the
639 `morecore_recursing' flag and return null. */
640 int save
= errno
; /* Don't want to clobber errno with ENOMEM. */
641 morecore_recursing
= 1;
642 newinfo
= _realloc_internal_nolock (_heapinfo
,
643 newsize
* sizeof (malloc_info
));
644 morecore_recursing
= 0;
649 /* We found some space in core, and realloc has put the old
650 table's blocks on the free list. Now zero the new part
651 of the table and install the new table location. */
652 memset (&newinfo
[heapsize
], 0,
653 (newsize
- heapsize
) * sizeof (malloc_info
));
660 /* Allocate new space for the malloc info table. */
663 newinfo
= align (newsize
* sizeof (malloc_info
));
668 (*__morecore
) (-size
);
672 /* Is it big enough to record status for its own space?
674 if ((size_t) BLOCK ((char *) newinfo
675 + newsize
* sizeof (malloc_info
))
679 /* Must try again. First give back most of what we just got. */
680 (*__morecore
) (- newsize
* sizeof (malloc_info
));
684 /* Copy the old table to the beginning of the new,
685 and zero the rest of the new table. */
686 memcpy (newinfo
, _heapinfo
, heapsize
* sizeof (malloc_info
));
687 memset (&newinfo
[heapsize
], 0,
688 (newsize
- heapsize
) * sizeof (malloc_info
));
693 register_heapinfo ();
695 /* Reset _heaplimit so _free_internal never decides
696 it can relocate or resize the info table. */
698 _free_internal_nolock (oldinfo
);
699 PROTECT_MALLOC_STATE (0);
701 /* The new heap limit includes the new table just allocated. */
702 _heaplimit
= BLOCK ((char *) newinfo
+ heapsize
* sizeof (malloc_info
));
707 _heaplimit
= BLOCK ((char *) result
+ size
);
711 /* Allocate memory from the heap. */
713 _malloc_internal_nolock (size_t size
)
716 size_t block
, blocks
, lastblocks
, start
;
720 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
721 valid address you can realloc and free (though not dereference).
723 It turns out that some extant code (sunrpc, at least Ultrix's version)
724 expects `malloc (0)' to return non-NULL and breaks otherwise.
732 PROTECT_MALLOC_STATE (0);
734 if (size
< sizeof (struct list
))
735 size
= sizeof (struct list
);
737 /* Determine the allocation policy based on the request size. */
738 if (size
<= BLOCKSIZE
/ 2)
740 /* Small allocation to receive a fragment of a block.
741 Determine the logarithm to base two of the fragment size. */
742 register size_t log
= 1;
744 while ((size
/= 2) != 0)
747 /* Look in the fragment lists for a
748 free fragment of the desired size. */
749 next
= _fraghead
[log
].next
;
752 /* There are free fragments of this size.
753 Pop a fragment out of the fragment list and return it.
754 Update the block's nfree and first counters. */
756 next
->prev
->next
= next
->next
;
757 if (next
->next
!= NULL
)
758 next
->next
->prev
= next
->prev
;
759 block
= BLOCK (result
);
760 if (--_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
761 _heapinfo
[block
].busy
.info
.frag
.first
=
762 (uintptr_t) next
->next
% BLOCKSIZE
>> log
;
764 /* Update the statistics. */
766 _bytes_used
+= 1 << log
;
768 _bytes_free
-= 1 << log
;
772 /* No free fragments of the desired size, so get a new block
773 and break it into fragments, returning the first. */
774 #ifdef GC_MALLOC_CHECK
775 result
= _malloc_internal_nolock (BLOCKSIZE
);
776 PROTECT_MALLOC_STATE (0);
777 #elif defined (USE_PTHREAD)
778 result
= _malloc_internal_nolock (BLOCKSIZE
);
780 result
= malloc (BLOCKSIZE
);
784 PROTECT_MALLOC_STATE (1);
788 /* Link all fragments but the first into the free list. */
789 next
= (struct list
*) ((char *) result
+ (1 << log
));
791 next
->prev
= &_fraghead
[log
];
792 _fraghead
[log
].next
= next
;
794 for (i
= 2; i
< (size_t) (BLOCKSIZE
>> log
); ++i
)
796 next
= (struct list
*) ((char *) result
+ (i
<< log
));
797 next
->next
= _fraghead
[log
].next
;
798 next
->prev
= &_fraghead
[log
];
799 next
->prev
->next
= next
;
800 next
->next
->prev
= next
;
803 /* Initialize the nfree and first counters for this block. */
804 block
= BLOCK (result
);
805 _heapinfo
[block
].busy
.type
= log
;
806 _heapinfo
[block
].busy
.info
.frag
.nfree
= i
- 1;
807 _heapinfo
[block
].busy
.info
.frag
.first
= i
- 1;
809 _chunks_free
+= (BLOCKSIZE
>> log
) - 1;
810 _bytes_free
+= BLOCKSIZE
- (1 << log
);
811 _bytes_used
-= BLOCKSIZE
- (1 << log
);
816 /* Large allocation to receive one or more blocks.
817 Search the free list in a circle starting at the last place visited.
818 If we loop completely around without finding a large enough
819 space we will have to get more memory from the system. */
820 blocks
= BLOCKIFY (size
);
821 start
= block
= _heapindex
;
822 while (_heapinfo
[block
].free
.size
< blocks
)
824 block
= _heapinfo
[block
].free
.next
;
827 /* Need to get more from the system. Get a little extra. */
828 size_t wantblocks
= blocks
+ __malloc_extra_blocks
;
829 block
= _heapinfo
[0].free
.prev
;
830 lastblocks
= _heapinfo
[block
].free
.size
;
831 /* Check to see if the new core will be contiguous with the
832 final free block; if so we don't need to get as much. */
833 if (_heaplimit
!= 0 && block
+ lastblocks
== _heaplimit
&&
834 /* We can't do this if we will have to make the heap info
835 table bigger to accommodate the new space. */
836 block
+ wantblocks
<= heapsize
&&
837 get_contiguous_space ((wantblocks
- lastblocks
) * BLOCKSIZE
,
838 ADDRESS (block
+ lastblocks
)))
840 /* We got it contiguously. Which block we are extending
841 (the `final free block' referred to above) might have
842 changed, if it got combined with a freed info table. */
843 block
= _heapinfo
[0].free
.prev
;
844 _heapinfo
[block
].free
.size
+= (wantblocks
- lastblocks
);
845 _bytes_free
+= (wantblocks
- lastblocks
) * BLOCKSIZE
;
846 _heaplimit
+= wantblocks
- lastblocks
;
849 result
= morecore_nolock (wantblocks
* BLOCKSIZE
);
852 block
= BLOCK (result
);
853 /* Put the new block at the end of the free list. */
854 _heapinfo
[block
].free
.size
= wantblocks
;
855 _heapinfo
[block
].free
.prev
= _heapinfo
[0].free
.prev
;
856 _heapinfo
[block
].free
.next
= 0;
857 _heapinfo
[0].free
.prev
= block
;
858 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
= block
;
860 /* Now loop to use some of that block for this allocation. */
864 /* At this point we have found a suitable free list entry.
865 Figure out how to remove what we need from the list. */
866 result
= ADDRESS (block
);
867 if (_heapinfo
[block
].free
.size
> blocks
)
869 /* The block we found has a bit left over,
870 so relink the tail end back into the free list. */
871 _heapinfo
[block
+ blocks
].free
.size
872 = _heapinfo
[block
].free
.size
- blocks
;
873 _heapinfo
[block
+ blocks
].free
.next
874 = _heapinfo
[block
].free
.next
;
875 _heapinfo
[block
+ blocks
].free
.prev
876 = _heapinfo
[block
].free
.prev
;
877 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
878 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
879 = _heapindex
= block
+ blocks
;
883 /* The block exactly matches our requirements,
884 so just remove it from the list. */
885 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
886 = _heapinfo
[block
].free
.prev
;
887 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
888 = _heapindex
= _heapinfo
[block
].free
.next
;
892 _heapinfo
[block
].busy
.type
= 0;
893 _heapinfo
[block
].busy
.info
.size
= blocks
;
895 _bytes_used
+= blocks
* BLOCKSIZE
;
896 _bytes_free
-= blocks
* BLOCKSIZE
;
898 /* Mark all the blocks of the object just allocated except for the
899 first with a negative number so you can find the first block by
900 adding that adjustment. */
902 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
905 PROTECT_MALLOC_STATE (1);
911 _malloc_internal (size_t size
)
916 result
= _malloc_internal_nolock (size
);
925 void *(*hook
) (size_t);
927 if (!__malloc_initialized
&& !__malloc_initialize ())
930 /* Copy the value of __malloc_hook to an automatic variable in case
931 __malloc_hook is modified in another thread between its
932 NULL-check and the use.
934 Note: Strictly speaking, this is not a right solution. We should
935 use mutexes to access non-read-only variables that are shared
936 among multiple threads. We just leave it for compatibility with
937 glibc malloc (i.e., assignments to __malloc_hook) for now. */
938 hook
= __malloc_hook
;
939 return (hook
!= NULL
? *hook
: _malloc_internal
) (size
);
944 /* On some ANSI C systems, some libc functions call _malloc, _free
945 and _realloc. Make them use the GNU functions. */
947 extern void *_malloc (size_t);
948 extern void _free (void *);
949 extern void *_realloc (void *, size_t);
952 _malloc (size_t size
)
954 return malloc (size
);
964 _realloc (void *ptr
, size_t size
)
966 return realloc (ptr
, size
);
970 /* Free a block of memory allocated by `malloc'.
971 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
972 Written May 1989 by Mike Haertel.
974 This library is free software; you can redistribute it and/or
975 modify it under the terms of the GNU General Public License as
976 published by the Free Software Foundation; either version 2 of the
977 License, or (at your option) any later version.
979 This library is distributed in the hope that it will be useful,
980 but WITHOUT ANY WARRANTY; without even the implied warranty of
981 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
982 General Public License for more details.
984 You should have received a copy of the GNU General Public
985 License along with this library. If not, see <http://www.gnu.org/licenses/>.
987 The author may be reached (Email) at the address mike@ai.mit.edu,
988 or (US mail) as Mike Haertel c/o Free Software Foundation. */
991 /* Debugging hook for free. */
992 void (*__free_hook
) (void *__ptr
);
994 /* List of blocks allocated by aligned_alloc. */
995 struct alignlist
*_aligned_blocks
= NULL
;
997 /* Return memory to the heap.
998 Like `_free_internal' but don't lock mutex. */
1000 _free_internal_nolock (void *ptr
)
1003 size_t block
, blocks
;
1005 struct list
*prev
, *next
;
1007 const size_t lesscore_threshold
1008 /* Threshold of free space at which we will return some to the system. */
1009 = FINAL_FREE_BLOCKS
+ 2 * __malloc_extra_blocks
;
1011 register struct alignlist
*l
;
1016 PROTECT_MALLOC_STATE (0);
1018 LOCK_ALIGNED_BLOCKS ();
1019 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1020 if (l
->aligned
== ptr
)
1022 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1026 UNLOCK_ALIGNED_BLOCKS ();
1028 block
= BLOCK (ptr
);
1030 type
= _heapinfo
[block
].busy
.type
;
1034 /* Get as many statistics as early as we can. */
1036 _bytes_used
-= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1037 _bytes_free
+= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1039 /* Find the free cluster previous to this one in the free list.
1040 Start searching at the last block referenced; this may benefit
1041 programs with locality of allocation. */
1045 i
= _heapinfo
[i
].free
.prev
;
1049 i
= _heapinfo
[i
].free
.next
;
1050 while (i
> 0 && i
< block
);
1051 i
= _heapinfo
[i
].free
.prev
;
1054 /* Determine how to link this block into the free list. */
1055 if (block
== i
+ _heapinfo
[i
].free
.size
)
1057 /* Coalesce this block with its predecessor. */
1058 _heapinfo
[i
].free
.size
+= _heapinfo
[block
].busy
.info
.size
;
1063 /* Really link this block back into the free list. */
1064 _heapinfo
[block
].free
.size
= _heapinfo
[block
].busy
.info
.size
;
1065 _heapinfo
[block
].free
.next
= _heapinfo
[i
].free
.next
;
1066 _heapinfo
[block
].free
.prev
= i
;
1067 _heapinfo
[i
].free
.next
= block
;
1068 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1072 /* Now that the block is linked in, see if we can coalesce it
1073 with its successor (by deleting its successor from the list
1074 and adding in its size). */
1075 if (block
+ _heapinfo
[block
].free
.size
== _heapinfo
[block
].free
.next
)
1077 _heapinfo
[block
].free
.size
1078 += _heapinfo
[_heapinfo
[block
].free
.next
].free
.size
;
1079 _heapinfo
[block
].free
.next
1080 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.next
;
1081 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1085 /* How many trailing free blocks are there now? */
1086 blocks
= _heapinfo
[block
].free
.size
;
1088 /* Where is the current end of accessible core? */
1089 curbrk
= (*__morecore
) (0);
1091 if (_heaplimit
!= 0 && curbrk
== ADDRESS (_heaplimit
))
1093 /* The end of the malloc heap is at the end of accessible core.
1094 It's possible that moving _heapinfo will allow us to
1095 return some space to the system. */
1097 size_t info_block
= BLOCK (_heapinfo
);
1098 size_t info_blocks
= _heapinfo
[info_block
].busy
.info
.size
;
1099 size_t prev_block
= _heapinfo
[block
].free
.prev
;
1100 size_t prev_blocks
= _heapinfo
[prev_block
].free
.size
;
1101 size_t next_block
= _heapinfo
[block
].free
.next
;
1102 size_t next_blocks
= _heapinfo
[next_block
].free
.size
;
1104 if (/* Win if this block being freed is last in core, the info table
1105 is just before it, the previous free block is just before the
1106 info table, and the two free blocks together form a useful
1107 amount to return to the system. */
1108 (block
+ blocks
== _heaplimit
&&
1109 info_block
+ info_blocks
== block
&&
1110 prev_block
!= 0 && prev_block
+ prev_blocks
== info_block
&&
1111 blocks
+ prev_blocks
>= lesscore_threshold
) ||
1112 /* Nope, not the case. We can also win if this block being
1113 freed is just before the info table, and the table extends
1114 to the end of core or is followed only by a free block,
1115 and the total free space is worth returning to the system. */
1116 (block
+ blocks
== info_block
&&
1117 ((info_block
+ info_blocks
== _heaplimit
&&
1118 blocks
>= lesscore_threshold
) ||
1119 (info_block
+ info_blocks
== next_block
&&
1120 next_block
+ next_blocks
== _heaplimit
&&
1121 blocks
+ next_blocks
>= lesscore_threshold
)))
1124 malloc_info
*newinfo
;
1125 size_t oldlimit
= _heaplimit
;
1127 /* Free the old info table, clearing _heaplimit to avoid
1128 recursion into this code. We don't want to return the
1129 table's blocks to the system before we have copied them to
1130 the new location. */
1132 _free_internal_nolock (_heapinfo
);
1133 _heaplimit
= oldlimit
;
1135 /* Tell malloc to search from the beginning of the heap for
1136 free blocks, so it doesn't reuse the ones just freed. */
1139 /* Allocate new space for the info table and move its data. */
1140 newinfo
= _malloc_internal_nolock (info_blocks
* BLOCKSIZE
);
1141 PROTECT_MALLOC_STATE (0);
1142 memmove (newinfo
, _heapinfo
, info_blocks
* BLOCKSIZE
);
1143 _heapinfo
= newinfo
;
1145 /* We should now have coalesced the free block with the
1146 blocks freed from the old info table. Examine the entire
1147 trailing free block to decide below whether to return some
1149 block
= _heapinfo
[0].free
.prev
;
1150 blocks
= _heapinfo
[block
].free
.size
;
1153 /* Now see if we can return stuff to the system. */
1154 if (block
+ blocks
== _heaplimit
&& blocks
>= lesscore_threshold
)
1156 register size_t bytes
= blocks
* BLOCKSIZE
;
1157 _heaplimit
-= blocks
;
1158 (*__morecore
) (-bytes
);
1159 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
1160 = _heapinfo
[block
].free
.next
;
1161 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
1162 = _heapinfo
[block
].free
.prev
;
1163 block
= _heapinfo
[block
].free
.prev
;
1165 _bytes_free
-= bytes
;
1169 /* Set the next search to begin at this block. */
1174 /* Do some of the statistics. */
1176 _bytes_used
-= 1 << type
;
1178 _bytes_free
+= 1 << type
;
1180 /* Get the address of the first free fragment in this block. */
1181 prev
= (struct list
*) ((char *) ADDRESS (block
) +
1182 (_heapinfo
[block
].busy
.info
.frag
.first
<< type
));
1184 if (_heapinfo
[block
].busy
.info
.frag
.nfree
== (BLOCKSIZE
>> type
) - 1)
1186 /* If all fragments of this block are free, remove them
1187 from the fragment list and free the whole block. */
1189 for (i
= 1; i
< (size_t) (BLOCKSIZE
>> type
); ++i
)
1191 prev
->prev
->next
= next
;
1193 next
->prev
= prev
->prev
;
1194 _heapinfo
[block
].busy
.type
= 0;
1195 _heapinfo
[block
].busy
.info
.size
= 1;
1197 /* Keep the statistics accurate. */
1199 _bytes_used
+= BLOCKSIZE
;
1200 _chunks_free
-= BLOCKSIZE
>> type
;
1201 _bytes_free
-= BLOCKSIZE
;
1203 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1204 _free_internal_nolock (ADDRESS (block
));
1206 free (ADDRESS (block
));
1209 else if (_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
1211 /* If some fragments of this block are free, link this
1212 fragment into the fragment list after the first free
1213 fragment of this block. */
1215 next
->next
= prev
->next
;
1218 if (next
->next
!= NULL
)
1219 next
->next
->prev
= next
;
1220 ++_heapinfo
[block
].busy
.info
.frag
.nfree
;
1224 /* No fragments of this block are free, so link this
1225 fragment into the fragment list and announce that
1226 it is the first free fragment of this block. */
1228 _heapinfo
[block
].busy
.info
.frag
.nfree
= 1;
1229 _heapinfo
[block
].busy
.info
.frag
.first
=
1230 (uintptr_t) ptr
% BLOCKSIZE
>> type
;
1231 prev
->next
= _fraghead
[type
].next
;
1232 prev
->prev
= &_fraghead
[type
];
1233 prev
->prev
->next
= prev
;
1234 if (prev
->next
!= NULL
)
1235 prev
->next
->prev
= prev
;
1240 PROTECT_MALLOC_STATE (1);
1243 /* Return memory to the heap.
1244 Like `free' but don't call a __free_hook if there is one. */
1246 _free_internal (void *ptr
)
1249 _free_internal_nolock (ptr
);
1253 /* Return memory to the heap. */
1258 void (*hook
) (void *) = __free_hook
;
1263 _free_internal (ptr
);
1266 /* Define the `cfree' alias for `free'. */
1268 weak_alias (free
, cfree
)
1276 /* Change the size of a block allocated by `malloc'.
1277 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1278 Written May 1989 by Mike Haertel.
1280 This library is free software; you can redistribute it and/or
1281 modify it under the terms of the GNU General Public License as
1282 published by the Free Software Foundation; either version 2 of the
1283 License, or (at your option) any later version.
1285 This library is distributed in the hope that it will be useful,
1286 but WITHOUT ANY WARRANTY; without even the implied warranty of
1287 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1288 General Public License for more details.
1290 You should have received a copy of the GNU General Public
1291 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1293 The author may be reached (Email) at the address mike@ai.mit.edu,
1294 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1297 #define min(a, b) ((a) < (b) ? (a) : (b))
1300 /* Debugging hook for realloc. */
1301 void *(*__realloc_hook
) (void *ptr
, size_t size
);
1303 /* Resize the given region to the new size, returning a pointer
1304 to the (possibly moved) region. This is optimized for speed;
1305 some benchmarks seem to indicate that greater compactness is
1306 achieved by unconditionally allocating and copying to a
1307 new region. This module has incestuous knowledge of the
1308 internals of both free and malloc. */
1310 _realloc_internal_nolock (void *ptr
, size_t size
)
1314 size_t block
, blocks
, oldlimit
;
1318 _free_internal_nolock (ptr
);
1319 return _malloc_internal_nolock (0);
1321 else if (ptr
== NULL
)
1322 return _malloc_internal_nolock (size
);
1324 block
= BLOCK (ptr
);
1326 PROTECT_MALLOC_STATE (0);
1328 type
= _heapinfo
[block
].busy
.type
;
1332 /* Maybe reallocate a large block to a small fragment. */
1333 if (size
<= BLOCKSIZE
/ 2)
1335 result
= _malloc_internal_nolock (size
);
1338 memcpy (result
, ptr
, size
);
1339 _free_internal_nolock (ptr
);
1344 /* The new size is a large allocation as well;
1345 see if we can hold it in place. */
1346 blocks
= BLOCKIFY (size
);
1347 if (blocks
< _heapinfo
[block
].busy
.info
.size
)
1349 /* The new size is smaller; return
1350 excess memory to the free list. */
1351 _heapinfo
[block
+ blocks
].busy
.type
= 0;
1352 _heapinfo
[block
+ blocks
].busy
.info
.size
1353 = _heapinfo
[block
].busy
.info
.size
- blocks
;
1354 _heapinfo
[block
].busy
.info
.size
= blocks
;
1355 /* We have just created a new chunk by splitting a chunk in two.
1356 Now we will free this chunk; increment the statistics counter
1357 so it doesn't become wrong when _free_internal decrements it. */
1359 _free_internal_nolock (ADDRESS (block
+ blocks
));
1362 else if (blocks
== _heapinfo
[block
].busy
.info
.size
)
1363 /* No size change necessary. */
1367 /* Won't fit, so allocate a new region that will.
1368 Free the old region first in case there is sufficient
1369 adjacent free space to grow without moving. */
1370 blocks
= _heapinfo
[block
].busy
.info
.size
;
1371 /* Prevent free from actually returning memory to the system. */
1372 oldlimit
= _heaplimit
;
1374 _free_internal_nolock (ptr
);
1375 result
= _malloc_internal_nolock (size
);
1376 PROTECT_MALLOC_STATE (0);
1377 if (_heaplimit
== 0)
1378 _heaplimit
= oldlimit
;
1381 /* Now we're really in trouble. We have to unfree
1382 the thing we just freed. Unfortunately it might
1383 have been coalesced with its neighbors. */
1384 if (_heapindex
== block
)
1385 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1389 = _malloc_internal_nolock ((block
- _heapindex
) * BLOCKSIZE
);
1390 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1391 _free_internal_nolock (previous
);
1396 memmove (result
, ptr
, blocks
* BLOCKSIZE
);
1401 /* Old size is a fragment; type is logarithm
1402 to base two of the fragment size. */
1403 if (size
> (size_t) (1 << (type
- 1)) &&
1404 size
<= (size_t) (1 << type
))
1405 /* The new size is the same kind of fragment. */
1409 /* The new size is different; allocate a new space,
1410 and copy the lesser of the new size and the old. */
1411 result
= _malloc_internal_nolock (size
);
1414 memcpy (result
, ptr
, min (size
, (size_t) 1 << type
));
1415 _free_internal_nolock (ptr
);
1420 PROTECT_MALLOC_STATE (1);
1426 _realloc_internal (void *ptr
, size_t size
)
1431 result
= _realloc_internal_nolock (ptr
, size
);
1438 realloc (void *ptr
, size_t size
)
1440 void *(*hook
) (void *, size_t);
1442 if (!__malloc_initialized
&& !__malloc_initialize ())
1445 hook
= __realloc_hook
;
1446 return (hook
!= NULL
? *hook
: _realloc_internal
) (ptr
, size
);
1448 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1450 This library is free software; you can redistribute it and/or
1451 modify it under the terms of the GNU General Public License as
1452 published by the Free Software Foundation; either version 2 of the
1453 License, or (at your option) any later version.
1455 This library is distributed in the hope that it will be useful,
1456 but WITHOUT ANY WARRANTY; without even the implied warranty of
1457 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1458 General Public License for more details.
1460 You should have received a copy of the GNU General Public
1461 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1463 The author may be reached (Email) at the address mike@ai.mit.edu,
1464 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1466 /* Allocate an array of NMEMB elements each SIZE bytes long.
1467 The entire array is initialized to zeros. */
1469 calloc (size_t nmemb
, size_t size
)
1472 size_t bytes
= nmemb
* size
;
1474 if (size
!= 0 && bytes
/ size
!= nmemb
)
1480 result
= malloc (bytes
);
1482 return memset (result
, 0, bytes
);
1485 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1486 This file is part of the GNU C Library.
1488 The GNU C Library is free software; you can redistribute it and/or modify
1489 it under the terms of the GNU General Public License as published by
1490 the Free Software Foundation; either version 2, or (at your option)
1493 The GNU C Library is distributed in the hope that it will be useful,
1494 but WITHOUT ANY WARRANTY; without even the implied warranty of
1495 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1496 GNU General Public License for more details.
1498 You should have received a copy of the GNU General Public License
1499 along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
1501 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1503 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1505 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1506 /* It is best not to declare this and cast its result on foreign operating
1507 systems with potentially hostile include files. */
1509 extern void *__sbrk (ptrdiff_t increment
);
1510 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1512 /* Allocate INCREMENT more bytes of data space,
1513 and return the start of data space, or NULL on errors.
1514 If INCREMENT is negative, shrink data space. */
1516 __default_morecore (ptrdiff_t increment
)
1519 #if defined (CYGWIN)
1522 return bss_sbrk (increment
);
1525 result
= (void *) __sbrk (increment
);
1526 if (result
== (void *) -1)
1530 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1532 This library is free software; you can redistribute it and/or
1533 modify it under the terms of the GNU General Public License as
1534 published by the Free Software Foundation; either version 2 of the
1535 License, or (at your option) any later version.
1537 This library is distributed in the hope that it will be useful,
1538 but WITHOUT ANY WARRANTY; without even the implied warranty of
1539 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1540 General Public License for more details.
1542 You should have received a copy of the GNU General Public
1543 License along with this library. If not, see <http://www.gnu.org/licenses/>. */
1545 void *(*__memalign_hook
) (size_t size
, size_t alignment
);
1548 aligned_alloc (size_t alignment
, size_t size
)
1551 size_t adj
, lastadj
;
1552 void *(*hook
) (size_t, size_t) = __memalign_hook
;
1555 return (*hook
) (alignment
, size
);
1557 /* Allocate a block with enough extra space to pad the block with up to
1558 (ALIGNMENT - 1) bytes if necessary. */
1559 if (- size
< alignment
)
1564 result
= malloc (size
+ alignment
- 1);
1568 /* Figure out how much we will need to pad this particular block
1569 to achieve the required alignment. */
1570 adj
= alignment
- (uintptr_t) result
% alignment
;
1571 if (adj
== alignment
)
1574 if (adj
!= alignment
- 1)
1578 /* Reallocate the block with only as much excess as it
1581 result
= malloc (size
+ adj
);
1582 if (result
== NULL
) /* Impossible unless interrupted. */
1586 adj
= alignment
- (uintptr_t) result
% alignment
;
1587 if (adj
== alignment
)
1589 /* It's conceivable we might have been so unlucky as to get
1590 a different block with weaker alignment. If so, this
1591 block is too short to contain SIZE after alignment
1592 correction. So we must try again and get another block,
1594 } while (adj
> lastadj
);
1599 /* Record this block in the list of aligned blocks, so that `free'
1600 can identify the pointer it is passed, which will be in the middle
1601 of an allocated block. */
1603 struct alignlist
*l
;
1604 LOCK_ALIGNED_BLOCKS ();
1605 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1606 if (l
->aligned
== NULL
)
1607 /* This slot is free. Use it. */
1611 l
= malloc (sizeof *l
);
1614 l
->next
= _aligned_blocks
;
1615 _aligned_blocks
= l
;
1621 result
= l
->aligned
= (char *) result
+ adj
;
1623 UNLOCK_ALIGNED_BLOCKS ();
1634 /* An obsolete alias for aligned_alloc, for any old libraries that use
1638 memalign (size_t alignment
, size_t size
)
1640 return aligned_alloc (alignment
, size
);
1643 /* If HYBRID_MALLOC is defined, we may want to use the system
1644 posix_memalign below. */
1645 #ifndef HYBRID_MALLOC
1647 posix_memalign (void **memptr
, size_t alignment
, size_t size
)
1652 || alignment
% sizeof (void *) != 0
1653 || (alignment
& (alignment
- 1)) != 0)
1656 mem
= aligned_alloc (alignment
, size
);
1666 /* Allocate memory on a page boundary.
1667 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1669 This library is free software; you can redistribute it and/or
1670 modify it under the terms of the GNU General Public License as
1671 published by the Free Software Foundation; either version 2 of the
1672 License, or (at your option) any later version.
1674 This library is distributed in the hope that it will be useful,
1675 but WITHOUT ANY WARRANTY; without even the implied warranty of
1676 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1677 General Public License for more details.
1679 You should have received a copy of the GNU General Public
1680 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1682 The author may be reached (Email) at the address mike@ai.mit.edu,
1683 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1685 /* Allocate SIZE bytes on a page boundary. */
1686 #ifndef HAVE_DECL_VALLOC
1687 extern void *valloc (size_t);
1690 #if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1691 # include "getpagesize.h"
1692 #elif !defined getpagesize
1693 extern int getpagesize (void);
1696 static size_t pagesize
;
1699 valloc (size_t size
)
1702 pagesize
= getpagesize ();
1704 return aligned_alloc (pagesize
, size
);
1710 #undef aligned_alloc
1713 #ifdef HYBRID_MALLOC
1714 /* Declare system malloc and friends. */
1715 extern void *malloc (size_t size
);
1716 extern void *realloc (void *ptr
, size_t size
);
1717 extern void *calloc (size_t nmemb
, size_t size
);
1718 extern void free (void *ptr
);
1719 #ifdef HAVE_ALIGNED_ALLOC
1720 extern void *aligned_alloc (size_t alignment
, size_t size
);
1721 #elif defined HAVE_POSIX_MEMALIGN
1722 extern int posix_memalign (void **memptr
, size_t alignment
, size_t size
);
1725 /* See the comments near the beginning of this file for explanations
1726 of the following functions. */
1729 hybrid_malloc (size_t size
)
1732 return malloc (size
);
1733 return gmalloc (size
);
1737 hybrid_calloc (size_t nmemb
, size_t size
)
1740 return calloc (nmemb
, size
);
1741 return gcalloc (nmemb
, size
);
1745 hybrid_free (void *ptr
)
1749 else if (!ALLOCATED_BEFORE_DUMPING (ptr
))
1751 /* Otherwise the dumped emacs is trying to free something allocated
1752 before dumping; do nothing. */
1756 #if defined HAVE_ALIGNED_ALLOC || defined HAVE_POSIX_MEMALIGN
1758 hybrid_aligned_alloc (size_t alignment
, size_t size
)
1761 return galigned_alloc (alignment
, size
);
1762 /* The following is copied from alloc.c */
1763 #ifdef HAVE_ALIGNED_ALLOC
1764 return aligned_alloc (alignment
, size
);
1765 #else /* HAVE_POSIX_MEMALIGN */
1767 return posix_memalign (&p
, alignment
, size
) == 0 ? p
: 0;
1773 hybrid_realloc (void *ptr
, size_t size
)
1777 size_t block
, oldsize
;
1780 return grealloc (ptr
, size
);
1781 if (!ALLOCATED_BEFORE_DUMPING (ptr
))
1782 return realloc (ptr
, size
);
1784 /* The dumped emacs is trying to realloc storage allocated before
1785 dumping. We just malloc new space and copy the data. */
1786 if (size
== 0 || ptr
== NULL
)
1787 return malloc (size
);
1788 block
= ((char *) ptr
- _heapbase
) / BLOCKSIZE
+ 1;
1789 type
= _heapinfo
[block
].busy
.type
;
1791 type
== 0 ? _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
1792 : (size_t) 1 << type
;
1793 result
= malloc (size
);
1795 return memcpy (result
, ptr
, min (oldsize
, size
));
1799 #ifdef HYBRID_GET_CURRENT_DIR_NAME
1800 /* Defined in sysdep.c. */
1801 char *gget_current_dir_name (void);
1804 hybrid_get_current_dir_name (void)
1807 return get_current_dir_name ();
1808 return gget_current_dir_name ();
1812 #else /* ! HYBRID_MALLOC */
1815 malloc (size_t size
)
1817 return gmalloc (size
);
1821 calloc (size_t nmemb
, size_t size
)
1823 return gcalloc (nmemb
, size
);
1833 aligned_alloc (size_t alignment
, size_t size
)
1835 return galigned_alloc (alignment
, size
);
1839 realloc (void *ptr
, size_t size
)
1841 return grealloc (ptr
, size
);
1844 #endif /* HYBRID_MALLOC */
1848 /* Standard debugging hooks for `malloc'.
1849 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1850 Written May 1989 by Mike Haertel.
1852 This library is free software; you can redistribute it and/or
1853 modify it under the terms of the GNU General Public License as
1854 published by the Free Software Foundation; either version 2 of the
1855 License, or (at your option) any later version.
1857 This library is distributed in the hope that it will be useful,
1858 but WITHOUT ANY WARRANTY; without even the implied warranty of
1859 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1860 General Public License for more details.
1862 You should have received a copy of the GNU General Public
1863 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1865 The author may be reached (Email) at the address mike@ai.mit.edu,
1866 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1870 /* Old hook values. */
1871 static void (*old_free_hook
) (void *ptr
);
1872 static void *(*old_malloc_hook
) (size_t size
);
1873 static void *(*old_realloc_hook
) (void *ptr
, size_t size
);
1875 /* Function to call when something awful happens. */
1876 static void (*abortfunc
) (enum mcheck_status
);
1878 /* Arbitrary magical numbers. */
1879 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1880 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1881 #define MAGICBYTE ((char) 0xd7)
1882 #define MALLOCFLOOD ((char) 0x93)
1883 #define FREEFLOOD ((char) 0x95)
1887 size_t size
; /* Exact size requested by user. */
1888 size_t magic
; /* Magic number to check header integrity. */
1891 static enum mcheck_status
1892 checkhdr (const struct hdr
*hdr
)
1894 enum mcheck_status status
;
1898 status
= MCHECK_HEAD
;
1901 status
= MCHECK_FREE
;
1904 if (((char *) &hdr
[1])[hdr
->size
] != MAGICBYTE
)
1905 status
= MCHECK_TAIL
;
1910 if (status
!= MCHECK_OK
)
1911 (*abortfunc
) (status
);
1916 freehook (void *ptr
)
1922 struct alignlist
*l
;
1924 /* If the block was allocated by aligned_alloc, its real pointer
1925 to free is recorded in _aligned_blocks; find that. */
1926 PROTECT_MALLOC_STATE (0);
1927 LOCK_ALIGNED_BLOCKS ();
1928 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1929 if (l
->aligned
== ptr
)
1931 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1935 UNLOCK_ALIGNED_BLOCKS ();
1936 PROTECT_MALLOC_STATE (1);
1938 hdr
= ((struct hdr
*) ptr
) - 1;
1940 hdr
->magic
= MAGICFREE
;
1941 memset (ptr
, FREEFLOOD
, hdr
->size
);
1946 __free_hook
= old_free_hook
;
1948 __free_hook
= freehook
;
1952 mallochook (size_t size
)
1956 __malloc_hook
= old_malloc_hook
;
1957 hdr
= malloc (sizeof *hdr
+ size
+ 1);
1958 __malloc_hook
= mallochook
;
1963 hdr
->magic
= MAGICWORD
;
1964 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1965 return memset (hdr
+ 1, MALLOCFLOOD
, size
);
1969 reallochook (void *ptr
, size_t size
)
1971 struct hdr
*hdr
= NULL
;
1976 hdr
= ((struct hdr
*) ptr
) - 1;
1981 memset ((char *) ptr
+ size
, FREEFLOOD
, osize
- size
);
1984 __free_hook
= old_free_hook
;
1985 __malloc_hook
= old_malloc_hook
;
1986 __realloc_hook
= old_realloc_hook
;
1987 hdr
= realloc (hdr
, sizeof *hdr
+ size
+ 1);
1988 __free_hook
= freehook
;
1989 __malloc_hook
= mallochook
;
1990 __realloc_hook
= reallochook
;
1995 hdr
->magic
= MAGICWORD
;
1996 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1998 memset ((char *) (hdr
+ 1) + osize
, MALLOCFLOOD
, size
- osize
);
2003 mabort (enum mcheck_status status
)
2009 msg
= "memory is consistent, library is buggy";
2012 msg
= "memory clobbered before allocated block";
2015 msg
= "memory clobbered past end of allocated block";
2018 msg
= "block freed twice";
2021 msg
= "bogus mcheck_status, library is buggy";
2024 #ifdef __GNU_LIBRARY__
2027 fprintf (stderr
, "mcheck: %s\n", msg
);
2037 static int mcheck_used
= 0;
2040 mcheck (void (*func
) (enum mcheck_status
))
2042 abortfunc
= (func
!= NULL
) ? func
: &mabort
;
2044 /* These hooks may not be safely inserted if malloc is already in use. */
2045 if (!__malloc_initialized
&& !mcheck_used
)
2047 old_free_hook
= __free_hook
;
2048 __free_hook
= freehook
;
2049 old_malloc_hook
= __malloc_hook
;
2050 __malloc_hook
= mallochook
;
2051 old_realloc_hook
= __realloc_hook
;
2052 __realloc_hook
= reallochook
;
2056 return mcheck_used
? 0 : -1;
2062 return mcheck_used
? checkhdr (ptr
) : MCHECK_DISABLED
;
2065 #endif /* GC_MCHECK */