1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2015 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <http://www.gnu.org/licenses/>.
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
24 #if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
32 #ifdef HYBRID_GET_CURRENT_DIR_NAME
33 #undef get_current_dir_name
43 #include <w32heap.h> /* for sbrk */
47 extern void emacs_abort (void);
50 /* If HYBRID_MALLOC is defined, then temacs will use malloc,
51 realloc... as defined in this file (and renamed gmalloc,
52 grealloc... via the macros that follow). The dumped emacs,
53 however, will use the system malloc, realloc.... In other source
54 files, malloc, realloc... are renamed hybrid_malloc,
55 hybrid_realloc... via macros in conf_post.h. hybrid_malloc and
56 friends are wrapper functions defined later in this file.
57 aligned_alloc is defined as a macro only in alloc.c.
59 As of this writing (August 2014), Cygwin is the only platform on
60 which HYBRID_MACRO is defined. Any other platform that wants to
61 define it will have to define the macros DUMPED and
62 ALLOCATED_BEFORE_DUMPING, defined below for Cygwin. */
68 #define malloc gmalloc
69 #define realloc grealloc
70 #define calloc gcalloc
71 #define aligned_alloc galigned_alloc
73 #endif /* HYBRID_MALLOC */
76 extern void *bss_sbrk (ptrdiff_t size
);
77 extern int bss_sbrk_did_unexec
;
78 extern char bss_sbrk_buffer
[];
79 extern void *bss_sbrk_buffer_end
;
80 #define DUMPED bss_sbrk_did_unexec
81 #define ALLOCATED_BEFORE_DUMPING(P) \
82 ((P) < bss_sbrk_buffer_end && (P) >= (void *) bss_sbrk_buffer)
93 /* Allocate SIZE bytes of memory. */
94 extern void *malloc (size_t size
) ATTRIBUTE_MALLOC_SIZE ((1));
95 /* Re-allocate the previously allocated block
96 in ptr, making the new block SIZE bytes long. */
97 extern void *realloc (void *ptr
, size_t size
) ATTRIBUTE_ALLOC_SIZE ((2));
98 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
99 extern void *calloc (size_t nmemb
, size_t size
) ATTRIBUTE_MALLOC_SIZE ((1,2));
100 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
101 extern void free (void *ptr
);
103 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
105 extern void *aligned_alloc (size_t, size_t);
106 extern void *memalign (size_t, size_t);
107 extern int posix_memalign (void **, size_t, size_t);
111 /* Set up mutexes and make malloc etc. thread-safe. */
112 extern void malloc_enable_thread (void);
116 extern void emacs_abort (void);
119 /* The allocator divides the heap into blocks of fixed size; large
120 requests receive one or more whole blocks, and small requests
121 receive a fragment of a block. Fragment sizes are powers of two,
122 and all fragments of a block are the same size. When all the
123 fragments in a block have been freed, the block itself is freed. */
124 #define INT_BIT (CHAR_BIT * sizeof (int))
125 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
126 #define BLOCKSIZE (1 << BLOCKLOG)
127 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
129 /* Determine the amount of memory spanned by the initial heap table
130 (not an absolute limit). */
131 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
133 /* Number of contiguous free blocks allowed to build up at the end of
134 memory before they will be returned to the system. */
135 #define FINAL_FREE_BLOCKS 8
137 /* Data structure giving per-block information. */
140 /* Heap information for a busy block. */
143 /* Zero for a large (multiblock) object, or positive giving the
144 logarithm to the base two of the fragment size. */
150 size_t nfree
; /* Free frags in a fragmented block. */
151 size_t first
; /* First free fragment of the block. */
153 /* For a large object, in its first block, this has the number
154 of blocks in the object. In the other blocks, this has a
155 negative number which says how far back the first block is. */
159 /* Heap information for a free block
160 (that may be the first of a free cluster). */
163 size_t size
; /* Size (in blocks) of a free cluster. */
164 size_t next
; /* Index of next free cluster. */
165 size_t prev
; /* Index of previous free cluster. */
169 /* Pointer to first block of the heap. */
170 extern char *_heapbase
;
172 /* Table indexed by block number giving per-block information. */
173 extern malloc_info
*_heapinfo
;
175 /* Address to block number and vice versa. */
176 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
177 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
179 /* Current search index for the heap table. */
180 extern size_t _heapindex
;
182 /* Limit of valid info table indices. */
183 extern size_t _heaplimit
;
185 /* Doubly linked lists of free fragments. */
192 /* Free list headers for each fragment size. */
193 extern struct list _fraghead
[];
195 /* List of blocks allocated with aligned_alloc and friends. */
198 struct alignlist
*next
;
199 void *aligned
; /* The address that aligned_alloc returned. */
200 void *exact
; /* The address that malloc returned. */
202 extern struct alignlist
*_aligned_blocks
;
204 /* Instrumentation. */
205 extern size_t _chunks_used
;
206 extern size_t _bytes_used
;
207 extern size_t _chunks_free
;
208 extern size_t _bytes_free
;
210 /* Internal versions of `malloc', `realloc', and `free'
211 used when these functions need to call each other.
212 They are the same but don't call the hooks. */
213 extern void *_malloc_internal (size_t);
214 extern void *_realloc_internal (void *, size_t);
215 extern void _free_internal (void *);
216 extern void *_malloc_internal_nolock (size_t);
217 extern void *_realloc_internal_nolock (void *, size_t);
218 extern void _free_internal_nolock (void *);
221 extern pthread_mutex_t _malloc_mutex
, _aligned_blocks_mutex
;
222 extern int _malloc_thread_enabled_p
;
225 if (_malloc_thread_enabled_p) \
226 pthread_mutex_lock (&_malloc_mutex); \
230 if (_malloc_thread_enabled_p) \
231 pthread_mutex_unlock (&_malloc_mutex); \
233 #define LOCK_ALIGNED_BLOCKS() \
235 if (_malloc_thread_enabled_p) \
236 pthread_mutex_lock (&_aligned_blocks_mutex); \
238 #define UNLOCK_ALIGNED_BLOCKS() \
240 if (_malloc_thread_enabled_p) \
241 pthread_mutex_unlock (&_aligned_blocks_mutex); \
246 #define LOCK_ALIGNED_BLOCKS()
247 #define UNLOCK_ALIGNED_BLOCKS()
250 /* Given an address in the middle of a malloc'd object,
251 return the address of the beginning of the object. */
252 extern void *malloc_find_object_address (void *ptr
);
254 /* Underlying allocation function; successive calls should
255 return contiguous pieces of memory. */
256 extern void *(*__morecore
) (ptrdiff_t size
);
258 /* Default value of `__morecore'. */
259 extern void *__default_morecore (ptrdiff_t size
);
261 /* If not NULL, this function is called after each time
262 `__morecore' is called to increase the data size. */
263 extern void (*__after_morecore_hook
) (void);
265 /* Number of extra blocks to get each time we ask for more core.
266 This reduces the frequency of calling `(*__morecore)'. */
267 extern size_t __malloc_extra_blocks
;
269 /* Nonzero if `malloc' has been called and done its initialization. */
270 extern int __malloc_initialized
;
271 /* Function called to initialize malloc data structures. */
272 extern int __malloc_initialize (void);
274 /* Hooks for debugging versions. */
275 extern void (*__malloc_initialize_hook
) (void);
276 extern void (*__free_hook
) (void *ptr
);
277 extern void *(*__malloc_hook
) (size_t size
);
278 extern void *(*__realloc_hook
) (void *ptr
, size_t size
);
279 extern void *(*__memalign_hook
) (size_t size
, size_t alignment
);
281 /* Return values for `mprobe': these are the kinds of inconsistencies that
282 `mcheck' enables detection of. */
285 MCHECK_DISABLED
= -1, /* Consistency checking is not turned on. */
286 MCHECK_OK
, /* Block is fine. */
287 MCHECK_FREE
, /* Block freed twice. */
288 MCHECK_HEAD
, /* Memory before the block was clobbered. */
289 MCHECK_TAIL
/* Memory after the block was clobbered. */
292 /* Activate a standard collection of debugging hooks. This must be called
293 before `malloc' is ever called. ABORTFUNC is called with an error code
294 (see enum above) when an inconsistency is detected. If ABORTFUNC is
295 null, the standard function prints on stderr and then calls `abort'. */
296 extern int mcheck (void (*abortfunc
) (enum mcheck_status
));
298 /* Check for aberrations in a particular malloc'd block. You must have
299 called `mcheck' already. These are the same checks that `mcheck' does
300 when you free or reallocate a block. */
301 extern enum mcheck_status
mprobe (void *ptr
);
303 /* Activate a standard collection of tracing hooks. */
304 extern void mtrace (void);
305 extern void muntrace (void);
307 /* Statistics available to the user. */
310 size_t bytes_total
; /* Total size of the heap. */
311 size_t chunks_used
; /* Chunks allocated by the user. */
312 size_t bytes_used
; /* Byte total of user-allocated chunks. */
313 size_t chunks_free
; /* Chunks in the free list. */
314 size_t bytes_free
; /* Byte total of chunks in the free list. */
317 /* Pick up the current statistics. */
318 extern struct mstats
mstats (void);
320 /* Call WARNFUN with a warning message when memory usage is high. */
321 extern void memory_warnings (void *start
, void (*warnfun
) (const char *));
327 /* Memory allocator `malloc'.
328 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
329 Written May 1989 by Mike Haertel.
331 This library is free software; you can redistribute it and/or
332 modify it under the terms of the GNU General Public License as
333 published by the Free Software Foundation; either version 2 of the
334 License, or (at your option) any later version.
336 This library is distributed in the hope that it will be useful,
337 but WITHOUT ANY WARRANTY; without even the implied warranty of
338 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
339 General Public License for more details.
341 You should have received a copy of the GNU General Public
342 License along with this library. If not, see <http://www.gnu.org/licenses/>.
344 The author may be reached (Email) at the address mike@ai.mit.edu,
345 or (US mail) as Mike Haertel c/o Free Software Foundation. */
349 void *(*__morecore
) (ptrdiff_t size
) = __default_morecore
;
351 /* Debugging hook for `malloc'. */
352 void *(*__malloc_hook
) (size_t size
);
354 /* Pointer to the base of the first block. */
357 /* Block information table. Allocated with align/__free (not malloc/free). */
358 malloc_info
*_heapinfo
;
360 /* Number of info entries. */
361 static size_t heapsize
;
363 /* Search index in the info table. */
366 /* Limit of valid info table indices. */
369 /* Free lists for each fragment size. */
370 struct list _fraghead
[BLOCKLOG
];
372 /* Instrumentation. */
378 /* Are you experienced? */
379 int __malloc_initialized
;
381 size_t __malloc_extra_blocks
;
383 void (*__malloc_initialize_hook
) (void);
384 void (*__after_morecore_hook
) (void);
386 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
388 /* Some code for hunting a bug writing into _heapinfo.
390 Call this macro with argument PROT non-zero to protect internal
391 malloc state against writing to it, call it with a zero argument to
392 make it readable and writable.
394 Note that this only works if BLOCKSIZE == page size, which is
395 the case on the i386. */
397 #include <sys/types.h>
398 #include <sys/mman.h>
400 static int state_protected_p
;
401 static size_t last_state_size
;
402 static malloc_info
*last_heapinfo
;
405 protect_malloc_state (int protect_p
)
407 /* If _heapinfo has been relocated, make sure its old location
408 isn't left read-only; it will be reused by malloc. */
409 if (_heapinfo
!= last_heapinfo
411 && state_protected_p
)
412 mprotect (last_heapinfo
, last_state_size
, PROT_READ
| PROT_WRITE
);
414 last_state_size
= _heaplimit
* sizeof *_heapinfo
;
415 last_heapinfo
= _heapinfo
;
417 if (protect_p
!= state_protected_p
)
419 state_protected_p
= protect_p
;
420 if (mprotect (_heapinfo
, last_state_size
,
421 protect_p
? PROT_READ
: PROT_READ
| PROT_WRITE
) != 0)
426 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
429 #define PROTECT_MALLOC_STATE(PROT) /* empty */
433 /* Aligned allocation. */
440 /* align accepts an unsigned argument, but __morecore accepts a
441 signed one. This could lead to trouble if SIZE overflows the
442 ptrdiff_t type accepted by __morecore. We just punt in that
443 case, since they are requesting a ludicrous amount anyway. */
444 if (PTRDIFF_MAX
< size
)
447 result
= (*__morecore
) (size
);
448 adj
= (uintptr_t) result
% BLOCKSIZE
;
451 adj
= BLOCKSIZE
- adj
;
453 result
= (char *) result
+ adj
;
456 if (__after_morecore_hook
)
457 (*__after_morecore_hook
) ();
462 /* Get SIZE bytes, if we can get them starting at END.
463 Return the address of the space we got.
464 If we cannot get space at END, fail and return 0. */
466 get_contiguous_space (ptrdiff_t size
, void *position
)
471 before
= (*__morecore
) (0);
472 /* If we can tell in advance that the break is at the wrong place,
474 if (before
!= position
)
477 /* Allocate SIZE bytes and get the address of them. */
478 after
= (*__morecore
) (size
);
482 /* It was not contiguous--reject it. */
483 if (after
!= position
)
485 (*__morecore
) (- size
);
493 /* This is called when `_heapinfo' and `heapsize' have just
494 been set to describe a new info table. Set up the table
495 to describe itself and account for it in the statistics. */
497 register_heapinfo (void)
499 size_t block
, blocks
;
501 block
= BLOCK (_heapinfo
);
502 blocks
= BLOCKIFY (heapsize
* sizeof (malloc_info
));
504 /* Account for the _heapinfo block itself in the statistics. */
505 _bytes_used
+= blocks
* BLOCKSIZE
;
508 /* Describe the heapinfo block itself in the heapinfo. */
509 _heapinfo
[block
].busy
.type
= 0;
510 _heapinfo
[block
].busy
.info
.size
= blocks
;
511 /* Leave back-pointers for malloc_find_address. */
513 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
517 pthread_mutex_t _malloc_mutex
= PTHREAD_MUTEX_INITIALIZER
;
518 pthread_mutex_t _aligned_blocks_mutex
= PTHREAD_MUTEX_INITIALIZER
;
519 int _malloc_thread_enabled_p
;
522 malloc_atfork_handler_prepare (void)
525 LOCK_ALIGNED_BLOCKS ();
529 malloc_atfork_handler_parent (void)
531 UNLOCK_ALIGNED_BLOCKS ();
536 malloc_atfork_handler_child (void)
538 UNLOCK_ALIGNED_BLOCKS ();
542 /* Set up mutexes and make malloc etc. thread-safe. */
544 malloc_enable_thread (void)
546 if (_malloc_thread_enabled_p
)
549 /* Some pthread implementations call malloc for statically
550 initialized mutexes when they are used first. To avoid such a
551 situation, we initialize mutexes here while their use is
552 disabled in malloc etc. */
553 pthread_mutex_init (&_malloc_mutex
, NULL
);
554 pthread_mutex_init (&_aligned_blocks_mutex
, NULL
);
555 pthread_atfork (malloc_atfork_handler_prepare
,
556 malloc_atfork_handler_parent
,
557 malloc_atfork_handler_child
);
558 _malloc_thread_enabled_p
= 1;
560 #endif /* USE_PTHREAD */
563 malloc_initialize_1 (void)
569 if (__malloc_initialize_hook
)
570 (*__malloc_initialize_hook
) ();
572 heapsize
= HEAP
/ BLOCKSIZE
;
573 _heapinfo
= align (heapsize
* sizeof (malloc_info
));
574 if (_heapinfo
== NULL
)
576 memset (_heapinfo
, 0, heapsize
* sizeof (malloc_info
));
577 _heapinfo
[0].free
.size
= 0;
578 _heapinfo
[0].free
.next
= _heapinfo
[0].free
.prev
= 0;
580 _heapbase
= (char *) _heapinfo
;
581 _heaplimit
= BLOCK (_heapbase
+ heapsize
* sizeof (malloc_info
));
583 register_heapinfo ();
585 __malloc_initialized
= 1;
586 PROTECT_MALLOC_STATE (1);
590 /* Set everything up and remember that we have.
591 main will call malloc which calls this function. That is before any threads
592 or signal handlers has been set up, so we don't need thread protection. */
594 __malloc_initialize (void)
596 if (__malloc_initialized
)
599 malloc_initialize_1 ();
601 return __malloc_initialized
;
604 static int morecore_recursing
;
606 /* Get neatly aligned memory, initializing or
607 growing the heap info table as necessary. */
609 morecore_nolock (size_t size
)
612 malloc_info
*newinfo
, *oldinfo
;
615 if (morecore_recursing
)
616 /* Avoid recursion. The caller will know how to handle a null return. */
619 result
= align (size
);
623 PROTECT_MALLOC_STATE (0);
625 /* Check if we need to grow the info table. */
626 if ((size_t) BLOCK ((char *) result
+ size
) > heapsize
)
628 /* Calculate the new _heapinfo table size. We do not account for the
629 added blocks in the table itself, as we hope to place them in
630 existing free space, which is already covered by part of the
635 while ((size_t) BLOCK ((char *) result
+ size
) > newsize
);
637 /* We must not reuse existing core for the new info table when called
638 from realloc in the case of growing a large block, because the
639 block being grown is momentarily marked as free. In this case
640 _heaplimit is zero so we know not to reuse space for internal
644 /* First try to allocate the new info table in core we already
645 have, in the usual way using realloc. If realloc cannot
646 extend it in place or relocate it to existing sufficient core,
647 we will get called again, and the code above will notice the
648 `morecore_recursing' flag and return null. */
649 int save
= errno
; /* Don't want to clobber errno with ENOMEM. */
650 morecore_recursing
= 1;
651 newinfo
= _realloc_internal_nolock (_heapinfo
,
652 newsize
* sizeof (malloc_info
));
653 morecore_recursing
= 0;
658 /* We found some space in core, and realloc has put the old
659 table's blocks on the free list. Now zero the new part
660 of the table and install the new table location. */
661 memset (&newinfo
[heapsize
], 0,
662 (newsize
- heapsize
) * sizeof (malloc_info
));
669 /* Allocate new space for the malloc info table. */
672 newinfo
= align (newsize
* sizeof (malloc_info
));
677 (*__morecore
) (-size
);
681 /* Is it big enough to record status for its own space?
683 if ((size_t) BLOCK ((char *) newinfo
684 + newsize
* sizeof (malloc_info
))
688 /* Must try again. First give back most of what we just got. */
689 (*__morecore
) (- newsize
* sizeof (malloc_info
));
693 /* Copy the old table to the beginning of the new,
694 and zero the rest of the new table. */
695 memcpy (newinfo
, _heapinfo
, heapsize
* sizeof (malloc_info
));
696 memset (&newinfo
[heapsize
], 0,
697 (newsize
- heapsize
) * sizeof (malloc_info
));
702 register_heapinfo ();
704 /* Reset _heaplimit so _free_internal never decides
705 it can relocate or resize the info table. */
707 _free_internal_nolock (oldinfo
);
708 PROTECT_MALLOC_STATE (0);
710 /* The new heap limit includes the new table just allocated. */
711 _heaplimit
= BLOCK ((char *) newinfo
+ heapsize
* sizeof (malloc_info
));
716 _heaplimit
= BLOCK ((char *) result
+ size
);
720 /* Allocate memory from the heap. */
722 _malloc_internal_nolock (size_t size
)
725 size_t block
, blocks
, lastblocks
, start
;
729 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
730 valid address you can realloc and free (though not dereference).
732 It turns out that some extant code (sunrpc, at least Ultrix's version)
733 expects `malloc (0)' to return non-NULL and breaks otherwise.
741 PROTECT_MALLOC_STATE (0);
743 if (size
< sizeof (struct list
))
744 size
= sizeof (struct list
);
746 /* Determine the allocation policy based on the request size. */
747 if (size
<= BLOCKSIZE
/ 2)
749 /* Small allocation to receive a fragment of a block.
750 Determine the logarithm to base two of the fragment size. */
751 register size_t log
= 1;
753 while ((size
/= 2) != 0)
756 /* Look in the fragment lists for a
757 free fragment of the desired size. */
758 next
= _fraghead
[log
].next
;
761 /* There are free fragments of this size.
762 Pop a fragment out of the fragment list and return it.
763 Update the block's nfree and first counters. */
765 next
->prev
->next
= next
->next
;
766 if (next
->next
!= NULL
)
767 next
->next
->prev
= next
->prev
;
768 block
= BLOCK (result
);
769 if (--_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
770 _heapinfo
[block
].busy
.info
.frag
.first
=
771 (uintptr_t) next
->next
% BLOCKSIZE
>> log
;
773 /* Update the statistics. */
775 _bytes_used
+= 1 << log
;
777 _bytes_free
-= 1 << log
;
781 /* No free fragments of the desired size, so get a new block
782 and break it into fragments, returning the first. */
783 #ifdef GC_MALLOC_CHECK
784 result
= _malloc_internal_nolock (BLOCKSIZE
);
785 PROTECT_MALLOC_STATE (0);
786 #elif defined (USE_PTHREAD)
787 result
= _malloc_internal_nolock (BLOCKSIZE
);
789 result
= malloc (BLOCKSIZE
);
793 PROTECT_MALLOC_STATE (1);
797 /* Link all fragments but the first into the free list. */
798 next
= (struct list
*) ((char *) result
+ (1 << log
));
800 next
->prev
= &_fraghead
[log
];
801 _fraghead
[log
].next
= next
;
803 for (i
= 2; i
< (size_t) (BLOCKSIZE
>> log
); ++i
)
805 next
= (struct list
*) ((char *) result
+ (i
<< log
));
806 next
->next
= _fraghead
[log
].next
;
807 next
->prev
= &_fraghead
[log
];
808 next
->prev
->next
= next
;
809 next
->next
->prev
= next
;
812 /* Initialize the nfree and first counters for this block. */
813 block
= BLOCK (result
);
814 _heapinfo
[block
].busy
.type
= log
;
815 _heapinfo
[block
].busy
.info
.frag
.nfree
= i
- 1;
816 _heapinfo
[block
].busy
.info
.frag
.first
= i
- 1;
818 _chunks_free
+= (BLOCKSIZE
>> log
) - 1;
819 _bytes_free
+= BLOCKSIZE
- (1 << log
);
820 _bytes_used
-= BLOCKSIZE
- (1 << log
);
825 /* Large allocation to receive one or more blocks.
826 Search the free list in a circle starting at the last place visited.
827 If we loop completely around without finding a large enough
828 space we will have to get more memory from the system. */
829 blocks
= BLOCKIFY (size
);
830 start
= block
= _heapindex
;
831 while (_heapinfo
[block
].free
.size
< blocks
)
833 block
= _heapinfo
[block
].free
.next
;
836 /* Need to get more from the system. Get a little extra. */
837 size_t wantblocks
= blocks
+ __malloc_extra_blocks
;
838 block
= _heapinfo
[0].free
.prev
;
839 lastblocks
= _heapinfo
[block
].free
.size
;
840 /* Check to see if the new core will be contiguous with the
841 final free block; if so we don't need to get as much. */
842 if (_heaplimit
!= 0 && block
+ lastblocks
== _heaplimit
&&
843 /* We can't do this if we will have to make the heap info
844 table bigger to accommodate the new space. */
845 block
+ wantblocks
<= heapsize
&&
846 get_contiguous_space ((wantblocks
- lastblocks
) * BLOCKSIZE
,
847 ADDRESS (block
+ lastblocks
)))
849 /* We got it contiguously. Which block we are extending
850 (the `final free block' referred to above) might have
851 changed, if it got combined with a freed info table. */
852 block
= _heapinfo
[0].free
.prev
;
853 _heapinfo
[block
].free
.size
+= (wantblocks
- lastblocks
);
854 _bytes_free
+= (wantblocks
- lastblocks
) * BLOCKSIZE
;
855 _heaplimit
+= wantblocks
- lastblocks
;
858 result
= morecore_nolock (wantblocks
* BLOCKSIZE
);
861 block
= BLOCK (result
);
862 /* Put the new block at the end of the free list. */
863 _heapinfo
[block
].free
.size
= wantblocks
;
864 _heapinfo
[block
].free
.prev
= _heapinfo
[0].free
.prev
;
865 _heapinfo
[block
].free
.next
= 0;
866 _heapinfo
[0].free
.prev
= block
;
867 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
= block
;
869 /* Now loop to use some of that block for this allocation. */
873 /* At this point we have found a suitable free list entry.
874 Figure out how to remove what we need from the list. */
875 result
= ADDRESS (block
);
876 if (_heapinfo
[block
].free
.size
> blocks
)
878 /* The block we found has a bit left over,
879 so relink the tail end back into the free list. */
880 _heapinfo
[block
+ blocks
].free
.size
881 = _heapinfo
[block
].free
.size
- blocks
;
882 _heapinfo
[block
+ blocks
].free
.next
883 = _heapinfo
[block
].free
.next
;
884 _heapinfo
[block
+ blocks
].free
.prev
885 = _heapinfo
[block
].free
.prev
;
886 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
887 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
888 = _heapindex
= block
+ blocks
;
892 /* The block exactly matches our requirements,
893 so just remove it from the list. */
894 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
895 = _heapinfo
[block
].free
.prev
;
896 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
897 = _heapindex
= _heapinfo
[block
].free
.next
;
901 _heapinfo
[block
].busy
.type
= 0;
902 _heapinfo
[block
].busy
.info
.size
= blocks
;
904 _bytes_used
+= blocks
* BLOCKSIZE
;
905 _bytes_free
-= blocks
* BLOCKSIZE
;
907 /* Mark all the blocks of the object just allocated except for the
908 first with a negative number so you can find the first block by
909 adding that adjustment. */
911 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
914 PROTECT_MALLOC_STATE (1);
920 _malloc_internal (size_t size
)
925 result
= _malloc_internal_nolock (size
);
934 void *(*hook
) (size_t);
936 if (!__malloc_initialized
&& !__malloc_initialize ())
939 /* Copy the value of __malloc_hook to an automatic variable in case
940 __malloc_hook is modified in another thread between its
941 NULL-check and the use.
943 Note: Strictly speaking, this is not a right solution. We should
944 use mutexes to access non-read-only variables that are shared
945 among multiple threads. We just leave it for compatibility with
946 glibc malloc (i.e., assignments to __malloc_hook) for now. */
947 hook
= __malloc_hook
;
948 return (hook
!= NULL
? *hook
: _malloc_internal
) (size
);
953 /* On some ANSI C systems, some libc functions call _malloc, _free
954 and _realloc. Make them use the GNU functions. */
956 extern void *_malloc (size_t);
957 extern void _free (void *);
958 extern void *_realloc (void *, size_t);
961 _malloc (size_t size
)
963 return malloc (size
);
973 _realloc (void *ptr
, size_t size
)
975 return realloc (ptr
, size
);
979 /* Free a block of memory allocated by `malloc'.
980 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
981 Written May 1989 by Mike Haertel.
983 This library is free software; you can redistribute it and/or
984 modify it under the terms of the GNU General Public License as
985 published by the Free Software Foundation; either version 2 of the
986 License, or (at your option) any later version.
988 This library is distributed in the hope that it will be useful,
989 but WITHOUT ANY WARRANTY; without even the implied warranty of
990 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
991 General Public License for more details.
993 You should have received a copy of the GNU General Public
994 License along with this library. If not, see <http://www.gnu.org/licenses/>.
996 The author may be reached (Email) at the address mike@ai.mit.edu,
997 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1000 /* Debugging hook for free. */
1001 void (*__free_hook
) (void *__ptr
);
1003 /* List of blocks allocated by aligned_alloc. */
1004 struct alignlist
*_aligned_blocks
= NULL
;
1006 /* Return memory to the heap.
1007 Like `_free_internal' but don't lock mutex. */
1009 _free_internal_nolock (void *ptr
)
1012 size_t block
, blocks
;
1014 struct list
*prev
, *next
;
1016 const size_t lesscore_threshold
1017 /* Threshold of free space at which we will return some to the system. */
1018 = FINAL_FREE_BLOCKS
+ 2 * __malloc_extra_blocks
;
1020 register struct alignlist
*l
;
1025 PROTECT_MALLOC_STATE (0);
1027 LOCK_ALIGNED_BLOCKS ();
1028 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1029 if (l
->aligned
== ptr
)
1031 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1035 UNLOCK_ALIGNED_BLOCKS ();
1037 block
= BLOCK (ptr
);
1039 type
= _heapinfo
[block
].busy
.type
;
1043 /* Get as many statistics as early as we can. */
1045 _bytes_used
-= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1046 _bytes_free
+= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1048 /* Find the free cluster previous to this one in the free list.
1049 Start searching at the last block referenced; this may benefit
1050 programs with locality of allocation. */
1054 i
= _heapinfo
[i
].free
.prev
;
1058 i
= _heapinfo
[i
].free
.next
;
1059 while (i
> 0 && i
< block
);
1060 i
= _heapinfo
[i
].free
.prev
;
1063 /* Determine how to link this block into the free list. */
1064 if (block
== i
+ _heapinfo
[i
].free
.size
)
1066 /* Coalesce this block with its predecessor. */
1067 _heapinfo
[i
].free
.size
+= _heapinfo
[block
].busy
.info
.size
;
1072 /* Really link this block back into the free list. */
1073 _heapinfo
[block
].free
.size
= _heapinfo
[block
].busy
.info
.size
;
1074 _heapinfo
[block
].free
.next
= _heapinfo
[i
].free
.next
;
1075 _heapinfo
[block
].free
.prev
= i
;
1076 _heapinfo
[i
].free
.next
= block
;
1077 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1081 /* Now that the block is linked in, see if we can coalesce it
1082 with its successor (by deleting its successor from the list
1083 and adding in its size). */
1084 if (block
+ _heapinfo
[block
].free
.size
== _heapinfo
[block
].free
.next
)
1086 _heapinfo
[block
].free
.size
1087 += _heapinfo
[_heapinfo
[block
].free
.next
].free
.size
;
1088 _heapinfo
[block
].free
.next
1089 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.next
;
1090 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1094 /* How many trailing free blocks are there now? */
1095 blocks
= _heapinfo
[block
].free
.size
;
1097 /* Where is the current end of accessible core? */
1098 curbrk
= (*__morecore
) (0);
1100 if (_heaplimit
!= 0 && curbrk
== ADDRESS (_heaplimit
))
1102 /* The end of the malloc heap is at the end of accessible core.
1103 It's possible that moving _heapinfo will allow us to
1104 return some space to the system. */
1106 size_t info_block
= BLOCK (_heapinfo
);
1107 size_t info_blocks
= _heapinfo
[info_block
].busy
.info
.size
;
1108 size_t prev_block
= _heapinfo
[block
].free
.prev
;
1109 size_t prev_blocks
= _heapinfo
[prev_block
].free
.size
;
1110 size_t next_block
= _heapinfo
[block
].free
.next
;
1111 size_t next_blocks
= _heapinfo
[next_block
].free
.size
;
1113 if (/* Win if this block being freed is last in core, the info table
1114 is just before it, the previous free block is just before the
1115 info table, and the two free blocks together form a useful
1116 amount to return to the system. */
1117 (block
+ blocks
== _heaplimit
&&
1118 info_block
+ info_blocks
== block
&&
1119 prev_block
!= 0 && prev_block
+ prev_blocks
== info_block
&&
1120 blocks
+ prev_blocks
>= lesscore_threshold
) ||
1121 /* Nope, not the case. We can also win if this block being
1122 freed is just before the info table, and the table extends
1123 to the end of core or is followed only by a free block,
1124 and the total free space is worth returning to the system. */
1125 (block
+ blocks
== info_block
&&
1126 ((info_block
+ info_blocks
== _heaplimit
&&
1127 blocks
>= lesscore_threshold
) ||
1128 (info_block
+ info_blocks
== next_block
&&
1129 next_block
+ next_blocks
== _heaplimit
&&
1130 blocks
+ next_blocks
>= lesscore_threshold
)))
1133 malloc_info
*newinfo
;
1134 size_t oldlimit
= _heaplimit
;
1136 /* Free the old info table, clearing _heaplimit to avoid
1137 recursion into this code. We don't want to return the
1138 table's blocks to the system before we have copied them to
1139 the new location. */
1141 _free_internal_nolock (_heapinfo
);
1142 _heaplimit
= oldlimit
;
1144 /* Tell malloc to search from the beginning of the heap for
1145 free blocks, so it doesn't reuse the ones just freed. */
1148 /* Allocate new space for the info table and move its data. */
1149 newinfo
= _malloc_internal_nolock (info_blocks
* BLOCKSIZE
);
1150 PROTECT_MALLOC_STATE (0);
1151 memmove (newinfo
, _heapinfo
, info_blocks
* BLOCKSIZE
);
1152 _heapinfo
= newinfo
;
1154 /* We should now have coalesced the free block with the
1155 blocks freed from the old info table. Examine the entire
1156 trailing free block to decide below whether to return some
1158 block
= _heapinfo
[0].free
.prev
;
1159 blocks
= _heapinfo
[block
].free
.size
;
1162 /* Now see if we can return stuff to the system. */
1163 if (block
+ blocks
== _heaplimit
&& blocks
>= lesscore_threshold
)
1165 register size_t bytes
= blocks
* BLOCKSIZE
;
1166 _heaplimit
-= blocks
;
1167 (*__morecore
) (-bytes
);
1168 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
1169 = _heapinfo
[block
].free
.next
;
1170 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
1171 = _heapinfo
[block
].free
.prev
;
1172 block
= _heapinfo
[block
].free
.prev
;
1174 _bytes_free
-= bytes
;
1178 /* Set the next search to begin at this block. */
1183 /* Do some of the statistics. */
1185 _bytes_used
-= 1 << type
;
1187 _bytes_free
+= 1 << type
;
1189 /* Get the address of the first free fragment in this block. */
1190 prev
= (struct list
*) ((char *) ADDRESS (block
) +
1191 (_heapinfo
[block
].busy
.info
.frag
.first
<< type
));
1193 if (_heapinfo
[block
].busy
.info
.frag
.nfree
== (BLOCKSIZE
>> type
) - 1)
1195 /* If all fragments of this block are free, remove them
1196 from the fragment list and free the whole block. */
1198 for (i
= 1; i
< (size_t) (BLOCKSIZE
>> type
); ++i
)
1200 prev
->prev
->next
= next
;
1202 next
->prev
= prev
->prev
;
1203 _heapinfo
[block
].busy
.type
= 0;
1204 _heapinfo
[block
].busy
.info
.size
= 1;
1206 /* Keep the statistics accurate. */
1208 _bytes_used
+= BLOCKSIZE
;
1209 _chunks_free
-= BLOCKSIZE
>> type
;
1210 _bytes_free
-= BLOCKSIZE
;
1212 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1213 _free_internal_nolock (ADDRESS (block
));
1215 free (ADDRESS (block
));
1218 else if (_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
1220 /* If some fragments of this block are free, link this
1221 fragment into the fragment list after the first free
1222 fragment of this block. */
1224 next
->next
= prev
->next
;
1227 if (next
->next
!= NULL
)
1228 next
->next
->prev
= next
;
1229 ++_heapinfo
[block
].busy
.info
.frag
.nfree
;
1233 /* No fragments of this block are free, so link this
1234 fragment into the fragment list and announce that
1235 it is the first free fragment of this block. */
1237 _heapinfo
[block
].busy
.info
.frag
.nfree
= 1;
1238 _heapinfo
[block
].busy
.info
.frag
.first
=
1239 (uintptr_t) ptr
% BLOCKSIZE
>> type
;
1240 prev
->next
= _fraghead
[type
].next
;
1241 prev
->prev
= &_fraghead
[type
];
1242 prev
->prev
->next
= prev
;
1243 if (prev
->next
!= NULL
)
1244 prev
->next
->prev
= prev
;
1249 PROTECT_MALLOC_STATE (1);
1252 /* Return memory to the heap.
1253 Like `free' but don't call a __free_hook if there is one. */
1255 _free_internal (void *ptr
)
1258 _free_internal_nolock (ptr
);
1262 /* Return memory to the heap. */
1267 void (*hook
) (void *) = __free_hook
;
1272 _free_internal (ptr
);
1275 /* Define the `cfree' alias for `free'. */
1277 weak_alias (free
, cfree
)
1285 /* Change the size of a block allocated by `malloc'.
1286 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1287 Written May 1989 by Mike Haertel.
1289 This library is free software; you can redistribute it and/or
1290 modify it under the terms of the GNU General Public License as
1291 published by the Free Software Foundation; either version 2 of the
1292 License, or (at your option) any later version.
1294 This library is distributed in the hope that it will be useful,
1295 but WITHOUT ANY WARRANTY; without even the implied warranty of
1296 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1297 General Public License for more details.
1299 You should have received a copy of the GNU General Public
1300 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1302 The author may be reached (Email) at the address mike@ai.mit.edu,
1303 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1306 #define min(a, b) ((a) < (b) ? (a) : (b))
1309 /* Debugging hook for realloc. */
1310 void *(*__realloc_hook
) (void *ptr
, size_t size
);
1312 /* Resize the given region to the new size, returning a pointer
1313 to the (possibly moved) region. This is optimized for speed;
1314 some benchmarks seem to indicate that greater compactness is
1315 achieved by unconditionally allocating and copying to a
1316 new region. This module has incestuous knowledge of the
1317 internals of both free and malloc. */
1319 _realloc_internal_nolock (void *ptr
, size_t size
)
1323 size_t block
, blocks
, oldlimit
;
1327 _free_internal_nolock (ptr
);
1328 return _malloc_internal_nolock (0);
1330 else if (ptr
== NULL
)
1331 return _malloc_internal_nolock (size
);
1333 block
= BLOCK (ptr
);
1335 PROTECT_MALLOC_STATE (0);
1337 type
= _heapinfo
[block
].busy
.type
;
1341 /* Maybe reallocate a large block to a small fragment. */
1342 if (size
<= BLOCKSIZE
/ 2)
1344 result
= _malloc_internal_nolock (size
);
1347 memcpy (result
, ptr
, size
);
1348 _free_internal_nolock (ptr
);
1353 /* The new size is a large allocation as well;
1354 see if we can hold it in place. */
1355 blocks
= BLOCKIFY (size
);
1356 if (blocks
< _heapinfo
[block
].busy
.info
.size
)
1358 /* The new size is smaller; return
1359 excess memory to the free list. */
1360 _heapinfo
[block
+ blocks
].busy
.type
= 0;
1361 _heapinfo
[block
+ blocks
].busy
.info
.size
1362 = _heapinfo
[block
].busy
.info
.size
- blocks
;
1363 _heapinfo
[block
].busy
.info
.size
= blocks
;
1364 /* We have just created a new chunk by splitting a chunk in two.
1365 Now we will free this chunk; increment the statistics counter
1366 so it doesn't become wrong when _free_internal decrements it. */
1368 _free_internal_nolock (ADDRESS (block
+ blocks
));
1371 else if (blocks
== _heapinfo
[block
].busy
.info
.size
)
1372 /* No size change necessary. */
1376 /* Won't fit, so allocate a new region that will.
1377 Free the old region first in case there is sufficient
1378 adjacent free space to grow without moving. */
1379 blocks
= _heapinfo
[block
].busy
.info
.size
;
1380 /* Prevent free from actually returning memory to the system. */
1381 oldlimit
= _heaplimit
;
1383 _free_internal_nolock (ptr
);
1384 result
= _malloc_internal_nolock (size
);
1385 PROTECT_MALLOC_STATE (0);
1386 if (_heaplimit
== 0)
1387 _heaplimit
= oldlimit
;
1390 /* Now we're really in trouble. We have to unfree
1391 the thing we just freed. Unfortunately it might
1392 have been coalesced with its neighbors. */
1393 if (_heapindex
== block
)
1394 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1398 = _malloc_internal_nolock ((block
- _heapindex
) * BLOCKSIZE
);
1399 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1400 _free_internal_nolock (previous
);
1405 memmove (result
, ptr
, blocks
* BLOCKSIZE
);
1410 /* Old size is a fragment; type is logarithm
1411 to base two of the fragment size. */
1412 if (size
> (size_t) (1 << (type
- 1)) &&
1413 size
<= (size_t) (1 << type
))
1414 /* The new size is the same kind of fragment. */
1418 /* The new size is different; allocate a new space,
1419 and copy the lesser of the new size and the old. */
1420 result
= _malloc_internal_nolock (size
);
1423 memcpy (result
, ptr
, min (size
, (size_t) 1 << type
));
1424 _free_internal_nolock (ptr
);
1429 PROTECT_MALLOC_STATE (1);
1435 _realloc_internal (void *ptr
, size_t size
)
1440 result
= _realloc_internal_nolock (ptr
, size
);
1447 realloc (void *ptr
, size_t size
)
1449 void *(*hook
) (void *, size_t);
1451 if (!__malloc_initialized
&& !__malloc_initialize ())
1454 hook
= __realloc_hook
;
1455 return (hook
!= NULL
? *hook
: _realloc_internal
) (ptr
, size
);
1457 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1459 This library is free software; you can redistribute it and/or
1460 modify it under the terms of the GNU General Public License as
1461 published by the Free Software Foundation; either version 2 of the
1462 License, or (at your option) any later version.
1464 This library is distributed in the hope that it will be useful,
1465 but WITHOUT ANY WARRANTY; without even the implied warranty of
1466 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1467 General Public License for more details.
1469 You should have received a copy of the GNU General Public
1470 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1472 The author may be reached (Email) at the address mike@ai.mit.edu,
1473 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1475 /* Allocate an array of NMEMB elements each SIZE bytes long.
1476 The entire array is initialized to zeros. */
1478 calloc (size_t nmemb
, size_t size
)
1481 size_t bytes
= nmemb
* size
;
1483 if (size
!= 0 && bytes
/ size
!= nmemb
)
1489 result
= malloc (bytes
);
1491 return memset (result
, 0, bytes
);
1494 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1495 This file is part of the GNU C Library.
1497 The GNU C Library is free software; you can redistribute it and/or modify
1498 it under the terms of the GNU General Public License as published by
1499 the Free Software Foundation; either version 2, or (at your option)
1502 The GNU C Library is distributed in the hope that it will be useful,
1503 but WITHOUT ANY WARRANTY; without even the implied warranty of
1504 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1505 GNU General Public License for more details.
1507 You should have received a copy of the GNU General Public License
1508 along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
1510 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1512 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1514 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1515 /* It is best not to declare this and cast its result on foreign operating
1516 systems with potentially hostile include files. */
1518 extern void *__sbrk (ptrdiff_t increment
);
1519 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1521 /* Allocate INCREMENT more bytes of data space,
1522 and return the start of data space, or NULL on errors.
1523 If INCREMENT is negative, shrink data space. */
1525 __default_morecore (ptrdiff_t increment
)
1528 #if defined (CYGWIN)
1531 return bss_sbrk (increment
);
1534 result
= (void *) __sbrk (increment
);
1535 if (result
== (void *) -1)
1539 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1541 This library is free software; you can redistribute it and/or
1542 modify it under the terms of the GNU General Public License as
1543 published by the Free Software Foundation; either version 2 of the
1544 License, or (at your option) any later version.
1546 This library is distributed in the hope that it will be useful,
1547 but WITHOUT ANY WARRANTY; without even the implied warranty of
1548 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1549 General Public License for more details.
1551 You should have received a copy of the GNU General Public
1552 License along with this library. If not, see <http://www.gnu.org/licenses/>. */
1554 void *(*__memalign_hook
) (size_t size
, size_t alignment
);
1557 aligned_alloc (size_t alignment
, size_t size
)
1560 size_t adj
, lastadj
;
1561 void *(*hook
) (size_t, size_t) = __memalign_hook
;
1564 return (*hook
) (alignment
, size
);
1566 /* Allocate a block with enough extra space to pad the block with up to
1567 (ALIGNMENT - 1) bytes if necessary. */
1568 if (- size
< alignment
)
1573 result
= malloc (size
+ alignment
- 1);
1577 /* Figure out how much we will need to pad this particular block
1578 to achieve the required alignment. */
1579 adj
= alignment
- (uintptr_t) result
% alignment
;
1580 if (adj
== alignment
)
1583 if (adj
!= alignment
- 1)
1587 /* Reallocate the block with only as much excess as it
1590 result
= malloc (size
+ adj
);
1591 if (result
== NULL
) /* Impossible unless interrupted. */
1595 adj
= alignment
- (uintptr_t) result
% alignment
;
1596 if (adj
== alignment
)
1598 /* It's conceivable we might have been so unlucky as to get
1599 a different block with weaker alignment. If so, this
1600 block is too short to contain SIZE after alignment
1601 correction. So we must try again and get another block,
1603 } while (adj
> lastadj
);
1608 /* Record this block in the list of aligned blocks, so that `free'
1609 can identify the pointer it is passed, which will be in the middle
1610 of an allocated block. */
1612 struct alignlist
*l
;
1613 LOCK_ALIGNED_BLOCKS ();
1614 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1615 if (l
->aligned
== NULL
)
1616 /* This slot is free. Use it. */
1620 l
= malloc (sizeof *l
);
1623 l
->next
= _aligned_blocks
;
1624 _aligned_blocks
= l
;
1630 result
= l
->aligned
= (char *) result
+ adj
;
1632 UNLOCK_ALIGNED_BLOCKS ();
1643 /* An obsolete alias for aligned_alloc, for any old libraries that use
1647 memalign (size_t alignment
, size_t size
)
1649 return aligned_alloc (alignment
, size
);
1652 /* If HYBRID_MALLOC is defined, we may want to use the system
1653 posix_memalign below. */
1654 #ifndef HYBRID_MALLOC
1656 posix_memalign (void **memptr
, size_t alignment
, size_t size
)
1661 || alignment
% sizeof (void *) != 0
1662 || (alignment
& (alignment
- 1)) != 0)
1665 mem
= aligned_alloc (alignment
, size
);
1675 /* Allocate memory on a page boundary.
1676 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1678 This library is free software; you can redistribute it and/or
1679 modify it under the terms of the GNU General Public License as
1680 published by the Free Software Foundation; either version 2 of the
1681 License, or (at your option) any later version.
1683 This library is distributed in the hope that it will be useful,
1684 but WITHOUT ANY WARRANTY; without even the implied warranty of
1685 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1686 General Public License for more details.
1688 You should have received a copy of the GNU General Public
1689 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1691 The author may be reached (Email) at the address mike@ai.mit.edu,
1692 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1694 /* Allocate SIZE bytes on a page boundary. */
1695 extern void *valloc (size_t);
1697 #if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1698 # include "getpagesize.h"
1699 #elif !defined getpagesize
1700 extern int getpagesize (void);
1703 static size_t pagesize
;
1706 valloc (size_t size
)
1709 pagesize
= getpagesize ();
1711 return aligned_alloc (pagesize
, size
);
1714 #ifdef HYBRID_MALLOC
1718 #undef aligned_alloc
1721 /* Declare system malloc and friends. */
1722 extern void *malloc (size_t size
);
1723 extern void *realloc (void *ptr
, size_t size
);
1724 extern void *calloc (size_t nmemb
, size_t size
);
1725 extern void free (void *ptr
);
1726 #ifdef HAVE_ALIGNED_ALLOC
1727 extern void *aligned_alloc (size_t alignment
, size_t size
);
1728 #elif defined HAVE_POSIX_MEMALIGN
1729 extern int posix_memalign (void **memptr
, size_t alignment
, size_t size
);
1732 /* See the comments near the beginning of this file for explanations
1733 of the following functions. */
1736 hybrid_malloc (size_t size
)
1739 return malloc (size
);
1740 return gmalloc (size
);
1744 hybrid_calloc (size_t nmemb
, size_t size
)
1747 return calloc (nmemb
, size
);
1748 return gcalloc (nmemb
, size
);
1752 hybrid_free (void *ptr
)
1756 else if (!ALLOCATED_BEFORE_DUMPING (ptr
))
1758 /* Otherwise the dumped emacs is trying to free something allocated
1759 before dumping; do nothing. */
1763 #if defined HAVE_ALIGNED_ALLOC || defined HAVE_POSIX_MEMALIGN
1765 hybrid_aligned_alloc (size_t alignment
, size_t size
)
1768 return galigned_alloc (alignment
, size
);
1769 /* The following is copied from alloc.c */
1770 #ifdef HAVE_ALIGNED_ALLOC
1771 return aligned_alloc (alignment
, size
);
1772 #else /* HAVE_POSIX_MEMALIGN */
1774 return posix_memalign (&p
, alignment
, size
) == 0 ? p
: 0;
1780 hybrid_realloc (void *ptr
, size_t size
)
1784 size_t block
, oldsize
;
1787 return grealloc (ptr
, size
);
1788 if (!ALLOCATED_BEFORE_DUMPING (ptr
))
1789 return realloc (ptr
, size
);
1791 /* The dumped emacs is trying to realloc storage allocated before
1792 dumping. We just malloc new space and copy the data. */
1793 if (size
== 0 || ptr
== NULL
)
1794 return malloc (size
);
1795 block
= ((char *) ptr
- _heapbase
) / BLOCKSIZE
+ 1;
1796 type
= _heapinfo
[block
].busy
.type
;
1798 type
== 0 ? _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
1799 : (size_t) 1 << type
;
1800 result
= malloc (size
);
1802 return memcpy (result
, ptr
, min (oldsize
, size
));
1806 #ifdef HYBRID_GET_CURRENT_DIR_NAME
1807 /* Defined in sysdep.c. */
1808 char *gget_current_dir_name (void);
1811 hybrid_get_current_dir_name (void)
1814 return get_current_dir_name ();
1815 return gget_current_dir_name ();
1819 #endif /* HYBRID_MALLOC */
1823 /* Standard debugging hooks for `malloc'.
1824 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1825 Written May 1989 by Mike Haertel.
1827 This library is free software; you can redistribute it and/or
1828 modify it under the terms of the GNU General Public License as
1829 published by the Free Software Foundation; either version 2 of the
1830 License, or (at your option) any later version.
1832 This library is distributed in the hope that it will be useful,
1833 but WITHOUT ANY WARRANTY; without even the implied warranty of
1834 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1835 General Public License for more details.
1837 You should have received a copy of the GNU General Public
1838 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1840 The author may be reached (Email) at the address mike@ai.mit.edu,
1841 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1845 /* Old hook values. */
1846 static void (*old_free_hook
) (void *ptr
);
1847 static void *(*old_malloc_hook
) (size_t size
);
1848 static void *(*old_realloc_hook
) (void *ptr
, size_t size
);
1850 /* Function to call when something awful happens. */
1851 static void (*abortfunc
) (enum mcheck_status
);
1853 /* Arbitrary magical numbers. */
1854 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1855 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1856 #define MAGICBYTE ((char) 0xd7)
1857 #define MALLOCFLOOD ((char) 0x93)
1858 #define FREEFLOOD ((char) 0x95)
1862 size_t size
; /* Exact size requested by user. */
1863 size_t magic
; /* Magic number to check header integrity. */
1866 static enum mcheck_status
1867 checkhdr (const struct hdr
*hdr
)
1869 enum mcheck_status status
;
1873 status
= MCHECK_HEAD
;
1876 status
= MCHECK_FREE
;
1879 if (((char *) &hdr
[1])[hdr
->size
] != MAGICBYTE
)
1880 status
= MCHECK_TAIL
;
1885 if (status
!= MCHECK_OK
)
1886 (*abortfunc
) (status
);
1891 freehook (void *ptr
)
1897 struct alignlist
*l
;
1899 /* If the block was allocated by aligned_alloc, its real pointer
1900 to free is recorded in _aligned_blocks; find that. */
1901 PROTECT_MALLOC_STATE (0);
1902 LOCK_ALIGNED_BLOCKS ();
1903 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1904 if (l
->aligned
== ptr
)
1906 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1910 UNLOCK_ALIGNED_BLOCKS ();
1911 PROTECT_MALLOC_STATE (1);
1913 hdr
= ((struct hdr
*) ptr
) - 1;
1915 hdr
->magic
= MAGICFREE
;
1916 memset (ptr
, FREEFLOOD
, hdr
->size
);
1921 __free_hook
= old_free_hook
;
1923 __free_hook
= freehook
;
1927 mallochook (size_t size
)
1931 __malloc_hook
= old_malloc_hook
;
1932 hdr
= malloc (sizeof *hdr
+ size
+ 1);
1933 __malloc_hook
= mallochook
;
1938 hdr
->magic
= MAGICWORD
;
1939 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1940 return memset (hdr
+ 1, MALLOCFLOOD
, size
);
1944 reallochook (void *ptr
, size_t size
)
1946 struct hdr
*hdr
= NULL
;
1951 hdr
= ((struct hdr
*) ptr
) - 1;
1956 memset ((char *) ptr
+ size
, FREEFLOOD
, osize
- size
);
1959 __free_hook
= old_free_hook
;
1960 __malloc_hook
= old_malloc_hook
;
1961 __realloc_hook
= old_realloc_hook
;
1962 hdr
= realloc (hdr
, sizeof *hdr
+ size
+ 1);
1963 __free_hook
= freehook
;
1964 __malloc_hook
= mallochook
;
1965 __realloc_hook
= reallochook
;
1970 hdr
->magic
= MAGICWORD
;
1971 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1973 memset ((char *) (hdr
+ 1) + osize
, MALLOCFLOOD
, size
- osize
);
1978 mabort (enum mcheck_status status
)
1984 msg
= "memory is consistent, library is buggy";
1987 msg
= "memory clobbered before allocated block";
1990 msg
= "memory clobbered past end of allocated block";
1993 msg
= "block freed twice";
1996 msg
= "bogus mcheck_status, library is buggy";
1999 #ifdef __GNU_LIBRARY__
2002 fprintf (stderr
, "mcheck: %s\n", msg
);
2012 static int mcheck_used
= 0;
2015 mcheck (void (*func
) (enum mcheck_status
))
2017 abortfunc
= (func
!= NULL
) ? func
: &mabort
;
2019 /* These hooks may not be safely inserted if malloc is already in use. */
2020 if (!__malloc_initialized
&& !mcheck_used
)
2022 old_free_hook
= __free_hook
;
2023 __free_hook
= freehook
;
2024 old_malloc_hook
= __malloc_hook
;
2025 __malloc_hook
= mallochook
;
2026 old_realloc_hook
= __realloc_hook
;
2027 __realloc_hook
= reallochook
;
2031 return mcheck_used
? 0 : -1;
2037 return mcheck_used
? checkhdr (ptr
) : MCHECK_DISABLED
;
2040 #endif /* GC_MCHECK */