1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2014 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <http://www.gnu.org/licenses/>.
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
38 #include <w32heap.h> /* for sbrk */
42 extern void emacs_abort (void);
53 /* Allocate SIZE bytes of memory. */
54 extern void *malloc (size_t size
);
55 /* Re-allocate the previously allocated block
56 in ptr, making the new block SIZE bytes long. */
57 extern void *realloc (void *ptr
, size_t size
);
58 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
59 extern void *calloc (size_t nmemb
, size_t size
);
60 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
61 extern void free (void *ptr
);
63 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
65 extern void *aligned_alloc (size_t, size_t);
66 extern void *memalign (size_t, size_t);
67 extern int posix_memalign (void **, size_t, size_t);
71 /* Set up mutexes and make malloc etc. thread-safe. */
72 extern void malloc_enable_thread (void);
76 extern void emacs_abort (void);
79 /* The allocator divides the heap into blocks of fixed size; large
80 requests receive one or more whole blocks, and small requests
81 receive a fragment of a block. Fragment sizes are powers of two,
82 and all fragments of a block are the same size. When all the
83 fragments in a block have been freed, the block itself is freed. */
84 #define INT_BIT (CHAR_BIT * sizeof (int))
85 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
86 #define BLOCKSIZE (1 << BLOCKLOG)
87 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
89 /* Determine the amount of memory spanned by the initial heap table
90 (not an absolute limit). */
91 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
93 /* Number of contiguous free blocks allowed to build up at the end of
94 memory before they will be returned to the system. */
95 #define FINAL_FREE_BLOCKS 8
97 /* Data structure giving per-block information. */
100 /* Heap information for a busy block. */
103 /* Zero for a large (multiblock) object, or positive giving the
104 logarithm to the base two of the fragment size. */
110 size_t nfree
; /* Free frags in a fragmented block. */
111 size_t first
; /* First free fragment of the block. */
113 /* For a large object, in its first block, this has the number
114 of blocks in the object. In the other blocks, this has a
115 negative number which says how far back the first block is. */
119 /* Heap information for a free block
120 (that may be the first of a free cluster). */
123 size_t size
; /* Size (in blocks) of a free cluster. */
124 size_t next
; /* Index of next free cluster. */
125 size_t prev
; /* Index of previous free cluster. */
129 /* Pointer to first block of the heap. */
130 extern char *_heapbase
;
132 /* Table indexed by block number giving per-block information. */
133 extern malloc_info
*_heapinfo
;
135 /* Address to block number and vice versa. */
136 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
137 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
139 /* Current search index for the heap table. */
140 extern size_t _heapindex
;
142 /* Limit of valid info table indices. */
143 extern size_t _heaplimit
;
145 /* Doubly linked lists of free fragments. */
152 /* Free list headers for each fragment size. */
153 extern struct list _fraghead
[];
155 /* List of blocks allocated with aligned_alloc and friends. */
158 struct alignlist
*next
;
159 void *aligned
; /* The address that aligned_alloc returned. */
160 void *exact
; /* The address that malloc returned. */
162 extern struct alignlist
*_aligned_blocks
;
164 /* Instrumentation. */
165 extern size_t _chunks_used
;
166 extern size_t _bytes_used
;
167 extern size_t _chunks_free
;
168 extern size_t _bytes_free
;
170 /* Internal versions of `malloc', `realloc', and `free'
171 used when these functions need to call each other.
172 They are the same but don't call the hooks. */
173 extern void *_malloc_internal (size_t);
174 extern void *_realloc_internal (void *, size_t);
175 extern void _free_internal (void *);
176 extern void *_malloc_internal_nolock (size_t);
177 extern void *_realloc_internal_nolock (void *, size_t);
178 extern void _free_internal_nolock (void *);
181 extern pthread_mutex_t _malloc_mutex
, _aligned_blocks_mutex
;
182 extern int _malloc_thread_enabled_p
;
185 if (_malloc_thread_enabled_p) \
186 pthread_mutex_lock (&_malloc_mutex); \
190 if (_malloc_thread_enabled_p) \
191 pthread_mutex_unlock (&_malloc_mutex); \
193 #define LOCK_ALIGNED_BLOCKS() \
195 if (_malloc_thread_enabled_p) \
196 pthread_mutex_lock (&_aligned_blocks_mutex); \
198 #define UNLOCK_ALIGNED_BLOCKS() \
200 if (_malloc_thread_enabled_p) \
201 pthread_mutex_unlock (&_aligned_blocks_mutex); \
206 #define LOCK_ALIGNED_BLOCKS()
207 #define UNLOCK_ALIGNED_BLOCKS()
210 /* Given an address in the middle of a malloc'd object,
211 return the address of the beginning of the object. */
212 extern void *malloc_find_object_address (void *ptr
);
214 /* Underlying allocation function; successive calls should
215 return contiguous pieces of memory. */
216 extern void *(*__morecore
) (ptrdiff_t size
);
218 /* Default value of `__morecore'. */
219 extern void *__default_morecore (ptrdiff_t size
);
221 /* If not NULL, this function is called after each time
222 `__morecore' is called to increase the data size. */
223 extern void (*__after_morecore_hook
) (void);
225 /* Number of extra blocks to get each time we ask for more core.
226 This reduces the frequency of calling `(*__morecore)'. */
227 extern size_t __malloc_extra_blocks
;
229 /* Nonzero if `malloc' has been called and done its initialization. */
230 extern int __malloc_initialized
;
231 /* Function called to initialize malloc data structures. */
232 extern int __malloc_initialize (void);
234 /* Hooks for debugging versions. */
235 extern void (*__malloc_initialize_hook
) (void);
236 extern void (*__free_hook
) (void *ptr
);
237 extern void *(*__malloc_hook
) (size_t size
);
238 extern void *(*__realloc_hook
) (void *ptr
, size_t size
);
239 extern void *(*__memalign_hook
) (size_t size
, size_t alignment
);
241 /* Return values for `mprobe': these are the kinds of inconsistencies that
242 `mcheck' enables detection of. */
245 MCHECK_DISABLED
= -1, /* Consistency checking is not turned on. */
246 MCHECK_OK
, /* Block is fine. */
247 MCHECK_FREE
, /* Block freed twice. */
248 MCHECK_HEAD
, /* Memory before the block was clobbered. */
249 MCHECK_TAIL
/* Memory after the block was clobbered. */
252 /* Activate a standard collection of debugging hooks. This must be called
253 before `malloc' is ever called. ABORTFUNC is called with an error code
254 (see enum above) when an inconsistency is detected. If ABORTFUNC is
255 null, the standard function prints on stderr and then calls `abort'. */
256 extern int mcheck (void (*abortfunc
) (enum mcheck_status
));
258 /* Check for aberrations in a particular malloc'd block. You must have
259 called `mcheck' already. These are the same checks that `mcheck' does
260 when you free or reallocate a block. */
261 extern enum mcheck_status
mprobe (void *ptr
);
263 /* Activate a standard collection of tracing hooks. */
264 extern void mtrace (void);
265 extern void muntrace (void);
267 /* Statistics available to the user. */
270 size_t bytes_total
; /* Total size of the heap. */
271 size_t chunks_used
; /* Chunks allocated by the user. */
272 size_t bytes_used
; /* Byte total of user-allocated chunks. */
273 size_t chunks_free
; /* Chunks in the free list. */
274 size_t bytes_free
; /* Byte total of chunks in the free list. */
277 /* Pick up the current statistics. */
278 extern struct mstats
mstats (void);
280 /* Call WARNFUN with a warning message when memory usage is high. */
281 extern void memory_warnings (void *start
, void (*warnfun
) (const char *));
287 /* Memory allocator `malloc'.
288 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
289 Written May 1989 by Mike Haertel.
291 This library is free software; you can redistribute it and/or
292 modify it under the terms of the GNU General Public License as
293 published by the Free Software Foundation; either version 2 of the
294 License, or (at your option) any later version.
296 This library is distributed in the hope that it will be useful,
297 but WITHOUT ANY WARRANTY; without even the implied warranty of
298 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
299 General Public License for more details.
301 You should have received a copy of the GNU General Public
302 License along with this library. If not, see <http://www.gnu.org/licenses/>.
304 The author may be reached (Email) at the address mike@ai.mit.edu,
305 or (US mail) as Mike Haertel c/o Free Software Foundation. */
309 /* On Cygwin there are two heaps. temacs uses the static heap
310 (defined in sheap.c and managed with bss_sbrk), and the dumped
311 emacs uses the Cygwin heap (managed with sbrk). When emacs starts
312 on Cygwin, it reinitializes malloc, and we save the old info for
313 use by free and realloc if they're called with a pointer into the
316 Currently (2011-08-16) the Cygwin build doesn't use ralloc.c; if
317 this is changed in the future, we'll have to similarly deal with
318 reinitializing ralloc. */
320 extern void *bss_sbrk (ptrdiff_t size
);
321 extern int bss_sbrk_did_unexec
;
322 char *bss_sbrk_heapbase
; /* _heapbase for static heap */
323 malloc_info
*bss_sbrk_heapinfo
; /* _heapinfo for static heap */
325 void *(*__morecore
) (ptrdiff_t size
) = __default_morecore
;
327 /* Debugging hook for `malloc'. */
328 void *(*__malloc_hook
) (size_t size
);
330 /* Pointer to the base of the first block. */
333 /* Block information table. Allocated with align/__free (not malloc/free). */
334 malloc_info
*_heapinfo
;
336 /* Number of info entries. */
337 static size_t heapsize
;
339 /* Search index in the info table. */
342 /* Limit of valid info table indices. */
345 /* Free lists for each fragment size. */
346 struct list _fraghead
[BLOCKLOG
];
348 /* Instrumentation. */
354 /* Are you experienced? */
355 int __malloc_initialized
;
357 size_t __malloc_extra_blocks
;
359 void (*__malloc_initialize_hook
) (void);
360 void (*__after_morecore_hook
) (void);
362 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
364 /* Some code for hunting a bug writing into _heapinfo.
366 Call this macro with argument PROT non-zero to protect internal
367 malloc state against writing to it, call it with a zero argument to
368 make it readable and writable.
370 Note that this only works if BLOCKSIZE == page size, which is
371 the case on the i386. */
373 #include <sys/types.h>
374 #include <sys/mman.h>
376 static int state_protected_p
;
377 static size_t last_state_size
;
378 static malloc_info
*last_heapinfo
;
381 protect_malloc_state (int protect_p
)
383 /* If _heapinfo has been relocated, make sure its old location
384 isn't left read-only; it will be reused by malloc. */
385 if (_heapinfo
!= last_heapinfo
387 && state_protected_p
)
388 mprotect (last_heapinfo
, last_state_size
, PROT_READ
| PROT_WRITE
);
390 last_state_size
= _heaplimit
* sizeof *_heapinfo
;
391 last_heapinfo
= _heapinfo
;
393 if (protect_p
!= state_protected_p
)
395 state_protected_p
= protect_p
;
396 if (mprotect (_heapinfo
, last_state_size
,
397 protect_p
? PROT_READ
: PROT_READ
| PROT_WRITE
) != 0)
402 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
405 #define PROTECT_MALLOC_STATE(PROT) /* empty */
409 /* Aligned allocation. */
416 /* align accepts an unsigned argument, but __morecore accepts a
417 signed one. This could lead to trouble if SIZE overflows the
418 ptrdiff_t type accepted by __morecore. We just punt in that
419 case, since they are requesting a ludicrous amount anyway. */
420 if (PTRDIFF_MAX
< size
)
423 result
= (*__morecore
) (size
);
424 adj
= (uintptr_t) result
% BLOCKSIZE
;
427 adj
= BLOCKSIZE
- adj
;
429 result
= (char *) result
+ adj
;
432 if (__after_morecore_hook
)
433 (*__after_morecore_hook
) ();
438 /* Get SIZE bytes, if we can get them starting at END.
439 Return the address of the space we got.
440 If we cannot get space at END, fail and return 0. */
442 get_contiguous_space (ptrdiff_t size
, void *position
)
447 before
= (*__morecore
) (0);
448 /* If we can tell in advance that the break is at the wrong place,
450 if (before
!= position
)
453 /* Allocate SIZE bytes and get the address of them. */
454 after
= (*__morecore
) (size
);
458 /* It was not contiguous--reject it. */
459 if (after
!= position
)
461 (*__morecore
) (- size
);
469 /* This is called when `_heapinfo' and `heapsize' have just
470 been set to describe a new info table. Set up the table
471 to describe itself and account for it in the statistics. */
473 register_heapinfo (void)
475 size_t block
, blocks
;
477 block
= BLOCK (_heapinfo
);
478 blocks
= BLOCKIFY (heapsize
* sizeof (malloc_info
));
480 /* Account for the _heapinfo block itself in the statistics. */
481 _bytes_used
+= blocks
* BLOCKSIZE
;
484 /* Describe the heapinfo block itself in the heapinfo. */
485 _heapinfo
[block
].busy
.type
= 0;
486 _heapinfo
[block
].busy
.info
.size
= blocks
;
487 /* Leave back-pointers for malloc_find_address. */
489 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
493 pthread_mutex_t _malloc_mutex
= PTHREAD_MUTEX_INITIALIZER
;
494 pthread_mutex_t _aligned_blocks_mutex
= PTHREAD_MUTEX_INITIALIZER
;
495 int _malloc_thread_enabled_p
;
498 malloc_atfork_handler_prepare (void)
501 LOCK_ALIGNED_BLOCKS ();
505 malloc_atfork_handler_parent (void)
507 UNLOCK_ALIGNED_BLOCKS ();
512 malloc_atfork_handler_child (void)
514 UNLOCK_ALIGNED_BLOCKS ();
518 /* Set up mutexes and make malloc etc. thread-safe. */
520 malloc_enable_thread (void)
522 if (_malloc_thread_enabled_p
)
525 /* Some pthread implementations call malloc for statically
526 initialized mutexes when they are used first. To avoid such a
527 situation, we initialize mutexes here while their use is
528 disabled in malloc etc. */
529 pthread_mutex_init (&_malloc_mutex
, NULL
);
530 pthread_mutex_init (&_aligned_blocks_mutex
, NULL
);
531 pthread_atfork (malloc_atfork_handler_prepare
,
532 malloc_atfork_handler_parent
,
533 malloc_atfork_handler_child
);
534 _malloc_thread_enabled_p
= 1;
539 malloc_initialize_1 (void)
546 if (bss_sbrk_did_unexec
)
547 /* we're reinitializing the dumped emacs */
549 bss_sbrk_heapbase
= _heapbase
;
550 bss_sbrk_heapinfo
= _heapinfo
;
551 memset (_fraghead
, 0, BLOCKLOG
* sizeof (struct list
));
555 if (__malloc_initialize_hook
)
556 (*__malloc_initialize_hook
) ();
558 heapsize
= HEAP
/ BLOCKSIZE
;
559 _heapinfo
= align (heapsize
* sizeof (malloc_info
));
560 if (_heapinfo
== NULL
)
562 memset (_heapinfo
, 0, heapsize
* sizeof (malloc_info
));
563 _heapinfo
[0].free
.size
= 0;
564 _heapinfo
[0].free
.next
= _heapinfo
[0].free
.prev
= 0;
566 _heapbase
= (char *) _heapinfo
;
567 _heaplimit
= BLOCK (_heapbase
+ heapsize
* sizeof (malloc_info
));
569 register_heapinfo ();
571 __malloc_initialized
= 1;
572 PROTECT_MALLOC_STATE (1);
576 /* Set everything up and remember that we have.
577 main will call malloc which calls this function. That is before any threads
578 or signal handlers has been set up, so we don't need thread protection. */
580 __malloc_initialize (void)
582 if (__malloc_initialized
)
585 malloc_initialize_1 ();
587 return __malloc_initialized
;
590 static int morecore_recursing
;
592 /* Get neatly aligned memory, initializing or
593 growing the heap info table as necessary. */
595 morecore_nolock (size_t size
)
598 malloc_info
*newinfo
, *oldinfo
;
601 if (morecore_recursing
)
602 /* Avoid recursion. The caller will know how to handle a null return. */
605 result
= align (size
);
609 PROTECT_MALLOC_STATE (0);
611 /* Check if we need to grow the info table. */
612 if ((size_t) BLOCK ((char *) result
+ size
) > heapsize
)
614 /* Calculate the new _heapinfo table size. We do not account for the
615 added blocks in the table itself, as we hope to place them in
616 existing free space, which is already covered by part of the
621 while ((size_t) BLOCK ((char *) result
+ size
) > newsize
);
623 /* We must not reuse existing core for the new info table when called
624 from realloc in the case of growing a large block, because the
625 block being grown is momentarily marked as free. In this case
626 _heaplimit is zero so we know not to reuse space for internal
630 /* First try to allocate the new info table in core we already
631 have, in the usual way using realloc. If realloc cannot
632 extend it in place or relocate it to existing sufficient core,
633 we will get called again, and the code above will notice the
634 `morecore_recursing' flag and return null. */
635 int save
= errno
; /* Don't want to clobber errno with ENOMEM. */
636 morecore_recursing
= 1;
637 newinfo
= _realloc_internal_nolock (_heapinfo
,
638 newsize
* sizeof (malloc_info
));
639 morecore_recursing
= 0;
644 /* We found some space in core, and realloc has put the old
645 table's blocks on the free list. Now zero the new part
646 of the table and install the new table location. */
647 memset (&newinfo
[heapsize
], 0,
648 (newsize
- heapsize
) * sizeof (malloc_info
));
655 /* Allocate new space for the malloc info table. */
658 newinfo
= align (newsize
* sizeof (malloc_info
));
663 (*__morecore
) (-size
);
667 /* Is it big enough to record status for its own space?
669 if ((size_t) BLOCK ((char *) newinfo
670 + newsize
* sizeof (malloc_info
))
674 /* Must try again. First give back most of what we just got. */
675 (*__morecore
) (- newsize
* sizeof (malloc_info
));
679 /* Copy the old table to the beginning of the new,
680 and zero the rest of the new table. */
681 memcpy (newinfo
, _heapinfo
, heapsize
* sizeof (malloc_info
));
682 memset (&newinfo
[heapsize
], 0,
683 (newsize
- heapsize
) * sizeof (malloc_info
));
688 register_heapinfo ();
690 /* Reset _heaplimit so _free_internal never decides
691 it can relocate or resize the info table. */
693 _free_internal_nolock (oldinfo
);
694 PROTECT_MALLOC_STATE (0);
696 /* The new heap limit includes the new table just allocated. */
697 _heaplimit
= BLOCK ((char *) newinfo
+ heapsize
* sizeof (malloc_info
));
702 _heaplimit
= BLOCK ((char *) result
+ size
);
706 /* Allocate memory from the heap. */
708 _malloc_internal_nolock (size_t size
)
711 size_t block
, blocks
, lastblocks
, start
;
715 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
716 valid address you can realloc and free (though not dereference).
718 It turns out that some extant code (sunrpc, at least Ultrix's version)
719 expects `malloc (0)' to return non-NULL and breaks otherwise.
727 PROTECT_MALLOC_STATE (0);
729 if (size
< sizeof (struct list
))
730 size
= sizeof (struct list
);
732 /* Determine the allocation policy based on the request size. */
733 if (size
<= BLOCKSIZE
/ 2)
735 /* Small allocation to receive a fragment of a block.
736 Determine the logarithm to base two of the fragment size. */
737 register size_t log
= 1;
739 while ((size
/= 2) != 0)
742 /* Look in the fragment lists for a
743 free fragment of the desired size. */
744 next
= _fraghead
[log
].next
;
747 /* There are free fragments of this size.
748 Pop a fragment out of the fragment list and return it.
749 Update the block's nfree and first counters. */
751 next
->prev
->next
= next
->next
;
752 if (next
->next
!= NULL
)
753 next
->next
->prev
= next
->prev
;
754 block
= BLOCK (result
);
755 if (--_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
756 _heapinfo
[block
].busy
.info
.frag
.first
=
757 (uintptr_t) next
->next
% BLOCKSIZE
>> log
;
759 /* Update the statistics. */
761 _bytes_used
+= 1 << log
;
763 _bytes_free
-= 1 << log
;
767 /* No free fragments of the desired size, so get a new block
768 and break it into fragments, returning the first. */
769 #ifdef GC_MALLOC_CHECK
770 result
= _malloc_internal_nolock (BLOCKSIZE
);
771 PROTECT_MALLOC_STATE (0);
772 #elif defined (USE_PTHREAD)
773 result
= _malloc_internal_nolock (BLOCKSIZE
);
775 result
= malloc (BLOCKSIZE
);
779 PROTECT_MALLOC_STATE (1);
783 /* Link all fragments but the first into the free list. */
784 next
= (struct list
*) ((char *) result
+ (1 << log
));
786 next
->prev
= &_fraghead
[log
];
787 _fraghead
[log
].next
= next
;
789 for (i
= 2; i
< (size_t) (BLOCKSIZE
>> log
); ++i
)
791 next
= (struct list
*) ((char *) result
+ (i
<< log
));
792 next
->next
= _fraghead
[log
].next
;
793 next
->prev
= &_fraghead
[log
];
794 next
->prev
->next
= next
;
795 next
->next
->prev
= next
;
798 /* Initialize the nfree and first counters for this block. */
799 block
= BLOCK (result
);
800 _heapinfo
[block
].busy
.type
= log
;
801 _heapinfo
[block
].busy
.info
.frag
.nfree
= i
- 1;
802 _heapinfo
[block
].busy
.info
.frag
.first
= i
- 1;
804 _chunks_free
+= (BLOCKSIZE
>> log
) - 1;
805 _bytes_free
+= BLOCKSIZE
- (1 << log
);
806 _bytes_used
-= BLOCKSIZE
- (1 << log
);
811 /* Large allocation to receive one or more blocks.
812 Search the free list in a circle starting at the last place visited.
813 If we loop completely around without finding a large enough
814 space we will have to get more memory from the system. */
815 blocks
= BLOCKIFY (size
);
816 start
= block
= _heapindex
;
817 while (_heapinfo
[block
].free
.size
< blocks
)
819 block
= _heapinfo
[block
].free
.next
;
822 /* Need to get more from the system. Get a little extra. */
823 size_t wantblocks
= blocks
+ __malloc_extra_blocks
;
824 block
= _heapinfo
[0].free
.prev
;
825 lastblocks
= _heapinfo
[block
].free
.size
;
826 /* Check to see if the new core will be contiguous with the
827 final free block; if so we don't need to get as much. */
828 if (_heaplimit
!= 0 && block
+ lastblocks
== _heaplimit
&&
829 /* We can't do this if we will have to make the heap info
830 table bigger to accommodate the new space. */
831 block
+ wantblocks
<= heapsize
&&
832 get_contiguous_space ((wantblocks
- lastblocks
) * BLOCKSIZE
,
833 ADDRESS (block
+ lastblocks
)))
835 /* We got it contiguously. Which block we are extending
836 (the `final free block' referred to above) might have
837 changed, if it got combined with a freed info table. */
838 block
= _heapinfo
[0].free
.prev
;
839 _heapinfo
[block
].free
.size
+= (wantblocks
- lastblocks
);
840 _bytes_free
+= (wantblocks
- lastblocks
) * BLOCKSIZE
;
841 _heaplimit
+= wantblocks
- lastblocks
;
844 result
= morecore_nolock (wantblocks
* BLOCKSIZE
);
847 block
= BLOCK (result
);
848 /* Put the new block at the end of the free list. */
849 _heapinfo
[block
].free
.size
= wantblocks
;
850 _heapinfo
[block
].free
.prev
= _heapinfo
[0].free
.prev
;
851 _heapinfo
[block
].free
.next
= 0;
852 _heapinfo
[0].free
.prev
= block
;
853 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
= block
;
855 /* Now loop to use some of that block for this allocation. */
859 /* At this point we have found a suitable free list entry.
860 Figure out how to remove what we need from the list. */
861 result
= ADDRESS (block
);
862 if (_heapinfo
[block
].free
.size
> blocks
)
864 /* The block we found has a bit left over,
865 so relink the tail end back into the free list. */
866 _heapinfo
[block
+ blocks
].free
.size
867 = _heapinfo
[block
].free
.size
- blocks
;
868 _heapinfo
[block
+ blocks
].free
.next
869 = _heapinfo
[block
].free
.next
;
870 _heapinfo
[block
+ blocks
].free
.prev
871 = _heapinfo
[block
].free
.prev
;
872 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
873 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
874 = _heapindex
= block
+ blocks
;
878 /* The block exactly matches our requirements,
879 so just remove it from the list. */
880 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
881 = _heapinfo
[block
].free
.prev
;
882 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
883 = _heapindex
= _heapinfo
[block
].free
.next
;
887 _heapinfo
[block
].busy
.type
= 0;
888 _heapinfo
[block
].busy
.info
.size
= blocks
;
890 _bytes_used
+= blocks
* BLOCKSIZE
;
891 _bytes_free
-= blocks
* BLOCKSIZE
;
893 /* Mark all the blocks of the object just allocated except for the
894 first with a negative number so you can find the first block by
895 adding that adjustment. */
897 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
900 PROTECT_MALLOC_STATE (1);
906 _malloc_internal (size_t size
)
911 result
= _malloc_internal_nolock (size
);
920 void *(*hook
) (size_t);
922 if (!__malloc_initialized
&& !__malloc_initialize ())
925 /* Copy the value of __malloc_hook to an automatic variable in case
926 __malloc_hook is modified in another thread between its
927 NULL-check and the use.
929 Note: Strictly speaking, this is not a right solution. We should
930 use mutexes to access non-read-only variables that are shared
931 among multiple threads. We just leave it for compatibility with
932 glibc malloc (i.e., assignments to __malloc_hook) for now. */
933 hook
= __malloc_hook
;
934 return (hook
!= NULL
? *hook
: _malloc_internal
) (size
);
939 /* On some ANSI C systems, some libc functions call _malloc, _free
940 and _realloc. Make them use the GNU functions. */
942 extern void *_malloc (size_t);
943 extern void _free (void *);
944 extern void *_realloc (void *, size_t);
947 _malloc (size_t size
)
949 return malloc (size
);
959 _realloc (void *ptr
, size_t size
)
961 return realloc (ptr
, size
);
965 /* Free a block of memory allocated by `malloc'.
966 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
967 Written May 1989 by Mike Haertel.
969 This library is free software; you can redistribute it and/or
970 modify it under the terms of the GNU General Public License as
971 published by the Free Software Foundation; either version 2 of the
972 License, or (at your option) any later version.
974 This library is distributed in the hope that it will be useful,
975 but WITHOUT ANY WARRANTY; without even the implied warranty of
976 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
977 General Public License for more details.
979 You should have received a copy of the GNU General Public
980 License along with this library. If not, see <http://www.gnu.org/licenses/>.
982 The author may be reached (Email) at the address mike@ai.mit.edu,
983 or (US mail) as Mike Haertel c/o Free Software Foundation. */
986 /* Debugging hook for free. */
987 void (*__free_hook
) (void *__ptr
);
989 /* List of blocks allocated by aligned_alloc. */
990 struct alignlist
*_aligned_blocks
= NULL
;
992 /* Return memory to the heap.
993 Like `_free_internal' but don't lock mutex. */
995 _free_internal_nolock (void *ptr
)
998 size_t block
, blocks
;
1000 struct list
*prev
, *next
;
1002 const size_t lesscore_threshold
1003 /* Threshold of free space at which we will return some to the system. */
1004 = FINAL_FREE_BLOCKS
+ 2 * __malloc_extra_blocks
;
1006 register struct alignlist
*l
;
1012 if ((char *) ptr
< _heapbase
)
1013 /* We're being asked to free something in the static heap. */
1017 PROTECT_MALLOC_STATE (0);
1019 LOCK_ALIGNED_BLOCKS ();
1020 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1021 if (l
->aligned
== ptr
)
1023 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1027 UNLOCK_ALIGNED_BLOCKS ();
1029 block
= BLOCK (ptr
);
1031 type
= _heapinfo
[block
].busy
.type
;
1035 /* Get as many statistics as early as we can. */
1037 _bytes_used
-= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1038 _bytes_free
+= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1040 /* Find the free cluster previous to this one in the free list.
1041 Start searching at the last block referenced; this may benefit
1042 programs with locality of allocation. */
1046 i
= _heapinfo
[i
].free
.prev
;
1050 i
= _heapinfo
[i
].free
.next
;
1051 while (i
> 0 && i
< block
);
1052 i
= _heapinfo
[i
].free
.prev
;
1055 /* Determine how to link this block into the free list. */
1056 if (block
== i
+ _heapinfo
[i
].free
.size
)
1058 /* Coalesce this block with its predecessor. */
1059 _heapinfo
[i
].free
.size
+= _heapinfo
[block
].busy
.info
.size
;
1064 /* Really link this block back into the free list. */
1065 _heapinfo
[block
].free
.size
= _heapinfo
[block
].busy
.info
.size
;
1066 _heapinfo
[block
].free
.next
= _heapinfo
[i
].free
.next
;
1067 _heapinfo
[block
].free
.prev
= i
;
1068 _heapinfo
[i
].free
.next
= block
;
1069 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1073 /* Now that the block is linked in, see if we can coalesce it
1074 with its successor (by deleting its successor from the list
1075 and adding in its size). */
1076 if (block
+ _heapinfo
[block
].free
.size
== _heapinfo
[block
].free
.next
)
1078 _heapinfo
[block
].free
.size
1079 += _heapinfo
[_heapinfo
[block
].free
.next
].free
.size
;
1080 _heapinfo
[block
].free
.next
1081 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.next
;
1082 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1086 /* How many trailing free blocks are there now? */
1087 blocks
= _heapinfo
[block
].free
.size
;
1089 /* Where is the current end of accessible core? */
1090 curbrk
= (*__morecore
) (0);
1092 if (_heaplimit
!= 0 && curbrk
== ADDRESS (_heaplimit
))
1094 /* The end of the malloc heap is at the end of accessible core.
1095 It's possible that moving _heapinfo will allow us to
1096 return some space to the system. */
1098 size_t info_block
= BLOCK (_heapinfo
);
1099 size_t info_blocks
= _heapinfo
[info_block
].busy
.info
.size
;
1100 size_t prev_block
= _heapinfo
[block
].free
.prev
;
1101 size_t prev_blocks
= _heapinfo
[prev_block
].free
.size
;
1102 size_t next_block
= _heapinfo
[block
].free
.next
;
1103 size_t next_blocks
= _heapinfo
[next_block
].free
.size
;
1105 if (/* Win if this block being freed is last in core, the info table
1106 is just before it, the previous free block is just before the
1107 info table, and the two free blocks together form a useful
1108 amount to return to the system. */
1109 (block
+ blocks
== _heaplimit
&&
1110 info_block
+ info_blocks
== block
&&
1111 prev_block
!= 0 && prev_block
+ prev_blocks
== info_block
&&
1112 blocks
+ prev_blocks
>= lesscore_threshold
) ||
1113 /* Nope, not the case. We can also win if this block being
1114 freed is just before the info table, and the table extends
1115 to the end of core or is followed only by a free block,
1116 and the total free space is worth returning to the system. */
1117 (block
+ blocks
== info_block
&&
1118 ((info_block
+ info_blocks
== _heaplimit
&&
1119 blocks
>= lesscore_threshold
) ||
1120 (info_block
+ info_blocks
== next_block
&&
1121 next_block
+ next_blocks
== _heaplimit
&&
1122 blocks
+ next_blocks
>= lesscore_threshold
)))
1125 malloc_info
*newinfo
;
1126 size_t oldlimit
= _heaplimit
;
1128 /* Free the old info table, clearing _heaplimit to avoid
1129 recursion into this code. We don't want to return the
1130 table's blocks to the system before we have copied them to
1131 the new location. */
1133 _free_internal_nolock (_heapinfo
);
1134 _heaplimit
= oldlimit
;
1136 /* Tell malloc to search from the beginning of the heap for
1137 free blocks, so it doesn't reuse the ones just freed. */
1140 /* Allocate new space for the info table and move its data. */
1141 newinfo
= _malloc_internal_nolock (info_blocks
* BLOCKSIZE
);
1142 PROTECT_MALLOC_STATE (0);
1143 memmove (newinfo
, _heapinfo
, info_blocks
* BLOCKSIZE
);
1144 _heapinfo
= newinfo
;
1146 /* We should now have coalesced the free block with the
1147 blocks freed from the old info table. Examine the entire
1148 trailing free block to decide below whether to return some
1150 block
= _heapinfo
[0].free
.prev
;
1151 blocks
= _heapinfo
[block
].free
.size
;
1154 /* Now see if we can return stuff to the system. */
1155 if (block
+ blocks
== _heaplimit
&& blocks
>= lesscore_threshold
)
1157 register size_t bytes
= blocks
* BLOCKSIZE
;
1158 _heaplimit
-= blocks
;
1159 (*__morecore
) (-bytes
);
1160 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
1161 = _heapinfo
[block
].free
.next
;
1162 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
1163 = _heapinfo
[block
].free
.prev
;
1164 block
= _heapinfo
[block
].free
.prev
;
1166 _bytes_free
-= bytes
;
1170 /* Set the next search to begin at this block. */
1175 /* Do some of the statistics. */
1177 _bytes_used
-= 1 << type
;
1179 _bytes_free
+= 1 << type
;
1181 /* Get the address of the first free fragment in this block. */
1182 prev
= (struct list
*) ((char *) ADDRESS (block
) +
1183 (_heapinfo
[block
].busy
.info
.frag
.first
<< type
));
1185 if (_heapinfo
[block
].busy
.info
.frag
.nfree
== (BLOCKSIZE
>> type
) - 1)
1187 /* If all fragments of this block are free, remove them
1188 from the fragment list and free the whole block. */
1190 for (i
= 1; i
< (size_t) (BLOCKSIZE
>> type
); ++i
)
1192 prev
->prev
->next
= next
;
1194 next
->prev
= prev
->prev
;
1195 _heapinfo
[block
].busy
.type
= 0;
1196 _heapinfo
[block
].busy
.info
.size
= 1;
1198 /* Keep the statistics accurate. */
1200 _bytes_used
+= BLOCKSIZE
;
1201 _chunks_free
-= BLOCKSIZE
>> type
;
1202 _bytes_free
-= BLOCKSIZE
;
1204 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1205 _free_internal_nolock (ADDRESS (block
));
1207 free (ADDRESS (block
));
1210 else if (_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
1212 /* If some fragments of this block are free, link this
1213 fragment into the fragment list after the first free
1214 fragment of this block. */
1216 next
->next
= prev
->next
;
1219 if (next
->next
!= NULL
)
1220 next
->next
->prev
= next
;
1221 ++_heapinfo
[block
].busy
.info
.frag
.nfree
;
1225 /* No fragments of this block are free, so link this
1226 fragment into the fragment list and announce that
1227 it is the first free fragment of this block. */
1229 _heapinfo
[block
].busy
.info
.frag
.nfree
= 1;
1230 _heapinfo
[block
].busy
.info
.frag
.first
=
1231 (uintptr_t) ptr
% BLOCKSIZE
>> type
;
1232 prev
->next
= _fraghead
[type
].next
;
1233 prev
->prev
= &_fraghead
[type
];
1234 prev
->prev
->next
= prev
;
1235 if (prev
->next
!= NULL
)
1236 prev
->next
->prev
= prev
;
1241 PROTECT_MALLOC_STATE (1);
1244 /* Return memory to the heap.
1245 Like `free' but don't call a __free_hook if there is one. */
1247 _free_internal (void *ptr
)
1250 _free_internal_nolock (ptr
);
1254 /* Return memory to the heap. */
1259 void (*hook
) (void *) = __free_hook
;
1264 _free_internal (ptr
);
1267 /* Define the `cfree' alias for `free'. */
1269 weak_alias (free
, cfree
)
1277 /* Change the size of a block allocated by `malloc'.
1278 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1279 Written May 1989 by Mike Haertel.
1281 This library is free software; you can redistribute it and/or
1282 modify it under the terms of the GNU General Public License as
1283 published by the Free Software Foundation; either version 2 of the
1284 License, or (at your option) any later version.
1286 This library is distributed in the hope that it will be useful,
1287 but WITHOUT ANY WARRANTY; without even the implied warranty of
1288 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1289 General Public License for more details.
1291 You should have received a copy of the GNU General Public
1292 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1294 The author may be reached (Email) at the address mike@ai.mit.edu,
1295 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1298 #define min(A, B) ((A) < (B) ? (A) : (B))
1301 /* On Cygwin the dumped emacs may try to realloc storage allocated in
1302 the static heap. We just malloc space in the new heap and copy the
1306 special_realloc (void *ptr
, size_t size
)
1310 size_t block
, oldsize
;
1312 block
= ((char *) ptr
- bss_sbrk_heapbase
) / BLOCKSIZE
+ 1;
1313 type
= bss_sbrk_heapinfo
[block
].busy
.type
;
1315 type
== 0 ? bss_sbrk_heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
1316 : (size_t) 1 << type
;
1317 result
= _malloc_internal_nolock (size
);
1319 return memcpy (result
, ptr
, min (oldsize
, size
));
1324 /* Debugging hook for realloc. */
1325 void *(*__realloc_hook
) (void *ptr
, size_t size
);
1327 /* Resize the given region to the new size, returning a pointer
1328 to the (possibly moved) region. This is optimized for speed;
1329 some benchmarks seem to indicate that greater compactness is
1330 achieved by unconditionally allocating and copying to a
1331 new region. This module has incestuous knowledge of the
1332 internals of both free and malloc. */
1334 _realloc_internal_nolock (void *ptr
, size_t size
)
1338 size_t block
, blocks
, oldlimit
;
1342 _free_internal_nolock (ptr
);
1343 return _malloc_internal_nolock (0);
1345 else if (ptr
== NULL
)
1346 return _malloc_internal_nolock (size
);
1349 if ((char *) ptr
< _heapbase
)
1350 /* ptr points into the static heap */
1351 return special_realloc (ptr
, size
);
1354 block
= BLOCK (ptr
);
1356 PROTECT_MALLOC_STATE (0);
1358 type
= _heapinfo
[block
].busy
.type
;
1362 /* Maybe reallocate a large block to a small fragment. */
1363 if (size
<= BLOCKSIZE
/ 2)
1365 result
= _malloc_internal_nolock (size
);
1368 memcpy (result
, ptr
, size
);
1369 _free_internal_nolock (ptr
);
1374 /* The new size is a large allocation as well;
1375 see if we can hold it in place. */
1376 blocks
= BLOCKIFY (size
);
1377 if (blocks
< _heapinfo
[block
].busy
.info
.size
)
1379 /* The new size is smaller; return
1380 excess memory to the free list. */
1381 _heapinfo
[block
+ blocks
].busy
.type
= 0;
1382 _heapinfo
[block
+ blocks
].busy
.info
.size
1383 = _heapinfo
[block
].busy
.info
.size
- blocks
;
1384 _heapinfo
[block
].busy
.info
.size
= blocks
;
1385 /* We have just created a new chunk by splitting a chunk in two.
1386 Now we will free this chunk; increment the statistics counter
1387 so it doesn't become wrong when _free_internal decrements it. */
1389 _free_internal_nolock (ADDRESS (block
+ blocks
));
1392 else if (blocks
== _heapinfo
[block
].busy
.info
.size
)
1393 /* No size change necessary. */
1397 /* Won't fit, so allocate a new region that will.
1398 Free the old region first in case there is sufficient
1399 adjacent free space to grow without moving. */
1400 blocks
= _heapinfo
[block
].busy
.info
.size
;
1401 /* Prevent free from actually returning memory to the system. */
1402 oldlimit
= _heaplimit
;
1404 _free_internal_nolock (ptr
);
1405 result
= _malloc_internal_nolock (size
);
1406 PROTECT_MALLOC_STATE (0);
1407 if (_heaplimit
== 0)
1408 _heaplimit
= oldlimit
;
1411 /* Now we're really in trouble. We have to unfree
1412 the thing we just freed. Unfortunately it might
1413 have been coalesced with its neighbors. */
1414 if (_heapindex
== block
)
1415 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1419 = _malloc_internal_nolock ((block
- _heapindex
) * BLOCKSIZE
);
1420 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1421 _free_internal_nolock (previous
);
1426 memmove (result
, ptr
, blocks
* BLOCKSIZE
);
1431 /* Old size is a fragment; type is logarithm
1432 to base two of the fragment size. */
1433 if (size
> (size_t) (1 << (type
- 1)) &&
1434 size
<= (size_t) (1 << type
))
1435 /* The new size is the same kind of fragment. */
1439 /* The new size is different; allocate a new space,
1440 and copy the lesser of the new size and the old. */
1441 result
= _malloc_internal_nolock (size
);
1444 memcpy (result
, ptr
, min (size
, (size_t) 1 << type
));
1445 _free_internal_nolock (ptr
);
1450 PROTECT_MALLOC_STATE (1);
1456 _realloc_internal (void *ptr
, size_t size
)
1461 result
= _realloc_internal_nolock (ptr
, size
);
1468 realloc (void *ptr
, size_t size
)
1470 void *(*hook
) (void *, size_t);
1472 if (!__malloc_initialized
&& !__malloc_initialize ())
1475 hook
= __realloc_hook
;
1476 return (hook
!= NULL
? *hook
: _realloc_internal
) (ptr
, size
);
1478 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1480 This library is free software; you can redistribute it and/or
1481 modify it under the terms of the GNU General Public License as
1482 published by the Free Software Foundation; either version 2 of the
1483 License, or (at your option) any later version.
1485 This library is distributed in the hope that it will be useful,
1486 but WITHOUT ANY WARRANTY; without even the implied warranty of
1487 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1488 General Public License for more details.
1490 You should have received a copy of the GNU General Public
1491 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1493 The author may be reached (Email) at the address mike@ai.mit.edu,
1494 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1496 /* Allocate an array of NMEMB elements each SIZE bytes long.
1497 The entire array is initialized to zeros. */
1499 calloc (size_t nmemb
, size_t size
)
1502 size_t bytes
= nmemb
* size
;
1504 if (size
!= 0 && bytes
/ size
!= nmemb
)
1510 result
= malloc (bytes
);
1512 return memset (result
, 0, bytes
);
1515 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1516 This file is part of the GNU C Library.
1518 The GNU C Library is free software; you can redistribute it and/or modify
1519 it under the terms of the GNU General Public License as published by
1520 the Free Software Foundation; either version 2, or (at your option)
1523 The GNU C Library is distributed in the hope that it will be useful,
1524 but WITHOUT ANY WARRANTY; without even the implied warranty of
1525 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1526 GNU General Public License for more details.
1528 You should have received a copy of the GNU General Public License
1529 along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
1531 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1533 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1535 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1536 /* It is best not to declare this and cast its result on foreign operating
1537 systems with potentially hostile include files. */
1539 extern void *__sbrk (ptrdiff_t increment
);
1540 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1542 /* Allocate INCREMENT more bytes of data space,
1543 and return the start of data space, or NULL on errors.
1544 If INCREMENT is negative, shrink data space. */
1546 __default_morecore (ptrdiff_t increment
)
1549 #if defined (CYGWIN)
1550 if (!bss_sbrk_did_unexec
)
1552 return bss_sbrk (increment
);
1555 result
= (void *) __sbrk (increment
);
1556 if (result
== (void *) -1)
1560 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1562 This library is free software; you can redistribute it and/or
1563 modify it under the terms of the GNU General Public License as
1564 published by the Free Software Foundation; either version 2 of the
1565 License, or (at your option) any later version.
1567 This library is distributed in the hope that it will be useful,
1568 but WITHOUT ANY WARRANTY; without even the implied warranty of
1569 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1570 General Public License for more details.
1572 You should have received a copy of the GNU General Public
1573 License along with this library. If not, see <http://www.gnu.org/licenses/>. */
1575 void *(*__memalign_hook
) (size_t size
, size_t alignment
);
1578 aligned_alloc (size_t alignment
, size_t size
)
1581 size_t adj
, lastadj
;
1582 void *(*hook
) (size_t, size_t) = __memalign_hook
;
1585 return (*hook
) (alignment
, size
);
1587 /* Allocate a block with enough extra space to pad the block with up to
1588 (ALIGNMENT - 1) bytes if necessary. */
1589 if (- size
< alignment
)
1594 result
= malloc (size
+ alignment
- 1);
1598 /* Figure out how much we will need to pad this particular block
1599 to achieve the required alignment. */
1600 adj
= alignment
- (uintptr_t) result
% alignment
;
1601 if (adj
== alignment
)
1604 if (adj
!= alignment
- 1)
1608 /* Reallocate the block with only as much excess as it
1611 result
= malloc (size
+ adj
);
1612 if (result
== NULL
) /* Impossible unless interrupted. */
1616 adj
= alignment
- (uintptr_t) result
% alignment
;
1617 if (adj
== alignment
)
1619 /* It's conceivable we might have been so unlucky as to get
1620 a different block with weaker alignment. If so, this
1621 block is too short to contain SIZE after alignment
1622 correction. So we must try again and get another block,
1624 } while (adj
> lastadj
);
1629 /* Record this block in the list of aligned blocks, so that `free'
1630 can identify the pointer it is passed, which will be in the middle
1631 of an allocated block. */
1633 struct alignlist
*l
;
1634 LOCK_ALIGNED_BLOCKS ();
1635 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1636 if (l
->aligned
== NULL
)
1637 /* This slot is free. Use it. */
1641 l
= malloc (sizeof *l
);
1644 l
->next
= _aligned_blocks
;
1645 _aligned_blocks
= l
;
1651 result
= l
->aligned
= (char *) result
+ adj
;
1653 UNLOCK_ALIGNED_BLOCKS ();
1664 /* An obsolete alias for aligned_alloc, for any old libraries that use
1668 memalign (size_t alignment
, size_t size
)
1670 return aligned_alloc (alignment
, size
);
1674 posix_memalign (void **memptr
, size_t alignment
, size_t size
)
1679 || alignment
% sizeof (void *) != 0
1680 || (alignment
& (alignment
- 1)) != 0)
1683 mem
= aligned_alloc (alignment
, size
);
1692 /* Allocate memory on a page boundary.
1693 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1695 This library is free software; you can redistribute it and/or
1696 modify it under the terms of the GNU General Public License as
1697 published by the Free Software Foundation; either version 2 of the
1698 License, or (at your option) any later version.
1700 This library is distributed in the hope that it will be useful,
1701 but WITHOUT ANY WARRANTY; without even the implied warranty of
1702 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1703 General Public License for more details.
1705 You should have received a copy of the GNU General Public
1706 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1708 The author may be reached (Email) at the address mike@ai.mit.edu,
1709 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1711 /* Allocate SIZE bytes on a page boundary. */
1712 extern void *valloc (size_t);
1714 #if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1715 # include "getpagesize.h"
1716 #elif !defined getpagesize
1717 extern int getpagesize (void);
1720 static size_t pagesize
;
1723 valloc (size_t size
)
1726 pagesize
= getpagesize ();
1728 return aligned_alloc (pagesize
, size
);
1733 /* Standard debugging hooks for `malloc'.
1734 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1735 Written May 1989 by Mike Haertel.
1737 This library is free software; you can redistribute it and/or
1738 modify it under the terms of the GNU General Public License as
1739 published by the Free Software Foundation; either version 2 of the
1740 License, or (at your option) any later version.
1742 This library is distributed in the hope that it will be useful,
1743 but WITHOUT ANY WARRANTY; without even the implied warranty of
1744 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1745 General Public License for more details.
1747 You should have received a copy of the GNU General Public
1748 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1750 The author may be reached (Email) at the address mike@ai.mit.edu,
1751 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1755 /* Old hook values. */
1756 static void (*old_free_hook
) (void *ptr
);
1757 static void *(*old_malloc_hook
) (size_t size
);
1758 static void *(*old_realloc_hook
) (void *ptr
, size_t size
);
1760 /* Function to call when something awful happens. */
1761 static void (*abortfunc
) (enum mcheck_status
);
1763 /* Arbitrary magical numbers. */
1764 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1765 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1766 #define MAGICBYTE ((char) 0xd7)
1767 #define MALLOCFLOOD ((char) 0x93)
1768 #define FREEFLOOD ((char) 0x95)
1772 size_t size
; /* Exact size requested by user. */
1773 size_t magic
; /* Magic number to check header integrity. */
1776 static enum mcheck_status
1777 checkhdr (const struct hdr
*hdr
)
1779 enum mcheck_status status
;
1783 status
= MCHECK_HEAD
;
1786 status
= MCHECK_FREE
;
1789 if (((char *) &hdr
[1])[hdr
->size
] != MAGICBYTE
)
1790 status
= MCHECK_TAIL
;
1795 if (status
!= MCHECK_OK
)
1796 (*abortfunc
) (status
);
1801 freehook (void *ptr
)
1807 struct alignlist
*l
;
1809 /* If the block was allocated by aligned_alloc, its real pointer
1810 to free is recorded in _aligned_blocks; find that. */
1811 PROTECT_MALLOC_STATE (0);
1812 LOCK_ALIGNED_BLOCKS ();
1813 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1814 if (l
->aligned
== ptr
)
1816 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1820 UNLOCK_ALIGNED_BLOCKS ();
1821 PROTECT_MALLOC_STATE (1);
1823 hdr
= ((struct hdr
*) ptr
) - 1;
1825 hdr
->magic
= MAGICFREE
;
1826 memset (ptr
, FREEFLOOD
, hdr
->size
);
1831 __free_hook
= old_free_hook
;
1833 __free_hook
= freehook
;
1837 mallochook (size_t size
)
1841 __malloc_hook
= old_malloc_hook
;
1842 hdr
= malloc (sizeof *hdr
+ size
+ 1);
1843 __malloc_hook
= mallochook
;
1848 hdr
->magic
= MAGICWORD
;
1849 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1850 return memset (hdr
+ 1, MALLOCFLOOD
, size
);
1854 reallochook (void *ptr
, size_t size
)
1856 struct hdr
*hdr
= NULL
;
1861 hdr
= ((struct hdr
*) ptr
) - 1;
1866 memset ((char *) ptr
+ size
, FREEFLOOD
, osize
- size
);
1869 __free_hook
= old_free_hook
;
1870 __malloc_hook
= old_malloc_hook
;
1871 __realloc_hook
= old_realloc_hook
;
1872 hdr
= realloc (hdr
, sizeof *hdr
+ size
+ 1);
1873 __free_hook
= freehook
;
1874 __malloc_hook
= mallochook
;
1875 __realloc_hook
= reallochook
;
1880 hdr
->magic
= MAGICWORD
;
1881 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1883 memset ((char *) (hdr
+ 1) + osize
, MALLOCFLOOD
, size
- osize
);
1888 mabort (enum mcheck_status status
)
1894 msg
= "memory is consistent, library is buggy";
1897 msg
= "memory clobbered before allocated block";
1900 msg
= "memory clobbered past end of allocated block";
1903 msg
= "block freed twice";
1906 msg
= "bogus mcheck_status, library is buggy";
1909 #ifdef __GNU_LIBRARY__
1912 fprintf (stderr
, "mcheck: %s\n", msg
);
1922 static int mcheck_used
= 0;
1925 mcheck (void (*func
) (enum mcheck_status
))
1927 abortfunc
= (func
!= NULL
) ? func
: &mabort
;
1929 /* These hooks may not be safely inserted if malloc is already in use. */
1930 if (!__malloc_initialized
&& !mcheck_used
)
1932 old_free_hook
= __free_hook
;
1933 __free_hook
= freehook
;
1934 old_malloc_hook
= __malloc_hook
;
1935 __malloc_hook
= mallochook
;
1936 old_realloc_hook
= __realloc_hook
;
1937 __realloc_hook
= reallochook
;
1941 return mcheck_used
? 0 : -1;
1947 return mcheck_used
? checkhdr (ptr
) : MCHECK_DISABLED
;
1950 #endif /* GC_MCHECK */