1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
3 2005, 2006, 2007 Free Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public
17 License along with this library; see the file COPYING. If
18 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
19 Fifth Floor, Boston, MA 02110-1301, USA.
21 The author may be reached (Email) at the address mike@ai.mit.edu,
22 or (US mail) as Mike Haertel c/o Free Software Foundation. */
47 /* Allocate SIZE bytes of memory. */
48 extern void *malloc (size_t size
);
49 /* Re-allocate the previously allocated block
50 in ptr, making the new block SIZE bytes long. */
51 extern void *realloc (void *ptr
, size_t size
);
52 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
53 extern void *calloc (size_t nmemb
, size_t size
);
54 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
55 extern void free (void *ptr
);
57 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
59 extern void *memalign (size_t, size_t);
60 extern int posix_memalign (void **, size_t, size_t);
64 /* Set up mutexes and make malloc etc. thread-safe. */
65 extern void malloc_enable_thread (void);
68 /* The allocator divides the heap into blocks of fixed size; large
69 requests receive one or more whole blocks, and small requests
70 receive a fragment of a block. Fragment sizes are powers of two,
71 and all fragments of a block are the same size. When all the
72 fragments in a block have been freed, the block itself is freed. */
73 #define INT_BIT (CHAR_BIT * sizeof (int))
74 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
75 #define BLOCKSIZE (1 << BLOCKLOG)
76 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
78 /* Determine the amount of memory spanned by the initial heap table
79 (not an absolute limit). */
80 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
82 /* Number of contiguous free blocks allowed to build up at the end of
83 memory before they will be returned to the system. */
84 #define FINAL_FREE_BLOCKS 8
86 /* Data structure giving per-block information. */
89 /* Heap information for a busy block. */
92 /* Zero for a large (multiblock) object, or positive giving the
93 logarithm to the base two of the fragment size. */
99 size_t nfree
; /* Free frags in a fragmented block. */
100 size_t first
; /* First free fragment of the block. */
102 /* For a large object, in its first block, this has the number
103 of blocks in the object. In the other blocks, this has a
104 negative number which says how far back the first block is. */
108 /* Heap information for a free block
109 (that may be the first of a free cluster). */
112 size_t size
; /* Size (in blocks) of a free cluster. */
113 size_t next
; /* Index of next free cluster. */
114 size_t prev
; /* Index of previous free cluster. */
118 /* Pointer to first block of the heap. */
119 extern char *_heapbase
;
121 /* Table indexed by block number giving per-block information. */
122 extern malloc_info
*_heapinfo
;
124 /* Address to block number and vice versa. */
125 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
126 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
128 /* Current search index for the heap table. */
129 extern size_t _heapindex
;
131 /* Limit of valid info table indices. */
132 extern size_t _heaplimit
;
134 /* Doubly linked lists of free fragments. */
141 /* Free list headers for each fragment size. */
142 extern struct list _fraghead
[];
144 /* List of blocks allocated with `memalign' (or `valloc'). */
147 struct alignlist
*next
;
148 void *aligned
; /* The address that memaligned returned. */
149 void *exact
; /* The address that malloc returned. */
151 extern struct alignlist
*_aligned_blocks
;
153 /* Instrumentation. */
154 extern size_t _chunks_used
;
155 extern size_t _bytes_used
;
156 extern size_t _chunks_free
;
157 extern size_t _bytes_free
;
159 /* Internal versions of `malloc', `realloc', and `free'
160 used when these functions need to call each other.
161 They are the same but don't call the hooks. */
162 extern void *_malloc_internal (size_t);
163 extern void *_realloc_internal (void *, size_t);
164 extern void _free_internal (void *);
165 extern void *_malloc_internal_nolock (size_t);
166 extern void *_realloc_internal_nolock (void *, size_t);
167 extern void _free_internal_nolock (void *);
170 extern pthread_mutex_t _malloc_mutex
, _aligned_blocks_mutex
;
171 extern int _malloc_thread_enabled_p
;
174 if (_malloc_thread_enabled_p) \
175 pthread_mutex_lock (&_malloc_mutex); \
179 if (_malloc_thread_enabled_p) \
180 pthread_mutex_unlock (&_malloc_mutex); \
182 #define LOCK_ALIGNED_BLOCKS() \
184 if (_malloc_thread_enabled_p) \
185 pthread_mutex_lock (&_aligned_blocks_mutex); \
187 #define UNLOCK_ALIGNED_BLOCKS() \
189 if (_malloc_thread_enabled_p) \
190 pthread_mutex_unlock (&_aligned_blocks_mutex); \
195 #define LOCK_ALIGNED_BLOCKS()
196 #define UNLOCK_ALIGNED_BLOCKS()
199 /* Given an address in the middle of a malloc'd object,
200 return the address of the beginning of the object. */
201 extern void *malloc_find_object_address (void *ptr
);
203 /* Underlying allocation function; successive calls should
204 return contiguous pieces of memory. */
205 extern void *(*__morecore
) (ptrdiff_t size
);
207 /* Default value of `__morecore'. */
208 extern void *__default_morecore (ptrdiff_t size
);
210 /* If not NULL, this function is called after each time
211 `__morecore' is called to increase the data size. */
212 extern void (*__after_morecore_hook
) (void);
214 /* Number of extra blocks to get each time we ask for more core.
215 This reduces the frequency of calling `(*__morecore)'. */
216 extern size_t __malloc_extra_blocks
;
218 /* Nonzero if `malloc' has been called and done its initialization. */
219 extern int __malloc_initialized
;
220 /* Function called to initialize malloc data structures. */
221 extern int __malloc_initialize (void);
223 /* Hooks for debugging versions. */
224 extern void (*__malloc_initialize_hook
) (void);
225 extern void (*__free_hook
) (void *ptr
);
226 extern void *(*__malloc_hook
) (size_t size
);
227 extern void *(*__realloc_hook
) (void *ptr
, size_t size
);
228 extern void *(*__memalign_hook
) (size_t size
, size_t alignment
);
230 /* Return values for `mprobe': these are the kinds of inconsistencies that
231 `mcheck' enables detection of. */
234 MCHECK_DISABLED
= -1, /* Consistency checking is not turned on. */
235 MCHECK_OK
, /* Block is fine. */
236 MCHECK_FREE
, /* Block freed twice. */
237 MCHECK_HEAD
, /* Memory before the block was clobbered. */
238 MCHECK_TAIL
/* Memory after the block was clobbered. */
241 /* Activate a standard collection of debugging hooks. This must be called
242 before `malloc' is ever called. ABORTFUNC is called with an error code
243 (see enum above) when an inconsistency is detected. If ABORTFUNC is
244 null, the standard function prints on stderr and then calls `abort'. */
245 extern int mcheck (void (*abortfunc
) (enum mcheck_status
));
247 /* Check for aberrations in a particular malloc'd block. You must have
248 called `mcheck' already. These are the same checks that `mcheck' does
249 when you free or reallocate a block. */
250 extern enum mcheck_status
mprobe (void *ptr
);
252 /* Activate a standard collection of tracing hooks. */
253 extern void mtrace (void);
254 extern void muntrace (void);
256 /* Statistics available to the user. */
259 size_t bytes_total
; /* Total size of the heap. */
260 size_t chunks_used
; /* Chunks allocated by the user. */
261 size_t bytes_used
; /* Byte total of user-allocated chunks. */
262 size_t chunks_free
; /* Chunks in the free list. */
263 size_t bytes_free
; /* Byte total of chunks in the free list. */
266 /* Pick up the current statistics. */
267 extern struct mstats
mstats (void);
269 /* Call WARNFUN with a warning message when memory usage is high. */
270 extern void memory_warnings (void *start
, void (*warnfun
) (const char *));
276 /* Memory allocator `malloc'.
277 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
278 Written May 1989 by Mike Haertel.
280 This library is free software; you can redistribute it and/or
281 modify it under the terms of the GNU General Public License as
282 published by the Free Software Foundation; either version 2 of the
283 License, or (at your option) any later version.
285 This library is distributed in the hope that it will be useful,
286 but WITHOUT ANY WARRANTY; without even the implied warranty of
287 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
288 General Public License for more details.
290 You should have received a copy of the GNU General Public
291 License along with this library; see the file COPYING. If
292 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
293 Fifth Floor, Boston, MA 02110-1301, USA.
295 The author may be reached (Email) at the address mike@ai.mit.edu,
296 or (US mail) as Mike Haertel c/o Free Software Foundation. */
300 /* On Cygwin there are two heaps. temacs uses the static heap
301 (defined in sheap.c and managed with bss_sbrk), and the dumped
302 emacs uses the Cygwin heap (managed with sbrk). When emacs starts
303 on Cygwin, it reinitializes malloc, and we save the old info for
304 use by free and realloc if they're called with a pointer into the
307 Currently (2011-08-16) the Cygwin build doesn't use ralloc.c; if
308 this is changed in the future, we'll have to similarly deal with
309 reinitializing ralloc. */
311 extern void *bss_sbrk (ptrdiff_t size
);
312 extern int bss_sbrk_did_unexec
;
313 char *bss_sbrk_heapbase
; /* _heapbase for static heap */
314 malloc_info
*bss_sbrk_heapinfo
; /* _heapinfo for static heap */
316 void *(*__morecore
) (ptrdiff_t size
) = __default_morecore
;
318 /* Debugging hook for `malloc'. */
319 void *(*__malloc_hook
) (size_t size
);
321 /* Pointer to the base of the first block. */
324 /* Block information table. Allocated with align/__free (not malloc/free). */
325 malloc_info
*_heapinfo
;
327 /* Number of info entries. */
328 static size_t heapsize
;
330 /* Search index in the info table. */
333 /* Limit of valid info table indices. */
336 /* Free lists for each fragment size. */
337 struct list _fraghead
[BLOCKLOG
];
339 /* Instrumentation. */
345 /* Are you experienced? */
346 int __malloc_initialized
;
348 size_t __malloc_extra_blocks
;
350 void (*__malloc_initialize_hook
) (void);
351 void (*__after_morecore_hook
) (void);
353 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
355 /* Some code for hunting a bug writing into _heapinfo.
357 Call this macro with argument PROT non-zero to protect internal
358 malloc state against writing to it, call it with a zero argument to
359 make it readable and writable.
361 Note that this only works if BLOCKSIZE == page size, which is
362 the case on the i386. */
364 #include <sys/types.h>
365 #include <sys/mman.h>
367 static int state_protected_p
;
368 static size_t last_state_size
;
369 static malloc_info
*last_heapinfo
;
372 protect_malloc_state (int protect_p
)
374 /* If _heapinfo has been relocated, make sure its old location
375 isn't left read-only; it will be reused by malloc. */
376 if (_heapinfo
!= last_heapinfo
378 && state_protected_p
)
379 mprotect (last_heapinfo
, last_state_size
, PROT_READ
| PROT_WRITE
);
381 last_state_size
= _heaplimit
* sizeof *_heapinfo
;
382 last_heapinfo
= _heapinfo
;
384 if (protect_p
!= state_protected_p
)
386 state_protected_p
= protect_p
;
387 if (mprotect (_heapinfo
, last_state_size
,
388 protect_p
? PROT_READ
: PROT_READ
| PROT_WRITE
) != 0)
393 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
396 #define PROTECT_MALLOC_STATE(PROT) /* empty */
400 /* Aligned allocation. */
407 /* align accepts an unsigned argument, but __morecore accepts a
408 signed one. This could lead to trouble if SIZE overflows the
409 ptrdiff_t type accepted by __morecore. We just punt in that
410 case, since they are requesting a ludicrous amount anyway. */
411 if (PTRDIFF_MAX
< size
)
414 result
= (*__morecore
) (size
);
415 adj
= (uintptr_t) result
% BLOCKSIZE
;
418 adj
= BLOCKSIZE
- adj
;
420 result
= (char *) result
+ adj
;
423 if (__after_morecore_hook
)
424 (*__after_morecore_hook
) ();
429 /* Get SIZE bytes, if we can get them starting at END.
430 Return the address of the space we got.
431 If we cannot get space at END, fail and return 0. */
433 get_contiguous_space (ptrdiff_t size
, void *position
)
438 before
= (*__morecore
) (0);
439 /* If we can tell in advance that the break is at the wrong place,
441 if (before
!= position
)
444 /* Allocate SIZE bytes and get the address of them. */
445 after
= (*__morecore
) (size
);
449 /* It was not contiguous--reject it. */
450 if (after
!= position
)
452 (*__morecore
) (- size
);
460 /* This is called when `_heapinfo' and `heapsize' have just
461 been set to describe a new info table. Set up the table
462 to describe itself and account for it in the statistics. */
464 register_heapinfo (void)
466 size_t block
, blocks
;
468 block
= BLOCK (_heapinfo
);
469 blocks
= BLOCKIFY (heapsize
* sizeof (malloc_info
));
471 /* Account for the _heapinfo block itself in the statistics. */
472 _bytes_used
+= blocks
* BLOCKSIZE
;
475 /* Describe the heapinfo block itself in the heapinfo. */
476 _heapinfo
[block
].busy
.type
= 0;
477 _heapinfo
[block
].busy
.info
.size
= blocks
;
478 /* Leave back-pointers for malloc_find_address. */
480 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
484 pthread_mutex_t _malloc_mutex
= PTHREAD_MUTEX_INITIALIZER
;
485 pthread_mutex_t _aligned_blocks_mutex
= PTHREAD_MUTEX_INITIALIZER
;
486 int _malloc_thread_enabled_p
;
489 malloc_atfork_handler_prepare (void)
492 LOCK_ALIGNED_BLOCKS ();
496 malloc_atfork_handler_parent (void)
498 UNLOCK_ALIGNED_BLOCKS ();
503 malloc_atfork_handler_child (void)
505 UNLOCK_ALIGNED_BLOCKS ();
509 /* Set up mutexes and make malloc etc. thread-safe. */
511 malloc_enable_thread (void)
513 if (_malloc_thread_enabled_p
)
516 /* Some pthread implementations call malloc for statically
517 initialized mutexes when they are used first. To avoid such a
518 situation, we initialize mutexes here while their use is
519 disabled in malloc etc. */
520 pthread_mutex_init (&_malloc_mutex
, NULL
);
521 pthread_mutex_init (&_aligned_blocks_mutex
, NULL
);
522 pthread_atfork (malloc_atfork_handler_prepare
,
523 malloc_atfork_handler_parent
,
524 malloc_atfork_handler_child
);
525 _malloc_thread_enabled_p
= 1;
530 malloc_initialize_1 (void)
537 if (bss_sbrk_did_unexec
)
538 /* we're reinitializing the dumped emacs */
540 bss_sbrk_heapbase
= _heapbase
;
541 bss_sbrk_heapinfo
= _heapinfo
;
542 memset (_fraghead
, 0, BLOCKLOG
* sizeof (struct list
));
546 if (__malloc_initialize_hook
)
547 (*__malloc_initialize_hook
) ();
549 heapsize
= HEAP
/ BLOCKSIZE
;
550 _heapinfo
= align (heapsize
* sizeof (malloc_info
));
551 if (_heapinfo
== NULL
)
553 memset (_heapinfo
, 0, heapsize
* sizeof (malloc_info
));
554 _heapinfo
[0].free
.size
= 0;
555 _heapinfo
[0].free
.next
= _heapinfo
[0].free
.prev
= 0;
557 _heapbase
= (char *) _heapinfo
;
558 _heaplimit
= BLOCK (_heapbase
+ heapsize
* sizeof (malloc_info
));
560 register_heapinfo ();
562 __malloc_initialized
= 1;
563 PROTECT_MALLOC_STATE (1);
567 /* Set everything up and remember that we have.
568 main will call malloc which calls this function. That is before any threads
569 or signal handlers has been set up, so we don't need thread protection. */
571 __malloc_initialize (void)
573 if (__malloc_initialized
)
576 malloc_initialize_1 ();
578 return __malloc_initialized
;
581 static int morecore_recursing
;
583 /* Get neatly aligned memory, initializing or
584 growing the heap info table as necessary. */
586 morecore_nolock (size_t size
)
589 malloc_info
*newinfo
, *oldinfo
;
592 if (morecore_recursing
)
593 /* Avoid recursion. The caller will know how to handle a null return. */
596 result
= align (size
);
600 PROTECT_MALLOC_STATE (0);
602 /* Check if we need to grow the info table. */
603 if ((size_t) BLOCK ((char *) result
+ size
) > heapsize
)
605 /* Calculate the new _heapinfo table size. We do not account for the
606 added blocks in the table itself, as we hope to place them in
607 existing free space, which is already covered by part of the
612 while ((size_t) BLOCK ((char *) result
+ size
) > newsize
);
614 /* We must not reuse existing core for the new info table when called
615 from realloc in the case of growing a large block, because the
616 block being grown is momentarily marked as free. In this case
617 _heaplimit is zero so we know not to reuse space for internal
621 /* First try to allocate the new info table in core we already
622 have, in the usual way using realloc. If realloc cannot
623 extend it in place or relocate it to existing sufficient core,
624 we will get called again, and the code above will notice the
625 `morecore_recursing' flag and return null. */
626 int save
= errno
; /* Don't want to clobber errno with ENOMEM. */
627 morecore_recursing
= 1;
628 newinfo
= _realloc_internal_nolock (_heapinfo
,
629 newsize
* sizeof (malloc_info
));
630 morecore_recursing
= 0;
635 /* We found some space in core, and realloc has put the old
636 table's blocks on the free list. Now zero the new part
637 of the table and install the new table location. */
638 memset (&newinfo
[heapsize
], 0,
639 (newsize
- heapsize
) * sizeof (malloc_info
));
646 /* Allocate new space for the malloc info table. */
649 newinfo
= align (newsize
* sizeof (malloc_info
));
654 (*__morecore
) (-size
);
658 /* Is it big enough to record status for its own space?
660 if ((size_t) BLOCK ((char *) newinfo
661 + newsize
* sizeof (malloc_info
))
665 /* Must try again. First give back most of what we just got. */
666 (*__morecore
) (- newsize
* sizeof (malloc_info
));
670 /* Copy the old table to the beginning of the new,
671 and zero the rest of the new table. */
672 memcpy (newinfo
, _heapinfo
, heapsize
* sizeof (malloc_info
));
673 memset (&newinfo
[heapsize
], 0,
674 (newsize
- heapsize
) * sizeof (malloc_info
));
679 register_heapinfo ();
681 /* Reset _heaplimit so _free_internal never decides
682 it can relocate or resize the info table. */
684 _free_internal_nolock (oldinfo
);
685 PROTECT_MALLOC_STATE (0);
687 /* The new heap limit includes the new table just allocated. */
688 _heaplimit
= BLOCK ((char *) newinfo
+ heapsize
* sizeof (malloc_info
));
693 _heaplimit
= BLOCK ((char *) result
+ size
);
697 /* Allocate memory from the heap. */
699 _malloc_internal_nolock (size_t size
)
702 size_t block
, blocks
, lastblocks
, start
;
706 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
707 valid address you can realloc and free (though not dereference).
709 It turns out that some extant code (sunrpc, at least Ultrix's version)
710 expects `malloc (0)' to return non-NULL and breaks otherwise.
718 PROTECT_MALLOC_STATE (0);
720 if (size
< sizeof (struct list
))
721 size
= sizeof (struct list
);
723 /* Determine the allocation policy based on the request size. */
724 if (size
<= BLOCKSIZE
/ 2)
726 /* Small allocation to receive a fragment of a block.
727 Determine the logarithm to base two of the fragment size. */
728 register size_t log
= 1;
730 while ((size
/= 2) != 0)
733 /* Look in the fragment lists for a
734 free fragment of the desired size. */
735 next
= _fraghead
[log
].next
;
738 /* There are free fragments of this size.
739 Pop a fragment out of the fragment list and return it.
740 Update the block's nfree and first counters. */
742 next
->prev
->next
= next
->next
;
743 if (next
->next
!= NULL
)
744 next
->next
->prev
= next
->prev
;
745 block
= BLOCK (result
);
746 if (--_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
747 _heapinfo
[block
].busy
.info
.frag
.first
=
748 (uintptr_t) next
->next
% BLOCKSIZE
>> log
;
750 /* Update the statistics. */
752 _bytes_used
+= 1 << log
;
754 _bytes_free
-= 1 << log
;
758 /* No free fragments of the desired size, so get a new block
759 and break it into fragments, returning the first. */
760 #ifdef GC_MALLOC_CHECK
761 result
= _malloc_internal_nolock (BLOCKSIZE
);
762 PROTECT_MALLOC_STATE (0);
763 #elif defined (USE_PTHREAD)
764 result
= _malloc_internal_nolock (BLOCKSIZE
);
766 result
= malloc (BLOCKSIZE
);
770 PROTECT_MALLOC_STATE (1);
774 /* Link all fragments but the first into the free list. */
775 next
= (struct list
*) ((char *) result
+ (1 << log
));
777 next
->prev
= &_fraghead
[log
];
778 _fraghead
[log
].next
= next
;
780 for (i
= 2; i
< (size_t) (BLOCKSIZE
>> log
); ++i
)
782 next
= (struct list
*) ((char *) result
+ (i
<< log
));
783 next
->next
= _fraghead
[log
].next
;
784 next
->prev
= &_fraghead
[log
];
785 next
->prev
->next
= next
;
786 next
->next
->prev
= next
;
789 /* Initialize the nfree and first counters for this block. */
790 block
= BLOCK (result
);
791 _heapinfo
[block
].busy
.type
= log
;
792 _heapinfo
[block
].busy
.info
.frag
.nfree
= i
- 1;
793 _heapinfo
[block
].busy
.info
.frag
.first
= i
- 1;
795 _chunks_free
+= (BLOCKSIZE
>> log
) - 1;
796 _bytes_free
+= BLOCKSIZE
- (1 << log
);
797 _bytes_used
-= BLOCKSIZE
- (1 << log
);
802 /* Large allocation to receive one or more blocks.
803 Search the free list in a circle starting at the last place visited.
804 If we loop completely around without finding a large enough
805 space we will have to get more memory from the system. */
806 blocks
= BLOCKIFY (size
);
807 start
= block
= _heapindex
;
808 while (_heapinfo
[block
].free
.size
< blocks
)
810 block
= _heapinfo
[block
].free
.next
;
813 /* Need to get more from the system. Get a little extra. */
814 size_t wantblocks
= blocks
+ __malloc_extra_blocks
;
815 block
= _heapinfo
[0].free
.prev
;
816 lastblocks
= _heapinfo
[block
].free
.size
;
817 /* Check to see if the new core will be contiguous with the
818 final free block; if so we don't need to get as much. */
819 if (_heaplimit
!= 0 && block
+ lastblocks
== _heaplimit
&&
820 /* We can't do this if we will have to make the heap info
821 table bigger to accommodate the new space. */
822 block
+ wantblocks
<= heapsize
&&
823 get_contiguous_space ((wantblocks
- lastblocks
) * BLOCKSIZE
,
824 ADDRESS (block
+ lastblocks
)))
826 /* We got it contiguously. Which block we are extending
827 (the `final free block' referred to above) might have
828 changed, if it got combined with a freed info table. */
829 block
= _heapinfo
[0].free
.prev
;
830 _heapinfo
[block
].free
.size
+= (wantblocks
- lastblocks
);
831 _bytes_free
+= (wantblocks
- lastblocks
) * BLOCKSIZE
;
832 _heaplimit
+= wantblocks
- lastblocks
;
835 result
= morecore_nolock (wantblocks
* BLOCKSIZE
);
838 block
= BLOCK (result
);
839 /* Put the new block at the end of the free list. */
840 _heapinfo
[block
].free
.size
= wantblocks
;
841 _heapinfo
[block
].free
.prev
= _heapinfo
[0].free
.prev
;
842 _heapinfo
[block
].free
.next
= 0;
843 _heapinfo
[0].free
.prev
= block
;
844 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
= block
;
846 /* Now loop to use some of that block for this allocation. */
850 /* At this point we have found a suitable free list entry.
851 Figure out how to remove what we need from the list. */
852 result
= ADDRESS (block
);
853 if (_heapinfo
[block
].free
.size
> blocks
)
855 /* The block we found has a bit left over,
856 so relink the tail end back into the free list. */
857 _heapinfo
[block
+ blocks
].free
.size
858 = _heapinfo
[block
].free
.size
- blocks
;
859 _heapinfo
[block
+ blocks
].free
.next
860 = _heapinfo
[block
].free
.next
;
861 _heapinfo
[block
+ blocks
].free
.prev
862 = _heapinfo
[block
].free
.prev
;
863 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
864 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
865 = _heapindex
= block
+ blocks
;
869 /* The block exactly matches our requirements,
870 so just remove it from the list. */
871 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
872 = _heapinfo
[block
].free
.prev
;
873 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
874 = _heapindex
= _heapinfo
[block
].free
.next
;
878 _heapinfo
[block
].busy
.type
= 0;
879 _heapinfo
[block
].busy
.info
.size
= blocks
;
881 _bytes_used
+= blocks
* BLOCKSIZE
;
882 _bytes_free
-= blocks
* BLOCKSIZE
;
884 /* Mark all the blocks of the object just allocated except for the
885 first with a negative number so you can find the first block by
886 adding that adjustment. */
888 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
891 PROTECT_MALLOC_STATE (1);
897 _malloc_internal (size_t size
)
902 result
= _malloc_internal_nolock (size
);
911 void *(*hook
) (size_t);
913 if (!__malloc_initialized
&& !__malloc_initialize ())
916 /* Copy the value of __malloc_hook to an automatic variable in case
917 __malloc_hook is modified in another thread between its
918 NULL-check and the use.
920 Note: Strictly speaking, this is not a right solution. We should
921 use mutexes to access non-read-only variables that are shared
922 among multiple threads. We just leave it for compatibility with
923 glibc malloc (i.e., assignments to __malloc_hook) for now. */
924 hook
= __malloc_hook
;
925 return (hook
!= NULL
? *hook
: _malloc_internal
) (size
);
930 /* On some ANSI C systems, some libc functions call _malloc, _free
931 and _realloc. Make them use the GNU functions. */
933 extern void *_malloc (size_t);
934 extern void _free (void *);
935 extern void *_realloc (void *, size_t);
938 _malloc (size_t size
)
940 return malloc (size
);
950 _realloc (void *ptr
, size_t size
)
952 return realloc (ptr
, size
);
956 /* Free a block of memory allocated by `malloc'.
957 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
958 Written May 1989 by Mike Haertel.
960 This library is free software; you can redistribute it and/or
961 modify it under the terms of the GNU General Public License as
962 published by the Free Software Foundation; either version 2 of the
963 License, or (at your option) any later version.
965 This library is distributed in the hope that it will be useful,
966 but WITHOUT ANY WARRANTY; without even the implied warranty of
967 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
968 General Public License for more details.
970 You should have received a copy of the GNU General Public
971 License along with this library; see the file COPYING. If
972 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
973 Fifth Floor, Boston, MA 02110-1301, USA.
975 The author may be reached (Email) at the address mike@ai.mit.edu,
976 or (US mail) as Mike Haertel c/o Free Software Foundation. */
979 /* Debugging hook for free. */
980 void (*__free_hook
) (void *__ptr
);
982 /* List of blocks allocated by memalign. */
983 struct alignlist
*_aligned_blocks
= NULL
;
985 /* Return memory to the heap.
986 Like `_free_internal' but don't lock mutex. */
988 _free_internal_nolock (void *ptr
)
991 size_t block
, blocks
;
993 struct list
*prev
, *next
;
995 const size_t lesscore_threshold
996 /* Threshold of free space at which we will return some to the system. */
997 = FINAL_FREE_BLOCKS
+ 2 * __malloc_extra_blocks
;
999 register struct alignlist
*l
;
1005 if ((char *) ptr
< _heapbase
)
1006 /* We're being asked to free something in the static heap. */
1010 PROTECT_MALLOC_STATE (0);
1012 LOCK_ALIGNED_BLOCKS ();
1013 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1014 if (l
->aligned
== ptr
)
1016 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1020 UNLOCK_ALIGNED_BLOCKS ();
1022 block
= BLOCK (ptr
);
1024 type
= _heapinfo
[block
].busy
.type
;
1028 /* Get as many statistics as early as we can. */
1030 _bytes_used
-= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1031 _bytes_free
+= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1033 /* Find the free cluster previous to this one in the free list.
1034 Start searching at the last block referenced; this may benefit
1035 programs with locality of allocation. */
1039 i
= _heapinfo
[i
].free
.prev
;
1043 i
= _heapinfo
[i
].free
.next
;
1044 while (i
> 0 && i
< block
);
1045 i
= _heapinfo
[i
].free
.prev
;
1048 /* Determine how to link this block into the free list. */
1049 if (block
== i
+ _heapinfo
[i
].free
.size
)
1051 /* Coalesce this block with its predecessor. */
1052 _heapinfo
[i
].free
.size
+= _heapinfo
[block
].busy
.info
.size
;
1057 /* Really link this block back into the free list. */
1058 _heapinfo
[block
].free
.size
= _heapinfo
[block
].busy
.info
.size
;
1059 _heapinfo
[block
].free
.next
= _heapinfo
[i
].free
.next
;
1060 _heapinfo
[block
].free
.prev
= i
;
1061 _heapinfo
[i
].free
.next
= block
;
1062 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1066 /* Now that the block is linked in, see if we can coalesce it
1067 with its successor (by deleting its successor from the list
1068 and adding in its size). */
1069 if (block
+ _heapinfo
[block
].free
.size
== _heapinfo
[block
].free
.next
)
1071 _heapinfo
[block
].free
.size
1072 += _heapinfo
[_heapinfo
[block
].free
.next
].free
.size
;
1073 _heapinfo
[block
].free
.next
1074 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.next
;
1075 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1079 /* How many trailing free blocks are there now? */
1080 blocks
= _heapinfo
[block
].free
.size
;
1082 /* Where is the current end of accessible core? */
1083 curbrk
= (*__morecore
) (0);
1085 if (_heaplimit
!= 0 && curbrk
== ADDRESS (_heaplimit
))
1087 /* The end of the malloc heap is at the end of accessible core.
1088 It's possible that moving _heapinfo will allow us to
1089 return some space to the system. */
1091 size_t info_block
= BLOCK (_heapinfo
);
1092 size_t info_blocks
= _heapinfo
[info_block
].busy
.info
.size
;
1093 size_t prev_block
= _heapinfo
[block
].free
.prev
;
1094 size_t prev_blocks
= _heapinfo
[prev_block
].free
.size
;
1095 size_t next_block
= _heapinfo
[block
].free
.next
;
1096 size_t next_blocks
= _heapinfo
[next_block
].free
.size
;
1098 if (/* Win if this block being freed is last in core, the info table
1099 is just before it, the previous free block is just before the
1100 info table, and the two free blocks together form a useful
1101 amount to return to the system. */
1102 (block
+ blocks
== _heaplimit
&&
1103 info_block
+ info_blocks
== block
&&
1104 prev_block
!= 0 && prev_block
+ prev_blocks
== info_block
&&
1105 blocks
+ prev_blocks
>= lesscore_threshold
) ||
1106 /* Nope, not the case. We can also win if this block being
1107 freed is just before the info table, and the table extends
1108 to the end of core or is followed only by a free block,
1109 and the total free space is worth returning to the system. */
1110 (block
+ blocks
== info_block
&&
1111 ((info_block
+ info_blocks
== _heaplimit
&&
1112 blocks
>= lesscore_threshold
) ||
1113 (info_block
+ info_blocks
== next_block
&&
1114 next_block
+ next_blocks
== _heaplimit
&&
1115 blocks
+ next_blocks
>= lesscore_threshold
)))
1118 malloc_info
*newinfo
;
1119 size_t oldlimit
= _heaplimit
;
1121 /* Free the old info table, clearing _heaplimit to avoid
1122 recursion into this code. We don't want to return the
1123 table's blocks to the system before we have copied them to
1124 the new location. */
1126 _free_internal_nolock (_heapinfo
);
1127 _heaplimit
= oldlimit
;
1129 /* Tell malloc to search from the beginning of the heap for
1130 free blocks, so it doesn't reuse the ones just freed. */
1133 /* Allocate new space for the info table and move its data. */
1134 newinfo
= _malloc_internal_nolock (info_blocks
* BLOCKSIZE
);
1135 PROTECT_MALLOC_STATE (0);
1136 memmove (newinfo
, _heapinfo
, info_blocks
* BLOCKSIZE
);
1137 _heapinfo
= newinfo
;
1139 /* We should now have coalesced the free block with the
1140 blocks freed from the old info table. Examine the entire
1141 trailing free block to decide below whether to return some
1143 block
= _heapinfo
[0].free
.prev
;
1144 blocks
= _heapinfo
[block
].free
.size
;
1147 /* Now see if we can return stuff to the system. */
1148 if (block
+ blocks
== _heaplimit
&& blocks
>= lesscore_threshold
)
1150 register size_t bytes
= blocks
* BLOCKSIZE
;
1151 _heaplimit
-= blocks
;
1152 (*__morecore
) (-bytes
);
1153 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
1154 = _heapinfo
[block
].free
.next
;
1155 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
1156 = _heapinfo
[block
].free
.prev
;
1157 block
= _heapinfo
[block
].free
.prev
;
1159 _bytes_free
-= bytes
;
1163 /* Set the next search to begin at this block. */
1168 /* Do some of the statistics. */
1170 _bytes_used
-= 1 << type
;
1172 _bytes_free
+= 1 << type
;
1174 /* Get the address of the first free fragment in this block. */
1175 prev
= (struct list
*) ((char *) ADDRESS (block
) +
1176 (_heapinfo
[block
].busy
.info
.frag
.first
<< type
));
1178 if (_heapinfo
[block
].busy
.info
.frag
.nfree
== (BLOCKSIZE
>> type
) - 1)
1180 /* If all fragments of this block are free, remove them
1181 from the fragment list and free the whole block. */
1183 for (i
= 1; i
< (size_t) (BLOCKSIZE
>> type
); ++i
)
1185 prev
->prev
->next
= next
;
1187 next
->prev
= prev
->prev
;
1188 _heapinfo
[block
].busy
.type
= 0;
1189 _heapinfo
[block
].busy
.info
.size
= 1;
1191 /* Keep the statistics accurate. */
1193 _bytes_used
+= BLOCKSIZE
;
1194 _chunks_free
-= BLOCKSIZE
>> type
;
1195 _bytes_free
-= BLOCKSIZE
;
1197 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1198 _free_internal_nolock (ADDRESS (block
));
1200 free (ADDRESS (block
));
1203 else if (_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
1205 /* If some fragments of this block are free, link this
1206 fragment into the fragment list after the first free
1207 fragment of this block. */
1209 next
->next
= prev
->next
;
1212 if (next
->next
!= NULL
)
1213 next
->next
->prev
= next
;
1214 ++_heapinfo
[block
].busy
.info
.frag
.nfree
;
1218 /* No fragments of this block are free, so link this
1219 fragment into the fragment list and announce that
1220 it is the first free fragment of this block. */
1222 _heapinfo
[block
].busy
.info
.frag
.nfree
= 1;
1223 _heapinfo
[block
].busy
.info
.frag
.first
=
1224 (uintptr_t) ptr
% BLOCKSIZE
>> type
;
1225 prev
->next
= _fraghead
[type
].next
;
1226 prev
->prev
= &_fraghead
[type
];
1227 prev
->prev
->next
= prev
;
1228 if (prev
->next
!= NULL
)
1229 prev
->next
->prev
= prev
;
1234 PROTECT_MALLOC_STATE (1);
1237 /* Return memory to the heap.
1238 Like `free' but don't call a __free_hook if there is one. */
1240 _free_internal (void *ptr
)
1243 _free_internal_nolock (ptr
);
1247 /* Return memory to the heap. */
1252 void (*hook
) (void *) = __free_hook
;
1257 _free_internal (ptr
);
1260 /* Define the `cfree' alias for `free'. */
1262 weak_alias (free
, cfree
)
1270 /* Change the size of a block allocated by `malloc'.
1271 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1272 Written May 1989 by Mike Haertel.
1274 This library is free software; you can redistribute it and/or
1275 modify it under the terms of the GNU General Public License as
1276 published by the Free Software Foundation; either version 2 of the
1277 License, or (at your option) any later version.
1279 This library is distributed in the hope that it will be useful,
1280 but WITHOUT ANY WARRANTY; without even the implied warranty of
1281 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1282 General Public License for more details.
1284 You should have received a copy of the GNU General Public
1285 License along with this library; see the file COPYING. If
1286 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1287 Fifth Floor, Boston, MA 02110-1301, USA.
1289 The author may be reached (Email) at the address mike@ai.mit.edu,
1290 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1292 #define min(A, B) ((A) < (B) ? (A) : (B))
1294 /* On Cygwin the dumped emacs may try to realloc storage allocated in
1295 the static heap. We just malloc space in the new heap and copy the
1299 special_realloc (void *ptr
, size_t size
)
1303 size_t block
, oldsize
;
1305 block
= ((char *) ptr
- bss_sbrk_heapbase
) / BLOCKSIZE
+ 1;
1306 type
= bss_sbrk_heapinfo
[block
].busy
.type
;
1308 type
== 0 ? bss_sbrk_heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
1309 : (size_t) 1 << type
;
1310 result
= _malloc_internal_nolock (size
);
1312 memcpy (result
, ptr
, min (oldsize
, size
));
1317 /* Debugging hook for realloc. */
1318 void *(*__realloc_hook
) (void *ptr
, size_t size
);
1320 /* Resize the given region to the new size, returning a pointer
1321 to the (possibly moved) region. This is optimized for speed;
1322 some benchmarks seem to indicate that greater compactness is
1323 achieved by unconditionally allocating and copying to a
1324 new region. This module has incestuous knowledge of the
1325 internals of both free and malloc. */
1327 _realloc_internal_nolock (void *ptr
, size_t size
)
1331 size_t block
, blocks
, oldlimit
;
1335 _free_internal_nolock (ptr
);
1336 return _malloc_internal_nolock (0);
1338 else if (ptr
== NULL
)
1339 return _malloc_internal_nolock (size
);
1342 if ((char *) ptr
< _heapbase
)
1343 /* ptr points into the static heap */
1344 return special_realloc (ptr
, size
);
1347 block
= BLOCK (ptr
);
1349 PROTECT_MALLOC_STATE (0);
1351 type
= _heapinfo
[block
].busy
.type
;
1355 /* Maybe reallocate a large block to a small fragment. */
1356 if (size
<= BLOCKSIZE
/ 2)
1358 result
= _malloc_internal_nolock (size
);
1361 memcpy (result
, ptr
, size
);
1362 _free_internal_nolock (ptr
);
1367 /* The new size is a large allocation as well;
1368 see if we can hold it in place. */
1369 blocks
= BLOCKIFY (size
);
1370 if (blocks
< _heapinfo
[block
].busy
.info
.size
)
1372 /* The new size is smaller; return
1373 excess memory to the free list. */
1374 _heapinfo
[block
+ blocks
].busy
.type
= 0;
1375 _heapinfo
[block
+ blocks
].busy
.info
.size
1376 = _heapinfo
[block
].busy
.info
.size
- blocks
;
1377 _heapinfo
[block
].busy
.info
.size
= blocks
;
1378 /* We have just created a new chunk by splitting a chunk in two.
1379 Now we will free this chunk; increment the statistics counter
1380 so it doesn't become wrong when _free_internal decrements it. */
1382 _free_internal_nolock (ADDRESS (block
+ blocks
));
1385 else if (blocks
== _heapinfo
[block
].busy
.info
.size
)
1386 /* No size change necessary. */
1390 /* Won't fit, so allocate a new region that will.
1391 Free the old region first in case there is sufficient
1392 adjacent free space to grow without moving. */
1393 blocks
= _heapinfo
[block
].busy
.info
.size
;
1394 /* Prevent free from actually returning memory to the system. */
1395 oldlimit
= _heaplimit
;
1397 _free_internal_nolock (ptr
);
1398 result
= _malloc_internal_nolock (size
);
1399 PROTECT_MALLOC_STATE (0);
1400 if (_heaplimit
== 0)
1401 _heaplimit
= oldlimit
;
1404 /* Now we're really in trouble. We have to unfree
1405 the thing we just freed. Unfortunately it might
1406 have been coalesced with its neighbors. */
1407 if (_heapindex
== block
)
1408 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1412 = _malloc_internal_nolock ((block
- _heapindex
) * BLOCKSIZE
);
1413 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1414 _free_internal_nolock (previous
);
1419 memmove (result
, ptr
, blocks
* BLOCKSIZE
);
1424 /* Old size is a fragment; type is logarithm
1425 to base two of the fragment size. */
1426 if (size
> (size_t) (1 << (type
- 1)) &&
1427 size
<= (size_t) (1 << type
))
1428 /* The new size is the same kind of fragment. */
1432 /* The new size is different; allocate a new space,
1433 and copy the lesser of the new size and the old. */
1434 result
= _malloc_internal_nolock (size
);
1437 memcpy (result
, ptr
, min (size
, (size_t) 1 << type
));
1438 _free_internal_nolock (ptr
);
1443 PROTECT_MALLOC_STATE (1);
1449 _realloc_internal (void *ptr
, size_t size
)
1454 result
= _realloc_internal_nolock (ptr
, size
);
1461 realloc (void *ptr
, size_t size
)
1463 void *(*hook
) (void *, size_t);
1465 if (!__malloc_initialized
&& !__malloc_initialize ())
1468 hook
= __realloc_hook
;
1469 return (hook
!= NULL
? *hook
: _realloc_internal
) (ptr
, size
);
1471 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1473 This library is free software; you can redistribute it and/or
1474 modify it under the terms of the GNU General Public License as
1475 published by the Free Software Foundation; either version 2 of the
1476 License, or (at your option) any later version.
1478 This library is distributed in the hope that it will be useful,
1479 but WITHOUT ANY WARRANTY; without even the implied warranty of
1480 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1481 General Public License for more details.
1483 You should have received a copy of the GNU General Public
1484 License along with this library; see the file COPYING. If
1485 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1486 Fifth Floor, Boston, MA 02110-1301, USA.
1488 The author may be reached (Email) at the address mike@ai.mit.edu,
1489 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1491 /* Allocate an array of NMEMB elements each SIZE bytes long.
1492 The entire array is initialized to zeros. */
1494 calloc (register size_t nmemb
, register size_t size
)
1496 register void *result
= malloc (nmemb
* size
);
1499 (void) memset (result
, 0, nmemb
* size
);
1503 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1504 This file is part of the GNU C Library.
1506 The GNU C Library is free software; you can redistribute it and/or modify
1507 it under the terms of the GNU General Public License as published by
1508 the Free Software Foundation; either version 2, or (at your option)
1511 The GNU C Library is distributed in the hope that it will be useful,
1512 but WITHOUT ANY WARRANTY; without even the implied warranty of
1513 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1514 GNU General Public License for more details.
1516 You should have received a copy of the GNU General Public License
1517 along with the GNU C Library; see the file COPYING. If not, write to
1518 the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1519 MA 02110-1301, USA. */
1521 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1523 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1525 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1526 /* It is best not to declare this and cast its result on foreign operating
1527 systems with potentially hostile include files. */
1529 extern void *__sbrk (ptrdiff_t increment
);
1530 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1532 /* Allocate INCREMENT more bytes of data space,
1533 and return the start of data space, or NULL on errors.
1534 If INCREMENT is negative, shrink data space. */
1536 __default_morecore (ptrdiff_t increment
)
1539 #if defined (CYGWIN)
1540 if (!bss_sbrk_did_unexec
)
1542 return bss_sbrk (increment
);
1545 result
= (void *) __sbrk (increment
);
1546 if (result
== (void *) -1)
1550 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1552 This library is free software; you can redistribute it and/or
1553 modify it under the terms of the GNU General Public License as
1554 published by the Free Software Foundation; either version 2 of the
1555 License, or (at your option) any later version.
1557 This library is distributed in the hope that it will be useful,
1558 but WITHOUT ANY WARRANTY; without even the implied warranty of
1559 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1560 General Public License for more details.
1562 You should have received a copy of the GNU General Public
1563 License along with this library; see the file COPYING. If
1564 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1565 Fifth Floor, Boston, MA 02110-1301, USA. */
1567 void *(*__memalign_hook
) (size_t size
, size_t alignment
);
1570 memalign (size_t alignment
, size_t size
)
1573 size_t adj
, lastadj
;
1574 void *(*hook
) (size_t, size_t) = __memalign_hook
;
1577 return (*hook
) (alignment
, size
);
1579 /* Allocate a block with enough extra space to pad the block with up to
1580 (ALIGNMENT - 1) bytes if necessary. */
1581 result
= malloc (size
+ alignment
- 1);
1585 /* Figure out how much we will need to pad this particular block
1586 to achieve the required alignment. */
1587 adj
= (uintptr_t) result
% alignment
;
1591 /* Reallocate the block with only as much excess as it needs. */
1593 result
= malloc (adj
+ size
);
1594 if (result
== NULL
) /* Impossible unless interrupted. */
1598 adj
= (uintptr_t) result
% alignment
;
1599 /* It's conceivable we might have been so unlucky as to get a
1600 different block with weaker alignment. If so, this block is too
1601 short to contain SIZE after alignment correction. So we must
1602 try again and get another block, slightly larger. */
1603 } while (adj
> lastadj
);
1607 /* Record this block in the list of aligned blocks, so that `free'
1608 can identify the pointer it is passed, which will be in the middle
1609 of an allocated block. */
1611 struct alignlist
*l
;
1612 LOCK_ALIGNED_BLOCKS ();
1613 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1614 if (l
->aligned
== NULL
)
1615 /* This slot is free. Use it. */
1619 l
= malloc (sizeof *l
);
1622 l
->next
= _aligned_blocks
;
1623 _aligned_blocks
= l
;
1629 result
= l
->aligned
= (char *) result
+ alignment
- adj
;
1631 UNLOCK_ALIGNED_BLOCKS ();
1651 posix_memalign (void **memptr
, size_t alignment
, size_t size
)
1656 || alignment
% sizeof (void *) != 0
1657 || (alignment
& (alignment
- 1)) != 0)
1660 mem
= memalign (alignment
, size
);
1669 /* Allocate memory on a page boundary.
1670 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1672 This library is free software; you can redistribute it and/or
1673 modify it under the terms of the GNU General Public License as
1674 published by the Free Software Foundation; either version 2 of the
1675 License, or (at your option) any later version.
1677 This library is distributed in the hope that it will be useful,
1678 but WITHOUT ANY WARRANTY; without even the implied warranty of
1679 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1680 General Public License for more details.
1682 You should have received a copy of the GNU General Public
1683 License along with this library; see the file COPYING. If
1684 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1685 Fifth Floor, Boston, MA 02110-1301, USA.
1687 The author may be reached (Email) at the address mike@ai.mit.edu,
1688 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1690 /* Allocate SIZE bytes on a page boundary. */
1691 extern void *valloc (size_t);
1693 #if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1694 # include "getpagesize.h"
1695 #elif !defined getpagesize
1696 extern int getpagesize (void);
1699 static size_t pagesize
;
1702 valloc (size_t size
)
1705 pagesize
= getpagesize ();
1707 return memalign (pagesize
, size
);
1712 /* Standard debugging hooks for `malloc'.
1713 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1714 Written May 1989 by Mike Haertel.
1716 This library is free software; you can redistribute it and/or
1717 modify it under the terms of the GNU General Public License as
1718 published by the Free Software Foundation; either version 2 of the
1719 License, or (at your option) any later version.
1721 This library is distributed in the hope that it will be useful,
1722 but WITHOUT ANY WARRANTY; without even the implied warranty of
1723 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1724 General Public License for more details.
1726 You should have received a copy of the GNU General Public
1727 License along with this library; see the file COPYING. If
1728 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1729 Fifth Floor, Boston, MA 02110-1301, USA.
1731 The author may be reached (Email) at the address mike@ai.mit.edu,
1732 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1736 /* Old hook values. */
1737 static void (*old_free_hook
) (void *ptr
);
1738 static void *(*old_malloc_hook
) (size_t size
);
1739 static void *(*old_realloc_hook
) (void *ptr
, size_t size
);
1741 /* Function to call when something awful happens. */
1742 static void (*abortfunc
) (enum mcheck_status
);
1744 /* Arbitrary magical numbers. */
1745 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1746 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1747 #define MAGICBYTE ((char) 0xd7)
1748 #define MALLOCFLOOD ((char) 0x93)
1749 #define FREEFLOOD ((char) 0x95)
1753 size_t size
; /* Exact size requested by user. */
1754 size_t magic
; /* Magic number to check header integrity. */
1757 static enum mcheck_status
1758 checkhdr (const struct hdr
*hdr
)
1760 enum mcheck_status status
;
1764 status
= MCHECK_HEAD
;
1767 status
= MCHECK_FREE
;
1770 if (((char *) &hdr
[1])[hdr
->size
] != MAGICBYTE
)
1771 status
= MCHECK_TAIL
;
1776 if (status
!= MCHECK_OK
)
1777 (*abortfunc
) (status
);
1782 freehook (void *ptr
)
1788 hdr
= ((struct hdr
*) ptr
) - 1;
1790 hdr
->magic
= MAGICFREE
;
1791 memset (ptr
, FREEFLOOD
, hdr
->size
);
1796 __free_hook
= old_free_hook
;
1798 __free_hook
= freehook
;
1802 mallochook (size_t size
)
1806 __malloc_hook
= old_malloc_hook
;
1807 hdr
= malloc (sizeof *hdr
+ size
+ 1);
1808 __malloc_hook
= mallochook
;
1813 hdr
->magic
= MAGICWORD
;
1814 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1815 memset (hdr
+ 1, MALLOCFLOOD
, size
);
1820 reallochook (void *ptr
, size_t size
)
1822 struct hdr
*hdr
= NULL
;
1827 hdr
= ((struct hdr
*) ptr
) - 1;
1832 memset ((char *) ptr
+ size
, FREEFLOOD
, osize
- size
);
1835 __free_hook
= old_free_hook
;
1836 __malloc_hook
= old_malloc_hook
;
1837 __realloc_hook
= old_realloc_hook
;
1838 hdr
= realloc (hdr
, sizeof *hdr
+ size
+ 1);
1839 __free_hook
= freehook
;
1840 __malloc_hook
= mallochook
;
1841 __realloc_hook
= reallochook
;
1846 hdr
->magic
= MAGICWORD
;
1847 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1849 memset ((char *) (hdr
+ 1) + osize
, MALLOCFLOOD
, size
- osize
);
1854 mabort (enum mcheck_status status
)
1860 msg
= "memory is consistent, library is buggy";
1863 msg
= "memory clobbered before allocated block";
1866 msg
= "memory clobbered past end of allocated block";
1869 msg
= "block freed twice";
1872 msg
= "bogus mcheck_status, library is buggy";
1875 #ifdef __GNU_LIBRARY__
1878 fprintf (stderr
, "mcheck: %s\n", msg
);
1884 static int mcheck_used
= 0;
1887 mcheck (void (*func
) (enum mcheck_status
))
1889 abortfunc
= (func
!= NULL
) ? func
: &mabort
;
1891 /* These hooks may not be safely inserted if malloc is already in use. */
1892 if (!__malloc_initialized
&& !mcheck_used
)
1894 old_free_hook
= __free_hook
;
1895 __free_hook
= freehook
;
1896 old_malloc_hook
= __malloc_hook
;
1897 __malloc_hook
= mallochook
;
1898 old_realloc_hook
= __realloc_hook
;
1899 __realloc_hook
= reallochook
;
1903 return mcheck_used
? 0 : -1;
1909 return mcheck_used
? checkhdr (ptr
) : MCHECK_DISABLED
;
1912 #endif /* GC_MCHECK */