1 /* This file is no longer automatically generated from libc. */
3 #define _MALLOC_INTERNAL
5 /* The malloc headers and source files from the C library follow here. */
7 /* Declarations for `malloc' and friends.
8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
9 2005, 2006, 2007 Free Software Foundation, Inc.
10 Written May 1989 by Mike Haertel.
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public
23 License along with this library; see the file COPYING. If
24 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25 Fifth Floor, Boston, MA 02110-1301, USA.
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
34 #ifdef _MALLOC_INTERNAL
40 #ifdef HAVE_GTK_AND_PTHREAD
44 #if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
45 || defined STDC_HEADERS || defined PROTOTYPES))
49 #define __ptr_t void *
50 #else /* Not C++ or ANSI C. */
54 #define __ptr_t char *
55 #endif /* C++ or ANSI C. */
57 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
61 #define memset(s, zero, n) bzero ((s), (n))
64 #define memcpy(d, s, n) bcopy ((s), (d), (n))
81 #endif /* _MALLOC_INTERNAL. */
91 #define __malloc_size_t size_t
92 #define __malloc_ptrdiff_t ptrdiff_t
97 #define __malloc_size_t __SIZE_TYPE__
100 #ifndef __malloc_size_t
101 #define __malloc_size_t unsigned int
103 #define __malloc_ptrdiff_t int
111 /* Allocate SIZE bytes of memory. */
112 extern __ptr_t malloc
PP ((__malloc_size_t __size
));
113 /* Re-allocate the previously allocated block
114 in __ptr_t, making the new block SIZE bytes long. */
115 extern __ptr_t realloc
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
116 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
117 extern __ptr_t calloc
PP ((__malloc_size_t __nmemb
, __malloc_size_t __size
));
118 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
119 extern void free
PP ((__ptr_t __ptr
));
121 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
122 #if !defined (_MALLOC_INTERNAL) || defined (MSDOS) /* Avoid conflict. */
123 extern __ptr_t memalign
PP ((__malloc_size_t __alignment
,
124 __malloc_size_t __size
));
125 extern int posix_memalign
PP ((__ptr_t
*, __malloc_size_t
,
126 __malloc_size_t size
));
129 /* Allocate SIZE bytes on a page boundary. */
130 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
131 extern __ptr_t valloc
PP ((__malloc_size_t __size
));
135 /* Set up mutexes and make malloc etc. thread-safe. */
136 extern void malloc_enable_thread
PP ((void));
139 #ifdef _MALLOC_INTERNAL
141 /* The allocator divides the heap into blocks of fixed size; large
142 requests receive one or more whole blocks, and small requests
143 receive a fragment of a block. Fragment sizes are powers of two,
144 and all fragments of a block are the same size. When all the
145 fragments in a block have been freed, the block itself is freed. */
146 #define INT_BIT (CHAR_BIT * sizeof(int))
147 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
148 #define BLOCKSIZE (1 << BLOCKLOG)
149 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
151 /* Determine the amount of memory spanned by the initial heap table
152 (not an absolute limit). */
153 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
155 /* Number of contiguous free blocks allowed to build up at the end of
156 memory before they will be returned to the system. */
157 #define FINAL_FREE_BLOCKS 8
159 /* Data structure giving per-block information. */
162 /* Heap information for a busy block. */
165 /* Zero for a large (multiblock) object, or positive giving the
166 logarithm to the base two of the fragment size. */
172 __malloc_size_t nfree
; /* Free frags in a fragmented block. */
173 __malloc_size_t first
; /* First free fragment of the block. */
175 /* For a large object, in its first block, this has the number
176 of blocks in the object. In the other blocks, this has a
177 negative number which says how far back the first block is. */
178 __malloc_ptrdiff_t size
;
181 /* Heap information for a free block
182 (that may be the first of a free cluster). */
185 __malloc_size_t size
; /* Size (in blocks) of a free cluster. */
186 __malloc_size_t next
; /* Index of next free cluster. */
187 __malloc_size_t prev
; /* Index of previous free cluster. */
191 /* Pointer to first block of the heap. */
192 extern char *_heapbase
;
194 /* Table indexed by block number giving per-block information. */
195 extern malloc_info
*_heapinfo
;
197 /* Address to block number and vice versa. */
198 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
199 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
201 /* Current search index for the heap table. */
202 extern __malloc_size_t _heapindex
;
204 /* Limit of valid info table indices. */
205 extern __malloc_size_t _heaplimit
;
207 /* Doubly linked lists of free fragments. */
214 /* Free list headers for each fragment size. */
215 extern struct list _fraghead
[];
217 /* List of blocks allocated with `memalign' (or `valloc'). */
220 struct alignlist
*next
;
221 __ptr_t aligned
; /* The address that memaligned returned. */
222 __ptr_t exact
; /* The address that malloc returned. */
224 extern struct alignlist
*_aligned_blocks
;
226 /* Instrumentation. */
227 extern __malloc_size_t _chunks_used
;
228 extern __malloc_size_t _bytes_used
;
229 extern __malloc_size_t _chunks_free
;
230 extern __malloc_size_t _bytes_free
;
232 /* Internal versions of `malloc', `realloc', and `free'
233 used when these functions need to call each other.
234 They are the same but don't call the hooks. */
235 extern __ptr_t _malloc_internal
PP ((__malloc_size_t __size
));
236 extern __ptr_t _realloc_internal
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
237 extern void _free_internal
PP ((__ptr_t __ptr
));
238 extern __ptr_t _malloc_internal_nolock
PP ((__malloc_size_t __size
));
239 extern __ptr_t _realloc_internal_nolock
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
240 extern void _free_internal_nolock
PP ((__ptr_t __ptr
));
243 extern pthread_mutex_t _malloc_mutex
, _aligned_blocks_mutex
;
244 extern int _malloc_thread_enabled_p
;
247 if (_malloc_thread_enabled_p) \
248 pthread_mutex_lock (&_malloc_mutex); \
252 if (_malloc_thread_enabled_p) \
253 pthread_mutex_unlock (&_malloc_mutex); \
255 #define LOCK_ALIGNED_BLOCKS() \
257 if (_malloc_thread_enabled_p) \
258 pthread_mutex_lock (&_aligned_blocks_mutex); \
260 #define UNLOCK_ALIGNED_BLOCKS() \
262 if (_malloc_thread_enabled_p) \
263 pthread_mutex_unlock (&_aligned_blocks_mutex); \
268 #define LOCK_ALIGNED_BLOCKS()
269 #define UNLOCK_ALIGNED_BLOCKS()
272 #endif /* _MALLOC_INTERNAL. */
274 /* Given an address in the middle of a malloc'd object,
275 return the address of the beginning of the object. */
276 extern __ptr_t malloc_find_object_address
PP ((__ptr_t __ptr
));
278 /* Underlying allocation function; successive calls should
279 return contiguous pieces of memory. */
280 extern __ptr_t (*__morecore
) PP ((__malloc_ptrdiff_t __size
));
282 /* Default value of `__morecore'. */
283 extern __ptr_t __default_morecore
PP ((__malloc_ptrdiff_t __size
));
285 /* If not NULL, this function is called after each time
286 `__morecore' is called to increase the data size. */
287 extern void (*__after_morecore_hook
) PP ((void));
289 /* Number of extra blocks to get each time we ask for more core.
290 This reduces the frequency of calling `(*__morecore)'. */
291 extern __malloc_size_t __malloc_extra_blocks
;
293 /* Nonzero if `malloc' has been called and done its initialization. */
294 extern int __malloc_initialized
;
295 /* Function called to initialize malloc data structures. */
296 extern int __malloc_initialize
PP ((void));
298 /* Hooks for debugging versions. */
299 extern void (*__malloc_initialize_hook
) PP ((void));
300 extern void (*__free_hook
) PP ((__ptr_t __ptr
));
301 extern __ptr_t (*__malloc_hook
) PP ((__malloc_size_t __size
));
302 extern __ptr_t (*__realloc_hook
) PP ((__ptr_t __ptr
, __malloc_size_t __size
));
303 extern __ptr_t (*__memalign_hook
) PP ((__malloc_size_t __size
,
304 __malloc_size_t __alignment
));
306 /* Return values for `mprobe': these are the kinds of inconsistencies that
307 `mcheck' enables detection of. */
310 MCHECK_DISABLED
= -1, /* Consistency checking is not turned on. */
311 MCHECK_OK
, /* Block is fine. */
312 MCHECK_FREE
, /* Block freed twice. */
313 MCHECK_HEAD
, /* Memory before the block was clobbered. */
314 MCHECK_TAIL
/* Memory after the block was clobbered. */
317 /* Activate a standard collection of debugging hooks. This must be called
318 before `malloc' is ever called. ABORTFUNC is called with an error code
319 (see enum above) when an inconsistency is detected. If ABORTFUNC is
320 null, the standard function prints on stderr and then calls `abort'. */
321 extern int mcheck
PP ((void (*__abortfunc
) PP ((enum mcheck_status
))));
323 /* Check for aberrations in a particular malloc'd block. You must have
324 called `mcheck' already. These are the same checks that `mcheck' does
325 when you free or reallocate a block. */
326 extern enum mcheck_status mprobe
PP ((__ptr_t __ptr
));
328 /* Activate a standard collection of tracing hooks. */
329 extern void mtrace
PP ((void));
330 extern void muntrace
PP ((void));
332 /* Statistics available to the user. */
335 __malloc_size_t bytes_total
; /* Total size of the heap. */
336 __malloc_size_t chunks_used
; /* Chunks allocated by the user. */
337 __malloc_size_t bytes_used
; /* Byte total of user-allocated chunks. */
338 __malloc_size_t chunks_free
; /* Chunks in the free list. */
339 __malloc_size_t bytes_free
; /* Byte total of chunks in the free list. */
342 /* Pick up the current statistics. */
343 extern struct mstats mstats
PP ((void));
345 /* Call WARNFUN with a warning message when memory usage is high. */
346 extern void memory_warnings
PP ((__ptr_t __start
,
347 void (*__warnfun
) PP ((const char *))));
350 /* Relocating allocator. */
352 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
353 extern __ptr_t r_alloc
PP ((__ptr_t
*__handleptr
, __malloc_size_t __size
));
355 /* Free the storage allocated in HANDLEPTR. */
356 extern void r_alloc_free
PP ((__ptr_t
*__handleptr
));
358 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
359 extern __ptr_t r_re_alloc
PP ((__ptr_t
*__handleptr
, __malloc_size_t __size
));
366 #endif /* malloc.h */
367 /* Memory allocator `malloc'.
368 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
369 Written May 1989 by Mike Haertel.
371 This library is free software; you can redistribute it and/or
372 modify it under the terms of the GNU General Public License as
373 published by the Free Software Foundation; either version 2 of the
374 License, or (at your option) any later version.
376 This library is distributed in the hope that it will be useful,
377 but WITHOUT ANY WARRANTY; without even the implied warranty of
378 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
379 General Public License for more details.
381 You should have received a copy of the GNU General Public
382 License along with this library; see the file COPYING. If
383 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
384 Fifth Floor, Boston, MA 02110-1301, USA.
386 The author may be reached (Email) at the address mike@ai.mit.edu,
387 or (US mail) as Mike Haertel c/o Free Software Foundation. */
389 #ifndef _MALLOC_INTERNAL
390 #define _MALLOC_INTERNAL
395 /* How to really get more memory. */
397 extern __ptr_t bss_sbrk
PP ((ptrdiff_t __size
));
398 extern int bss_sbrk_did_unexec
;
400 __ptr_t (*__morecore
) PP ((__malloc_ptrdiff_t __size
)) = __default_morecore
;
402 /* Debugging hook for `malloc'. */
403 __ptr_t (*__malloc_hook
) PP ((__malloc_size_t __size
));
405 /* Pointer to the base of the first block. */
408 /* Block information table. Allocated with align/__free (not malloc/free). */
409 malloc_info
*_heapinfo
;
411 /* Number of info entries. */
412 static __malloc_size_t heapsize
;
414 /* Search index in the info table. */
415 __malloc_size_t _heapindex
;
417 /* Limit of valid info table indices. */
418 __malloc_size_t _heaplimit
;
420 /* Free lists for each fragment size. */
421 struct list _fraghead
[BLOCKLOG
];
423 /* Instrumentation. */
424 __malloc_size_t _chunks_used
;
425 __malloc_size_t _bytes_used
;
426 __malloc_size_t _chunks_free
;
427 __malloc_size_t _bytes_free
;
429 /* Are you experienced? */
430 int __malloc_initialized
;
432 __malloc_size_t __malloc_extra_blocks
;
434 void (*__malloc_initialize_hook
) PP ((void));
435 void (*__after_morecore_hook
) PP ((void));
437 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
439 /* Some code for hunting a bug writing into _heapinfo.
441 Call this macro with argument PROT non-zero to protect internal
442 malloc state against writing to it, call it with a zero argument to
443 make it readable and writable.
445 Note that this only works if BLOCKSIZE == page size, which is
446 the case on the i386. */
448 #include <sys/types.h>
449 #include <sys/mman.h>
451 static int state_protected_p
;
452 static __malloc_size_t last_state_size
;
453 static malloc_info
*last_heapinfo
;
456 protect_malloc_state (protect_p
)
459 /* If _heapinfo has been relocated, make sure its old location
460 isn't left read-only; it will be reused by malloc. */
461 if (_heapinfo
!= last_heapinfo
463 && state_protected_p
)
464 mprotect (last_heapinfo
, last_state_size
, PROT_READ
| PROT_WRITE
);
466 last_state_size
= _heaplimit
* sizeof *_heapinfo
;
467 last_heapinfo
= _heapinfo
;
469 if (protect_p
!= state_protected_p
)
471 state_protected_p
= protect_p
;
472 if (mprotect (_heapinfo
, last_state_size
,
473 protect_p
? PROT_READ
: PROT_READ
| PROT_WRITE
) != 0)
478 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
481 #define PROTECT_MALLOC_STATE(PROT) /* empty */
485 /* Aligned allocation. */
486 static __ptr_t align
PP ((__malloc_size_t
));
489 __malloc_size_t size
;
492 unsigned long int adj
;
494 /* align accepts an unsigned argument, but __morecore accepts a
495 signed one. This could lead to trouble if SIZE overflows a
496 signed int type accepted by __morecore. We just punt in that
497 case, since they are requesting a ludicrous amount anyway. */
498 if ((__malloc_ptrdiff_t
)size
< 0)
501 result
= (*__morecore
) (size
);
502 adj
= (unsigned long int) ((unsigned long int) ((char *) result
-
503 (char *) NULL
)) % BLOCKSIZE
;
507 adj
= BLOCKSIZE
- adj
;
508 new = (*__morecore
) (adj
);
509 result
= (char *) result
+ adj
;
512 if (__after_morecore_hook
)
513 (*__after_morecore_hook
) ();
518 /* Get SIZE bytes, if we can get them starting at END.
519 Return the address of the space we got.
520 If we cannot get space at END, fail and return 0. */
521 static __ptr_t get_contiguous_space
PP ((__malloc_ptrdiff_t
, __ptr_t
));
523 get_contiguous_space (size
, position
)
524 __malloc_ptrdiff_t size
;
530 before
= (*__morecore
) (0);
531 /* If we can tell in advance that the break is at the wrong place,
533 if (before
!= position
)
536 /* Allocate SIZE bytes and get the address of them. */
537 after
= (*__morecore
) (size
);
541 /* It was not contiguous--reject it. */
542 if (after
!= position
)
544 (*__morecore
) (- size
);
552 /* This is called when `_heapinfo' and `heapsize' have just
553 been set to describe a new info table. Set up the table
554 to describe itself and account for it in the statistics. */
555 static void register_heapinfo
PP ((void));
562 __malloc_size_t block
, blocks
;
564 block
= BLOCK (_heapinfo
);
565 blocks
= BLOCKIFY (heapsize
* sizeof (malloc_info
));
567 /* Account for the _heapinfo block itself in the statistics. */
568 _bytes_used
+= blocks
* BLOCKSIZE
;
571 /* Describe the heapinfo block itself in the heapinfo. */
572 _heapinfo
[block
].busy
.type
= 0;
573 _heapinfo
[block
].busy
.info
.size
= blocks
;
574 /* Leave back-pointers for malloc_find_address. */
576 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
580 pthread_mutex_t _malloc_mutex
= PTHREAD_MUTEX_INITIALIZER
;
581 pthread_mutex_t _aligned_blocks_mutex
= PTHREAD_MUTEX_INITIALIZER
;
582 int _malloc_thread_enabled_p
;
585 malloc_atfork_handler_prepare ()
588 LOCK_ALIGNED_BLOCKS ();
592 malloc_atfork_handler_parent ()
594 UNLOCK_ALIGNED_BLOCKS ();
599 malloc_atfork_handler_child ()
601 UNLOCK_ALIGNED_BLOCKS ();
605 /* Set up mutexes and make malloc etc. thread-safe. */
607 malloc_enable_thread ()
609 if (_malloc_thread_enabled_p
)
612 /* Some pthread implementations call malloc for statically
613 initialized mutexes when they are used first. To avoid such a
614 situation, we initialize mutexes here while their use is
615 disabled in malloc etc. */
616 pthread_mutex_init (&_malloc_mutex
, NULL
);
617 pthread_mutex_init (&_aligned_blocks_mutex
, NULL
);
618 pthread_atfork (malloc_atfork_handler_prepare
,
619 malloc_atfork_handler_parent
,
620 malloc_atfork_handler_child
);
621 _malloc_thread_enabled_p
= 1;
626 malloc_initialize_1 ()
632 if (__malloc_initialize_hook
)
633 (*__malloc_initialize_hook
) ();
635 heapsize
= HEAP
/ BLOCKSIZE
;
636 _heapinfo
= (malloc_info
*) align (heapsize
* sizeof (malloc_info
));
637 if (_heapinfo
== NULL
)
639 memset (_heapinfo
, 0, heapsize
* sizeof (malloc_info
));
640 _heapinfo
[0].free
.size
= 0;
641 _heapinfo
[0].free
.next
= _heapinfo
[0].free
.prev
= 0;
643 _heapbase
= (char *) _heapinfo
;
644 _heaplimit
= BLOCK (_heapbase
+ heapsize
* sizeof (malloc_info
));
646 register_heapinfo ();
648 __malloc_initialized
= 1;
649 PROTECT_MALLOC_STATE (1);
653 /* Set everything up and remember that we have.
654 main will call malloc which calls this function. That is before any threads
655 or signal handlers has been set up, so we don't need thread protection. */
657 __malloc_initialize ()
659 if (__malloc_initialized
)
662 malloc_initialize_1 ();
664 return __malloc_initialized
;
667 static int morecore_recursing
;
669 /* Get neatly aligned memory, initializing or
670 growing the heap info table as necessary. */
671 static __ptr_t morecore_nolock
PP ((__malloc_size_t
));
673 morecore_nolock (size
)
674 __malloc_size_t size
;
677 malloc_info
*newinfo
, *oldinfo
;
678 __malloc_size_t newsize
;
680 if (morecore_recursing
)
681 /* Avoid recursion. The caller will know how to handle a null return. */
684 result
= align (size
);
688 PROTECT_MALLOC_STATE (0);
690 /* Check if we need to grow the info table. */
691 if ((__malloc_size_t
) BLOCK ((char *) result
+ size
) > heapsize
)
693 /* Calculate the new _heapinfo table size. We do not account for the
694 added blocks in the table itself, as we hope to place them in
695 existing free space, which is already covered by part of the
700 while ((__malloc_size_t
) BLOCK ((char *) result
+ size
) > newsize
);
702 /* We must not reuse existing core for the new info table when called
703 from realloc in the case of growing a large block, because the
704 block being grown is momentarily marked as free. In this case
705 _heaplimit is zero so we know not to reuse space for internal
709 /* First try to allocate the new info table in core we already
710 have, in the usual way using realloc. If realloc cannot
711 extend it in place or relocate it to existing sufficient core,
712 we will get called again, and the code above will notice the
713 `morecore_recursing' flag and return null. */
714 int save
= errno
; /* Don't want to clobber errno with ENOMEM. */
715 morecore_recursing
= 1;
716 newinfo
= (malloc_info
*) _realloc_internal_nolock
717 (_heapinfo
, newsize
* sizeof (malloc_info
));
718 morecore_recursing
= 0;
723 /* We found some space in core, and realloc has put the old
724 table's blocks on the free list. Now zero the new part
725 of the table and install the new table location. */
726 memset (&newinfo
[heapsize
], 0,
727 (newsize
- heapsize
) * sizeof (malloc_info
));
734 /* Allocate new space for the malloc info table. */
737 newinfo
= (malloc_info
*) align (newsize
* sizeof (malloc_info
));
742 (*__morecore
) (-size
);
746 /* Is it big enough to record status for its own space?
748 if ((__malloc_size_t
) BLOCK ((char *) newinfo
749 + newsize
* sizeof (malloc_info
))
753 /* Must try again. First give back most of what we just got. */
754 (*__morecore
) (- newsize
* sizeof (malloc_info
));
758 /* Copy the old table to the beginning of the new,
759 and zero the rest of the new table. */
760 memcpy (newinfo
, _heapinfo
, heapsize
* sizeof (malloc_info
));
761 memset (&newinfo
[heapsize
], 0,
762 (newsize
- heapsize
) * sizeof (malloc_info
));
767 register_heapinfo ();
769 /* Reset _heaplimit so _free_internal never decides
770 it can relocate or resize the info table. */
772 _free_internal_nolock (oldinfo
);
773 PROTECT_MALLOC_STATE (0);
775 /* The new heap limit includes the new table just allocated. */
776 _heaplimit
= BLOCK ((char *) newinfo
+ heapsize
* sizeof (malloc_info
));
781 _heaplimit
= BLOCK ((char *) result
+ size
);
785 /* Allocate memory from the heap. */
787 _malloc_internal_nolock (size
)
788 __malloc_size_t size
;
791 __malloc_size_t block
, blocks
, lastblocks
, start
;
792 register __malloc_size_t i
;
795 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
796 valid address you can realloc and free (though not dereference).
798 It turns out that some extant code (sunrpc, at least Ultrix's version)
799 expects `malloc (0)' to return non-NULL and breaks otherwise.
807 PROTECT_MALLOC_STATE (0);
809 if (size
< sizeof (struct list
))
810 size
= sizeof (struct list
);
812 /* Determine the allocation policy based on the request size. */
813 if (size
<= BLOCKSIZE
/ 2)
815 /* Small allocation to receive a fragment of a block.
816 Determine the logarithm to base two of the fragment size. */
817 register __malloc_size_t log
= 1;
819 while ((size
/= 2) != 0)
822 /* Look in the fragment lists for a
823 free fragment of the desired size. */
824 next
= _fraghead
[log
].next
;
827 /* There are free fragments of this size.
828 Pop a fragment out of the fragment list and return it.
829 Update the block's nfree and first counters. */
830 result
= (__ptr_t
) next
;
831 next
->prev
->next
= next
->next
;
832 if (next
->next
!= NULL
)
833 next
->next
->prev
= next
->prev
;
834 block
= BLOCK (result
);
835 if (--_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
836 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
837 ((unsigned long int) ((char *) next
->next
- (char *) NULL
)
840 /* Update the statistics. */
842 _bytes_used
+= 1 << log
;
844 _bytes_free
-= 1 << log
;
848 /* No free fragments of the desired size, so get a new block
849 and break it into fragments, returning the first. */
850 #ifdef GC_MALLOC_CHECK
851 result
= _malloc_internal_nolock (BLOCKSIZE
);
852 PROTECT_MALLOC_STATE (0);
853 #elif defined (USE_PTHREAD)
854 result
= _malloc_internal_nolock (BLOCKSIZE
);
856 result
= malloc (BLOCKSIZE
);
860 PROTECT_MALLOC_STATE (1);
864 /* Link all fragments but the first into the free list. */
865 next
= (struct list
*) ((char *) result
+ (1 << log
));
867 next
->prev
= &_fraghead
[log
];
868 _fraghead
[log
].next
= next
;
870 for (i
= 2; i
< (__malloc_size_t
) (BLOCKSIZE
>> log
); ++i
)
872 next
= (struct list
*) ((char *) result
+ (i
<< log
));
873 next
->next
= _fraghead
[log
].next
;
874 next
->prev
= &_fraghead
[log
];
875 next
->prev
->next
= next
;
876 next
->next
->prev
= next
;
879 /* Initialize the nfree and first counters for this block. */
880 block
= BLOCK (result
);
881 _heapinfo
[block
].busy
.type
= log
;
882 _heapinfo
[block
].busy
.info
.frag
.nfree
= i
- 1;
883 _heapinfo
[block
].busy
.info
.frag
.first
= i
- 1;
885 _chunks_free
+= (BLOCKSIZE
>> log
) - 1;
886 _bytes_free
+= BLOCKSIZE
- (1 << log
);
887 _bytes_used
-= BLOCKSIZE
- (1 << log
);
892 /* Large allocation to receive one or more blocks.
893 Search the free list in a circle starting at the last place visited.
894 If we loop completely around without finding a large enough
895 space we will have to get more memory from the system. */
896 blocks
= BLOCKIFY (size
);
897 start
= block
= _heapindex
;
898 while (_heapinfo
[block
].free
.size
< blocks
)
900 block
= _heapinfo
[block
].free
.next
;
903 /* Need to get more from the system. Get a little extra. */
904 __malloc_size_t wantblocks
= blocks
+ __malloc_extra_blocks
;
905 block
= _heapinfo
[0].free
.prev
;
906 lastblocks
= _heapinfo
[block
].free
.size
;
907 /* Check to see if the new core will be contiguous with the
908 final free block; if so we don't need to get as much. */
909 if (_heaplimit
!= 0 && block
+ lastblocks
== _heaplimit
&&
910 /* We can't do this if we will have to make the heap info
911 table bigger to accommodate the new space. */
912 block
+ wantblocks
<= heapsize
&&
913 get_contiguous_space ((wantblocks
- lastblocks
) * BLOCKSIZE
,
914 ADDRESS (block
+ lastblocks
)))
916 /* We got it contiguously. Which block we are extending
917 (the `final free block' referred to above) might have
918 changed, if it got combined with a freed info table. */
919 block
= _heapinfo
[0].free
.prev
;
920 _heapinfo
[block
].free
.size
+= (wantblocks
- lastblocks
);
921 _bytes_free
+= (wantblocks
- lastblocks
) * BLOCKSIZE
;
922 _heaplimit
+= wantblocks
- lastblocks
;
925 result
= morecore_nolock (wantblocks
* BLOCKSIZE
);
928 block
= BLOCK (result
);
929 /* Put the new block at the end of the free list. */
930 _heapinfo
[block
].free
.size
= wantblocks
;
931 _heapinfo
[block
].free
.prev
= _heapinfo
[0].free
.prev
;
932 _heapinfo
[block
].free
.next
= 0;
933 _heapinfo
[0].free
.prev
= block
;
934 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
= block
;
936 /* Now loop to use some of that block for this allocation. */
940 /* At this point we have found a suitable free list entry.
941 Figure out how to remove what we need from the list. */
942 result
= ADDRESS (block
);
943 if (_heapinfo
[block
].free
.size
> blocks
)
945 /* The block we found has a bit left over,
946 so relink the tail end back into the free list. */
947 _heapinfo
[block
+ blocks
].free
.size
948 = _heapinfo
[block
].free
.size
- blocks
;
949 _heapinfo
[block
+ blocks
].free
.next
950 = _heapinfo
[block
].free
.next
;
951 _heapinfo
[block
+ blocks
].free
.prev
952 = _heapinfo
[block
].free
.prev
;
953 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
954 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
955 = _heapindex
= block
+ blocks
;
959 /* The block exactly matches our requirements,
960 so just remove it from the list. */
961 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
962 = _heapinfo
[block
].free
.prev
;
963 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
964 = _heapindex
= _heapinfo
[block
].free
.next
;
968 _heapinfo
[block
].busy
.type
= 0;
969 _heapinfo
[block
].busy
.info
.size
= blocks
;
971 _bytes_used
+= blocks
* BLOCKSIZE
;
972 _bytes_free
-= blocks
* BLOCKSIZE
;
974 /* Mark all the blocks of the object just allocated except for the
975 first with a negative number so you can find the first block by
976 adding that adjustment. */
978 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
981 PROTECT_MALLOC_STATE (1);
987 _malloc_internal (size
)
988 __malloc_size_t size
;
993 result
= _malloc_internal_nolock (size
);
1001 __malloc_size_t size
;
1003 __ptr_t (*hook
) (__malloc_size_t
);
1005 if (!__malloc_initialized
&& !__malloc_initialize ())
1008 /* Copy the value of __malloc_hook to an automatic variable in case
1009 __malloc_hook is modified in another thread between its
1010 NULL-check and the use.
1012 Note: Strictly speaking, this is not a right solution. We should
1013 use mutexes to access non-read-only variables that are shared
1014 among multiple threads. We just leave it for compatibility with
1015 glibc malloc (i.e., assignments to __malloc_hook) for now. */
1016 hook
= __malloc_hook
;
1017 return (hook
!= NULL
? *hook
: _malloc_internal
) (size
);
1022 /* On some ANSI C systems, some libc functions call _malloc, _free
1023 and _realloc. Make them use the GNU functions. */
1027 __malloc_size_t size
;
1029 return malloc (size
);
1040 _realloc (ptr
, size
)
1042 __malloc_size_t size
;
1044 return realloc (ptr
, size
);
1048 /* Free a block of memory allocated by `malloc'.
1049 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1050 Written May 1989 by Mike Haertel.
1052 This library is free software; you can redistribute it and/or
1053 modify it under the terms of the GNU General Public License as
1054 published by the Free Software Foundation; either version 2 of the
1055 License, or (at your option) any later version.
1057 This library is distributed in the hope that it will be useful,
1058 but WITHOUT ANY WARRANTY; without even the implied warranty of
1059 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1060 General Public License for more details.
1062 You should have received a copy of the GNU General Public
1063 License along with this library; see the file COPYING. If
1064 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1065 Fifth Floor, Boston, MA 02110-1301, USA.
1067 The author may be reached (Email) at the address mike@ai.mit.edu,
1068 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1070 #ifndef _MALLOC_INTERNAL
1071 #define _MALLOC_INTERNAL
1076 /* Cope with systems lacking `memmove'. */
1078 #if (!defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1080 #undef __malloc_safe_bcopy
1081 #define __malloc_safe_bcopy safe_bcopy
1083 /* This function is defined in realloc.c. */
1084 extern void __malloc_safe_bcopy
PP ((__ptr_t
, __ptr_t
, __malloc_size_t
));
1085 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1090 /* Debugging hook for free. */
1091 void (*__free_hook
) PP ((__ptr_t __ptr
));
1093 /* List of blocks allocated by memalign. */
1094 struct alignlist
*_aligned_blocks
= NULL
;
1096 /* Return memory to the heap.
1097 Like `_free_internal' but don't lock mutex. */
1099 _free_internal_nolock (ptr
)
1103 __malloc_size_t block
, blocks
;
1104 register __malloc_size_t i
;
1105 struct list
*prev
, *next
;
1107 const __malloc_size_t lesscore_threshold
1108 /* Threshold of free space at which we will return some to the system. */
1109 = FINAL_FREE_BLOCKS
+ 2 * __malloc_extra_blocks
;
1111 register struct alignlist
*l
;
1116 PROTECT_MALLOC_STATE (0);
1118 LOCK_ALIGNED_BLOCKS ();
1119 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1120 if (l
->aligned
== ptr
)
1122 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1126 UNLOCK_ALIGNED_BLOCKS ();
1128 block
= BLOCK (ptr
);
1130 type
= _heapinfo
[block
].busy
.type
;
1134 /* Get as many statistics as early as we can. */
1136 _bytes_used
-= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1137 _bytes_free
+= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1139 /* Find the free cluster previous to this one in the free list.
1140 Start searching at the last block referenced; this may benefit
1141 programs with locality of allocation. */
1145 i
= _heapinfo
[i
].free
.prev
;
1149 i
= _heapinfo
[i
].free
.next
;
1150 while (i
> 0 && i
< block
);
1151 i
= _heapinfo
[i
].free
.prev
;
1154 /* Determine how to link this block into the free list. */
1155 if (block
== i
+ _heapinfo
[i
].free
.size
)
1157 /* Coalesce this block with its predecessor. */
1158 _heapinfo
[i
].free
.size
+= _heapinfo
[block
].busy
.info
.size
;
1163 /* Really link this block back into the free list. */
1164 _heapinfo
[block
].free
.size
= _heapinfo
[block
].busy
.info
.size
;
1165 _heapinfo
[block
].free
.next
= _heapinfo
[i
].free
.next
;
1166 _heapinfo
[block
].free
.prev
= i
;
1167 _heapinfo
[i
].free
.next
= block
;
1168 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1172 /* Now that the block is linked in, see if we can coalesce it
1173 with its successor (by deleting its successor from the list
1174 and adding in its size). */
1175 if (block
+ _heapinfo
[block
].free
.size
== _heapinfo
[block
].free
.next
)
1177 _heapinfo
[block
].free
.size
1178 += _heapinfo
[_heapinfo
[block
].free
.next
].free
.size
;
1179 _heapinfo
[block
].free
.next
1180 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.next
;
1181 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1185 /* How many trailing free blocks are there now? */
1186 blocks
= _heapinfo
[block
].free
.size
;
1188 /* Where is the current end of accessible core? */
1189 curbrk
= (*__morecore
) (0);
1191 if (_heaplimit
!= 0 && curbrk
== ADDRESS (_heaplimit
))
1193 /* The end of the malloc heap is at the end of accessible core.
1194 It's possible that moving _heapinfo will allow us to
1195 return some space to the system. */
1197 __malloc_size_t info_block
= BLOCK (_heapinfo
);
1198 __malloc_size_t info_blocks
= _heapinfo
[info_block
].busy
.info
.size
;
1199 __malloc_size_t prev_block
= _heapinfo
[block
].free
.prev
;
1200 __malloc_size_t prev_blocks
= _heapinfo
[prev_block
].free
.size
;
1201 __malloc_size_t next_block
= _heapinfo
[block
].free
.next
;
1202 __malloc_size_t next_blocks
= _heapinfo
[next_block
].free
.size
;
1204 if (/* Win if this block being freed is last in core, the info table
1205 is just before it, the previous free block is just before the
1206 info table, and the two free blocks together form a useful
1207 amount to return to the system. */
1208 (block
+ blocks
== _heaplimit
&&
1209 info_block
+ info_blocks
== block
&&
1210 prev_block
!= 0 && prev_block
+ prev_blocks
== info_block
&&
1211 blocks
+ prev_blocks
>= lesscore_threshold
) ||
1212 /* Nope, not the case. We can also win if this block being
1213 freed is just before the info table, and the table extends
1214 to the end of core or is followed only by a free block,
1215 and the total free space is worth returning to the system. */
1216 (block
+ blocks
== info_block
&&
1217 ((info_block
+ info_blocks
== _heaplimit
&&
1218 blocks
>= lesscore_threshold
) ||
1219 (info_block
+ info_blocks
== next_block
&&
1220 next_block
+ next_blocks
== _heaplimit
&&
1221 blocks
+ next_blocks
>= lesscore_threshold
)))
1224 malloc_info
*newinfo
;
1225 __malloc_size_t oldlimit
= _heaplimit
;
1227 /* Free the old info table, clearing _heaplimit to avoid
1228 recursion into this code. We don't want to return the
1229 table's blocks to the system before we have copied them to
1230 the new location. */
1232 _free_internal_nolock (_heapinfo
);
1233 _heaplimit
= oldlimit
;
1235 /* Tell malloc to search from the beginning of the heap for
1236 free blocks, so it doesn't reuse the ones just freed. */
1239 /* Allocate new space for the info table and move its data. */
1240 newinfo
= (malloc_info
*) _malloc_internal_nolock (info_blocks
1242 PROTECT_MALLOC_STATE (0);
1243 memmove (newinfo
, _heapinfo
, info_blocks
* BLOCKSIZE
);
1244 _heapinfo
= newinfo
;
1246 /* We should now have coalesced the free block with the
1247 blocks freed from the old info table. Examine the entire
1248 trailing free block to decide below whether to return some
1250 block
= _heapinfo
[0].free
.prev
;
1251 blocks
= _heapinfo
[block
].free
.size
;
1254 /* Now see if we can return stuff to the system. */
1255 if (block
+ blocks
== _heaplimit
&& blocks
>= lesscore_threshold
)
1257 register __malloc_size_t bytes
= blocks
* BLOCKSIZE
;
1258 _heaplimit
-= blocks
;
1259 (*__morecore
) (-bytes
);
1260 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
1261 = _heapinfo
[block
].free
.next
;
1262 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
1263 = _heapinfo
[block
].free
.prev
;
1264 block
= _heapinfo
[block
].free
.prev
;
1266 _bytes_free
-= bytes
;
1270 /* Set the next search to begin at this block. */
1275 /* Do some of the statistics. */
1277 _bytes_used
-= 1 << type
;
1279 _bytes_free
+= 1 << type
;
1281 /* Get the address of the first free fragment in this block. */
1282 prev
= (struct list
*) ((char *) ADDRESS (block
) +
1283 (_heapinfo
[block
].busy
.info
.frag
.first
<< type
));
1285 if (_heapinfo
[block
].busy
.info
.frag
.nfree
== (BLOCKSIZE
>> type
) - 1)
1287 /* If all fragments of this block are free, remove them
1288 from the fragment list and free the whole block. */
1290 for (i
= 1; i
< (__malloc_size_t
) (BLOCKSIZE
>> type
); ++i
)
1292 prev
->prev
->next
= next
;
1294 next
->prev
= prev
->prev
;
1295 _heapinfo
[block
].busy
.type
= 0;
1296 _heapinfo
[block
].busy
.info
.size
= 1;
1298 /* Keep the statistics accurate. */
1300 _bytes_used
+= BLOCKSIZE
;
1301 _chunks_free
-= BLOCKSIZE
>> type
;
1302 _bytes_free
-= BLOCKSIZE
;
1304 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1305 _free_internal_nolock (ADDRESS (block
));
1307 free (ADDRESS (block
));
1310 else if (_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
1312 /* If some fragments of this block are free, link this
1313 fragment into the fragment list after the first free
1314 fragment of this block. */
1315 next
= (struct list
*) ptr
;
1316 next
->next
= prev
->next
;
1319 if (next
->next
!= NULL
)
1320 next
->next
->prev
= next
;
1321 ++_heapinfo
[block
].busy
.info
.frag
.nfree
;
1325 /* No fragments of this block are free, so link this
1326 fragment into the fragment list and announce that
1327 it is the first free fragment of this block. */
1328 prev
= (struct list
*) ptr
;
1329 _heapinfo
[block
].busy
.info
.frag
.nfree
= 1;
1330 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
1331 ((unsigned long int) ((char *) ptr
- (char *) NULL
)
1332 % BLOCKSIZE
>> type
);
1333 prev
->next
= _fraghead
[type
].next
;
1334 prev
->prev
= &_fraghead
[type
];
1335 prev
->prev
->next
= prev
;
1336 if (prev
->next
!= NULL
)
1337 prev
->next
->prev
= prev
;
1342 PROTECT_MALLOC_STATE (1);
1345 /* Return memory to the heap.
1346 Like `free' but don't call a __free_hook if there is one. */
1348 _free_internal (ptr
)
1352 _free_internal_nolock (ptr
);
1356 /* Return memory to the heap. */
1362 void (*hook
) (__ptr_t
) = __free_hook
;
1367 _free_internal (ptr
);
1370 /* Define the `cfree' alias for `free'. */
1372 weak_alias (free
, cfree
)
1381 /* Change the size of a block allocated by `malloc'.
1382 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1383 Written May 1989 by Mike Haertel.
1385 This library is free software; you can redistribute it and/or
1386 modify it under the terms of the GNU General Public License as
1387 published by the Free Software Foundation; either version 2 of the
1388 License, or (at your option) any later version.
1390 This library is distributed in the hope that it will be useful,
1391 but WITHOUT ANY WARRANTY; without even the implied warranty of
1392 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1393 General Public License for more details.
1395 You should have received a copy of the GNU General Public
1396 License along with this library; see the file COPYING. If
1397 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1398 Fifth Floor, Boston, MA 02110-1301, USA.
1400 The author may be reached (Email) at the address mike@ai.mit.edu,
1401 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1403 #ifndef _MALLOC_INTERNAL
1404 #define _MALLOC_INTERNAL
1410 /* Cope with systems lacking `memmove'. */
1411 #if (!defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1414 #undef __malloc_safe_bcopy
1415 #define __malloc_safe_bcopy safe_bcopy
1418 /* Snarfed directly from Emacs src/dispnew.c:
1419 XXX Should use system bcopy if it handles overlap. */
1421 /* Like bcopy except never gets confused by overlap. */
1424 __malloc_safe_bcopy (afrom
, ato
, size
)
1427 __malloc_size_t size
;
1429 char *from
= afrom
, *to
= ato
;
1431 if (size
<= 0 || from
== to
)
1434 /* If the source and destination don't overlap, then bcopy can
1435 handle it. If they do overlap, but the destination is lower in
1436 memory than the source, we'll assume bcopy can handle that. */
1437 if (to
< from
|| from
+ size
<= to
)
1438 bcopy (from
, to
, size
);
1440 /* Otherwise, we'll copy from the end. */
1443 register char *endf
= from
+ size
;
1444 register char *endt
= to
+ size
;
1446 /* If TO - FROM is large, then we should break the copy into
1447 nonoverlapping chunks of TO - FROM bytes each. However, if
1448 TO - FROM is small, then the bcopy function call overhead
1449 makes this not worth it. The crossover point could be about
1450 anywhere. Since I don't think the obvious copy loop is too
1451 bad, I'm trying to err in its favor. */
1456 while (endf
!= from
);
1462 endt
-= (to
- from
);
1463 endf
-= (to
- from
);
1468 bcopy (endf
, endt
, to
- from
);
1471 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1472 little left over. The amount left over is
1473 (endt + (to - from)) - to, which is endt - from. */
1474 bcopy (from
, to
, endt
- from
);
1481 extern void __malloc_safe_bcopy
PP ((__ptr_t
, __ptr_t
, __malloc_size_t
));
1482 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1488 #define min(A, B) ((A) < (B) ? (A) : (B))
1490 /* Debugging hook for realloc. */
1491 __ptr_t (*__realloc_hook
) PP ((__ptr_t __ptr
, __malloc_size_t __size
));
1493 /* Resize the given region to the new size, returning a pointer
1494 to the (possibly moved) region. This is optimized for speed;
1495 some benchmarks seem to indicate that greater compactness is
1496 achieved by unconditionally allocating and copying to a
1497 new region. This module has incestuous knowledge of the
1498 internals of both free and malloc. */
1500 _realloc_internal_nolock (ptr
, size
)
1502 __malloc_size_t size
;
1506 __malloc_size_t block
, blocks
, oldlimit
;
1510 _free_internal_nolock (ptr
);
1511 return _malloc_internal_nolock (0);
1513 else if (ptr
== NULL
)
1514 return _malloc_internal_nolock (size
);
1516 block
= BLOCK (ptr
);
1518 PROTECT_MALLOC_STATE (0);
1520 type
= _heapinfo
[block
].busy
.type
;
1524 /* Maybe reallocate a large block to a small fragment. */
1525 if (size
<= BLOCKSIZE
/ 2)
1527 result
= _malloc_internal_nolock (size
);
1530 memcpy (result
, ptr
, size
);
1531 _free_internal_nolock (ptr
);
1536 /* The new size is a large allocation as well;
1537 see if we can hold it in place. */
1538 blocks
= BLOCKIFY (size
);
1539 if (blocks
< _heapinfo
[block
].busy
.info
.size
)
1541 /* The new size is smaller; return
1542 excess memory to the free list. */
1543 _heapinfo
[block
+ blocks
].busy
.type
= 0;
1544 _heapinfo
[block
+ blocks
].busy
.info
.size
1545 = _heapinfo
[block
].busy
.info
.size
- blocks
;
1546 _heapinfo
[block
].busy
.info
.size
= blocks
;
1547 /* We have just created a new chunk by splitting a chunk in two.
1548 Now we will free this chunk; increment the statistics counter
1549 so it doesn't become wrong when _free_internal decrements it. */
1551 _free_internal_nolock (ADDRESS (block
+ blocks
));
1554 else if (blocks
== _heapinfo
[block
].busy
.info
.size
)
1555 /* No size change necessary. */
1559 /* Won't fit, so allocate a new region that will.
1560 Free the old region first in case there is sufficient
1561 adjacent free space to grow without moving. */
1562 blocks
= _heapinfo
[block
].busy
.info
.size
;
1563 /* Prevent free from actually returning memory to the system. */
1564 oldlimit
= _heaplimit
;
1566 _free_internal_nolock (ptr
);
1567 result
= _malloc_internal_nolock (size
);
1568 PROTECT_MALLOC_STATE (0);
1569 if (_heaplimit
== 0)
1570 _heaplimit
= oldlimit
;
1573 /* Now we're really in trouble. We have to unfree
1574 the thing we just freed. Unfortunately it might
1575 have been coalesced with its neighbors. */
1576 if (_heapindex
== block
)
1577 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1581 = _malloc_internal_nolock ((block
- _heapindex
) * BLOCKSIZE
);
1582 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1583 _free_internal_nolock (previous
);
1588 memmove (result
, ptr
, blocks
* BLOCKSIZE
);
1593 /* Old size is a fragment; type is logarithm
1594 to base two of the fragment size. */
1595 if (size
> (__malloc_size_t
) (1 << (type
- 1)) &&
1596 size
<= (__malloc_size_t
) (1 << type
))
1597 /* The new size is the same kind of fragment. */
1601 /* The new size is different; allocate a new space,
1602 and copy the lesser of the new size and the old. */
1603 result
= _malloc_internal_nolock (size
);
1606 memcpy (result
, ptr
, min (size
, (__malloc_size_t
) 1 << type
));
1607 _free_internal_nolock (ptr
);
1612 PROTECT_MALLOC_STATE (1);
1618 _realloc_internal (ptr
, size
)
1620 __malloc_size_t size
;
1625 result
= _realloc_internal_nolock (ptr
, size
);
1634 __malloc_size_t size
;
1636 __ptr_t (*hook
) (__ptr_t
, __malloc_size_t
);
1638 if (!__malloc_initialized
&& !__malloc_initialize ())
1641 hook
= __realloc_hook
;
1642 return (hook
!= NULL
? *hook
: _realloc_internal
) (ptr
, size
);
1644 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1646 This library is free software; you can redistribute it and/or
1647 modify it under the terms of the GNU General Public License as
1648 published by the Free Software Foundation; either version 2 of the
1649 License, or (at your option) any later version.
1651 This library is distributed in the hope that it will be useful,
1652 but WITHOUT ANY WARRANTY; without even the implied warranty of
1653 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1654 General Public License for more details.
1656 You should have received a copy of the GNU General Public
1657 License along with this library; see the file COPYING. If
1658 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1659 Fifth Floor, Boston, MA 02110-1301, USA.
1661 The author may be reached (Email) at the address mike@ai.mit.edu,
1662 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1664 #ifndef _MALLOC_INTERNAL
1665 #define _MALLOC_INTERNAL
1669 /* Allocate an array of NMEMB elements each SIZE bytes long.
1670 The entire array is initialized to zeros. */
1672 calloc (nmemb
, size
)
1673 register __malloc_size_t nmemb
;
1674 register __malloc_size_t size
;
1676 register __ptr_t result
= malloc (nmemb
* size
);
1679 (void) memset (result
, 0, nmemb
* size
);
1683 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1684 This file is part of the GNU C Library.
1686 The GNU C Library is free software; you can redistribute it and/or modify
1687 it under the terms of the GNU General Public License as published by
1688 the Free Software Foundation; either version 2, or (at your option)
1691 The GNU C Library is distributed in the hope that it will be useful,
1692 but WITHOUT ANY WARRANTY; without even the implied warranty of
1693 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1694 GNU General Public License for more details.
1696 You should have received a copy of the GNU General Public License
1697 along with the GNU C Library; see the file COPYING. If not, write to
1698 the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1699 MA 02110-1301, USA. */
1701 #ifndef _MALLOC_INTERNAL
1702 #define _MALLOC_INTERNAL
1706 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1708 #if !defined(__GNU_LIBRARY__) || defined(__UCLIBC__)
1710 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1711 /* It is best not to declare this and cast its result on foreign operating
1712 systems with potentially hostile include files. */
1715 extern __ptr_t __sbrk
PP ((ptrdiff_t increment
));
1716 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1722 /* Allocate INCREMENT more bytes of data space,
1723 and return the start of data space, or NULL on errors.
1724 If INCREMENT is negative, shrink data space. */
1726 __default_morecore (increment
)
1727 __malloc_ptrdiff_t increment
;
1731 if (!bss_sbrk_did_unexec
)
1733 return bss_sbrk (increment
);
1736 result
= (__ptr_t
) __sbrk (increment
);
1737 if (result
== (__ptr_t
) -1)
1741 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1743 This library is free software; you can redistribute it and/or
1744 modify it under the terms of the GNU General Public License as
1745 published by the Free Software Foundation; either version 2 of the
1746 License, or (at your option) any later version.
1748 This library is distributed in the hope that it will be useful,
1749 but WITHOUT ANY WARRANTY; without even the implied warranty of
1750 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1751 General Public License for more details.
1753 You should have received a copy of the GNU General Public
1754 License along with this library; see the file COPYING. If
1755 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1756 Fifth Floor, Boston, MA 02110-1301, USA. */
1758 #ifndef _MALLOC_INTERNAL
1759 #define _MALLOC_INTERNAL
1763 __ptr_t (*__memalign_hook
) PP ((__malloc_size_t __size
,
1764 __malloc_size_t __alignment
));
1767 memalign (alignment
, size
)
1768 __malloc_size_t alignment
;
1769 __malloc_size_t size
;
1772 unsigned long int adj
, lastadj
;
1773 __ptr_t (*hook
) (__malloc_size_t
, __malloc_size_t
) = __memalign_hook
;
1776 return (*hook
) (alignment
, size
);
1778 /* Allocate a block with enough extra space to pad the block with up to
1779 (ALIGNMENT - 1) bytes if necessary. */
1780 result
= malloc (size
+ alignment
- 1);
1784 /* Figure out how much we will need to pad this particular block
1785 to achieve the required alignment. */
1786 adj
= (unsigned long int) ((char *) result
- (char *) NULL
) % alignment
;
1790 /* Reallocate the block with only as much excess as it needs. */
1792 result
= malloc (adj
+ size
);
1793 if (result
== NULL
) /* Impossible unless interrupted. */
1797 adj
= (unsigned long int) ((char *) result
- (char *) NULL
) % alignment
;
1798 /* It's conceivable we might have been so unlucky as to get a
1799 different block with weaker alignment. If so, this block is too
1800 short to contain SIZE after alignment correction. So we must
1801 try again and get another block, slightly larger. */
1802 } while (adj
> lastadj
);
1806 /* Record this block in the list of aligned blocks, so that `free'
1807 can identify the pointer it is passed, which will be in the middle
1808 of an allocated block. */
1810 struct alignlist
*l
;
1811 LOCK_ALIGNED_BLOCKS ();
1812 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1813 if (l
->aligned
== NULL
)
1814 /* This slot is free. Use it. */
1818 l
= (struct alignlist
*) malloc (sizeof (struct alignlist
));
1821 l
->next
= _aligned_blocks
;
1822 _aligned_blocks
= l
;
1828 result
= l
->aligned
= (char *) result
+ alignment
- adj
;
1830 UNLOCK_ALIGNED_BLOCKS ();
1850 posix_memalign (memptr
, alignment
, size
)
1852 __malloc_size_t alignment
;
1853 __malloc_size_t size
;
1858 || alignment
% sizeof (__ptr_t
) != 0
1859 || (alignment
& (alignment
- 1)) != 0)
1862 mem
= memalign (alignment
, size
);
1871 /* Allocate memory on a page boundary.
1872 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1874 This library is free software; you can redistribute it and/or
1875 modify it under the terms of the GNU General Public License as
1876 published by the Free Software Foundation; either version 2 of the
1877 License, or (at your option) any later version.
1879 This library is distributed in the hope that it will be useful,
1880 but WITHOUT ANY WARRANTY; without even the implied warranty of
1881 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1882 General Public License for more details.
1884 You should have received a copy of the GNU General Public
1885 License along with this library; see the file COPYING. If
1886 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1887 Fifth Floor, Boston, MA 02110-1301, USA.
1889 The author may be reached (Email) at the address mike@ai.mit.edu,
1890 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1892 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1894 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1895 on MSDOS, where it conflicts with a system header file. */
1897 #define ELIDE_VALLOC
1901 #ifndef ELIDE_VALLOC
1903 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
1905 #include <sys/cdefs.h>
1906 #if defined (__GLIBC__) && __GLIBC__ >= 2
1907 /* __getpagesize is already declared in <unistd.h> with return type int */
1909 extern size_t __getpagesize
PP ((void));
1912 #include "getpagesize.h"
1913 #define __getpagesize() getpagesize()
1916 #ifndef _MALLOC_INTERNAL
1917 #define _MALLOC_INTERNAL
1921 static __malloc_size_t pagesize
;
1925 __malloc_size_t size
;
1928 pagesize
= __getpagesize ();
1930 return memalign (pagesize
, size
);
1933 #endif /* Not ELIDE_VALLOC. */
1937 /* Standard debugging hooks for `malloc'.
1938 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1939 Written May 1989 by Mike Haertel.
1941 This library is free software; you can redistribute it and/or
1942 modify it under the terms of the GNU General Public License as
1943 published by the Free Software Foundation; either version 2 of the
1944 License, or (at your option) any later version.
1946 This library is distributed in the hope that it will be useful,
1947 but WITHOUT ANY WARRANTY; without even the implied warranty of
1948 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1949 General Public License for more details.
1951 You should have received a copy of the GNU General Public
1952 License along with this library; see the file COPYING. If
1953 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1954 Fifth Floor, Boston, MA 02110-1301, USA.
1956 The author may be reached (Email) at the address mike@ai.mit.edu,
1957 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1962 #ifndef _MALLOC_INTERNAL
1963 #define _MALLOC_INTERNAL
1969 /* Old hook values. */
1970 static void (*old_free_hook
) (__ptr_t ptr
);
1971 static __ptr_t (*old_malloc_hook
) (__malloc_size_t size
);
1972 static __ptr_t (*old_realloc_hook
) (__ptr_t ptr
, __malloc_size_t size
);
1974 /* Function to call when something awful happens. */
1975 static void (*abortfunc
) (enum mcheck_status
);
1977 /* Arbitrary magical numbers. */
1978 #define MAGICWORD 0xfedabeeb
1979 #define MAGICFREE 0xd8675309
1980 #define MAGICBYTE ((char) 0xd7)
1981 #define MALLOCFLOOD ((char) 0x93)
1982 #define FREEFLOOD ((char) 0x95)
1986 __malloc_size_t size
; /* Exact size requested by user. */
1987 unsigned long int magic
; /* Magic number to check header integrity. */
1990 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1991 #define flood memset
1993 static void flood (__ptr_t
, int, __malloc_size_t
);
1995 flood (ptr
, val
, size
)
1998 __malloc_size_t size
;
2006 static enum mcheck_status
checkhdr (const struct hdr
*);
2007 static enum mcheck_status
2009 const struct hdr
*hdr
;
2011 enum mcheck_status status
;
2015 status
= MCHECK_HEAD
;
2018 status
= MCHECK_FREE
;
2021 if (((char *) &hdr
[1])[hdr
->size
] != MAGICBYTE
)
2022 status
= MCHECK_TAIL
;
2027 if (status
!= MCHECK_OK
)
2028 (*abortfunc
) (status
);
2032 static void freehook (__ptr_t
);
2041 hdr
= ((struct hdr
*) ptr
) - 1;
2043 hdr
->magic
= MAGICFREE
;
2044 flood (ptr
, FREEFLOOD
, hdr
->size
);
2049 __free_hook
= old_free_hook
;
2051 __free_hook
= freehook
;
2054 static __ptr_t
mallochook (__malloc_size_t
);
2057 __malloc_size_t size
;
2061 __malloc_hook
= old_malloc_hook
;
2062 hdr
= (struct hdr
*) malloc (sizeof (struct hdr
) + size
+ 1);
2063 __malloc_hook
= mallochook
;
2068 hdr
->magic
= MAGICWORD
;
2069 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
2070 flood ((__ptr_t
) (hdr
+ 1), MALLOCFLOOD
, size
);
2071 return (__ptr_t
) (hdr
+ 1);
2074 static __ptr_t
reallochook (__ptr_t
, __malloc_size_t
);
2076 reallochook (ptr
, size
)
2078 __malloc_size_t size
;
2080 struct hdr
*hdr
= NULL
;
2081 __malloc_size_t osize
= 0;
2085 hdr
= ((struct hdr
*) ptr
) - 1;
2090 flood ((char *) ptr
+ size
, FREEFLOOD
, osize
- size
);
2093 __free_hook
= old_free_hook
;
2094 __malloc_hook
= old_malloc_hook
;
2095 __realloc_hook
= old_realloc_hook
;
2096 hdr
= (struct hdr
*) realloc ((__ptr_t
) hdr
, sizeof (struct hdr
) + size
+ 1);
2097 __free_hook
= freehook
;
2098 __malloc_hook
= mallochook
;
2099 __realloc_hook
= reallochook
;
2104 hdr
->magic
= MAGICWORD
;
2105 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
2107 flood ((char *) (hdr
+ 1) + osize
, MALLOCFLOOD
, size
- osize
);
2108 return (__ptr_t
) (hdr
+ 1);
2113 enum mcheck_status status
;
2119 msg
= "memory is consistent, library is buggy";
2122 msg
= "memory clobbered before allocated block";
2125 msg
= "memory clobbered past end of allocated block";
2128 msg
= "block freed twice";
2131 msg
= "bogus mcheck_status, library is buggy";
2134 #ifdef __GNU_LIBRARY__
2137 fprintf (stderr
, "mcheck: %s\n", msg
);
2143 static int mcheck_used
= 0;
2147 void (*func
) (enum mcheck_status
);
2149 abortfunc
= (func
!= NULL
) ? func
: &mabort
;
2151 /* These hooks may not be safely inserted if malloc is already in use. */
2152 if (!__malloc_initialized
&& !mcheck_used
)
2154 old_free_hook
= __free_hook
;
2155 __free_hook
= freehook
;
2156 old_malloc_hook
= __malloc_hook
;
2157 __malloc_hook
= mallochook
;
2158 old_realloc_hook
= __realloc_hook
;
2159 __realloc_hook
= reallochook
;
2163 return mcheck_used
? 0 : -1;
2167 mprobe (__ptr_t ptr
)
2169 return mcheck_used
? checkhdr (ptr
) : MCHECK_DISABLED
;
2172 #endif /* GC_MCHECK */