1 /* This file is no longer automatically generated from libc. */
3 #define _MALLOC_INTERNAL
5 /* The malloc headers and source files from the C library follow here. */
7 /* Declarations for `malloc' and friends.
8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
9 2005, 2006, 2007 Free Software Foundation, Inc.
10 Written May 1989 by Mike Haertel.
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public
23 License along with this library; see the file COPYING. If
24 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25 Fifth Floor, Boston, MA 02110-1301, USA.
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
34 #ifdef _MALLOC_INTERNAL
40 #ifdef HAVE_GTK_AND_PTHREAD
44 #if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
45 || defined STDC_HEADERS || defined PROTOTYPES))
49 #define __ptr_t void *
50 #else /* Not C++ or ANSI C. */
54 #define __ptr_t char *
55 #endif /* C++ or ANSI C. */
57 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
61 #define memset(s, zero, n) bzero ((s), (n))
64 #define memcpy(d, s, n) bcopy ((s), (d), (n))
83 #endif /* _MALLOC_INTERNAL. */
93 #define __malloc_size_t size_t
94 #define __malloc_ptrdiff_t ptrdiff_t
99 #define __malloc_size_t __SIZE_TYPE__
102 #ifndef __malloc_size_t
103 #define __malloc_size_t unsigned int
105 #define __malloc_ptrdiff_t int
113 /* Allocate SIZE bytes of memory. */
114 extern __ptr_t malloc
PP ((__malloc_size_t __size
));
115 /* Re-allocate the previously allocated block
116 in __ptr_t, making the new block SIZE bytes long. */
117 extern __ptr_t realloc
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
118 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
119 extern __ptr_t calloc
PP ((__malloc_size_t __nmemb
, __malloc_size_t __size
));
120 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
121 extern void free
PP ((__ptr_t __ptr
));
123 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
124 #if !defined (_MALLOC_INTERNAL) || defined (MSDOS) /* Avoid conflict. */
125 extern __ptr_t memalign
PP ((__malloc_size_t __alignment
,
126 __malloc_size_t __size
));
127 extern int posix_memalign
PP ((__ptr_t
*, __malloc_size_t
,
128 __malloc_size_t size
));
131 /* Allocate SIZE bytes on a page boundary. */
132 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
133 extern __ptr_t valloc
PP ((__malloc_size_t __size
));
137 /* Set up mutexes and make malloc etc. thread-safe. */
138 extern void malloc_enable_thread
PP ((void));
141 #ifdef _MALLOC_INTERNAL
143 /* The allocator divides the heap into blocks of fixed size; large
144 requests receive one or more whole blocks, and small requests
145 receive a fragment of a block. Fragment sizes are powers of two,
146 and all fragments of a block are the same size. When all the
147 fragments in a block have been freed, the block itself is freed. */
148 #define INT_BIT (CHAR_BIT * sizeof(int))
149 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
150 #define BLOCKSIZE (1 << BLOCKLOG)
151 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
153 /* Determine the amount of memory spanned by the initial heap table
154 (not an absolute limit). */
155 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
157 /* Number of contiguous free blocks allowed to build up at the end of
158 memory before they will be returned to the system. */
159 #define FINAL_FREE_BLOCKS 8
161 /* Data structure giving per-block information. */
164 /* Heap information for a busy block. */
167 /* Zero for a large (multiblock) object, or positive giving the
168 logarithm to the base two of the fragment size. */
174 __malloc_size_t nfree
; /* Free frags in a fragmented block. */
175 __malloc_size_t first
; /* First free fragment of the block. */
177 /* For a large object, in its first block, this has the number
178 of blocks in the object. In the other blocks, this has a
179 negative number which says how far back the first block is. */
180 __malloc_ptrdiff_t size
;
183 /* Heap information for a free block
184 (that may be the first of a free cluster). */
187 __malloc_size_t size
; /* Size (in blocks) of a free cluster. */
188 __malloc_size_t next
; /* Index of next free cluster. */
189 __malloc_size_t prev
; /* Index of previous free cluster. */
193 /* Pointer to first block of the heap. */
194 extern char *_heapbase
;
196 /* Table indexed by block number giving per-block information. */
197 extern malloc_info
*_heapinfo
;
199 /* Address to block number and vice versa. */
200 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
201 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
203 /* Current search index for the heap table. */
204 extern __malloc_size_t _heapindex
;
206 /* Limit of valid info table indices. */
207 extern __malloc_size_t _heaplimit
;
209 /* Doubly linked lists of free fragments. */
216 /* Free list headers for each fragment size. */
217 extern struct list _fraghead
[];
219 /* List of blocks allocated with `memalign' (or `valloc'). */
222 struct alignlist
*next
;
223 __ptr_t aligned
; /* The address that memaligned returned. */
224 __ptr_t exact
; /* The address that malloc returned. */
226 extern struct alignlist
*_aligned_blocks
;
228 /* Instrumentation. */
229 extern __malloc_size_t _chunks_used
;
230 extern __malloc_size_t _bytes_used
;
231 extern __malloc_size_t _chunks_free
;
232 extern __malloc_size_t _bytes_free
;
234 /* Internal versions of `malloc', `realloc', and `free'
235 used when these functions need to call each other.
236 They are the same but don't call the hooks. */
237 extern __ptr_t _malloc_internal
PP ((__malloc_size_t __size
));
238 extern __ptr_t _realloc_internal
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
239 extern void _free_internal
PP ((__ptr_t __ptr
));
240 extern __ptr_t _malloc_internal_nolock
PP ((__malloc_size_t __size
));
241 extern __ptr_t _realloc_internal_nolock
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
242 extern void _free_internal_nolock
PP ((__ptr_t __ptr
));
245 extern pthread_mutex_t _malloc_mutex
, _aligned_blocks_mutex
;
246 extern int _malloc_thread_enabled_p
;
249 if (_malloc_thread_enabled_p) \
250 pthread_mutex_lock (&_malloc_mutex); \
254 if (_malloc_thread_enabled_p) \
255 pthread_mutex_unlock (&_malloc_mutex); \
257 #define LOCK_ALIGNED_BLOCKS() \
259 if (_malloc_thread_enabled_p) \
260 pthread_mutex_lock (&_aligned_blocks_mutex); \
262 #define UNLOCK_ALIGNED_BLOCKS() \
264 if (_malloc_thread_enabled_p) \
265 pthread_mutex_unlock (&_aligned_blocks_mutex); \
270 #define LOCK_ALIGNED_BLOCKS()
271 #define UNLOCK_ALIGNED_BLOCKS()
274 #endif /* _MALLOC_INTERNAL. */
276 /* Given an address in the middle of a malloc'd object,
277 return the address of the beginning of the object. */
278 extern __ptr_t malloc_find_object_address
PP ((__ptr_t __ptr
));
280 /* Underlying allocation function; successive calls should
281 return contiguous pieces of memory. */
282 extern __ptr_t (*__morecore
) PP ((__malloc_ptrdiff_t __size
));
284 /* Default value of `__morecore'. */
285 extern __ptr_t __default_morecore
PP ((__malloc_ptrdiff_t __size
));
287 /* If not NULL, this function is called after each time
288 `__morecore' is called to increase the data size. */
289 extern void (*__after_morecore_hook
) PP ((void));
291 /* Number of extra blocks to get each time we ask for more core.
292 This reduces the frequency of calling `(*__morecore)'. */
293 extern __malloc_size_t __malloc_extra_blocks
;
295 /* Nonzero if `malloc' has been called and done its initialization. */
296 extern int __malloc_initialized
;
297 /* Function called to initialize malloc data structures. */
298 extern int __malloc_initialize
PP ((void));
300 /* Hooks for debugging versions. */
301 extern void (*__malloc_initialize_hook
) PP ((void));
302 extern void (*__free_hook
) PP ((__ptr_t __ptr
));
303 extern __ptr_t (*__malloc_hook
) PP ((__malloc_size_t __size
));
304 extern __ptr_t (*__realloc_hook
) PP ((__ptr_t __ptr
, __malloc_size_t __size
));
305 extern __ptr_t (*__memalign_hook
) PP ((__malloc_size_t __size
,
306 __malloc_size_t __alignment
));
308 /* Return values for `mprobe': these are the kinds of inconsistencies that
309 `mcheck' enables detection of. */
312 MCHECK_DISABLED
= -1, /* Consistency checking is not turned on. */
313 MCHECK_OK
, /* Block is fine. */
314 MCHECK_FREE
, /* Block freed twice. */
315 MCHECK_HEAD
, /* Memory before the block was clobbered. */
316 MCHECK_TAIL
/* Memory after the block was clobbered. */
319 /* Activate a standard collection of debugging hooks. This must be called
320 before `malloc' is ever called. ABORTFUNC is called with an error code
321 (see enum above) when an inconsistency is detected. If ABORTFUNC is
322 null, the standard function prints on stderr and then calls `abort'. */
323 extern int mcheck
PP ((void (*__abortfunc
) PP ((enum mcheck_status
))));
325 /* Check for aberrations in a particular malloc'd block. You must have
326 called `mcheck' already. These are the same checks that `mcheck' does
327 when you free or reallocate a block. */
328 extern enum mcheck_status mprobe
PP ((__ptr_t __ptr
));
330 /* Activate a standard collection of tracing hooks. */
331 extern void mtrace
PP ((void));
332 extern void muntrace
PP ((void));
334 /* Statistics available to the user. */
337 __malloc_size_t bytes_total
; /* Total size of the heap. */
338 __malloc_size_t chunks_used
; /* Chunks allocated by the user. */
339 __malloc_size_t bytes_used
; /* Byte total of user-allocated chunks. */
340 __malloc_size_t chunks_free
; /* Chunks in the free list. */
341 __malloc_size_t bytes_free
; /* Byte total of chunks in the free list. */
344 /* Pick up the current statistics. */
345 extern struct mstats mstats
PP ((void));
347 /* Call WARNFUN with a warning message when memory usage is high. */
348 extern void memory_warnings
PP ((__ptr_t __start
,
349 void (*__warnfun
) PP ((const char *))));
352 /* Relocating allocator. */
354 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
355 extern __ptr_t r_alloc
PP ((__ptr_t
*__handleptr
, __malloc_size_t __size
));
357 /* Free the storage allocated in HANDLEPTR. */
358 extern void r_alloc_free
PP ((__ptr_t
*__handleptr
));
360 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
361 extern __ptr_t r_re_alloc
PP ((__ptr_t
*__handleptr
, __malloc_size_t __size
));
368 #endif /* malloc.h */
369 /* Memory allocator `malloc'.
370 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
371 Written May 1989 by Mike Haertel.
373 This library is free software; you can redistribute it and/or
374 modify it under the terms of the GNU General Public License as
375 published by the Free Software Foundation; either version 2 of the
376 License, or (at your option) any later version.
378 This library is distributed in the hope that it will be useful,
379 but WITHOUT ANY WARRANTY; without even the implied warranty of
380 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
381 General Public License for more details.
383 You should have received a copy of the GNU General Public
384 License along with this library; see the file COPYING. If
385 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
386 Fifth Floor, Boston, MA 02110-1301, USA.
388 The author may be reached (Email) at the address mike@ai.mit.edu,
389 or (US mail) as Mike Haertel c/o Free Software Foundation. */
391 #ifndef _MALLOC_INTERNAL
392 #define _MALLOC_INTERNAL
397 /* How to really get more memory. */
399 extern __ptr_t bss_sbrk
PP ((ptrdiff_t __size
));
400 extern int bss_sbrk_did_unexec
;
402 __ptr_t (*__morecore
) PP ((__malloc_ptrdiff_t __size
)) = __default_morecore
;
404 /* Debugging hook for `malloc'. */
405 __ptr_t (*__malloc_hook
) PP ((__malloc_size_t __size
));
407 /* Pointer to the base of the first block. */
410 /* Block information table. Allocated with align/__free (not malloc/free). */
411 malloc_info
*_heapinfo
;
413 /* Number of info entries. */
414 static __malloc_size_t heapsize
;
416 /* Search index in the info table. */
417 __malloc_size_t _heapindex
;
419 /* Limit of valid info table indices. */
420 __malloc_size_t _heaplimit
;
422 /* Free lists for each fragment size. */
423 struct list _fraghead
[BLOCKLOG
];
425 /* Instrumentation. */
426 __malloc_size_t _chunks_used
;
427 __malloc_size_t _bytes_used
;
428 __malloc_size_t _chunks_free
;
429 __malloc_size_t _bytes_free
;
431 /* Are you experienced? */
432 int __malloc_initialized
;
434 __malloc_size_t __malloc_extra_blocks
;
436 void (*__malloc_initialize_hook
) PP ((void));
437 void (*__after_morecore_hook
) PP ((void));
439 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
441 /* Some code for hunting a bug writing into _heapinfo.
443 Call this macro with argument PROT non-zero to protect internal
444 malloc state against writing to it, call it with a zero argument to
445 make it readable and writable.
447 Note that this only works if BLOCKSIZE == page size, which is
448 the case on the i386. */
450 #include <sys/types.h>
451 #include <sys/mman.h>
453 static int state_protected_p
;
454 static __malloc_size_t last_state_size
;
455 static malloc_info
*last_heapinfo
;
458 protect_malloc_state (protect_p
)
461 /* If _heapinfo has been relocated, make sure its old location
462 isn't left read-only; it will be reused by malloc. */
463 if (_heapinfo
!= last_heapinfo
465 && state_protected_p
)
466 mprotect (last_heapinfo
, last_state_size
, PROT_READ
| PROT_WRITE
);
468 last_state_size
= _heaplimit
* sizeof *_heapinfo
;
469 last_heapinfo
= _heapinfo
;
471 if (protect_p
!= state_protected_p
)
473 state_protected_p
= protect_p
;
474 if (mprotect (_heapinfo
, last_state_size
,
475 protect_p
? PROT_READ
: PROT_READ
| PROT_WRITE
) != 0)
480 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
483 #define PROTECT_MALLOC_STATE(PROT) /* empty */
487 /* Aligned allocation. */
488 static __ptr_t align
PP ((__malloc_size_t
));
491 __malloc_size_t size
;
494 unsigned long int adj
;
496 /* align accepts an unsigned argument, but __morecore accepts a
497 signed one. This could lead to trouble if SIZE overflows a
498 signed int type accepted by __morecore. We just punt in that
499 case, since they are requesting a ludicrous amount anyway. */
500 if ((__malloc_ptrdiff_t
)size
< 0)
503 result
= (*__morecore
) (size
);
504 adj
= (unsigned long int) ((unsigned long int) ((char *) result
-
505 (char *) NULL
)) % BLOCKSIZE
;
509 adj
= BLOCKSIZE
- adj
;
510 new = (*__morecore
) (adj
);
511 result
= (char *) result
+ adj
;
514 if (__after_morecore_hook
)
515 (*__after_morecore_hook
) ();
520 /* Get SIZE bytes, if we can get them starting at END.
521 Return the address of the space we got.
522 If we cannot get space at END, fail and return 0. */
523 static __ptr_t get_contiguous_space
PP ((__malloc_ptrdiff_t
, __ptr_t
));
525 get_contiguous_space (size
, position
)
526 __malloc_ptrdiff_t size
;
532 before
= (*__morecore
) (0);
533 /* If we can tell in advance that the break is at the wrong place,
535 if (before
!= position
)
538 /* Allocate SIZE bytes and get the address of them. */
539 after
= (*__morecore
) (size
);
543 /* It was not contiguous--reject it. */
544 if (after
!= position
)
546 (*__morecore
) (- size
);
554 /* This is called when `_heapinfo' and `heapsize' have just
555 been set to describe a new info table. Set up the table
556 to describe itself and account for it in the statistics. */
557 static void register_heapinfo
PP ((void));
564 __malloc_size_t block
, blocks
;
566 block
= BLOCK (_heapinfo
);
567 blocks
= BLOCKIFY (heapsize
* sizeof (malloc_info
));
569 /* Account for the _heapinfo block itself in the statistics. */
570 _bytes_used
+= blocks
* BLOCKSIZE
;
573 /* Describe the heapinfo block itself in the heapinfo. */
574 _heapinfo
[block
].busy
.type
= 0;
575 _heapinfo
[block
].busy
.info
.size
= blocks
;
576 /* Leave back-pointers for malloc_find_address. */
578 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
582 pthread_mutex_t _malloc_mutex
= PTHREAD_MUTEX_INITIALIZER
;
583 pthread_mutex_t _aligned_blocks_mutex
= PTHREAD_MUTEX_INITIALIZER
;
584 int _malloc_thread_enabled_p
;
587 malloc_atfork_handler_prepare ()
590 LOCK_ALIGNED_BLOCKS ();
594 malloc_atfork_handler_parent ()
596 UNLOCK_ALIGNED_BLOCKS ();
601 malloc_atfork_handler_child ()
603 UNLOCK_ALIGNED_BLOCKS ();
607 /* Set up mutexes and make malloc etc. thread-safe. */
609 malloc_enable_thread ()
611 if (_malloc_thread_enabled_p
)
614 /* Some pthread implementations call malloc for statically
615 initialized mutexes when they are used first. To avoid such a
616 situation, we initialize mutexes here while their use is
617 disabled in malloc etc. */
618 pthread_mutex_init (&_malloc_mutex
, NULL
);
619 pthread_mutex_init (&_aligned_blocks_mutex
, NULL
);
620 pthread_atfork (malloc_atfork_handler_prepare
,
621 malloc_atfork_handler_parent
,
622 malloc_atfork_handler_child
);
623 _malloc_thread_enabled_p
= 1;
628 malloc_initialize_1 ()
634 if (__malloc_initialize_hook
)
635 (*__malloc_initialize_hook
) ();
637 heapsize
= HEAP
/ BLOCKSIZE
;
638 _heapinfo
= (malloc_info
*) align (heapsize
* sizeof (malloc_info
));
639 if (_heapinfo
== NULL
)
641 memset (_heapinfo
, 0, heapsize
* sizeof (malloc_info
));
642 _heapinfo
[0].free
.size
= 0;
643 _heapinfo
[0].free
.next
= _heapinfo
[0].free
.prev
= 0;
645 _heapbase
= (char *) _heapinfo
;
646 _heaplimit
= BLOCK (_heapbase
+ heapsize
* sizeof (malloc_info
));
648 register_heapinfo ();
650 __malloc_initialized
= 1;
651 PROTECT_MALLOC_STATE (1);
655 /* Set everything up and remember that we have.
656 main will call malloc which calls this function. That is before any threads
657 or signal handlers has been set up, so we don't need thread protection. */
659 __malloc_initialize ()
661 if (__malloc_initialized
)
664 malloc_initialize_1 ();
666 return __malloc_initialized
;
669 static int morecore_recursing
;
671 /* Get neatly aligned memory, initializing or
672 growing the heap info table as necessary. */
673 static __ptr_t morecore_nolock
PP ((__malloc_size_t
));
675 morecore_nolock (size
)
676 __malloc_size_t size
;
679 malloc_info
*newinfo
, *oldinfo
;
680 __malloc_size_t newsize
;
682 if (morecore_recursing
)
683 /* Avoid recursion. The caller will know how to handle a null return. */
686 result
= align (size
);
690 PROTECT_MALLOC_STATE (0);
692 /* Check if we need to grow the info table. */
693 if ((__malloc_size_t
) BLOCK ((char *) result
+ size
) > heapsize
)
695 /* Calculate the new _heapinfo table size. We do not account for the
696 added blocks in the table itself, as we hope to place them in
697 existing free space, which is already covered by part of the
702 while ((__malloc_size_t
) BLOCK ((char *) result
+ size
) > newsize
);
704 /* We must not reuse existing core for the new info table when called
705 from realloc in the case of growing a large block, because the
706 block being grown is momentarily marked as free. In this case
707 _heaplimit is zero so we know not to reuse space for internal
711 /* First try to allocate the new info table in core we already
712 have, in the usual way using realloc. If realloc cannot
713 extend it in place or relocate it to existing sufficient core,
714 we will get called again, and the code above will notice the
715 `morecore_recursing' flag and return null. */
716 int save
= errno
; /* Don't want to clobber errno with ENOMEM. */
717 morecore_recursing
= 1;
718 newinfo
= (malloc_info
*) _realloc_internal_nolock
719 (_heapinfo
, newsize
* sizeof (malloc_info
));
720 morecore_recursing
= 0;
725 /* We found some space in core, and realloc has put the old
726 table's blocks on the free list. Now zero the new part
727 of the table and install the new table location. */
728 memset (&newinfo
[heapsize
], 0,
729 (newsize
- heapsize
) * sizeof (malloc_info
));
736 /* Allocate new space for the malloc info table. */
739 newinfo
= (malloc_info
*) align (newsize
* sizeof (malloc_info
));
744 (*__morecore
) (-size
);
748 /* Is it big enough to record status for its own space?
750 if ((__malloc_size_t
) BLOCK ((char *) newinfo
751 + newsize
* sizeof (malloc_info
))
755 /* Must try again. First give back most of what we just got. */
756 (*__morecore
) (- newsize
* sizeof (malloc_info
));
760 /* Copy the old table to the beginning of the new,
761 and zero the rest of the new table. */
762 memcpy (newinfo
, _heapinfo
, heapsize
* sizeof (malloc_info
));
763 memset (&newinfo
[heapsize
], 0,
764 (newsize
- heapsize
) * sizeof (malloc_info
));
769 register_heapinfo ();
771 /* Reset _heaplimit so _free_internal never decides
772 it can relocate or resize the info table. */
774 _free_internal_nolock (oldinfo
);
775 PROTECT_MALLOC_STATE (0);
777 /* The new heap limit includes the new table just allocated. */
778 _heaplimit
= BLOCK ((char *) newinfo
+ heapsize
* sizeof (malloc_info
));
783 _heaplimit
= BLOCK ((char *) result
+ size
);
787 /* Allocate memory from the heap. */
789 _malloc_internal_nolock (size
)
790 __malloc_size_t size
;
793 __malloc_size_t block
, blocks
, lastblocks
, start
;
794 register __malloc_size_t i
;
797 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
798 valid address you can realloc and free (though not dereference).
800 It turns out that some extant code (sunrpc, at least Ultrix's version)
801 expects `malloc (0)' to return non-NULL and breaks otherwise.
809 PROTECT_MALLOC_STATE (0);
811 if (size
< sizeof (struct list
))
812 size
= sizeof (struct list
);
814 /* Determine the allocation policy based on the request size. */
815 if (size
<= BLOCKSIZE
/ 2)
817 /* Small allocation to receive a fragment of a block.
818 Determine the logarithm to base two of the fragment size. */
819 register __malloc_size_t log
= 1;
821 while ((size
/= 2) != 0)
824 /* Look in the fragment lists for a
825 free fragment of the desired size. */
826 next
= _fraghead
[log
].next
;
829 /* There are free fragments of this size.
830 Pop a fragment out of the fragment list and return it.
831 Update the block's nfree and first counters. */
832 result
= (__ptr_t
) next
;
833 next
->prev
->next
= next
->next
;
834 if (next
->next
!= NULL
)
835 next
->next
->prev
= next
->prev
;
836 block
= BLOCK (result
);
837 if (--_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
838 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
839 ((unsigned long int) ((char *) next
->next
- (char *) NULL
)
842 /* Update the statistics. */
844 _bytes_used
+= 1 << log
;
846 _bytes_free
-= 1 << log
;
850 /* No free fragments of the desired size, so get a new block
851 and break it into fragments, returning the first. */
852 #ifdef GC_MALLOC_CHECK
853 result
= _malloc_internal_nolock (BLOCKSIZE
);
854 PROTECT_MALLOC_STATE (0);
855 #elif defined (USE_PTHREAD)
856 result
= _malloc_internal_nolock (BLOCKSIZE
);
858 result
= malloc (BLOCKSIZE
);
862 PROTECT_MALLOC_STATE (1);
866 /* Link all fragments but the first into the free list. */
867 next
= (struct list
*) ((char *) result
+ (1 << log
));
869 next
->prev
= &_fraghead
[log
];
870 _fraghead
[log
].next
= next
;
872 for (i
= 2; i
< (__malloc_size_t
) (BLOCKSIZE
>> log
); ++i
)
874 next
= (struct list
*) ((char *) result
+ (i
<< log
));
875 next
->next
= _fraghead
[log
].next
;
876 next
->prev
= &_fraghead
[log
];
877 next
->prev
->next
= next
;
878 next
->next
->prev
= next
;
881 /* Initialize the nfree and first counters for this block. */
882 block
= BLOCK (result
);
883 _heapinfo
[block
].busy
.type
= log
;
884 _heapinfo
[block
].busy
.info
.frag
.nfree
= i
- 1;
885 _heapinfo
[block
].busy
.info
.frag
.first
= i
- 1;
887 _chunks_free
+= (BLOCKSIZE
>> log
) - 1;
888 _bytes_free
+= BLOCKSIZE
- (1 << log
);
889 _bytes_used
-= BLOCKSIZE
- (1 << log
);
894 /* Large allocation to receive one or more blocks.
895 Search the free list in a circle starting at the last place visited.
896 If we loop completely around without finding a large enough
897 space we will have to get more memory from the system. */
898 blocks
= BLOCKIFY (size
);
899 start
= block
= _heapindex
;
900 while (_heapinfo
[block
].free
.size
< blocks
)
902 block
= _heapinfo
[block
].free
.next
;
905 /* Need to get more from the system. Get a little extra. */
906 __malloc_size_t wantblocks
= blocks
+ __malloc_extra_blocks
;
907 block
= _heapinfo
[0].free
.prev
;
908 lastblocks
= _heapinfo
[block
].free
.size
;
909 /* Check to see if the new core will be contiguous with the
910 final free block; if so we don't need to get as much. */
911 if (_heaplimit
!= 0 && block
+ lastblocks
== _heaplimit
&&
912 /* We can't do this if we will have to make the heap info
913 table bigger to accommodate the new space. */
914 block
+ wantblocks
<= heapsize
&&
915 get_contiguous_space ((wantblocks
- lastblocks
) * BLOCKSIZE
,
916 ADDRESS (block
+ lastblocks
)))
918 /* We got it contiguously. Which block we are extending
919 (the `final free block' referred to above) might have
920 changed, if it got combined with a freed info table. */
921 block
= _heapinfo
[0].free
.prev
;
922 _heapinfo
[block
].free
.size
+= (wantblocks
- lastblocks
);
923 _bytes_free
+= (wantblocks
- lastblocks
) * BLOCKSIZE
;
924 _heaplimit
+= wantblocks
- lastblocks
;
927 result
= morecore_nolock (wantblocks
* BLOCKSIZE
);
930 block
= BLOCK (result
);
931 /* Put the new block at the end of the free list. */
932 _heapinfo
[block
].free
.size
= wantblocks
;
933 _heapinfo
[block
].free
.prev
= _heapinfo
[0].free
.prev
;
934 _heapinfo
[block
].free
.next
= 0;
935 _heapinfo
[0].free
.prev
= block
;
936 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
= block
;
938 /* Now loop to use some of that block for this allocation. */
942 /* At this point we have found a suitable free list entry.
943 Figure out how to remove what we need from the list. */
944 result
= ADDRESS (block
);
945 if (_heapinfo
[block
].free
.size
> blocks
)
947 /* The block we found has a bit left over,
948 so relink the tail end back into the free list. */
949 _heapinfo
[block
+ blocks
].free
.size
950 = _heapinfo
[block
].free
.size
- blocks
;
951 _heapinfo
[block
+ blocks
].free
.next
952 = _heapinfo
[block
].free
.next
;
953 _heapinfo
[block
+ blocks
].free
.prev
954 = _heapinfo
[block
].free
.prev
;
955 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
956 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
957 = _heapindex
= block
+ blocks
;
961 /* The block exactly matches our requirements,
962 so just remove it from the list. */
963 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
964 = _heapinfo
[block
].free
.prev
;
965 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
966 = _heapindex
= _heapinfo
[block
].free
.next
;
970 _heapinfo
[block
].busy
.type
= 0;
971 _heapinfo
[block
].busy
.info
.size
= blocks
;
973 _bytes_used
+= blocks
* BLOCKSIZE
;
974 _bytes_free
-= blocks
* BLOCKSIZE
;
976 /* Mark all the blocks of the object just allocated except for the
977 first with a negative number so you can find the first block by
978 adding that adjustment. */
980 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
983 PROTECT_MALLOC_STATE (1);
989 _malloc_internal (size
)
990 __malloc_size_t size
;
995 result
= _malloc_internal_nolock (size
);
1003 __malloc_size_t size
;
1005 __ptr_t (*hook
) (__malloc_size_t
);
1007 if (!__malloc_initialized
&& !__malloc_initialize ())
1010 /* Copy the value of __malloc_hook to an automatic variable in case
1011 __malloc_hook is modified in another thread between its
1012 NULL-check and the use.
1014 Note: Strictly speaking, this is not a right solution. We should
1015 use mutexes to access non-read-only variables that are shared
1016 among multiple threads. We just leave it for compatibility with
1017 glibc malloc (i.e., assignments to __malloc_hook) for now. */
1018 hook
= __malloc_hook
;
1019 return (hook
!= NULL
? *hook
: _malloc_internal
) (size
);
1024 /* On some ANSI C systems, some libc functions call _malloc, _free
1025 and _realloc. Make them use the GNU functions. */
1029 __malloc_size_t size
;
1031 return malloc (size
);
1042 _realloc (ptr
, size
)
1044 __malloc_size_t size
;
1046 return realloc (ptr
, size
);
1050 /* Free a block of memory allocated by `malloc'.
1051 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1052 Written May 1989 by Mike Haertel.
1054 This library is free software; you can redistribute it and/or
1055 modify it under the terms of the GNU General Public License as
1056 published by the Free Software Foundation; either version 2 of the
1057 License, or (at your option) any later version.
1059 This library is distributed in the hope that it will be useful,
1060 but WITHOUT ANY WARRANTY; without even the implied warranty of
1061 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1062 General Public License for more details.
1064 You should have received a copy of the GNU General Public
1065 License along with this library; see the file COPYING. If
1066 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1067 Fifth Floor, Boston, MA 02110-1301, USA.
1069 The author may be reached (Email) at the address mike@ai.mit.edu,
1070 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1072 #ifndef _MALLOC_INTERNAL
1073 #define _MALLOC_INTERNAL
1078 /* Cope with systems lacking `memmove'. */
1080 #if (!defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1082 #undef __malloc_safe_bcopy
1083 #define __malloc_safe_bcopy safe_bcopy
1085 /* This function is defined in realloc.c. */
1086 extern void __malloc_safe_bcopy
PP ((__ptr_t
, __ptr_t
, __malloc_size_t
));
1087 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1092 /* Debugging hook for free. */
1093 void (*__free_hook
) PP ((__ptr_t __ptr
));
1095 /* List of blocks allocated by memalign. */
1096 struct alignlist
*_aligned_blocks
= NULL
;
1098 /* Return memory to the heap.
1099 Like `_free_internal' but don't lock mutex. */
1101 _free_internal_nolock (ptr
)
1105 __malloc_size_t block
, blocks
;
1106 register __malloc_size_t i
;
1107 struct list
*prev
, *next
;
1109 const __malloc_size_t lesscore_threshold
1110 /* Threshold of free space at which we will return some to the system. */
1111 = FINAL_FREE_BLOCKS
+ 2 * __malloc_extra_blocks
;
1113 register struct alignlist
*l
;
1118 PROTECT_MALLOC_STATE (0);
1120 LOCK_ALIGNED_BLOCKS ();
1121 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1122 if (l
->aligned
== ptr
)
1124 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1128 UNLOCK_ALIGNED_BLOCKS ();
1130 block
= BLOCK (ptr
);
1132 type
= _heapinfo
[block
].busy
.type
;
1136 /* Get as many statistics as early as we can. */
1138 _bytes_used
-= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1139 _bytes_free
+= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1141 /* Find the free cluster previous to this one in the free list.
1142 Start searching at the last block referenced; this may benefit
1143 programs with locality of allocation. */
1147 i
= _heapinfo
[i
].free
.prev
;
1151 i
= _heapinfo
[i
].free
.next
;
1152 while (i
> 0 && i
< block
);
1153 i
= _heapinfo
[i
].free
.prev
;
1156 /* Determine how to link this block into the free list. */
1157 if (block
== i
+ _heapinfo
[i
].free
.size
)
1159 /* Coalesce this block with its predecessor. */
1160 _heapinfo
[i
].free
.size
+= _heapinfo
[block
].busy
.info
.size
;
1165 /* Really link this block back into the free list. */
1166 _heapinfo
[block
].free
.size
= _heapinfo
[block
].busy
.info
.size
;
1167 _heapinfo
[block
].free
.next
= _heapinfo
[i
].free
.next
;
1168 _heapinfo
[block
].free
.prev
= i
;
1169 _heapinfo
[i
].free
.next
= block
;
1170 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1174 /* Now that the block is linked in, see if we can coalesce it
1175 with its successor (by deleting its successor from the list
1176 and adding in its size). */
1177 if (block
+ _heapinfo
[block
].free
.size
== _heapinfo
[block
].free
.next
)
1179 _heapinfo
[block
].free
.size
1180 += _heapinfo
[_heapinfo
[block
].free
.next
].free
.size
;
1181 _heapinfo
[block
].free
.next
1182 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.next
;
1183 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1187 /* How many trailing free blocks are there now? */
1188 blocks
= _heapinfo
[block
].free
.size
;
1190 /* Where is the current end of accessible core? */
1191 curbrk
= (*__morecore
) (0);
1193 if (_heaplimit
!= 0 && curbrk
== ADDRESS (_heaplimit
))
1195 /* The end of the malloc heap is at the end of accessible core.
1196 It's possible that moving _heapinfo will allow us to
1197 return some space to the system. */
1199 __malloc_size_t info_block
= BLOCK (_heapinfo
);
1200 __malloc_size_t info_blocks
= _heapinfo
[info_block
].busy
.info
.size
;
1201 __malloc_size_t prev_block
= _heapinfo
[block
].free
.prev
;
1202 __malloc_size_t prev_blocks
= _heapinfo
[prev_block
].free
.size
;
1203 __malloc_size_t next_block
= _heapinfo
[block
].free
.next
;
1204 __malloc_size_t next_blocks
= _heapinfo
[next_block
].free
.size
;
1206 if (/* Win if this block being freed is last in core, the info table
1207 is just before it, the previous free block is just before the
1208 info table, and the two free blocks together form a useful
1209 amount to return to the system. */
1210 (block
+ blocks
== _heaplimit
&&
1211 info_block
+ info_blocks
== block
&&
1212 prev_block
!= 0 && prev_block
+ prev_blocks
== info_block
&&
1213 blocks
+ prev_blocks
>= lesscore_threshold
) ||
1214 /* Nope, not the case. We can also win if this block being
1215 freed is just before the info table, and the table extends
1216 to the end of core or is followed only by a free block,
1217 and the total free space is worth returning to the system. */
1218 (block
+ blocks
== info_block
&&
1219 ((info_block
+ info_blocks
== _heaplimit
&&
1220 blocks
>= lesscore_threshold
) ||
1221 (info_block
+ info_blocks
== next_block
&&
1222 next_block
+ next_blocks
== _heaplimit
&&
1223 blocks
+ next_blocks
>= lesscore_threshold
)))
1226 malloc_info
*newinfo
;
1227 __malloc_size_t oldlimit
= _heaplimit
;
1229 /* Free the old info table, clearing _heaplimit to avoid
1230 recursion into this code. We don't want to return the
1231 table's blocks to the system before we have copied them to
1232 the new location. */
1234 _free_internal_nolock (_heapinfo
);
1235 _heaplimit
= oldlimit
;
1237 /* Tell malloc to search from the beginning of the heap for
1238 free blocks, so it doesn't reuse the ones just freed. */
1241 /* Allocate new space for the info table and move its data. */
1242 newinfo
= (malloc_info
*) _malloc_internal_nolock (info_blocks
1244 PROTECT_MALLOC_STATE (0);
1245 memmove (newinfo
, _heapinfo
, info_blocks
* BLOCKSIZE
);
1246 _heapinfo
= newinfo
;
1248 /* We should now have coalesced the free block with the
1249 blocks freed from the old info table. Examine the entire
1250 trailing free block to decide below whether to return some
1252 block
= _heapinfo
[0].free
.prev
;
1253 blocks
= _heapinfo
[block
].free
.size
;
1256 /* Now see if we can return stuff to the system. */
1257 if (block
+ blocks
== _heaplimit
&& blocks
>= lesscore_threshold
)
1259 register __malloc_size_t bytes
= blocks
* BLOCKSIZE
;
1260 _heaplimit
-= blocks
;
1261 (*__morecore
) (-bytes
);
1262 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
1263 = _heapinfo
[block
].free
.next
;
1264 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
1265 = _heapinfo
[block
].free
.prev
;
1266 block
= _heapinfo
[block
].free
.prev
;
1268 _bytes_free
-= bytes
;
1272 /* Set the next search to begin at this block. */
1277 /* Do some of the statistics. */
1279 _bytes_used
-= 1 << type
;
1281 _bytes_free
+= 1 << type
;
1283 /* Get the address of the first free fragment in this block. */
1284 prev
= (struct list
*) ((char *) ADDRESS (block
) +
1285 (_heapinfo
[block
].busy
.info
.frag
.first
<< type
));
1287 if (_heapinfo
[block
].busy
.info
.frag
.nfree
== (BLOCKSIZE
>> type
) - 1)
1289 /* If all fragments of this block are free, remove them
1290 from the fragment list and free the whole block. */
1292 for (i
= 1; i
< (__malloc_size_t
) (BLOCKSIZE
>> type
); ++i
)
1294 prev
->prev
->next
= next
;
1296 next
->prev
= prev
->prev
;
1297 _heapinfo
[block
].busy
.type
= 0;
1298 _heapinfo
[block
].busy
.info
.size
= 1;
1300 /* Keep the statistics accurate. */
1302 _bytes_used
+= BLOCKSIZE
;
1303 _chunks_free
-= BLOCKSIZE
>> type
;
1304 _bytes_free
-= BLOCKSIZE
;
1306 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1307 _free_internal_nolock (ADDRESS (block
));
1309 free (ADDRESS (block
));
1312 else if (_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
1314 /* If some fragments of this block are free, link this
1315 fragment into the fragment list after the first free
1316 fragment of this block. */
1317 next
= (struct list
*) ptr
;
1318 next
->next
= prev
->next
;
1321 if (next
->next
!= NULL
)
1322 next
->next
->prev
= next
;
1323 ++_heapinfo
[block
].busy
.info
.frag
.nfree
;
1327 /* No fragments of this block are free, so link this
1328 fragment into the fragment list and announce that
1329 it is the first free fragment of this block. */
1330 prev
= (struct list
*) ptr
;
1331 _heapinfo
[block
].busy
.info
.frag
.nfree
= 1;
1332 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
1333 ((unsigned long int) ((char *) ptr
- (char *) NULL
)
1334 % BLOCKSIZE
>> type
);
1335 prev
->next
= _fraghead
[type
].next
;
1336 prev
->prev
= &_fraghead
[type
];
1337 prev
->prev
->next
= prev
;
1338 if (prev
->next
!= NULL
)
1339 prev
->next
->prev
= prev
;
1344 PROTECT_MALLOC_STATE (1);
1347 /* Return memory to the heap.
1348 Like `free' but don't call a __free_hook if there is one. */
1350 _free_internal (ptr
)
1354 _free_internal_nolock (ptr
);
1358 /* Return memory to the heap. */
1364 void (*hook
) (__ptr_t
) = __free_hook
;
1369 _free_internal (ptr
);
1372 /* Define the `cfree' alias for `free'. */
1374 weak_alias (free
, cfree
)
1383 /* Change the size of a block allocated by `malloc'.
1384 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1385 Written May 1989 by Mike Haertel.
1387 This library is free software; you can redistribute it and/or
1388 modify it under the terms of the GNU General Public License as
1389 published by the Free Software Foundation; either version 2 of the
1390 License, or (at your option) any later version.
1392 This library is distributed in the hope that it will be useful,
1393 but WITHOUT ANY WARRANTY; without even the implied warranty of
1394 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1395 General Public License for more details.
1397 You should have received a copy of the GNU General Public
1398 License along with this library; see the file COPYING. If
1399 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1400 Fifth Floor, Boston, MA 02110-1301, USA.
1402 The author may be reached (Email) at the address mike@ai.mit.edu,
1403 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1405 #ifndef _MALLOC_INTERNAL
1406 #define _MALLOC_INTERNAL
1412 /* Cope with systems lacking `memmove'. */
1413 #if (!defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1416 #undef __malloc_safe_bcopy
1417 #define __malloc_safe_bcopy safe_bcopy
1420 /* Snarfed directly from Emacs src/dispnew.c:
1421 XXX Should use system bcopy if it handles overlap. */
1423 /* Like bcopy except never gets confused by overlap. */
1426 __malloc_safe_bcopy (afrom
, ato
, size
)
1429 __malloc_size_t size
;
1431 char *from
= afrom
, *to
= ato
;
1433 if (size
<= 0 || from
== to
)
1436 /* If the source and destination don't overlap, then bcopy can
1437 handle it. If they do overlap, but the destination is lower in
1438 memory than the source, we'll assume bcopy can handle that. */
1439 if (to
< from
|| from
+ size
<= to
)
1440 bcopy (from
, to
, size
);
1442 /* Otherwise, we'll copy from the end. */
1445 register char *endf
= from
+ size
;
1446 register char *endt
= to
+ size
;
1448 /* If TO - FROM is large, then we should break the copy into
1449 nonoverlapping chunks of TO - FROM bytes each. However, if
1450 TO - FROM is small, then the bcopy function call overhead
1451 makes this not worth it. The crossover point could be about
1452 anywhere. Since I don't think the obvious copy loop is too
1453 bad, I'm trying to err in its favor. */
1458 while (endf
!= from
);
1464 endt
-= (to
- from
);
1465 endf
-= (to
- from
);
1470 bcopy (endf
, endt
, to
- from
);
1473 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1474 little left over. The amount left over is
1475 (endt + (to - from)) - to, which is endt - from. */
1476 bcopy (from
, to
, endt
- from
);
1483 extern void __malloc_safe_bcopy
PP ((__ptr_t
, __ptr_t
, __malloc_size_t
));
1484 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1490 #define min(A, B) ((A) < (B) ? (A) : (B))
1492 /* Debugging hook for realloc. */
1493 __ptr_t (*__realloc_hook
) PP ((__ptr_t __ptr
, __malloc_size_t __size
));
1495 /* Resize the given region to the new size, returning a pointer
1496 to the (possibly moved) region. This is optimized for speed;
1497 some benchmarks seem to indicate that greater compactness is
1498 achieved by unconditionally allocating and copying to a
1499 new region. This module has incestuous knowledge of the
1500 internals of both free and malloc. */
1502 _realloc_internal_nolock (ptr
, size
)
1504 __malloc_size_t size
;
1508 __malloc_size_t block
, blocks
, oldlimit
;
1512 _free_internal_nolock (ptr
);
1513 return _malloc_internal_nolock (0);
1515 else if (ptr
== NULL
)
1516 return _malloc_internal_nolock (size
);
1518 block
= BLOCK (ptr
);
1520 PROTECT_MALLOC_STATE (0);
1522 type
= _heapinfo
[block
].busy
.type
;
1526 /* Maybe reallocate a large block to a small fragment. */
1527 if (size
<= BLOCKSIZE
/ 2)
1529 result
= _malloc_internal_nolock (size
);
1532 memcpy (result
, ptr
, size
);
1533 _free_internal_nolock (ptr
);
1538 /* The new size is a large allocation as well;
1539 see if we can hold it in place. */
1540 blocks
= BLOCKIFY (size
);
1541 if (blocks
< _heapinfo
[block
].busy
.info
.size
)
1543 /* The new size is smaller; return
1544 excess memory to the free list. */
1545 _heapinfo
[block
+ blocks
].busy
.type
= 0;
1546 _heapinfo
[block
+ blocks
].busy
.info
.size
1547 = _heapinfo
[block
].busy
.info
.size
- blocks
;
1548 _heapinfo
[block
].busy
.info
.size
= blocks
;
1549 /* We have just created a new chunk by splitting a chunk in two.
1550 Now we will free this chunk; increment the statistics counter
1551 so it doesn't become wrong when _free_internal decrements it. */
1553 _free_internal_nolock (ADDRESS (block
+ blocks
));
1556 else if (blocks
== _heapinfo
[block
].busy
.info
.size
)
1557 /* No size change necessary. */
1561 /* Won't fit, so allocate a new region that will.
1562 Free the old region first in case there is sufficient
1563 adjacent free space to grow without moving. */
1564 blocks
= _heapinfo
[block
].busy
.info
.size
;
1565 /* Prevent free from actually returning memory to the system. */
1566 oldlimit
= _heaplimit
;
1568 _free_internal_nolock (ptr
);
1569 result
= _malloc_internal_nolock (size
);
1570 PROTECT_MALLOC_STATE (0);
1571 if (_heaplimit
== 0)
1572 _heaplimit
= oldlimit
;
1575 /* Now we're really in trouble. We have to unfree
1576 the thing we just freed. Unfortunately it might
1577 have been coalesced with its neighbors. */
1578 if (_heapindex
== block
)
1579 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1583 = _malloc_internal_nolock ((block
- _heapindex
) * BLOCKSIZE
);
1584 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1585 _free_internal_nolock (previous
);
1590 memmove (result
, ptr
, blocks
* BLOCKSIZE
);
1595 /* Old size is a fragment; type is logarithm
1596 to base two of the fragment size. */
1597 if (size
> (__malloc_size_t
) (1 << (type
- 1)) &&
1598 size
<= (__malloc_size_t
) (1 << type
))
1599 /* The new size is the same kind of fragment. */
1603 /* The new size is different; allocate a new space,
1604 and copy the lesser of the new size and the old. */
1605 result
= _malloc_internal_nolock (size
);
1608 memcpy (result
, ptr
, min (size
, (__malloc_size_t
) 1 << type
));
1609 _free_internal_nolock (ptr
);
1614 PROTECT_MALLOC_STATE (1);
1620 _realloc_internal (ptr
, size
)
1622 __malloc_size_t size
;
1627 result
= _realloc_internal_nolock (ptr
, size
);
1636 __malloc_size_t size
;
1638 __ptr_t (*hook
) (__ptr_t
, __malloc_size_t
);
1640 if (!__malloc_initialized
&& !__malloc_initialize ())
1643 hook
= __realloc_hook
;
1644 return (hook
!= NULL
? *hook
: _realloc_internal
) (ptr
, size
);
1646 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1648 This library is free software; you can redistribute it and/or
1649 modify it under the terms of the GNU General Public License as
1650 published by the Free Software Foundation; either version 2 of the
1651 License, or (at your option) any later version.
1653 This library is distributed in the hope that it will be useful,
1654 but WITHOUT ANY WARRANTY; without even the implied warranty of
1655 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1656 General Public License for more details.
1658 You should have received a copy of the GNU General Public
1659 License along with this library; see the file COPYING. If
1660 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1661 Fifth Floor, Boston, MA 02110-1301, USA.
1663 The author may be reached (Email) at the address mike@ai.mit.edu,
1664 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1666 #ifndef _MALLOC_INTERNAL
1667 #define _MALLOC_INTERNAL
1671 /* Allocate an array of NMEMB elements each SIZE bytes long.
1672 The entire array is initialized to zeros. */
1674 calloc (nmemb
, size
)
1675 register __malloc_size_t nmemb
;
1676 register __malloc_size_t size
;
1678 register __ptr_t result
= malloc (nmemb
* size
);
1681 (void) memset (result
, 0, nmemb
* size
);
1685 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1686 This file is part of the GNU C Library.
1688 The GNU C Library is free software; you can redistribute it and/or modify
1689 it under the terms of the GNU General Public License as published by
1690 the Free Software Foundation; either version 2, or (at your option)
1693 The GNU C Library is distributed in the hope that it will be useful,
1694 but WITHOUT ANY WARRANTY; without even the implied warranty of
1695 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1696 GNU General Public License for more details.
1698 You should have received a copy of the GNU General Public License
1699 along with the GNU C Library; see the file COPYING. If not, write to
1700 the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1701 MA 02110-1301, USA. */
1703 #ifndef _MALLOC_INTERNAL
1704 #define _MALLOC_INTERNAL
1708 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1710 #if !defined(__GNU_LIBRARY__) || defined(__UCLIBC__)
1712 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1713 /* It is best not to declare this and cast its result on foreign operating
1714 systems with potentially hostile include files. */
1717 extern __ptr_t __sbrk
PP ((ptrdiff_t increment
));
1718 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1724 /* Allocate INCREMENT more bytes of data space,
1725 and return the start of data space, or NULL on errors.
1726 If INCREMENT is negative, shrink data space. */
1728 __default_morecore (increment
)
1729 __malloc_ptrdiff_t increment
;
1733 if (!bss_sbrk_did_unexec
)
1735 return bss_sbrk (increment
);
1738 result
= (__ptr_t
) __sbrk (increment
);
1739 if (result
== (__ptr_t
) -1)
1743 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1745 This library is free software; you can redistribute it and/or
1746 modify it under the terms of the GNU General Public License as
1747 published by the Free Software Foundation; either version 2 of the
1748 License, or (at your option) any later version.
1750 This library is distributed in the hope that it will be useful,
1751 but WITHOUT ANY WARRANTY; without even the implied warranty of
1752 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1753 General Public License for more details.
1755 You should have received a copy of the GNU General Public
1756 License along with this library; see the file COPYING. If
1757 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1758 Fifth Floor, Boston, MA 02110-1301, USA. */
1760 #ifndef _MALLOC_INTERNAL
1761 #define _MALLOC_INTERNAL
1765 __ptr_t (*__memalign_hook
) PP ((__malloc_size_t __size
,
1766 __malloc_size_t __alignment
));
1769 memalign (alignment
, size
)
1770 __malloc_size_t alignment
;
1771 __malloc_size_t size
;
1774 unsigned long int adj
, lastadj
;
1775 __ptr_t (*hook
) (__malloc_size_t
, __malloc_size_t
) = __memalign_hook
;
1778 return (*hook
) (alignment
, size
);
1780 /* Allocate a block with enough extra space to pad the block with up to
1781 (ALIGNMENT - 1) bytes if necessary. */
1782 result
= malloc (size
+ alignment
- 1);
1786 /* Figure out how much we will need to pad this particular block
1787 to achieve the required alignment. */
1788 adj
= (unsigned long int) ((char *) result
- (char *) NULL
) % alignment
;
1792 /* Reallocate the block with only as much excess as it needs. */
1794 result
= malloc (adj
+ size
);
1795 if (result
== NULL
) /* Impossible unless interrupted. */
1799 adj
= (unsigned long int) ((char *) result
- (char *) NULL
) % alignment
;
1800 /* It's conceivable we might have been so unlucky as to get a
1801 different block with weaker alignment. If so, this block is too
1802 short to contain SIZE after alignment correction. So we must
1803 try again and get another block, slightly larger. */
1804 } while (adj
> lastadj
);
1808 /* Record this block in the list of aligned blocks, so that `free'
1809 can identify the pointer it is passed, which will be in the middle
1810 of an allocated block. */
1812 struct alignlist
*l
;
1813 LOCK_ALIGNED_BLOCKS ();
1814 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1815 if (l
->aligned
== NULL
)
1816 /* This slot is free. Use it. */
1820 l
= (struct alignlist
*) malloc (sizeof (struct alignlist
));
1823 l
->next
= _aligned_blocks
;
1824 _aligned_blocks
= l
;
1830 result
= l
->aligned
= (char *) result
+ alignment
- adj
;
1832 UNLOCK_ALIGNED_BLOCKS ();
1852 posix_memalign (memptr
, alignment
, size
)
1854 __malloc_size_t alignment
;
1855 __malloc_size_t size
;
1860 || alignment
% sizeof (__ptr_t
) != 0
1861 || (alignment
& (alignment
- 1)) != 0)
1864 mem
= memalign (alignment
, size
);
1873 /* Allocate memory on a page boundary.
1874 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1876 This library is free software; you can redistribute it and/or
1877 modify it under the terms of the GNU General Public License as
1878 published by the Free Software Foundation; either version 2 of the
1879 License, or (at your option) any later version.
1881 This library is distributed in the hope that it will be useful,
1882 but WITHOUT ANY WARRANTY; without even the implied warranty of
1883 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1884 General Public License for more details.
1886 You should have received a copy of the GNU General Public
1887 License along with this library; see the file COPYING. If
1888 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1889 Fifth Floor, Boston, MA 02110-1301, USA.
1891 The author may be reached (Email) at the address mike@ai.mit.edu,
1892 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1894 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1896 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1897 on MSDOS, where it conflicts with a system header file. */
1899 #define ELIDE_VALLOC
1903 #ifndef ELIDE_VALLOC
1905 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
1907 #include <sys/cdefs.h>
1908 #if defined (__GLIBC__) && __GLIBC__ >= 2
1909 /* __getpagesize is already declared in <unistd.h> with return type int */
1911 extern size_t __getpagesize
PP ((void));
1914 #include "getpagesize.h"
1915 #define __getpagesize() getpagesize()
1918 #ifndef _MALLOC_INTERNAL
1919 #define _MALLOC_INTERNAL
1923 static __malloc_size_t pagesize
;
1927 __malloc_size_t size
;
1930 pagesize
= __getpagesize ();
1932 return memalign (pagesize
, size
);
1935 #endif /* Not ELIDE_VALLOC. */
1939 /* Standard debugging hooks for `malloc'.
1940 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1941 Written May 1989 by Mike Haertel.
1943 This library is free software; you can redistribute it and/or
1944 modify it under the terms of the GNU General Public License as
1945 published by the Free Software Foundation; either version 2 of the
1946 License, or (at your option) any later version.
1948 This library is distributed in the hope that it will be useful,
1949 but WITHOUT ANY WARRANTY; without even the implied warranty of
1950 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1951 General Public License for more details.
1953 You should have received a copy of the GNU General Public
1954 License along with this library; see the file COPYING. If
1955 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1956 Fifth Floor, Boston, MA 02110-1301, USA.
1958 The author may be reached (Email) at the address mike@ai.mit.edu,
1959 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1964 #ifndef _MALLOC_INTERNAL
1965 #define _MALLOC_INTERNAL
1971 /* Old hook values. */
1972 static void (*old_free_hook
) __P ((__ptr_t ptr
));
1973 static __ptr_t (*old_malloc_hook
) __P ((__malloc_size_t size
));
1974 static __ptr_t (*old_realloc_hook
) __P ((__ptr_t ptr
, __malloc_size_t size
));
1976 /* Function to call when something awful happens. */
1977 static void (*abortfunc
) __P ((enum mcheck_status
));
1979 /* Arbitrary magical numbers. */
1980 #define MAGICWORD 0xfedabeeb
1981 #define MAGICFREE 0xd8675309
1982 #define MAGICBYTE ((char) 0xd7)
1983 #define MALLOCFLOOD ((char) 0x93)
1984 #define FREEFLOOD ((char) 0x95)
1988 __malloc_size_t size
; /* Exact size requested by user. */
1989 unsigned long int magic
; /* Magic number to check header integrity. */
1992 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1993 #define flood memset
1995 static void flood
__P ((__ptr_t
, int, __malloc_size_t
));
1997 flood (ptr
, val
, size
)
2000 __malloc_size_t size
;
2008 static enum mcheck_status checkhdr
__P ((const struct hdr
*));
2009 static enum mcheck_status
2011 const struct hdr
*hdr
;
2013 enum mcheck_status status
;
2017 status
= MCHECK_HEAD
;
2020 status
= MCHECK_FREE
;
2023 if (((char *) &hdr
[1])[hdr
->size
] != MAGICBYTE
)
2024 status
= MCHECK_TAIL
;
2029 if (status
!= MCHECK_OK
)
2030 (*abortfunc
) (status
);
2034 static void freehook
__P ((__ptr_t
));
2043 hdr
= ((struct hdr
*) ptr
) - 1;
2045 hdr
->magic
= MAGICFREE
;
2046 flood (ptr
, FREEFLOOD
, hdr
->size
);
2051 __free_hook
= old_free_hook
;
2053 __free_hook
= freehook
;
2056 static __ptr_t mallochook
__P ((__malloc_size_t
));
2059 __malloc_size_t size
;
2063 __malloc_hook
= old_malloc_hook
;
2064 hdr
= (struct hdr
*) malloc (sizeof (struct hdr
) + size
+ 1);
2065 __malloc_hook
= mallochook
;
2070 hdr
->magic
= MAGICWORD
;
2071 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
2072 flood ((__ptr_t
) (hdr
+ 1), MALLOCFLOOD
, size
);
2073 return (__ptr_t
) (hdr
+ 1);
2076 static __ptr_t reallochook
__P ((__ptr_t
, __malloc_size_t
));
2078 reallochook (ptr
, size
)
2080 __malloc_size_t size
;
2082 struct hdr
*hdr
= NULL
;
2083 __malloc_size_t osize
= 0;
2087 hdr
= ((struct hdr
*) ptr
) - 1;
2092 flood ((char *) ptr
+ size
, FREEFLOOD
, osize
- size
);
2095 __free_hook
= old_free_hook
;
2096 __malloc_hook
= old_malloc_hook
;
2097 __realloc_hook
= old_realloc_hook
;
2098 hdr
= (struct hdr
*) realloc ((__ptr_t
) hdr
, sizeof (struct hdr
) + size
+ 1);
2099 __free_hook
= freehook
;
2100 __malloc_hook
= mallochook
;
2101 __realloc_hook
= reallochook
;
2106 hdr
->magic
= MAGICWORD
;
2107 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
2109 flood ((char *) (hdr
+ 1) + osize
, MALLOCFLOOD
, size
- osize
);
2110 return (__ptr_t
) (hdr
+ 1);
2115 enum mcheck_status status
;
2121 msg
= "memory is consistent, library is buggy";
2124 msg
= "memory clobbered before allocated block";
2127 msg
= "memory clobbered past end of allocated block";
2130 msg
= "block freed twice";
2133 msg
= "bogus mcheck_status, library is buggy";
2136 #ifdef __GNU_LIBRARY__
2139 fprintf (stderr
, "mcheck: %s\n", msg
);
2145 static int mcheck_used
= 0;
2149 void (*func
) __P ((enum mcheck_status
));
2151 abortfunc
= (func
!= NULL
) ? func
: &mabort
;
2153 /* These hooks may not be safely inserted if malloc is already in use. */
2154 if (!__malloc_initialized
&& !mcheck_used
)
2156 old_free_hook
= __free_hook
;
2157 __free_hook
= freehook
;
2158 old_malloc_hook
= __malloc_hook
;
2159 __malloc_hook
= mallochook
;
2160 old_realloc_hook
= __realloc_hook
;
2161 __realloc_hook
= reallochook
;
2165 return mcheck_used
? 0 : -1;
2169 mprobe (__ptr_t ptr
)
2171 return mcheck_used
? checkhdr (ptr
) : MCHECK_DISABLED
;
2174 #endif /* GC_MCHECK */
2176 /* arch-tag: 93dce5c0-f49a-41b5-86b1-f91c4169c02e
2177 (do not change this comment) */