1 /* This file is no longer automatically generated from libc. */
3 #define _MALLOC_INTERNAL
5 /* The malloc headers and source files from the C library follow here. */
7 /* Declarations for `malloc' and friends.
8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
9 2005, 2006, 2007 Free Software Foundation, Inc.
10 Written May 1989 by Mike Haertel.
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public
23 License along with this library; see the file COPYING. If
24 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25 Fifth Floor, Boston, MA 02110-1301, USA.
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
34 #ifdef _MALLOC_INTERNAL
40 #ifdef HAVE_GTK_AND_PTHREAD
44 #if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
45 || defined STDC_HEADERS || defined PROTOTYPES) \
46 && ! defined (BROKEN_PROTOTYPES))
50 #define __ptr_t void *
51 #else /* Not C++ or ANSI C. */
55 #define __ptr_t char *
56 #endif /* C++ or ANSI C. */
58 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
62 #define memset(s, zero, n) bzero ((s), (n))
65 #define memcpy(d, s, n) bcopy ((s), (d), (n))
84 #endif /* _MALLOC_INTERNAL. */
94 #define __malloc_size_t size_t
95 #define __malloc_ptrdiff_t ptrdiff_t
100 #define __malloc_size_t __SIZE_TYPE__
103 #ifndef __malloc_size_t
104 #define __malloc_size_t unsigned int
106 #define __malloc_ptrdiff_t int
114 /* Allocate SIZE bytes of memory. */
115 extern __ptr_t malloc
PP ((__malloc_size_t __size
));
116 /* Re-allocate the previously allocated block
117 in __ptr_t, making the new block SIZE bytes long. */
118 extern __ptr_t realloc
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
119 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
120 extern __ptr_t calloc
PP ((__malloc_size_t __nmemb
, __malloc_size_t __size
));
121 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
122 extern void free
PP ((__ptr_t __ptr
));
124 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
125 #if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict. */
126 extern __ptr_t memalign
PP ((__malloc_size_t __alignment
,
127 __malloc_size_t __size
));
128 extern int posix_memalign
PP ((__ptr_t
*, __malloc_size_t
,
129 __malloc_size_t size
));
132 /* Allocate SIZE bytes on a page boundary. */
133 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
134 extern __ptr_t valloc
PP ((__malloc_size_t __size
));
138 /* Set up mutexes and make malloc etc. thread-safe. */
139 extern void malloc_enable_thread
PP ((void));
142 #ifdef _MALLOC_INTERNAL
144 /* The allocator divides the heap into blocks of fixed size; large
145 requests receive one or more whole blocks, and small requests
146 receive a fragment of a block. Fragment sizes are powers of two,
147 and all fragments of a block are the same size. When all the
148 fragments in a block have been freed, the block itself is freed. */
149 #define INT_BIT (CHAR_BIT * sizeof(int))
150 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
151 #define BLOCKSIZE (1 << BLOCKLOG)
152 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
154 /* Determine the amount of memory spanned by the initial heap table
155 (not an absolute limit). */
156 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
158 /* Number of contiguous free blocks allowed to build up at the end of
159 memory before they will be returned to the system. */
160 #define FINAL_FREE_BLOCKS 8
162 /* Data structure giving per-block information. */
165 /* Heap information for a busy block. */
168 /* Zero for a large (multiblock) object, or positive giving the
169 logarithm to the base two of the fragment size. */
175 __malloc_size_t nfree
; /* Free frags in a fragmented block. */
176 __malloc_size_t first
; /* First free fragment of the block. */
178 /* For a large object, in its first block, this has the number
179 of blocks in the object. In the other blocks, this has a
180 negative number which says how far back the first block is. */
181 __malloc_ptrdiff_t size
;
184 /* Heap information for a free block
185 (that may be the first of a free cluster). */
188 __malloc_size_t size
; /* Size (in blocks) of a free cluster. */
189 __malloc_size_t next
; /* Index of next free cluster. */
190 __malloc_size_t prev
; /* Index of previous free cluster. */
194 /* Pointer to first block of the heap. */
195 extern char *_heapbase
;
197 /* Table indexed by block number giving per-block information. */
198 extern malloc_info
*_heapinfo
;
200 /* Address to block number and vice versa. */
201 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
202 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
204 /* Current search index for the heap table. */
205 extern __malloc_size_t _heapindex
;
207 /* Limit of valid info table indices. */
208 extern __malloc_size_t _heaplimit
;
210 /* Doubly linked lists of free fragments. */
217 /* Free list headers for each fragment size. */
218 extern struct list _fraghead
[];
220 /* List of blocks allocated with `memalign' (or `valloc'). */
223 struct alignlist
*next
;
224 __ptr_t aligned
; /* The address that memaligned returned. */
225 __ptr_t exact
; /* The address that malloc returned. */
227 extern struct alignlist
*_aligned_blocks
;
229 /* Instrumentation. */
230 extern __malloc_size_t _chunks_used
;
231 extern __malloc_size_t _bytes_used
;
232 extern __malloc_size_t _chunks_free
;
233 extern __malloc_size_t _bytes_free
;
235 /* Internal versions of `malloc', `realloc', and `free'
236 used when these functions need to call each other.
237 They are the same but don't call the hooks. */
238 extern __ptr_t _malloc_internal
PP ((__malloc_size_t __size
));
239 extern __ptr_t _realloc_internal
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
240 extern void _free_internal
PP ((__ptr_t __ptr
));
241 extern __ptr_t _malloc_internal_nolock
PP ((__malloc_size_t __size
));
242 extern __ptr_t _realloc_internal_nolock
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
243 extern void _free_internal_nolock
PP ((__ptr_t __ptr
));
246 extern pthread_mutex_t _malloc_mutex
, _aligned_blocks_mutex
;
247 extern int _malloc_thread_enabled_p
;
250 if (_malloc_thread_enabled_p) \
251 pthread_mutex_lock (&_malloc_mutex); \
255 if (_malloc_thread_enabled_p) \
256 pthread_mutex_unlock (&_malloc_mutex); \
258 #define LOCK_ALIGNED_BLOCKS() \
260 if (_malloc_thread_enabled_p) \
261 pthread_mutex_lock (&_aligned_blocks_mutex); \
263 #define UNLOCK_ALIGNED_BLOCKS() \
265 if (_malloc_thread_enabled_p) \
266 pthread_mutex_unlock (&_aligned_blocks_mutex); \
271 #define LOCK_ALIGNED_BLOCKS()
272 #define UNLOCK_ALIGNED_BLOCKS()
275 #endif /* _MALLOC_INTERNAL. */
277 /* Given an address in the middle of a malloc'd object,
278 return the address of the beginning of the object. */
279 extern __ptr_t malloc_find_object_address
PP ((__ptr_t __ptr
));
281 /* Underlying allocation function; successive calls should
282 return contiguous pieces of memory. */
283 extern __ptr_t (*__morecore
) PP ((__malloc_ptrdiff_t __size
));
285 /* Default value of `__morecore'. */
286 extern __ptr_t __default_morecore
PP ((__malloc_ptrdiff_t __size
));
288 /* If not NULL, this function is called after each time
289 `__morecore' is called to increase the data size. */
290 extern void (*__after_morecore_hook
) PP ((void));
292 /* Number of extra blocks to get each time we ask for more core.
293 This reduces the frequency of calling `(*__morecore)'. */
294 extern __malloc_size_t __malloc_extra_blocks
;
296 /* Nonzero if `malloc' has been called and done its initialization. */
297 extern int __malloc_initialized
;
298 /* Function called to initialize malloc data structures. */
299 extern int __malloc_initialize
PP ((void));
301 /* Hooks for debugging versions. */
302 extern void (*__malloc_initialize_hook
) PP ((void));
303 extern void (*__free_hook
) PP ((__ptr_t __ptr
));
304 extern __ptr_t (*__malloc_hook
) PP ((__malloc_size_t __size
));
305 extern __ptr_t (*__realloc_hook
) PP ((__ptr_t __ptr
, __malloc_size_t __size
));
306 extern __ptr_t (*__memalign_hook
) PP ((__malloc_size_t __size
,
307 __malloc_size_t __alignment
));
309 /* Return values for `mprobe': these are the kinds of inconsistencies that
310 `mcheck' enables detection of. */
313 MCHECK_DISABLED
= -1, /* Consistency checking is not turned on. */
314 MCHECK_OK
, /* Block is fine. */
315 MCHECK_FREE
, /* Block freed twice. */
316 MCHECK_HEAD
, /* Memory before the block was clobbered. */
317 MCHECK_TAIL
/* Memory after the block was clobbered. */
320 /* Activate a standard collection of debugging hooks. This must be called
321 before `malloc' is ever called. ABORTFUNC is called with an error code
322 (see enum above) when an inconsistency is detected. If ABORTFUNC is
323 null, the standard function prints on stderr and then calls `abort'. */
324 extern int mcheck
PP ((void (*__abortfunc
) PP ((enum mcheck_status
))));
326 /* Check for aberrations in a particular malloc'd block. You must have
327 called `mcheck' already. These are the same checks that `mcheck' does
328 when you free or reallocate a block. */
329 extern enum mcheck_status mprobe
PP ((__ptr_t __ptr
));
331 /* Activate a standard collection of tracing hooks. */
332 extern void mtrace
PP ((void));
333 extern void muntrace
PP ((void));
335 /* Statistics available to the user. */
338 __malloc_size_t bytes_total
; /* Total size of the heap. */
339 __malloc_size_t chunks_used
; /* Chunks allocated by the user. */
340 __malloc_size_t bytes_used
; /* Byte total of user-allocated chunks. */
341 __malloc_size_t chunks_free
; /* Chunks in the free list. */
342 __malloc_size_t bytes_free
; /* Byte total of chunks in the free list. */
345 /* Pick up the current statistics. */
346 extern struct mstats mstats
PP ((void));
348 /* Call WARNFUN with a warning message when memory usage is high. */
349 extern void memory_warnings
PP ((__ptr_t __start
,
350 void (*__warnfun
) PP ((const char *))));
353 /* Relocating allocator. */
355 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
356 extern __ptr_t r_alloc
PP ((__ptr_t
*__handleptr
, __malloc_size_t __size
));
358 /* Free the storage allocated in HANDLEPTR. */
359 extern void r_alloc_free
PP ((__ptr_t
*__handleptr
));
361 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
362 extern __ptr_t r_re_alloc
PP ((__ptr_t
*__handleptr
, __malloc_size_t __size
));
369 #endif /* malloc.h */
370 /* Memory allocator `malloc'.
371 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
372 Written May 1989 by Mike Haertel.
374 This library is free software; you can redistribute it and/or
375 modify it under the terms of the GNU General Public License as
376 published by the Free Software Foundation; either version 2 of the
377 License, or (at your option) any later version.
379 This library is distributed in the hope that it will be useful,
380 but WITHOUT ANY WARRANTY; without even the implied warranty of
381 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
382 General Public License for more details.
384 You should have received a copy of the GNU General Public
385 License along with this library; see the file COPYING. If
386 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
387 Fifth Floor, Boston, MA 02110-1301, USA.
389 The author may be reached (Email) at the address mike@ai.mit.edu,
390 or (US mail) as Mike Haertel c/o Free Software Foundation. */
392 #ifndef _MALLOC_INTERNAL
393 #define _MALLOC_INTERNAL
398 /* How to really get more memory. */
400 extern __ptr_t bss_sbrk
PP ((ptrdiff_t __size
));
401 extern int bss_sbrk_did_unexec
;
403 __ptr_t (*__morecore
) PP ((__malloc_ptrdiff_t __size
)) = __default_morecore
;
405 /* Debugging hook for `malloc'. */
406 __ptr_t (*__malloc_hook
) PP ((__malloc_size_t __size
));
408 /* Pointer to the base of the first block. */
411 /* Block information table. Allocated with align/__free (not malloc/free). */
412 malloc_info
*_heapinfo
;
414 /* Number of info entries. */
415 static __malloc_size_t heapsize
;
417 /* Search index in the info table. */
418 __malloc_size_t _heapindex
;
420 /* Limit of valid info table indices. */
421 __malloc_size_t _heaplimit
;
423 /* Free lists for each fragment size. */
424 struct list _fraghead
[BLOCKLOG
];
426 /* Instrumentation. */
427 __malloc_size_t _chunks_used
;
428 __malloc_size_t _bytes_used
;
429 __malloc_size_t _chunks_free
;
430 __malloc_size_t _bytes_free
;
432 /* Are you experienced? */
433 int __malloc_initialized
;
435 __malloc_size_t __malloc_extra_blocks
;
437 void (*__malloc_initialize_hook
) PP ((void));
438 void (*__after_morecore_hook
) PP ((void));
440 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
442 /* Some code for hunting a bug writing into _heapinfo.
444 Call this macro with argument PROT non-zero to protect internal
445 malloc state against writing to it, call it with a zero argument to
446 make it readable and writable.
448 Note that this only works if BLOCKSIZE == page size, which is
449 the case on the i386. */
451 #include <sys/types.h>
452 #include <sys/mman.h>
454 static int state_protected_p
;
455 static __malloc_size_t last_state_size
;
456 static malloc_info
*last_heapinfo
;
459 protect_malloc_state (protect_p
)
462 /* If _heapinfo has been relocated, make sure its old location
463 isn't left read-only; it will be reused by malloc. */
464 if (_heapinfo
!= last_heapinfo
466 && state_protected_p
)
467 mprotect (last_heapinfo
, last_state_size
, PROT_READ
| PROT_WRITE
);
469 last_state_size
= _heaplimit
* sizeof *_heapinfo
;
470 last_heapinfo
= _heapinfo
;
472 if (protect_p
!= state_protected_p
)
474 state_protected_p
= protect_p
;
475 if (mprotect (_heapinfo
, last_state_size
,
476 protect_p
? PROT_READ
: PROT_READ
| PROT_WRITE
) != 0)
481 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
484 #define PROTECT_MALLOC_STATE(PROT) /* empty */
488 /* Aligned allocation. */
489 static __ptr_t align
PP ((__malloc_size_t
));
492 __malloc_size_t size
;
495 unsigned long int adj
;
497 /* align accepts an unsigned argument, but __morecore accepts a
498 signed one. This could lead to trouble if SIZE overflows a
499 signed int type accepted by __morecore. We just punt in that
500 case, since they are requesting a ludicrous amount anyway. */
501 if ((__malloc_ptrdiff_t
)size
< 0)
504 result
= (*__morecore
) (size
);
505 adj
= (unsigned long int) ((unsigned long int) ((char *) result
-
506 (char *) NULL
)) % BLOCKSIZE
;
510 adj
= BLOCKSIZE
- adj
;
511 new = (*__morecore
) (adj
);
512 result
= (char *) result
+ adj
;
515 if (__after_morecore_hook
)
516 (*__after_morecore_hook
) ();
521 /* Get SIZE bytes, if we can get them starting at END.
522 Return the address of the space we got.
523 If we cannot get space at END, fail and return 0. */
524 static __ptr_t get_contiguous_space
PP ((__malloc_ptrdiff_t
, __ptr_t
));
526 get_contiguous_space (size
, position
)
527 __malloc_ptrdiff_t size
;
533 before
= (*__morecore
) (0);
534 /* If we can tell in advance that the break is at the wrong place,
536 if (before
!= position
)
539 /* Allocate SIZE bytes and get the address of them. */
540 after
= (*__morecore
) (size
);
544 /* It was not contiguous--reject it. */
545 if (after
!= position
)
547 (*__morecore
) (- size
);
555 /* This is called when `_heapinfo' and `heapsize' have just
556 been set to describe a new info table. Set up the table
557 to describe itself and account for it in the statistics. */
558 static void register_heapinfo
PP ((void));
565 __malloc_size_t block
, blocks
;
567 block
= BLOCK (_heapinfo
);
568 blocks
= BLOCKIFY (heapsize
* sizeof (malloc_info
));
570 /* Account for the _heapinfo block itself in the statistics. */
571 _bytes_used
+= blocks
* BLOCKSIZE
;
574 /* Describe the heapinfo block itself in the heapinfo. */
575 _heapinfo
[block
].busy
.type
= 0;
576 _heapinfo
[block
].busy
.info
.size
= blocks
;
577 /* Leave back-pointers for malloc_find_address. */
579 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
583 pthread_mutex_t _malloc_mutex
= PTHREAD_MUTEX_INITIALIZER
;
584 pthread_mutex_t _aligned_blocks_mutex
= PTHREAD_MUTEX_INITIALIZER
;
585 int _malloc_thread_enabled_p
;
588 malloc_atfork_handler_prepare ()
591 LOCK_ALIGNED_BLOCKS ();
595 malloc_atfork_handler_parent ()
597 UNLOCK_ALIGNED_BLOCKS ();
602 malloc_atfork_handler_child ()
604 UNLOCK_ALIGNED_BLOCKS ();
608 /* Set up mutexes and make malloc etc. thread-safe. */
610 malloc_enable_thread ()
612 if (_malloc_thread_enabled_p
)
615 /* Some pthread implementations call malloc for statically
616 initialized mutexes when they are used first. To avoid such a
617 situation, we initialize mutexes here while their use is
618 disabled in malloc etc. */
619 pthread_mutex_init (&_malloc_mutex
, NULL
);
620 pthread_mutex_init (&_aligned_blocks_mutex
, NULL
);
621 pthread_atfork (malloc_atfork_handler_prepare
,
622 malloc_atfork_handler_parent
,
623 malloc_atfork_handler_child
);
624 _malloc_thread_enabled_p
= 1;
629 malloc_initialize_1 ()
635 if (__malloc_initialize_hook
)
636 (*__malloc_initialize_hook
) ();
638 heapsize
= HEAP
/ BLOCKSIZE
;
639 _heapinfo
= (malloc_info
*) align (heapsize
* sizeof (malloc_info
));
640 if (_heapinfo
== NULL
)
642 memset (_heapinfo
, 0, heapsize
* sizeof (malloc_info
));
643 _heapinfo
[0].free
.size
= 0;
644 _heapinfo
[0].free
.next
= _heapinfo
[0].free
.prev
= 0;
646 _heapbase
= (char *) _heapinfo
;
647 _heaplimit
= BLOCK (_heapbase
+ heapsize
* sizeof (malloc_info
));
649 register_heapinfo ();
651 __malloc_initialized
= 1;
652 PROTECT_MALLOC_STATE (1);
656 /* Set everything up and remember that we have.
657 main will call malloc which calls this function. That is before any threads
658 or signal handlers has been set up, so we don't need thread protection. */
660 __malloc_initialize ()
662 if (__malloc_initialized
)
665 malloc_initialize_1 ();
667 return __malloc_initialized
;
670 static int morecore_recursing
;
672 /* Get neatly aligned memory, initializing or
673 growing the heap info table as necessary. */
674 static __ptr_t morecore_nolock
PP ((__malloc_size_t
));
676 morecore_nolock (size
)
677 __malloc_size_t size
;
680 malloc_info
*newinfo
, *oldinfo
;
681 __malloc_size_t newsize
;
683 if (morecore_recursing
)
684 /* Avoid recursion. The caller will know how to handle a null return. */
687 result
= align (size
);
691 PROTECT_MALLOC_STATE (0);
693 /* Check if we need to grow the info table. */
694 if ((__malloc_size_t
) BLOCK ((char *) result
+ size
) > heapsize
)
696 /* Calculate the new _heapinfo table size. We do not account for the
697 added blocks in the table itself, as we hope to place them in
698 existing free space, which is already covered by part of the
703 while ((__malloc_size_t
) BLOCK ((char *) result
+ size
) > newsize
);
705 /* We must not reuse existing core for the new info table when called
706 from realloc in the case of growing a large block, because the
707 block being grown is momentarily marked as free. In this case
708 _heaplimit is zero so we know not to reuse space for internal
712 /* First try to allocate the new info table in core we already
713 have, in the usual way using realloc. If realloc cannot
714 extend it in place or relocate it to existing sufficient core,
715 we will get called again, and the code above will notice the
716 `morecore_recursing' flag and return null. */
717 int save
= errno
; /* Don't want to clobber errno with ENOMEM. */
718 morecore_recursing
= 1;
719 newinfo
= (malloc_info
*) _realloc_internal_nolock
720 (_heapinfo
, newsize
* sizeof (malloc_info
));
721 morecore_recursing
= 0;
726 /* We found some space in core, and realloc has put the old
727 table's blocks on the free list. Now zero the new part
728 of the table and install the new table location. */
729 memset (&newinfo
[heapsize
], 0,
730 (newsize
- heapsize
) * sizeof (malloc_info
));
737 /* Allocate new space for the malloc info table. */
740 newinfo
= (malloc_info
*) align (newsize
* sizeof (malloc_info
));
745 (*__morecore
) (-size
);
749 /* Is it big enough to record status for its own space?
751 if ((__malloc_size_t
) BLOCK ((char *) newinfo
752 + newsize
* sizeof (malloc_info
))
756 /* Must try again. First give back most of what we just got. */
757 (*__morecore
) (- newsize
* sizeof (malloc_info
));
761 /* Copy the old table to the beginning of the new,
762 and zero the rest of the new table. */
763 memcpy (newinfo
, _heapinfo
, heapsize
* sizeof (malloc_info
));
764 memset (&newinfo
[heapsize
], 0,
765 (newsize
- heapsize
) * sizeof (malloc_info
));
770 register_heapinfo ();
772 /* Reset _heaplimit so _free_internal never decides
773 it can relocate or resize the info table. */
775 _free_internal_nolock (oldinfo
);
776 PROTECT_MALLOC_STATE (0);
778 /* The new heap limit includes the new table just allocated. */
779 _heaplimit
= BLOCK ((char *) newinfo
+ heapsize
* sizeof (malloc_info
));
784 _heaplimit
= BLOCK ((char *) result
+ size
);
788 /* Allocate memory from the heap. */
790 _malloc_internal_nolock (size
)
791 __malloc_size_t size
;
794 __malloc_size_t block
, blocks
, lastblocks
, start
;
795 register __malloc_size_t i
;
798 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
799 valid address you can realloc and free (though not dereference).
801 It turns out that some extant code (sunrpc, at least Ultrix's version)
802 expects `malloc (0)' to return non-NULL and breaks otherwise.
810 PROTECT_MALLOC_STATE (0);
812 if (size
< sizeof (struct list
))
813 size
= sizeof (struct list
);
815 /* Determine the allocation policy based on the request size. */
816 if (size
<= BLOCKSIZE
/ 2)
818 /* Small allocation to receive a fragment of a block.
819 Determine the logarithm to base two of the fragment size. */
820 register __malloc_size_t log
= 1;
822 while ((size
/= 2) != 0)
825 /* Look in the fragment lists for a
826 free fragment of the desired size. */
827 next
= _fraghead
[log
].next
;
830 /* There are free fragments of this size.
831 Pop a fragment out of the fragment list and return it.
832 Update the block's nfree and first counters. */
833 result
= (__ptr_t
) next
;
834 next
->prev
->next
= next
->next
;
835 if (next
->next
!= NULL
)
836 next
->next
->prev
= next
->prev
;
837 block
= BLOCK (result
);
838 if (--_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
839 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
840 ((unsigned long int) ((char *) next
->next
- (char *) NULL
)
843 /* Update the statistics. */
845 _bytes_used
+= 1 << log
;
847 _bytes_free
-= 1 << log
;
851 /* No free fragments of the desired size, so get a new block
852 and break it into fragments, returning the first. */
853 #ifdef GC_MALLOC_CHECK
854 result
= _malloc_internal_nolock (BLOCKSIZE
);
855 PROTECT_MALLOC_STATE (0);
856 #elif defined (USE_PTHREAD)
857 result
= _malloc_internal_nolock (BLOCKSIZE
);
859 result
= malloc (BLOCKSIZE
);
863 PROTECT_MALLOC_STATE (1);
867 /* Link all fragments but the first into the free list. */
868 next
= (struct list
*) ((char *) result
+ (1 << log
));
870 next
->prev
= &_fraghead
[log
];
871 _fraghead
[log
].next
= next
;
873 for (i
= 2; i
< (__malloc_size_t
) (BLOCKSIZE
>> log
); ++i
)
875 next
= (struct list
*) ((char *) result
+ (i
<< log
));
876 next
->next
= _fraghead
[log
].next
;
877 next
->prev
= &_fraghead
[log
];
878 next
->prev
->next
= next
;
879 next
->next
->prev
= next
;
882 /* Initialize the nfree and first counters for this block. */
883 block
= BLOCK (result
);
884 _heapinfo
[block
].busy
.type
= log
;
885 _heapinfo
[block
].busy
.info
.frag
.nfree
= i
- 1;
886 _heapinfo
[block
].busy
.info
.frag
.first
= i
- 1;
888 _chunks_free
+= (BLOCKSIZE
>> log
) - 1;
889 _bytes_free
+= BLOCKSIZE
- (1 << log
);
890 _bytes_used
-= BLOCKSIZE
- (1 << log
);
895 /* Large allocation to receive one or more blocks.
896 Search the free list in a circle starting at the last place visited.
897 If we loop completely around without finding a large enough
898 space we will have to get more memory from the system. */
899 blocks
= BLOCKIFY (size
);
900 start
= block
= _heapindex
;
901 while (_heapinfo
[block
].free
.size
< blocks
)
903 block
= _heapinfo
[block
].free
.next
;
906 /* Need to get more from the system. Get a little extra. */
907 __malloc_size_t wantblocks
= blocks
+ __malloc_extra_blocks
;
908 block
= _heapinfo
[0].free
.prev
;
909 lastblocks
= _heapinfo
[block
].free
.size
;
910 /* Check to see if the new core will be contiguous with the
911 final free block; if so we don't need to get as much. */
912 if (_heaplimit
!= 0 && block
+ lastblocks
== _heaplimit
&&
913 /* We can't do this if we will have to make the heap info
914 table bigger to accommodate the new space. */
915 block
+ wantblocks
<= heapsize
&&
916 get_contiguous_space ((wantblocks
- lastblocks
) * BLOCKSIZE
,
917 ADDRESS (block
+ lastblocks
)))
919 /* We got it contiguously. Which block we are extending
920 (the `final free block' referred to above) might have
921 changed, if it got combined with a freed info table. */
922 block
= _heapinfo
[0].free
.prev
;
923 _heapinfo
[block
].free
.size
+= (wantblocks
- lastblocks
);
924 _bytes_free
+= (wantblocks
- lastblocks
) * BLOCKSIZE
;
925 _heaplimit
+= wantblocks
- lastblocks
;
928 result
= morecore_nolock (wantblocks
* BLOCKSIZE
);
931 block
= BLOCK (result
);
932 /* Put the new block at the end of the free list. */
933 _heapinfo
[block
].free
.size
= wantblocks
;
934 _heapinfo
[block
].free
.prev
= _heapinfo
[0].free
.prev
;
935 _heapinfo
[block
].free
.next
= 0;
936 _heapinfo
[0].free
.prev
= block
;
937 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
= block
;
939 /* Now loop to use some of that block for this allocation. */
943 /* At this point we have found a suitable free list entry.
944 Figure out how to remove what we need from the list. */
945 result
= ADDRESS (block
);
946 if (_heapinfo
[block
].free
.size
> blocks
)
948 /* The block we found has a bit left over,
949 so relink the tail end back into the free list. */
950 _heapinfo
[block
+ blocks
].free
.size
951 = _heapinfo
[block
].free
.size
- blocks
;
952 _heapinfo
[block
+ blocks
].free
.next
953 = _heapinfo
[block
].free
.next
;
954 _heapinfo
[block
+ blocks
].free
.prev
955 = _heapinfo
[block
].free
.prev
;
956 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
957 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
958 = _heapindex
= block
+ blocks
;
962 /* The block exactly matches our requirements,
963 so just remove it from the list. */
964 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
965 = _heapinfo
[block
].free
.prev
;
966 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
967 = _heapindex
= _heapinfo
[block
].free
.next
;
971 _heapinfo
[block
].busy
.type
= 0;
972 _heapinfo
[block
].busy
.info
.size
= blocks
;
974 _bytes_used
+= blocks
* BLOCKSIZE
;
975 _bytes_free
-= blocks
* BLOCKSIZE
;
977 /* Mark all the blocks of the object just allocated except for the
978 first with a negative number so you can find the first block by
979 adding that adjustment. */
981 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
984 PROTECT_MALLOC_STATE (1);
990 _malloc_internal (size
)
991 __malloc_size_t size
;
996 result
= _malloc_internal_nolock (size
);
1004 __malloc_size_t size
;
1006 __ptr_t (*hook
) (__malloc_size_t
);
1008 if (!__malloc_initialized
&& !__malloc_initialize ())
1011 /* Copy the value of __malloc_hook to an automatic variable in case
1012 __malloc_hook is modified in another thread between its
1013 NULL-check and the use.
1015 Note: Strictly speaking, this is not a right solution. We should
1016 use mutexes to access non-read-only variables that are shared
1017 among multiple threads. We just leave it for compatibility with
1018 glibc malloc (i.e., assignments to __malloc_hook) for now. */
1019 hook
= __malloc_hook
;
1020 return (hook
!= NULL
? *hook
: _malloc_internal
) (size
);
1025 /* On some ANSI C systems, some libc functions call _malloc, _free
1026 and _realloc. Make them use the GNU functions. */
1030 __malloc_size_t size
;
1032 return malloc (size
);
1043 _realloc (ptr
, size
)
1045 __malloc_size_t size
;
1047 return realloc (ptr
, size
);
1051 /* Free a block of memory allocated by `malloc'.
1052 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1053 Written May 1989 by Mike Haertel.
1055 This library is free software; you can redistribute it and/or
1056 modify it under the terms of the GNU General Public License as
1057 published by the Free Software Foundation; either version 2 of the
1058 License, or (at your option) any later version.
1060 This library is distributed in the hope that it will be useful,
1061 but WITHOUT ANY WARRANTY; without even the implied warranty of
1062 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1063 General Public License for more details.
1065 You should have received a copy of the GNU General Public
1066 License along with this library; see the file COPYING. If
1067 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1068 Fifth Floor, Boston, MA 02110-1301, USA.
1070 The author may be reached (Email) at the address mike@ai.mit.edu,
1071 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1073 #ifndef _MALLOC_INTERNAL
1074 #define _MALLOC_INTERNAL
1079 /* Cope with systems lacking `memmove'. */
1081 #if (!defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1083 #undef __malloc_safe_bcopy
1084 #define __malloc_safe_bcopy safe_bcopy
1086 /* This function is defined in realloc.c. */
1087 extern void __malloc_safe_bcopy
PP ((__ptr_t
, __ptr_t
, __malloc_size_t
));
1088 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1093 /* Debugging hook for free. */
1094 void (*__free_hook
) PP ((__ptr_t __ptr
));
1096 /* List of blocks allocated by memalign. */
1097 struct alignlist
*_aligned_blocks
= NULL
;
1099 /* Return memory to the heap.
1100 Like `_free_internal' but don't lock mutex. */
1102 _free_internal_nolock (ptr
)
1106 __malloc_size_t block
, blocks
;
1107 register __malloc_size_t i
;
1108 struct list
*prev
, *next
;
1110 const __malloc_size_t lesscore_threshold
1111 /* Threshold of free space at which we will return some to the system. */
1112 = FINAL_FREE_BLOCKS
+ 2 * __malloc_extra_blocks
;
1114 register struct alignlist
*l
;
1119 PROTECT_MALLOC_STATE (0);
1121 LOCK_ALIGNED_BLOCKS ();
1122 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1123 if (l
->aligned
== ptr
)
1125 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1129 UNLOCK_ALIGNED_BLOCKS ();
1131 block
= BLOCK (ptr
);
1133 type
= _heapinfo
[block
].busy
.type
;
1137 /* Get as many statistics as early as we can. */
1139 _bytes_used
-= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1140 _bytes_free
+= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1142 /* Find the free cluster previous to this one in the free list.
1143 Start searching at the last block referenced; this may benefit
1144 programs with locality of allocation. */
1148 i
= _heapinfo
[i
].free
.prev
;
1152 i
= _heapinfo
[i
].free
.next
;
1153 while (i
> 0 && i
< block
);
1154 i
= _heapinfo
[i
].free
.prev
;
1157 /* Determine how to link this block into the free list. */
1158 if (block
== i
+ _heapinfo
[i
].free
.size
)
1160 /* Coalesce this block with its predecessor. */
1161 _heapinfo
[i
].free
.size
+= _heapinfo
[block
].busy
.info
.size
;
1166 /* Really link this block back into the free list. */
1167 _heapinfo
[block
].free
.size
= _heapinfo
[block
].busy
.info
.size
;
1168 _heapinfo
[block
].free
.next
= _heapinfo
[i
].free
.next
;
1169 _heapinfo
[block
].free
.prev
= i
;
1170 _heapinfo
[i
].free
.next
= block
;
1171 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1175 /* Now that the block is linked in, see if we can coalesce it
1176 with its successor (by deleting its successor from the list
1177 and adding in its size). */
1178 if (block
+ _heapinfo
[block
].free
.size
== _heapinfo
[block
].free
.next
)
1180 _heapinfo
[block
].free
.size
1181 += _heapinfo
[_heapinfo
[block
].free
.next
].free
.size
;
1182 _heapinfo
[block
].free
.next
1183 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.next
;
1184 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1188 /* How many trailing free blocks are there now? */
1189 blocks
= _heapinfo
[block
].free
.size
;
1191 /* Where is the current end of accessible core? */
1192 curbrk
= (*__morecore
) (0);
1194 if (_heaplimit
!= 0 && curbrk
== ADDRESS (_heaplimit
))
1196 /* The end of the malloc heap is at the end of accessible core.
1197 It's possible that moving _heapinfo will allow us to
1198 return some space to the system. */
1200 __malloc_size_t info_block
= BLOCK (_heapinfo
);
1201 __malloc_size_t info_blocks
= _heapinfo
[info_block
].busy
.info
.size
;
1202 __malloc_size_t prev_block
= _heapinfo
[block
].free
.prev
;
1203 __malloc_size_t prev_blocks
= _heapinfo
[prev_block
].free
.size
;
1204 __malloc_size_t next_block
= _heapinfo
[block
].free
.next
;
1205 __malloc_size_t next_blocks
= _heapinfo
[next_block
].free
.size
;
1207 if (/* Win if this block being freed is last in core, the info table
1208 is just before it, the previous free block is just before the
1209 info table, and the two free blocks together form a useful
1210 amount to return to the system. */
1211 (block
+ blocks
== _heaplimit
&&
1212 info_block
+ info_blocks
== block
&&
1213 prev_block
!= 0 && prev_block
+ prev_blocks
== info_block
&&
1214 blocks
+ prev_blocks
>= lesscore_threshold
) ||
1215 /* Nope, not the case. We can also win if this block being
1216 freed is just before the info table, and the table extends
1217 to the end of core or is followed only by a free block,
1218 and the total free space is worth returning to the system. */
1219 (block
+ blocks
== info_block
&&
1220 ((info_block
+ info_blocks
== _heaplimit
&&
1221 blocks
>= lesscore_threshold
) ||
1222 (info_block
+ info_blocks
== next_block
&&
1223 next_block
+ next_blocks
== _heaplimit
&&
1224 blocks
+ next_blocks
>= lesscore_threshold
)))
1227 malloc_info
*newinfo
;
1228 __malloc_size_t oldlimit
= _heaplimit
;
1230 /* Free the old info table, clearing _heaplimit to avoid
1231 recursion into this code. We don't want to return the
1232 table's blocks to the system before we have copied them to
1233 the new location. */
1235 _free_internal_nolock (_heapinfo
);
1236 _heaplimit
= oldlimit
;
1238 /* Tell malloc to search from the beginning of the heap for
1239 free blocks, so it doesn't reuse the ones just freed. */
1242 /* Allocate new space for the info table and move its data. */
1243 newinfo
= (malloc_info
*) _malloc_internal_nolock (info_blocks
1245 PROTECT_MALLOC_STATE (0);
1246 memmove (newinfo
, _heapinfo
, info_blocks
* BLOCKSIZE
);
1247 _heapinfo
= newinfo
;
1249 /* We should now have coalesced the free block with the
1250 blocks freed from the old info table. Examine the entire
1251 trailing free block to decide below whether to return some
1253 block
= _heapinfo
[0].free
.prev
;
1254 blocks
= _heapinfo
[block
].free
.size
;
1257 /* Now see if we can return stuff to the system. */
1258 if (block
+ blocks
== _heaplimit
&& blocks
>= lesscore_threshold
)
1260 register __malloc_size_t bytes
= blocks
* BLOCKSIZE
;
1261 _heaplimit
-= blocks
;
1262 (*__morecore
) (-bytes
);
1263 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
1264 = _heapinfo
[block
].free
.next
;
1265 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
1266 = _heapinfo
[block
].free
.prev
;
1267 block
= _heapinfo
[block
].free
.prev
;
1269 _bytes_free
-= bytes
;
1273 /* Set the next search to begin at this block. */
1278 /* Do some of the statistics. */
1280 _bytes_used
-= 1 << type
;
1282 _bytes_free
+= 1 << type
;
1284 /* Get the address of the first free fragment in this block. */
1285 prev
= (struct list
*) ((char *) ADDRESS (block
) +
1286 (_heapinfo
[block
].busy
.info
.frag
.first
<< type
));
1288 if (_heapinfo
[block
].busy
.info
.frag
.nfree
== (BLOCKSIZE
>> type
) - 1)
1290 /* If all fragments of this block are free, remove them
1291 from the fragment list and free the whole block. */
1293 for (i
= 1; i
< (__malloc_size_t
) (BLOCKSIZE
>> type
); ++i
)
1295 prev
->prev
->next
= next
;
1297 next
->prev
= prev
->prev
;
1298 _heapinfo
[block
].busy
.type
= 0;
1299 _heapinfo
[block
].busy
.info
.size
= 1;
1301 /* Keep the statistics accurate. */
1303 _bytes_used
+= BLOCKSIZE
;
1304 _chunks_free
-= BLOCKSIZE
>> type
;
1305 _bytes_free
-= BLOCKSIZE
;
1307 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1308 _free_internal_nolock (ADDRESS (block
));
1310 free (ADDRESS (block
));
1313 else if (_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
1315 /* If some fragments of this block are free, link this
1316 fragment into the fragment list after the first free
1317 fragment of this block. */
1318 next
= (struct list
*) ptr
;
1319 next
->next
= prev
->next
;
1322 if (next
->next
!= NULL
)
1323 next
->next
->prev
= next
;
1324 ++_heapinfo
[block
].busy
.info
.frag
.nfree
;
1328 /* No fragments of this block are free, so link this
1329 fragment into the fragment list and announce that
1330 it is the first free fragment of this block. */
1331 prev
= (struct list
*) ptr
;
1332 _heapinfo
[block
].busy
.info
.frag
.nfree
= 1;
1333 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
1334 ((unsigned long int) ((char *) ptr
- (char *) NULL
)
1335 % BLOCKSIZE
>> type
);
1336 prev
->next
= _fraghead
[type
].next
;
1337 prev
->prev
= &_fraghead
[type
];
1338 prev
->prev
->next
= prev
;
1339 if (prev
->next
!= NULL
)
1340 prev
->next
->prev
= prev
;
1345 PROTECT_MALLOC_STATE (1);
1348 /* Return memory to the heap.
1349 Like `free' but don't call a __free_hook if there is one. */
1351 _free_internal (ptr
)
1355 _free_internal_nolock (ptr
);
1359 /* Return memory to the heap. */
1365 void (*hook
) (__ptr_t
) = __free_hook
;
1370 _free_internal (ptr
);
1373 /* Define the `cfree' alias for `free'. */
1375 weak_alias (free
, cfree
)
1384 /* Change the size of a block allocated by `malloc'.
1385 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1386 Written May 1989 by Mike Haertel.
1388 This library is free software; you can redistribute it and/or
1389 modify it under the terms of the GNU General Public License as
1390 published by the Free Software Foundation; either version 2 of the
1391 License, or (at your option) any later version.
1393 This library is distributed in the hope that it will be useful,
1394 but WITHOUT ANY WARRANTY; without even the implied warranty of
1395 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1396 General Public License for more details.
1398 You should have received a copy of the GNU General Public
1399 License along with this library; see the file COPYING. If
1400 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1401 Fifth Floor, Boston, MA 02110-1301, USA.
1403 The author may be reached (Email) at the address mike@ai.mit.edu,
1404 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1406 #ifndef _MALLOC_INTERNAL
1407 #define _MALLOC_INTERNAL
1413 /* Cope with systems lacking `memmove'. */
1414 #if (!defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1417 #undef __malloc_safe_bcopy
1418 #define __malloc_safe_bcopy safe_bcopy
1421 /* Snarfed directly from Emacs src/dispnew.c:
1422 XXX Should use system bcopy if it handles overlap. */
1424 /* Like bcopy except never gets confused by overlap. */
1427 __malloc_safe_bcopy (afrom
, ato
, size
)
1430 __malloc_size_t size
;
1432 char *from
= afrom
, *to
= ato
;
1434 if (size
<= 0 || from
== to
)
1437 /* If the source and destination don't overlap, then bcopy can
1438 handle it. If they do overlap, but the destination is lower in
1439 memory than the source, we'll assume bcopy can handle that. */
1440 if (to
< from
|| from
+ size
<= to
)
1441 bcopy (from
, to
, size
);
1443 /* Otherwise, we'll copy from the end. */
1446 register char *endf
= from
+ size
;
1447 register char *endt
= to
+ size
;
1449 /* If TO - FROM is large, then we should break the copy into
1450 nonoverlapping chunks of TO - FROM bytes each. However, if
1451 TO - FROM is small, then the bcopy function call overhead
1452 makes this not worth it. The crossover point could be about
1453 anywhere. Since I don't think the obvious copy loop is too
1454 bad, I'm trying to err in its favor. */
1459 while (endf
!= from
);
1465 endt
-= (to
- from
);
1466 endf
-= (to
- from
);
1471 bcopy (endf
, endt
, to
- from
);
1474 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1475 little left over. The amount left over is
1476 (endt + (to - from)) - to, which is endt - from. */
1477 bcopy (from
, to
, endt
- from
);
1484 extern void __malloc_safe_bcopy
PP ((__ptr_t
, __ptr_t
, __malloc_size_t
));
1485 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1491 #define min(A, B) ((A) < (B) ? (A) : (B))
1493 /* Debugging hook for realloc. */
1494 __ptr_t (*__realloc_hook
) PP ((__ptr_t __ptr
, __malloc_size_t __size
));
1496 /* Resize the given region to the new size, returning a pointer
1497 to the (possibly moved) region. This is optimized for speed;
1498 some benchmarks seem to indicate that greater compactness is
1499 achieved by unconditionally allocating and copying to a
1500 new region. This module has incestuous knowledge of the
1501 internals of both free and malloc. */
1503 _realloc_internal_nolock (ptr
, size
)
1505 __malloc_size_t size
;
1509 __malloc_size_t block
, blocks
, oldlimit
;
1513 _free_internal_nolock (ptr
);
1514 return _malloc_internal_nolock (0);
1516 else if (ptr
== NULL
)
1517 return _malloc_internal_nolock (size
);
1519 block
= BLOCK (ptr
);
1521 PROTECT_MALLOC_STATE (0);
1523 type
= _heapinfo
[block
].busy
.type
;
1527 /* Maybe reallocate a large block to a small fragment. */
1528 if (size
<= BLOCKSIZE
/ 2)
1530 result
= _malloc_internal_nolock (size
);
1533 memcpy (result
, ptr
, size
);
1534 _free_internal_nolock (ptr
);
1539 /* The new size is a large allocation as well;
1540 see if we can hold it in place. */
1541 blocks
= BLOCKIFY (size
);
1542 if (blocks
< _heapinfo
[block
].busy
.info
.size
)
1544 /* The new size is smaller; return
1545 excess memory to the free list. */
1546 _heapinfo
[block
+ blocks
].busy
.type
= 0;
1547 _heapinfo
[block
+ blocks
].busy
.info
.size
1548 = _heapinfo
[block
].busy
.info
.size
- blocks
;
1549 _heapinfo
[block
].busy
.info
.size
= blocks
;
1550 /* We have just created a new chunk by splitting a chunk in two.
1551 Now we will free this chunk; increment the statistics counter
1552 so it doesn't become wrong when _free_internal decrements it. */
1554 _free_internal_nolock (ADDRESS (block
+ blocks
));
1557 else if (blocks
== _heapinfo
[block
].busy
.info
.size
)
1558 /* No size change necessary. */
1562 /* Won't fit, so allocate a new region that will.
1563 Free the old region first in case there is sufficient
1564 adjacent free space to grow without moving. */
1565 blocks
= _heapinfo
[block
].busy
.info
.size
;
1566 /* Prevent free from actually returning memory to the system. */
1567 oldlimit
= _heaplimit
;
1569 _free_internal_nolock (ptr
);
1570 result
= _malloc_internal_nolock (size
);
1571 PROTECT_MALLOC_STATE (0);
1572 if (_heaplimit
== 0)
1573 _heaplimit
= oldlimit
;
1576 /* Now we're really in trouble. We have to unfree
1577 the thing we just freed. Unfortunately it might
1578 have been coalesced with its neighbors. */
1579 if (_heapindex
== block
)
1580 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1584 = _malloc_internal_nolock ((block
- _heapindex
) * BLOCKSIZE
);
1585 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1586 _free_internal_nolock (previous
);
1591 memmove (result
, ptr
, blocks
* BLOCKSIZE
);
1596 /* Old size is a fragment; type is logarithm
1597 to base two of the fragment size. */
1598 if (size
> (__malloc_size_t
) (1 << (type
- 1)) &&
1599 size
<= (__malloc_size_t
) (1 << type
))
1600 /* The new size is the same kind of fragment. */
1604 /* The new size is different; allocate a new space,
1605 and copy the lesser of the new size and the old. */
1606 result
= _malloc_internal_nolock (size
);
1609 memcpy (result
, ptr
, min (size
, (__malloc_size_t
) 1 << type
));
1610 _free_internal_nolock (ptr
);
1615 PROTECT_MALLOC_STATE (1);
1621 _realloc_internal (ptr
, size
)
1623 __malloc_size_t size
;
1628 result
= _realloc_internal_nolock (ptr
, size
);
1637 __malloc_size_t size
;
1639 __ptr_t (*hook
) (__ptr_t
, __malloc_size_t
);
1641 if (!__malloc_initialized
&& !__malloc_initialize ())
1644 hook
= __realloc_hook
;
1645 return (hook
!= NULL
? *hook
: _realloc_internal
) (ptr
, size
);
1647 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1649 This library is free software; you can redistribute it and/or
1650 modify it under the terms of the GNU General Public License as
1651 published by the Free Software Foundation; either version 2 of the
1652 License, or (at your option) any later version.
1654 This library is distributed in the hope that it will be useful,
1655 but WITHOUT ANY WARRANTY; without even the implied warranty of
1656 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1657 General Public License for more details.
1659 You should have received a copy of the GNU General Public
1660 License along with this library; see the file COPYING. If
1661 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1662 Fifth Floor, Boston, MA 02110-1301, USA.
1664 The author may be reached (Email) at the address mike@ai.mit.edu,
1665 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1667 #ifndef _MALLOC_INTERNAL
1668 #define _MALLOC_INTERNAL
1672 /* Allocate an array of NMEMB elements each SIZE bytes long.
1673 The entire array is initialized to zeros. */
1675 calloc (nmemb
, size
)
1676 register __malloc_size_t nmemb
;
1677 register __malloc_size_t size
;
1679 register __ptr_t result
= malloc (nmemb
* size
);
1682 (void) memset (result
, 0, nmemb
* size
);
1686 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1687 This file is part of the GNU C Library.
1689 The GNU C Library is free software; you can redistribute it and/or modify
1690 it under the terms of the GNU General Public License as published by
1691 the Free Software Foundation; either version 2, or (at your option)
1694 The GNU C Library is distributed in the hope that it will be useful,
1695 but WITHOUT ANY WARRANTY; without even the implied warranty of
1696 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1697 GNU General Public License for more details.
1699 You should have received a copy of the GNU General Public License
1700 along with the GNU C Library; see the file COPYING. If not, write to
1701 the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1702 MA 02110-1301, USA. */
1704 #ifndef _MALLOC_INTERNAL
1705 #define _MALLOC_INTERNAL
1709 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1711 #if !defined(__GNU_LIBRARY__) || defined(__UCLIBC__)
1713 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1714 /* It is best not to declare this and cast its result on foreign operating
1715 systems with potentially hostile include files. */
1718 extern __ptr_t __sbrk
PP ((ptrdiff_t increment
));
1719 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1725 /* Allocate INCREMENT more bytes of data space,
1726 and return the start of data space, or NULL on errors.
1727 If INCREMENT is negative, shrink data space. */
1729 __default_morecore (increment
)
1730 __malloc_ptrdiff_t increment
;
1734 if (!bss_sbrk_did_unexec
)
1736 return bss_sbrk (increment
);
1739 result
= (__ptr_t
) __sbrk (increment
);
1740 if (result
== (__ptr_t
) -1)
1744 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1746 This library is free software; you can redistribute it and/or
1747 modify it under the terms of the GNU General Public License as
1748 published by the Free Software Foundation; either version 2 of the
1749 License, or (at your option) any later version.
1751 This library is distributed in the hope that it will be useful,
1752 but WITHOUT ANY WARRANTY; without even the implied warranty of
1753 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1754 General Public License for more details.
1756 You should have received a copy of the GNU General Public
1757 License along with this library; see the file COPYING. If
1758 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1759 Fifth Floor, Boston, MA 02110-1301, USA. */
1761 #ifndef _MALLOC_INTERNAL
1762 #define _MALLOC_INTERNAL
1766 #if __DJGPP__ - 0 == 1
1768 /* There is some problem with memalign in DJGPP v1 and we are supposed
1769 to omit it. Noone told me why, they just told me to do it. */
1773 __ptr_t (*__memalign_hook
) PP ((__malloc_size_t __size
,
1774 __malloc_size_t __alignment
));
1777 memalign (alignment
, size
)
1778 __malloc_size_t alignment
;
1779 __malloc_size_t size
;
1782 unsigned long int adj
, lastadj
;
1783 __ptr_t (*hook
) (__malloc_size_t
, __malloc_size_t
) = __memalign_hook
;
1786 return (*hook
) (alignment
, size
);
1788 /* Allocate a block with enough extra space to pad the block with up to
1789 (ALIGNMENT - 1) bytes if necessary. */
1790 result
= malloc (size
+ alignment
- 1);
1794 /* Figure out how much we will need to pad this particular block
1795 to achieve the required alignment. */
1796 adj
= (unsigned long int) ((char *) result
- (char *) NULL
) % alignment
;
1800 /* Reallocate the block with only as much excess as it needs. */
1802 result
= malloc (adj
+ size
);
1803 if (result
== NULL
) /* Impossible unless interrupted. */
1807 adj
= (unsigned long int) ((char *) result
- (char *) NULL
) % alignment
;
1808 /* It's conceivable we might have been so unlucky as to get a
1809 different block with weaker alignment. If so, this block is too
1810 short to contain SIZE after alignment correction. So we must
1811 try again and get another block, slightly larger. */
1812 } while (adj
> lastadj
);
1816 /* Record this block in the list of aligned blocks, so that `free'
1817 can identify the pointer it is passed, which will be in the middle
1818 of an allocated block. */
1820 struct alignlist
*l
;
1821 LOCK_ALIGNED_BLOCKS ();
1822 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1823 if (l
->aligned
== NULL
)
1824 /* This slot is free. Use it. */
1828 l
= (struct alignlist
*) malloc (sizeof (struct alignlist
));
1831 l
->next
= _aligned_blocks
;
1832 _aligned_blocks
= l
;
1838 result
= l
->aligned
= (char *) result
+ alignment
- adj
;
1840 UNLOCK_ALIGNED_BLOCKS ();
1860 posix_memalign (memptr
, alignment
, size
)
1862 __malloc_size_t alignment
;
1863 __malloc_size_t size
;
1868 || alignment
% sizeof (__ptr_t
) != 0
1869 || (alignment
& (alignment
- 1)) != 0)
1872 mem
= memalign (alignment
, size
);
1881 #endif /* Not DJGPP v1 */
1882 /* Allocate memory on a page boundary.
1883 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1885 This library is free software; you can redistribute it and/or
1886 modify it under the terms of the GNU General Public License as
1887 published by the Free Software Foundation; either version 2 of the
1888 License, or (at your option) any later version.
1890 This library is distributed in the hope that it will be useful,
1891 but WITHOUT ANY WARRANTY; without even the implied warranty of
1892 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1893 General Public License for more details.
1895 You should have received a copy of the GNU General Public
1896 License along with this library; see the file COPYING. If
1897 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1898 Fifth Floor, Boston, MA 02110-1301, USA.
1900 The author may be reached (Email) at the address mike@ai.mit.edu,
1901 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1903 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1905 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1906 on MSDOS, where it conflicts with a system header file. */
1908 #define ELIDE_VALLOC
1912 #ifndef ELIDE_VALLOC
1914 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
1916 #include <sys/cdefs.h>
1917 #if defined (__GLIBC__) && __GLIBC__ >= 2
1918 /* __getpagesize is already declared in <unistd.h> with return type int */
1920 extern size_t __getpagesize
PP ((void));
1923 #include "getpagesize.h"
1924 #define __getpagesize() getpagesize()
1927 #ifndef _MALLOC_INTERNAL
1928 #define _MALLOC_INTERNAL
1932 static __malloc_size_t pagesize
;
1936 __malloc_size_t size
;
1939 pagesize
= __getpagesize ();
1941 return memalign (pagesize
, size
);
1944 #endif /* Not ELIDE_VALLOC. */
1948 /* Standard debugging hooks for `malloc'.
1949 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1950 Written May 1989 by Mike Haertel.
1952 This library is free software; you can redistribute it and/or
1953 modify it under the terms of the GNU General Public License as
1954 published by the Free Software Foundation; either version 2 of the
1955 License, or (at your option) any later version.
1957 This library is distributed in the hope that it will be useful,
1958 but WITHOUT ANY WARRANTY; without even the implied warranty of
1959 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1960 General Public License for more details.
1962 You should have received a copy of the GNU General Public
1963 License along with this library; see the file COPYING. If
1964 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1965 Fifth Floor, Boston, MA 02110-1301, USA.
1967 The author may be reached (Email) at the address mike@ai.mit.edu,
1968 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1973 #ifndef _MALLOC_INTERNAL
1974 #define _MALLOC_INTERNAL
1980 /* Old hook values. */
1981 static void (*old_free_hook
) __P ((__ptr_t ptr
));
1982 static __ptr_t (*old_malloc_hook
) __P ((__malloc_size_t size
));
1983 static __ptr_t (*old_realloc_hook
) __P ((__ptr_t ptr
, __malloc_size_t size
));
1985 /* Function to call when something awful happens. */
1986 static void (*abortfunc
) __P ((enum mcheck_status
));
1988 /* Arbitrary magical numbers. */
1989 #define MAGICWORD 0xfedabeeb
1990 #define MAGICFREE 0xd8675309
1991 #define MAGICBYTE ((char) 0xd7)
1992 #define MALLOCFLOOD ((char) 0x93)
1993 #define FREEFLOOD ((char) 0x95)
1997 __malloc_size_t size
; /* Exact size requested by user. */
1998 unsigned long int magic
; /* Magic number to check header integrity. */
2001 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
2002 #define flood memset
2004 static void flood
__P ((__ptr_t
, int, __malloc_size_t
));
2006 flood (ptr
, val
, size
)
2009 __malloc_size_t size
;
2017 static enum mcheck_status checkhdr
__P ((const struct hdr
*));
2018 static enum mcheck_status
2020 const struct hdr
*hdr
;
2022 enum mcheck_status status
;
2026 status
= MCHECK_HEAD
;
2029 status
= MCHECK_FREE
;
2032 if (((char *) &hdr
[1])[hdr
->size
] != MAGICBYTE
)
2033 status
= MCHECK_TAIL
;
2038 if (status
!= MCHECK_OK
)
2039 (*abortfunc
) (status
);
2043 static void freehook
__P ((__ptr_t
));
2052 hdr
= ((struct hdr
*) ptr
) - 1;
2054 hdr
->magic
= MAGICFREE
;
2055 flood (ptr
, FREEFLOOD
, hdr
->size
);
2060 __free_hook
= old_free_hook
;
2062 __free_hook
= freehook
;
2065 static __ptr_t mallochook
__P ((__malloc_size_t
));
2068 __malloc_size_t size
;
2072 __malloc_hook
= old_malloc_hook
;
2073 hdr
= (struct hdr
*) malloc (sizeof (struct hdr
) + size
+ 1);
2074 __malloc_hook
= mallochook
;
2079 hdr
->magic
= MAGICWORD
;
2080 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
2081 flood ((__ptr_t
) (hdr
+ 1), MALLOCFLOOD
, size
);
2082 return (__ptr_t
) (hdr
+ 1);
2085 static __ptr_t reallochook
__P ((__ptr_t
, __malloc_size_t
));
2087 reallochook (ptr
, size
)
2089 __malloc_size_t size
;
2091 struct hdr
*hdr
= NULL
;
2092 __malloc_size_t osize
= 0;
2096 hdr
= ((struct hdr
*) ptr
) - 1;
2101 flood ((char *) ptr
+ size
, FREEFLOOD
, osize
- size
);
2104 __free_hook
= old_free_hook
;
2105 __malloc_hook
= old_malloc_hook
;
2106 __realloc_hook
= old_realloc_hook
;
2107 hdr
= (struct hdr
*) realloc ((__ptr_t
) hdr
, sizeof (struct hdr
) + size
+ 1);
2108 __free_hook
= freehook
;
2109 __malloc_hook
= mallochook
;
2110 __realloc_hook
= reallochook
;
2115 hdr
->magic
= MAGICWORD
;
2116 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
2118 flood ((char *) (hdr
+ 1) + osize
, MALLOCFLOOD
, size
- osize
);
2119 return (__ptr_t
) (hdr
+ 1);
2124 enum mcheck_status status
;
2130 msg
= "memory is consistent, library is buggy";
2133 msg
= "memory clobbered before allocated block";
2136 msg
= "memory clobbered past end of allocated block";
2139 msg
= "block freed twice";
2142 msg
= "bogus mcheck_status, library is buggy";
2145 #ifdef __GNU_LIBRARY__
2148 fprintf (stderr
, "mcheck: %s\n", msg
);
2154 static int mcheck_used
= 0;
2158 void (*func
) __P ((enum mcheck_status
));
2160 abortfunc
= (func
!= NULL
) ? func
: &mabort
;
2162 /* These hooks may not be safely inserted if malloc is already in use. */
2163 if (!__malloc_initialized
&& !mcheck_used
)
2165 old_free_hook
= __free_hook
;
2166 __free_hook
= freehook
;
2167 old_malloc_hook
= __malloc_hook
;
2168 __malloc_hook
= mallochook
;
2169 old_realloc_hook
= __realloc_hook
;
2170 __realloc_hook
= reallochook
;
2174 return mcheck_used
? 0 : -1;
2178 mprobe (__ptr_t ptr
)
2180 return mcheck_used
? checkhdr (ptr
) : MCHECK_DISABLED
;
2183 #endif /* GC_MCHECK */
2185 /* arch-tag: 93dce5c0-f49a-41b5-86b1-f91c4169c02e
2186 (do not change this comment) */