1 /* This file is no longer automatically generated from libc. */
3 #define _MALLOC_INTERNAL
4 #ifdef HAVE_GTK_AND_PTHREAD
8 /* The malloc headers and source files from the C library follow here. */
10 /* Declarations for `malloc' and friends.
11 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
12 2005, 2006, 2007 Free Software Foundation, Inc.
13 Written May 1989 by Mike Haertel.
15 This library is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
20 This library is distributed in the hope that it will be useful,
21 but WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
25 You should have received a copy of the GNU General Public
26 License along with this library; see the file COPYING. If
27 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
28 Fifth Floor, Boston, MA 02110-1301, USA.
30 The author may be reached (Email) at the address mike@ai.mit.edu,
31 or (US mail) as Mike Haertel c/o Free Software Foundation. */
37 #ifdef _MALLOC_INTERNAL
43 #if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
44 || defined STDC_HEADERS || defined PROTOTYPES) \
45 && ! defined (BROKEN_PROTOTYPES))
49 #define __ptr_t void *
50 #else /* Not C++ or ANSI C. */
54 #define __ptr_t char *
55 #endif /* C++ or ANSI C. */
57 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
61 #define memset(s, zero, n) bzero ((s), (n))
64 #define memcpy(d, s, n) bcopy ((s), (d), (n))
83 #endif /* _MALLOC_INTERNAL. */
93 #define __malloc_size_t size_t
94 #define __malloc_ptrdiff_t ptrdiff_t
99 #define __malloc_size_t __SIZE_TYPE__
102 #ifndef __malloc_size_t
103 #define __malloc_size_t unsigned int
105 #define __malloc_ptrdiff_t int
112 #ifndef FREE_RETURN_TYPE
113 #define FREE_RETURN_TYPE void
117 /* Allocate SIZE bytes of memory. */
118 extern __ptr_t malloc
PP ((__malloc_size_t __size
));
119 /* Re-allocate the previously allocated block
120 in __ptr_t, making the new block SIZE bytes long. */
121 extern __ptr_t realloc
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
122 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
123 extern __ptr_t calloc
PP ((__malloc_size_t __nmemb
, __malloc_size_t __size
));
124 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
125 extern FREE_RETURN_TYPE free
PP ((__ptr_t __ptr
));
127 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
128 #if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict. */
129 extern __ptr_t memalign
PP ((__malloc_size_t __alignment
,
130 __malloc_size_t __size
));
133 /* Allocate SIZE bytes on a page boundary. */
134 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
135 extern __ptr_t valloc
PP ((__malloc_size_t __size
));
139 #ifdef _MALLOC_INTERNAL
141 /* The allocator divides the heap into blocks of fixed size; large
142 requests receive one or more whole blocks, and small requests
143 receive a fragment of a block. Fragment sizes are powers of two,
144 and all fragments of a block are the same size. When all the
145 fragments in a block have been freed, the block itself is freed. */
146 #define INT_BIT (CHAR_BIT * sizeof(int))
147 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
148 #define BLOCKSIZE (1 << BLOCKLOG)
149 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
151 /* Determine the amount of memory spanned by the initial heap table
152 (not an absolute limit). */
153 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
155 /* Number of contiguous free blocks allowed to build up at the end of
156 memory before they will be returned to the system. */
157 #define FINAL_FREE_BLOCKS 8
159 /* Data structure giving per-block information. */
162 /* Heap information for a busy block. */
165 /* Zero for a large (multiblock) object, or positive giving the
166 logarithm to the base two of the fragment size. */
172 __malloc_size_t nfree
; /* Free frags in a fragmented block. */
173 __malloc_size_t first
; /* First free fragment of the block. */
175 /* For a large object, in its first block, this has the number
176 of blocks in the object. In the other blocks, this has a
177 negative number which says how far back the first block is. */
178 __malloc_ptrdiff_t size
;
181 /* Heap information for a free block
182 (that may be the first of a free cluster). */
185 __malloc_size_t size
; /* Size (in blocks) of a free cluster. */
186 __malloc_size_t next
; /* Index of next free cluster. */
187 __malloc_size_t prev
; /* Index of previous free cluster. */
191 /* Pointer to first block of the heap. */
192 extern char *_heapbase
;
194 /* Table indexed by block number giving per-block information. */
195 extern malloc_info
*_heapinfo
;
197 /* Address to block number and vice versa. */
198 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
199 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
201 /* Current search index for the heap table. */
202 extern __malloc_size_t _heapindex
;
204 /* Limit of valid info table indices. */
205 extern __malloc_size_t _heaplimit
;
207 /* Doubly linked lists of free fragments. */
214 /* Free list headers for each fragment size. */
215 extern struct list _fraghead
[];
217 /* List of blocks allocated with `memalign' (or `valloc'). */
220 struct alignlist
*next
;
221 __ptr_t aligned
; /* The address that memaligned returned. */
222 __ptr_t exact
; /* The address that malloc returned. */
224 extern struct alignlist
*_aligned_blocks
;
226 /* Instrumentation. */
227 extern __malloc_size_t _chunks_used
;
228 extern __malloc_size_t _bytes_used
;
229 extern __malloc_size_t _chunks_free
;
230 extern __malloc_size_t _bytes_free
;
232 /* Internal versions of `malloc', `realloc', and `free'
233 used when these functions need to call each other.
234 They are the same but don't call the hooks. */
235 extern __ptr_t _malloc_internal
PP ((__malloc_size_t __size
));
236 extern __ptr_t _realloc_internal
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
237 extern void _free_internal
PP ((__ptr_t __ptr
));
240 extern pthread_mutex_t _malloc_mutex
;
241 #define LOCK() pthread_mutex_lock (&_malloc_mutex)
242 #define UNLOCK() pthread_mutex_unlock (&_malloc_mutex)
248 #endif /* _MALLOC_INTERNAL. */
250 /* Given an address in the middle of a malloc'd object,
251 return the address of the beginning of the object. */
252 extern __ptr_t malloc_find_object_address
PP ((__ptr_t __ptr
));
254 /* Underlying allocation function; successive calls should
255 return contiguous pieces of memory. */
256 extern __ptr_t (*__morecore
) PP ((__malloc_ptrdiff_t __size
));
258 /* Default value of `__morecore'. */
259 extern __ptr_t __default_morecore
PP ((__malloc_ptrdiff_t __size
));
261 /* If not NULL, this function is called after each time
262 `__morecore' is called to increase the data size. */
263 extern void (*__after_morecore_hook
) PP ((void));
265 /* Number of extra blocks to get each time we ask for more core.
266 This reduces the frequency of calling `(*__morecore)'. */
267 extern __malloc_size_t __malloc_extra_blocks
;
269 /* Nonzero if `malloc' has been called and done its initialization. */
270 extern int __malloc_initialized
;
271 /* Function called to initialize malloc data structures. */
272 extern int __malloc_initialize
PP ((void));
274 /* Hooks for debugging versions. */
275 extern void (*__malloc_initialize_hook
) PP ((void));
276 extern void (*__free_hook
) PP ((__ptr_t __ptr
));
277 extern __ptr_t (*__malloc_hook
) PP ((__malloc_size_t __size
));
278 extern __ptr_t (*__realloc_hook
) PP ((__ptr_t __ptr
, __malloc_size_t __size
));
279 extern __ptr_t (*__memalign_hook
) PP ((__malloc_size_t __size
,
280 __malloc_size_t __alignment
));
282 /* Return values for `mprobe': these are the kinds of inconsistencies that
283 `mcheck' enables detection of. */
286 MCHECK_DISABLED
= -1, /* Consistency checking is not turned on. */
287 MCHECK_OK
, /* Block is fine. */
288 MCHECK_FREE
, /* Block freed twice. */
289 MCHECK_HEAD
, /* Memory before the block was clobbered. */
290 MCHECK_TAIL
/* Memory after the block was clobbered. */
293 /* Activate a standard collection of debugging hooks. This must be called
294 before `malloc' is ever called. ABORTFUNC is called with an error code
295 (see enum above) when an inconsistency is detected. If ABORTFUNC is
296 null, the standard function prints on stderr and then calls `abort'. */
297 extern int mcheck
PP ((void (*__abortfunc
) PP ((enum mcheck_status
))));
299 /* Check for aberrations in a particular malloc'd block. You must have
300 called `mcheck' already. These are the same checks that `mcheck' does
301 when you free or reallocate a block. */
302 extern enum mcheck_status mprobe
PP ((__ptr_t __ptr
));
304 /* Activate a standard collection of tracing hooks. */
305 extern void mtrace
PP ((void));
306 extern void muntrace
PP ((void));
308 /* Statistics available to the user. */
311 __malloc_size_t bytes_total
; /* Total size of the heap. */
312 __malloc_size_t chunks_used
; /* Chunks allocated by the user. */
313 __malloc_size_t bytes_used
; /* Byte total of user-allocated chunks. */
314 __malloc_size_t chunks_free
; /* Chunks in the free list. */
315 __malloc_size_t bytes_free
; /* Byte total of chunks in the free list. */
318 /* Pick up the current statistics. */
319 extern struct mstats mstats
PP ((void));
321 /* Call WARNFUN with a warning message when memory usage is high. */
322 extern void memory_warnings
PP ((__ptr_t __start
,
323 void (*__warnfun
) PP ((const char *))));
326 /* Relocating allocator. */
328 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
329 extern __ptr_t r_alloc
PP ((__ptr_t
*__handleptr
, __malloc_size_t __size
));
331 /* Free the storage allocated in HANDLEPTR. */
332 extern void r_alloc_free
PP ((__ptr_t
*__handleptr
));
334 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
335 extern __ptr_t r_re_alloc
PP ((__ptr_t
*__handleptr
, __malloc_size_t __size
));
342 #endif /* malloc.h */
343 /* Memory allocator `malloc'.
344 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
345 Written May 1989 by Mike Haertel.
347 This library is free software; you can redistribute it and/or
348 modify it under the terms of the GNU General Public License as
349 published by the Free Software Foundation; either version 2 of the
350 License, or (at your option) any later version.
352 This library is distributed in the hope that it will be useful,
353 but WITHOUT ANY WARRANTY; without even the implied warranty of
354 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
355 General Public License for more details.
357 You should have received a copy of the GNU General Public
358 License along with this library; see the file COPYING. If
359 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
360 Fifth Floor, Boston, MA 02110-1301, USA.
362 The author may be reached (Email) at the address mike@ai.mit.edu,
363 or (US mail) as Mike Haertel c/o Free Software Foundation. */
365 #ifndef _MALLOC_INTERNAL
366 #define _MALLOC_INTERNAL
371 /* How to really get more memory. */
373 extern __ptr_t bss_sbrk
PP ((ptrdiff_t __size
));
374 extern int bss_sbrk_did_unexec
;
376 __ptr_t (*__morecore
) PP ((ptrdiff_t __size
)) = __default_morecore
;
378 /* Debugging hook for `malloc'. */
379 __ptr_t (*__malloc_hook
) PP ((__malloc_size_t __size
));
381 /* Pointer to the base of the first block. */
384 /* Block information table. Allocated with align/__free (not malloc/free). */
385 malloc_info
*_heapinfo
;
387 /* Number of info entries. */
388 static __malloc_size_t heapsize
;
390 /* Search index in the info table. */
391 __malloc_size_t _heapindex
;
393 /* Limit of valid info table indices. */
394 __malloc_size_t _heaplimit
;
396 /* Free lists for each fragment size. */
397 struct list _fraghead
[BLOCKLOG
];
399 /* Instrumentation. */
400 __malloc_size_t _chunks_used
;
401 __malloc_size_t _bytes_used
;
402 __malloc_size_t _chunks_free
;
403 __malloc_size_t _bytes_free
;
405 /* Are you experienced? */
406 int __malloc_initialized
;
408 __malloc_size_t __malloc_extra_blocks
;
410 void (*__malloc_initialize_hook
) PP ((void));
411 void (*__after_morecore_hook
) PP ((void));
413 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
415 /* Some code for hunting a bug writing into _heapinfo.
417 Call this macro with argument PROT non-zero to protect internal
418 malloc state against writing to it, call it with a zero argument to
419 make it readable and writable.
421 Note that this only works if BLOCKSIZE == page size, which is
422 the case on the i386. */
424 #include <sys/types.h>
425 #include <sys/mman.h>
427 static int state_protected_p
;
428 static __malloc_size_t last_state_size
;
429 static malloc_info
*last_heapinfo
;
432 protect_malloc_state (protect_p
)
435 /* If _heapinfo has been relocated, make sure its old location
436 isn't left read-only; it will be reused by malloc. */
437 if (_heapinfo
!= last_heapinfo
439 && state_protected_p
)
440 mprotect (last_heapinfo
, last_state_size
, PROT_READ
| PROT_WRITE
);
442 last_state_size
= _heaplimit
* sizeof *_heapinfo
;
443 last_heapinfo
= _heapinfo
;
445 if (protect_p
!= state_protected_p
)
447 state_protected_p
= protect_p
;
448 if (mprotect (_heapinfo
, last_state_size
,
449 protect_p
? PROT_READ
: PROT_READ
| PROT_WRITE
) != 0)
454 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
457 #define PROTECT_MALLOC_STATE(PROT) /* empty */
461 /* Aligned allocation. */
462 static __ptr_t align
PP ((__malloc_size_t
));
465 __malloc_size_t size
;
468 unsigned long int adj
;
470 /* align accepts an unsigned argument, but __morecore accepts a
471 signed one. This could lead to trouble if SIZE overflows a
472 signed int type accepted by __morecore. We just punt in that
473 case, since they are requesting a ludicrous amount anyway. */
474 if ((__malloc_ptrdiff_t
)size
< 0)
477 result
= (*__morecore
) (size
);
478 adj
= (unsigned long int) ((unsigned long int) ((char *) result
-
479 (char *) NULL
)) % BLOCKSIZE
;
483 adj
= BLOCKSIZE
- adj
;
484 new = (*__morecore
) (adj
);
485 result
= (char *) result
+ adj
;
488 if (__after_morecore_hook
)
489 (*__after_morecore_hook
) ();
494 /* Get SIZE bytes, if we can get them starting at END.
495 Return the address of the space we got.
496 If we cannot get space at END, fail and return 0. */
497 static __ptr_t get_contiguous_space
PP ((__malloc_ptrdiff_t
, __ptr_t
));
499 get_contiguous_space (size
, position
)
500 __malloc_ptrdiff_t size
;
506 before
= (*__morecore
) (0);
507 /* If we can tell in advance that the break is at the wrong place,
509 if (before
!= position
)
512 /* Allocate SIZE bytes and get the address of them. */
513 after
= (*__morecore
) (size
);
517 /* It was not contiguous--reject it. */
518 if (after
!= position
)
520 (*__morecore
) (- size
);
528 /* This is called when `_heapinfo' and `heapsize' have just
529 been set to describe a new info table. Set up the table
530 to describe itself and account for it in the statistics. */
531 static void register_heapinfo
PP ((void));
538 __malloc_size_t block
, blocks
;
540 block
= BLOCK (_heapinfo
);
541 blocks
= BLOCKIFY (heapsize
* sizeof (malloc_info
));
543 /* Account for the _heapinfo block itself in the statistics. */
544 _bytes_used
+= blocks
* BLOCKSIZE
;
547 /* Describe the heapinfo block itself in the heapinfo. */
548 _heapinfo
[block
].busy
.type
= 0;
549 _heapinfo
[block
].busy
.info
.size
= blocks
;
550 /* Leave back-pointers for malloc_find_address. */
552 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
556 static pthread_once_t malloc_init_once_control
= PTHREAD_ONCE_INIT
;
557 pthread_mutex_t _malloc_mutex
;
561 malloc_initialize_1 ()
567 if (__malloc_initialize_hook
)
568 (*__malloc_initialize_hook
) ();
572 pthread_mutexattr_t attr
;
574 pthread_mutexattr_init (&attr
);
575 pthread_mutexattr_settype (&attr
, PTHREAD_MUTEX_RECURSIVE
);
576 pthread_mutex_init (&_malloc_mutex
, &attr
);
577 pthread_mutexattr_destroy (&attr
);
581 heapsize
= HEAP
/ BLOCKSIZE
;
582 _heapinfo
= (malloc_info
*) align (heapsize
* sizeof (malloc_info
));
583 if (_heapinfo
== NULL
)
585 memset (_heapinfo
, 0, heapsize
* sizeof (malloc_info
));
586 _heapinfo
[0].free
.size
= 0;
587 _heapinfo
[0].free
.next
= _heapinfo
[0].free
.prev
= 0;
589 _heapbase
= (char *) _heapinfo
;
590 _heaplimit
= BLOCK (_heapbase
+ heapsize
* sizeof (malloc_info
));
592 register_heapinfo ();
594 __malloc_initialized
= 1;
595 PROTECT_MALLOC_STATE (1);
599 /* Set everything up and remember that we have. */
601 __malloc_initialize ()
604 pthread_once (&malloc_init_once_control
, malloc_initialize_1
);
606 if (__malloc_initialized
)
609 malloc_initialize_1 ();
612 return __malloc_initialized
;
615 static int morecore_recursing
;
617 /* Get neatly aligned memory, initializing or
618 growing the heap info table as necessary. */
619 static __ptr_t morecore
PP ((__malloc_size_t
));
622 __malloc_size_t size
;
625 malloc_info
*newinfo
, *oldinfo
;
626 __malloc_size_t newsize
;
628 if (morecore_recursing
)
629 /* Avoid recursion. The caller will know how to handle a null return. */
632 result
= align (size
);
636 PROTECT_MALLOC_STATE (0);
638 /* Check if we need to grow the info table. */
639 if ((__malloc_size_t
) BLOCK ((char *) result
+ size
) > heapsize
)
641 /* Calculate the new _heapinfo table size. We do not account for the
642 added blocks in the table itself, as we hope to place them in
643 existing free space, which is already covered by part of the
648 while ((__malloc_size_t
) BLOCK ((char *) result
+ size
) > newsize
);
650 /* We must not reuse existing core for the new info table when called
651 from realloc in the case of growing a large block, because the
652 block being grown is momentarily marked as free. In this case
653 _heaplimit is zero so we know not to reuse space for internal
657 /* First try to allocate the new info table in core we already
658 have, in the usual way using realloc. If realloc cannot
659 extend it in place or relocate it to existing sufficient core,
660 we will get called again, and the code above will notice the
661 `morecore_recursing' flag and return null. */
662 int save
= errno
; /* Don't want to clobber errno with ENOMEM. */
663 morecore_recursing
= 1;
664 newinfo
= (malloc_info
*) _realloc_internal
665 (_heapinfo
, newsize
* sizeof (malloc_info
));
666 morecore_recursing
= 0;
671 /* We found some space in core, and realloc has put the old
672 table's blocks on the free list. Now zero the new part
673 of the table and install the new table location. */
674 memset (&newinfo
[heapsize
], 0,
675 (newsize
- heapsize
) * sizeof (malloc_info
));
682 /* Allocate new space for the malloc info table. */
685 newinfo
= (malloc_info
*) align (newsize
* sizeof (malloc_info
));
690 (*__morecore
) (-size
);
694 /* Is it big enough to record status for its own space?
696 if ((__malloc_size_t
) BLOCK ((char *) newinfo
697 + newsize
* sizeof (malloc_info
))
701 /* Must try again. First give back most of what we just got. */
702 (*__morecore
) (- newsize
* sizeof (malloc_info
));
706 /* Copy the old table to the beginning of the new,
707 and zero the rest of the new table. */
708 memcpy (newinfo
, _heapinfo
, heapsize
* sizeof (malloc_info
));
709 memset (&newinfo
[heapsize
], 0,
710 (newsize
- heapsize
) * sizeof (malloc_info
));
715 register_heapinfo ();
717 /* Reset _heaplimit so _free_internal never decides
718 it can relocate or resize the info table. */
720 _free_internal (oldinfo
);
721 PROTECT_MALLOC_STATE (0);
723 /* The new heap limit includes the new table just allocated. */
724 _heaplimit
= BLOCK ((char *) newinfo
+ heapsize
* sizeof (malloc_info
));
729 _heaplimit
= BLOCK ((char *) result
+ size
);
733 /* Allocate memory from the heap. */
735 _malloc_internal (size
)
736 __malloc_size_t size
;
739 __malloc_size_t block
, blocks
, lastblocks
, start
;
740 register __malloc_size_t i
;
743 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
744 valid address you can realloc and free (though not dereference).
746 It turns out that some extant code (sunrpc, at least Ultrix's version)
747 expects `malloc (0)' to return non-NULL and breaks otherwise.
756 PROTECT_MALLOC_STATE (0);
758 if (size
< sizeof (struct list
))
759 size
= sizeof (struct list
);
761 #ifdef SUNOS_LOCALTIME_BUG
766 /* Determine the allocation policy based on the request size. */
767 if (size
<= BLOCKSIZE
/ 2)
769 /* Small allocation to receive a fragment of a block.
770 Determine the logarithm to base two of the fragment size. */
771 register __malloc_size_t log
= 1;
773 while ((size
/= 2) != 0)
776 /* Look in the fragment lists for a
777 free fragment of the desired size. */
778 next
= _fraghead
[log
].next
;
781 /* There are free fragments of this size.
782 Pop a fragment out of the fragment list and return it.
783 Update the block's nfree and first counters. */
784 result
= (__ptr_t
) next
;
785 next
->prev
->next
= next
->next
;
786 if (next
->next
!= NULL
)
787 next
->next
->prev
= next
->prev
;
788 block
= BLOCK (result
);
789 if (--_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
790 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
791 ((unsigned long int) ((char *) next
->next
- (char *) NULL
)
794 /* Update the statistics. */
796 _bytes_used
+= 1 << log
;
798 _bytes_free
-= 1 << log
;
802 /* No free fragments of the desired size, so get a new block
803 and break it into fragments, returning the first. */
804 #ifdef GC_MALLOC_CHECK
805 result
= _malloc_internal (BLOCKSIZE
);
806 PROTECT_MALLOC_STATE (0);
808 result
= malloc (BLOCKSIZE
);
812 PROTECT_MALLOC_STATE (1);
816 /* Link all fragments but the first into the free list. */
817 next
= (struct list
*) ((char *) result
+ (1 << log
));
819 next
->prev
= &_fraghead
[log
];
820 _fraghead
[log
].next
= next
;
822 for (i
= 2; i
< (__malloc_size_t
) (BLOCKSIZE
>> log
); ++i
)
824 next
= (struct list
*) ((char *) result
+ (i
<< log
));
825 next
->next
= _fraghead
[log
].next
;
826 next
->prev
= &_fraghead
[log
];
827 next
->prev
->next
= next
;
828 next
->next
->prev
= next
;
831 /* Initialize the nfree and first counters for this block. */
832 block
= BLOCK (result
);
833 _heapinfo
[block
].busy
.type
= log
;
834 _heapinfo
[block
].busy
.info
.frag
.nfree
= i
- 1;
835 _heapinfo
[block
].busy
.info
.frag
.first
= i
- 1;
837 _chunks_free
+= (BLOCKSIZE
>> log
) - 1;
838 _bytes_free
+= BLOCKSIZE
- (1 << log
);
839 _bytes_used
-= BLOCKSIZE
- (1 << log
);
844 /* Large allocation to receive one or more blocks.
845 Search the free list in a circle starting at the last place visited.
846 If we loop completely around without finding a large enough
847 space we will have to get more memory from the system. */
848 blocks
= BLOCKIFY (size
);
849 start
= block
= _heapindex
;
850 while (_heapinfo
[block
].free
.size
< blocks
)
852 block
= _heapinfo
[block
].free
.next
;
855 /* Need to get more from the system. Get a little extra. */
856 __malloc_size_t wantblocks
= blocks
+ __malloc_extra_blocks
;
857 block
= _heapinfo
[0].free
.prev
;
858 lastblocks
= _heapinfo
[block
].free
.size
;
859 /* Check to see if the new core will be contiguous with the
860 final free block; if so we don't need to get as much. */
861 if (_heaplimit
!= 0 && block
+ lastblocks
== _heaplimit
&&
862 /* We can't do this if we will have to make the heap info
863 table bigger to accomodate the new space. */
864 block
+ wantblocks
<= heapsize
&&
865 get_contiguous_space ((wantblocks
- lastblocks
) * BLOCKSIZE
,
866 ADDRESS (block
+ lastblocks
)))
868 /* We got it contiguously. Which block we are extending
869 (the `final free block' referred to above) might have
870 changed, if it got combined with a freed info table. */
871 block
= _heapinfo
[0].free
.prev
;
872 _heapinfo
[block
].free
.size
+= (wantblocks
- lastblocks
);
873 _bytes_free
+= (wantblocks
- lastblocks
) * BLOCKSIZE
;
874 _heaplimit
+= wantblocks
- lastblocks
;
877 result
= morecore (wantblocks
* BLOCKSIZE
);
880 block
= BLOCK (result
);
881 /* Put the new block at the end of the free list. */
882 _heapinfo
[block
].free
.size
= wantblocks
;
883 _heapinfo
[block
].free
.prev
= _heapinfo
[0].free
.prev
;
884 _heapinfo
[block
].free
.next
= 0;
885 _heapinfo
[0].free
.prev
= block
;
886 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
= block
;
888 /* Now loop to use some of that block for this allocation. */
892 /* At this point we have found a suitable free list entry.
893 Figure out how to remove what we need from the list. */
894 result
= ADDRESS (block
);
895 if (_heapinfo
[block
].free
.size
> blocks
)
897 /* The block we found has a bit left over,
898 so relink the tail end back into the free list. */
899 _heapinfo
[block
+ blocks
].free
.size
900 = _heapinfo
[block
].free
.size
- blocks
;
901 _heapinfo
[block
+ blocks
].free
.next
902 = _heapinfo
[block
].free
.next
;
903 _heapinfo
[block
+ blocks
].free
.prev
904 = _heapinfo
[block
].free
.prev
;
905 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
906 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
907 = _heapindex
= block
+ blocks
;
911 /* The block exactly matches our requirements,
912 so just remove it from the list. */
913 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
914 = _heapinfo
[block
].free
.prev
;
915 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
916 = _heapindex
= _heapinfo
[block
].free
.next
;
920 _heapinfo
[block
].busy
.type
= 0;
921 _heapinfo
[block
].busy
.info
.size
= blocks
;
923 _bytes_used
+= blocks
* BLOCKSIZE
;
924 _bytes_free
-= blocks
* BLOCKSIZE
;
926 /* Mark all the blocks of the object just allocated except for the
927 first with a negative number so you can find the first block by
928 adding that adjustment. */
930 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
933 PROTECT_MALLOC_STATE (1);
941 __malloc_size_t size
;
943 if (!__malloc_initialized
&& !__malloc_initialize ())
946 return (__malloc_hook
!= NULL
? *__malloc_hook
: _malloc_internal
) (size
);
951 /* On some ANSI C systems, some libc functions call _malloc, _free
952 and _realloc. Make them use the GNU functions. */
956 __malloc_size_t size
;
958 return malloc (size
);
971 __malloc_size_t size
;
973 return realloc (ptr
, size
);
977 /* Free a block of memory allocated by `malloc'.
978 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
979 Written May 1989 by Mike Haertel.
981 This library is free software; you can redistribute it and/or
982 modify it under the terms of the GNU General Public License as
983 published by the Free Software Foundation; either version 2 of the
984 License, or (at your option) any later version.
986 This library is distributed in the hope that it will be useful,
987 but WITHOUT ANY WARRANTY; without even the implied warranty of
988 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
989 General Public License for more details.
991 You should have received a copy of the GNU General Public
992 License along with this library; see the file COPYING. If
993 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
994 Fifth Floor, Boston, MA 02110-1301, USA.
996 The author may be reached (Email) at the address mike@ai.mit.edu,
997 or (US mail) as Mike Haertel c/o Free Software Foundation. */
999 #ifndef _MALLOC_INTERNAL
1000 #define _MALLOC_INTERNAL
1005 /* Cope with systems lacking `memmove'. */
1007 #if (defined (MEMMOVE_MISSING) || \
1008 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1010 #undef __malloc_safe_bcopy
1011 #define __malloc_safe_bcopy safe_bcopy
1013 /* This function is defined in realloc.c. */
1014 extern void __malloc_safe_bcopy
PP ((__ptr_t
, __ptr_t
, __malloc_size_t
));
1015 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1020 /* Debugging hook for free. */
1021 void (*__free_hook
) PP ((__ptr_t __ptr
));
1023 /* List of blocks allocated by memalign. */
1024 struct alignlist
*_aligned_blocks
= NULL
;
1026 /* Return memory to the heap.
1027 Like `free' but don't call a __free_hook if there is one. */
1029 _free_internal (ptr
)
1033 __malloc_size_t block
, blocks
;
1034 register __malloc_size_t i
;
1035 struct list
*prev
, *next
;
1037 const __malloc_size_t lesscore_threshold
1038 /* Threshold of free space at which we will return some to the system. */
1039 = FINAL_FREE_BLOCKS
+ 2 * __malloc_extra_blocks
;
1041 register struct alignlist
*l
;
1047 PROTECT_MALLOC_STATE (0);
1049 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1050 if (l
->aligned
== ptr
)
1052 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1057 block
= BLOCK (ptr
);
1059 type
= _heapinfo
[block
].busy
.type
;
1063 /* Get as many statistics as early as we can. */
1065 _bytes_used
-= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1066 _bytes_free
+= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1068 /* Find the free cluster previous to this one in the free list.
1069 Start searching at the last block referenced; this may benefit
1070 programs with locality of allocation. */
1074 i
= _heapinfo
[i
].free
.prev
;
1078 i
= _heapinfo
[i
].free
.next
;
1079 while (i
> 0 && i
< block
);
1080 i
= _heapinfo
[i
].free
.prev
;
1083 /* Determine how to link this block into the free list. */
1084 if (block
== i
+ _heapinfo
[i
].free
.size
)
1086 /* Coalesce this block with its predecessor. */
1087 _heapinfo
[i
].free
.size
+= _heapinfo
[block
].busy
.info
.size
;
1092 /* Really link this block back into the free list. */
1093 _heapinfo
[block
].free
.size
= _heapinfo
[block
].busy
.info
.size
;
1094 _heapinfo
[block
].free
.next
= _heapinfo
[i
].free
.next
;
1095 _heapinfo
[block
].free
.prev
= i
;
1096 _heapinfo
[i
].free
.next
= block
;
1097 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1101 /* Now that the block is linked in, see if we can coalesce it
1102 with its successor (by deleting its successor from the list
1103 and adding in its size). */
1104 if (block
+ _heapinfo
[block
].free
.size
== _heapinfo
[block
].free
.next
)
1106 _heapinfo
[block
].free
.size
1107 += _heapinfo
[_heapinfo
[block
].free
.next
].free
.size
;
1108 _heapinfo
[block
].free
.next
1109 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.next
;
1110 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1114 /* How many trailing free blocks are there now? */
1115 blocks
= _heapinfo
[block
].free
.size
;
1117 /* Where is the current end of accessible core? */
1118 curbrk
= (*__morecore
) (0);
1120 if (_heaplimit
!= 0 && curbrk
== ADDRESS (_heaplimit
))
1122 /* The end of the malloc heap is at the end of accessible core.
1123 It's possible that moving _heapinfo will allow us to
1124 return some space to the system. */
1126 __malloc_size_t info_block
= BLOCK (_heapinfo
);
1127 __malloc_size_t info_blocks
= _heapinfo
[info_block
].busy
.info
.size
;
1128 __malloc_size_t prev_block
= _heapinfo
[block
].free
.prev
;
1129 __malloc_size_t prev_blocks
= _heapinfo
[prev_block
].free
.size
;
1130 __malloc_size_t next_block
= _heapinfo
[block
].free
.next
;
1131 __malloc_size_t next_blocks
= _heapinfo
[next_block
].free
.size
;
1133 if (/* Win if this block being freed is last in core, the info table
1134 is just before it, the previous free block is just before the
1135 info table, and the two free blocks together form a useful
1136 amount to return to the system. */
1137 (block
+ blocks
== _heaplimit
&&
1138 info_block
+ info_blocks
== block
&&
1139 prev_block
!= 0 && prev_block
+ prev_blocks
== info_block
&&
1140 blocks
+ prev_blocks
>= lesscore_threshold
) ||
1141 /* Nope, not the case. We can also win if this block being
1142 freed is just before the info table, and the table extends
1143 to the end of core or is followed only by a free block,
1144 and the total free space is worth returning to the system. */
1145 (block
+ blocks
== info_block
&&
1146 ((info_block
+ info_blocks
== _heaplimit
&&
1147 blocks
>= lesscore_threshold
) ||
1148 (info_block
+ info_blocks
== next_block
&&
1149 next_block
+ next_blocks
== _heaplimit
&&
1150 blocks
+ next_blocks
>= lesscore_threshold
)))
1153 malloc_info
*newinfo
;
1154 __malloc_size_t oldlimit
= _heaplimit
;
1156 /* Free the old info table, clearing _heaplimit to avoid
1157 recursion into this code. We don't want to return the
1158 table's blocks to the system before we have copied them to
1159 the new location. */
1161 _free_internal (_heapinfo
);
1162 _heaplimit
= oldlimit
;
1164 /* Tell malloc to search from the beginning of the heap for
1165 free blocks, so it doesn't reuse the ones just freed. */
1168 /* Allocate new space for the info table and move its data. */
1169 newinfo
= (malloc_info
*) _malloc_internal (info_blocks
1171 PROTECT_MALLOC_STATE (0);
1172 memmove (newinfo
, _heapinfo
, info_blocks
* BLOCKSIZE
);
1173 _heapinfo
= newinfo
;
1175 /* We should now have coalesced the free block with the
1176 blocks freed from the old info table. Examine the entire
1177 trailing free block to decide below whether to return some
1179 block
= _heapinfo
[0].free
.prev
;
1180 blocks
= _heapinfo
[block
].free
.size
;
1183 /* Now see if we can return stuff to the system. */
1184 if (block
+ blocks
== _heaplimit
&& blocks
>= lesscore_threshold
)
1186 register __malloc_size_t bytes
= blocks
* BLOCKSIZE
;
1187 _heaplimit
-= blocks
;
1188 (*__morecore
) (-bytes
);
1189 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
1190 = _heapinfo
[block
].free
.next
;
1191 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
1192 = _heapinfo
[block
].free
.prev
;
1193 block
= _heapinfo
[block
].free
.prev
;
1195 _bytes_free
-= bytes
;
1199 /* Set the next search to begin at this block. */
1204 /* Do some of the statistics. */
1206 _bytes_used
-= 1 << type
;
1208 _bytes_free
+= 1 << type
;
1210 /* Get the address of the first free fragment in this block. */
1211 prev
= (struct list
*) ((char *) ADDRESS (block
) +
1212 (_heapinfo
[block
].busy
.info
.frag
.first
<< type
));
1214 if (_heapinfo
[block
].busy
.info
.frag
.nfree
== (BLOCKSIZE
>> type
) - 1)
1216 /* If all fragments of this block are free, remove them
1217 from the fragment list and free the whole block. */
1219 for (i
= 1; i
< (__malloc_size_t
) (BLOCKSIZE
>> type
); ++i
)
1221 prev
->prev
->next
= next
;
1223 next
->prev
= prev
->prev
;
1224 _heapinfo
[block
].busy
.type
= 0;
1225 _heapinfo
[block
].busy
.info
.size
= 1;
1227 /* Keep the statistics accurate. */
1229 _bytes_used
+= BLOCKSIZE
;
1230 _chunks_free
-= BLOCKSIZE
>> type
;
1231 _bytes_free
-= BLOCKSIZE
;
1233 #ifdef GC_MALLOC_CHECK
1234 _free_internal (ADDRESS (block
));
1236 free (ADDRESS (block
));
1239 else if (_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
1241 /* If some fragments of this block are free, link this
1242 fragment into the fragment list after the first free
1243 fragment of this block. */
1244 next
= (struct list
*) ptr
;
1245 next
->next
= prev
->next
;
1248 if (next
->next
!= NULL
)
1249 next
->next
->prev
= next
;
1250 ++_heapinfo
[block
].busy
.info
.frag
.nfree
;
1254 /* No fragments of this block are free, so link this
1255 fragment into the fragment list and announce that
1256 it is the first free fragment of this block. */
1257 prev
= (struct list
*) ptr
;
1258 _heapinfo
[block
].busy
.info
.frag
.nfree
= 1;
1259 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
1260 ((unsigned long int) ((char *) ptr
- (char *) NULL
)
1261 % BLOCKSIZE
>> type
);
1262 prev
->next
= _fraghead
[type
].next
;
1263 prev
->prev
= &_fraghead
[type
];
1264 prev
->prev
->next
= prev
;
1265 if (prev
->next
!= NULL
)
1266 prev
->next
->prev
= prev
;
1271 PROTECT_MALLOC_STATE (1);
1275 /* Return memory to the heap. */
1281 if (__free_hook
!= NULL
)
1282 (*__free_hook
) (ptr
);
1284 _free_internal (ptr
);
1287 /* Define the `cfree' alias for `free'. */
1289 weak_alias (free
, cfree
)
1298 /* Change the size of a block allocated by `malloc'.
1299 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1300 Written May 1989 by Mike Haertel.
1302 This library is free software; you can redistribute it and/or
1303 modify it under the terms of the GNU General Public License as
1304 published by the Free Software Foundation; either version 2 of the
1305 License, or (at your option) any later version.
1307 This library is distributed in the hope that it will be useful,
1308 but WITHOUT ANY WARRANTY; without even the implied warranty of
1309 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1310 General Public License for more details.
1312 You should have received a copy of the GNU General Public
1313 License along with this library; see the file COPYING. If
1314 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1315 Fifth Floor, Boston, MA 02110-1301, USA.
1317 The author may be reached (Email) at the address mike@ai.mit.edu,
1318 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1320 #ifndef _MALLOC_INTERNAL
1321 #define _MALLOC_INTERNAL
1327 /* Cope with systems lacking `memmove'. */
1328 #if (defined (MEMMOVE_MISSING) || \
1329 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1332 #undef __malloc_safe_bcopy
1333 #define __malloc_safe_bcopy safe_bcopy
1336 /* Snarfed directly from Emacs src/dispnew.c:
1337 XXX Should use system bcopy if it handles overlap. */
1339 /* Like bcopy except never gets confused by overlap. */
1342 __malloc_safe_bcopy (afrom
, ato
, size
)
1345 __malloc_size_t size
;
1347 char *from
= afrom
, *to
= ato
;
1349 if (size
<= 0 || from
== to
)
1352 /* If the source and destination don't overlap, then bcopy can
1353 handle it. If they do overlap, but the destination is lower in
1354 memory than the source, we'll assume bcopy can handle that. */
1355 if (to
< from
|| from
+ size
<= to
)
1356 bcopy (from
, to
, size
);
1358 /* Otherwise, we'll copy from the end. */
1361 register char *endf
= from
+ size
;
1362 register char *endt
= to
+ size
;
1364 /* If TO - FROM is large, then we should break the copy into
1365 nonoverlapping chunks of TO - FROM bytes each. However, if
1366 TO - FROM is small, then the bcopy function call overhead
1367 makes this not worth it. The crossover point could be about
1368 anywhere. Since I don't think the obvious copy loop is too
1369 bad, I'm trying to err in its favor. */
1374 while (endf
!= from
);
1380 endt
-= (to
- from
);
1381 endf
-= (to
- from
);
1386 bcopy (endf
, endt
, to
- from
);
1389 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1390 little left over. The amount left over is
1391 (endt + (to - from)) - to, which is endt - from. */
1392 bcopy (from
, to
, endt
- from
);
1399 extern void __malloc_safe_bcopy
PP ((__ptr_t
, __ptr_t
, __malloc_size_t
));
1400 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1406 #define min(A, B) ((A) < (B) ? (A) : (B))
1408 /* Debugging hook for realloc. */
1409 __ptr_t (*__realloc_hook
) PP ((__ptr_t __ptr
, __malloc_size_t __size
));
1411 /* Resize the given region to the new size, returning a pointer
1412 to the (possibly moved) region. This is optimized for speed;
1413 some benchmarks seem to indicate that greater compactness is
1414 achieved by unconditionally allocating and copying to a
1415 new region. This module has incestuous knowledge of the
1416 internals of both free and malloc. */
1418 _realloc_internal (ptr
, size
)
1420 __malloc_size_t size
;
1424 __malloc_size_t block
, blocks
, oldlimit
;
1428 _free_internal (ptr
);
1429 return _malloc_internal (0);
1431 else if (ptr
== NULL
)
1432 return _malloc_internal (size
);
1434 block
= BLOCK (ptr
);
1437 PROTECT_MALLOC_STATE (0);
1439 type
= _heapinfo
[block
].busy
.type
;
1443 /* Maybe reallocate a large block to a small fragment. */
1444 if (size
<= BLOCKSIZE
/ 2)
1446 result
= _malloc_internal (size
);
1449 memcpy (result
, ptr
, size
);
1450 _free_internal (ptr
);
1455 /* The new size is a large allocation as well;
1456 see if we can hold it in place. */
1457 blocks
= BLOCKIFY (size
);
1458 if (blocks
< _heapinfo
[block
].busy
.info
.size
)
1460 /* The new size is smaller; return
1461 excess memory to the free list. */
1462 _heapinfo
[block
+ blocks
].busy
.type
= 0;
1463 _heapinfo
[block
+ blocks
].busy
.info
.size
1464 = _heapinfo
[block
].busy
.info
.size
- blocks
;
1465 _heapinfo
[block
].busy
.info
.size
= blocks
;
1466 /* We have just created a new chunk by splitting a chunk in two.
1467 Now we will free this chunk; increment the statistics counter
1468 so it doesn't become wrong when _free_internal decrements it. */
1470 _free_internal (ADDRESS (block
+ blocks
));
1473 else if (blocks
== _heapinfo
[block
].busy
.info
.size
)
1474 /* No size change necessary. */
1478 /* Won't fit, so allocate a new region that will.
1479 Free the old region first in case there is sufficient
1480 adjacent free space to grow without moving. */
1481 blocks
= _heapinfo
[block
].busy
.info
.size
;
1482 /* Prevent free from actually returning memory to the system. */
1483 oldlimit
= _heaplimit
;
1485 _free_internal (ptr
);
1486 result
= _malloc_internal (size
);
1487 PROTECT_MALLOC_STATE (0);
1488 if (_heaplimit
== 0)
1489 _heaplimit
= oldlimit
;
1492 /* Now we're really in trouble. We have to unfree
1493 the thing we just freed. Unfortunately it might
1494 have been coalesced with its neighbors. */
1495 if (_heapindex
== block
)
1496 (void) _malloc_internal (blocks
* BLOCKSIZE
);
1500 = _malloc_internal ((block
- _heapindex
) * BLOCKSIZE
);
1501 (void) _malloc_internal (blocks
* BLOCKSIZE
);
1502 _free_internal (previous
);
1507 memmove (result
, ptr
, blocks
* BLOCKSIZE
);
1512 /* Old size is a fragment; type is logarithm
1513 to base two of the fragment size. */
1514 if (size
> (__malloc_size_t
) (1 << (type
- 1)) &&
1515 size
<= (__malloc_size_t
) (1 << type
))
1516 /* The new size is the same kind of fragment. */
1520 /* The new size is different; allocate a new space,
1521 and copy the lesser of the new size and the old. */
1522 result
= _malloc_internal (size
);
1525 memcpy (result
, ptr
, min (size
, (__malloc_size_t
) 1 << type
));
1526 _free_internal (ptr
);
1531 PROTECT_MALLOC_STATE (1);
1540 __malloc_size_t size
;
1542 if (!__malloc_initialized
&& !__malloc_initialize ())
1545 return (__realloc_hook
!= NULL
? *__realloc_hook
: _realloc_internal
)
1548 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1550 This library is free software; you can redistribute it and/or
1551 modify it under the terms of the GNU General Public License as
1552 published by the Free Software Foundation; either version 2 of the
1553 License, or (at your option) any later version.
1555 This library is distributed in the hope that it will be useful,
1556 but WITHOUT ANY WARRANTY; without even the implied warranty of
1557 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1558 General Public License for more details.
1560 You should have received a copy of the GNU General Public
1561 License along with this library; see the file COPYING. If
1562 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1563 Fifth Floor, Boston, MA 02110-1301, USA.
1565 The author may be reached (Email) at the address mike@ai.mit.edu,
1566 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1568 #ifndef _MALLOC_INTERNAL
1569 #define _MALLOC_INTERNAL
1573 /* Allocate an array of NMEMB elements each SIZE bytes long.
1574 The entire array is initialized to zeros. */
1576 calloc (nmemb
, size
)
1577 register __malloc_size_t nmemb
;
1578 register __malloc_size_t size
;
1580 register __ptr_t result
= malloc (nmemb
* size
);
1583 (void) memset (result
, 0, nmemb
* size
);
1587 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1588 This file is part of the GNU C Library.
1590 The GNU C Library is free software; you can redistribute it and/or modify
1591 it under the terms of the GNU General Public License as published by
1592 the Free Software Foundation; either version 2, or (at your option)
1595 The GNU C Library is distributed in the hope that it will be useful,
1596 but WITHOUT ANY WARRANTY; without even the implied warranty of
1597 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1598 GNU General Public License for more details.
1600 You should have received a copy of the GNU General Public License
1601 along with the GNU C Library; see the file COPYING. If not, write to
1602 the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1603 MA 02110-1301, USA. */
1605 #ifndef _MALLOC_INTERNAL
1606 #define _MALLOC_INTERNAL
1610 #ifndef __GNU_LIBRARY__
1614 #ifdef __GNU_LIBRARY__
1615 /* It is best not to declare this and cast its result on foreign operating
1616 systems with potentially hostile include files. */
1619 extern __ptr_t __sbrk
PP ((ptrdiff_t increment
));
1626 /* Allocate INCREMENT more bytes of data space,
1627 and return the start of data space, or NULL on errors.
1628 If INCREMENT is negative, shrink data space. */
1630 __default_morecore (increment
)
1631 __malloc_ptrdiff_t increment
;
1635 if (!bss_sbrk_did_unexec
)
1637 return bss_sbrk (increment
);
1640 result
= (__ptr_t
) __sbrk (increment
);
1641 if (result
== (__ptr_t
) -1)
1645 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1647 This library is free software; you can redistribute it and/or
1648 modify it under the terms of the GNU General Public License as
1649 published by the Free Software Foundation; either version 2 of the
1650 License, or (at your option) any later version.
1652 This library is distributed in the hope that it will be useful,
1653 but WITHOUT ANY WARRANTY; without even the implied warranty of
1654 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1655 General Public License for more details.
1657 You should have received a copy of the GNU General Public
1658 License along with this library; see the file COPYING. If
1659 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1660 Fifth Floor, Boston, MA 02110-1301, USA. */
1662 #ifndef _MALLOC_INTERNAL
1663 #define _MALLOC_INTERNAL
1667 #if __DJGPP__ - 0 == 1
1669 /* There is some problem with memalign in DJGPP v1 and we are supposed
1670 to omit it. Noone told me why, they just told me to do it. */
1674 __ptr_t (*__memalign_hook
) PP ((__malloc_size_t __size
,
1675 __malloc_size_t __alignment
));
1678 memalign (alignment
, size
)
1679 __malloc_size_t alignment
;
1680 __malloc_size_t size
;
1683 unsigned long int adj
, lastadj
;
1685 if (__memalign_hook
)
1686 return (*__memalign_hook
) (alignment
, size
);
1688 /* Allocate a block with enough extra space to pad the block with up to
1689 (ALIGNMENT - 1) bytes if necessary. */
1690 result
= malloc (size
+ alignment
- 1);
1694 /* Figure out how much we will need to pad this particular block
1695 to achieve the required alignment. */
1696 adj
= (unsigned long int) ((char *) result
- (char *) NULL
) % alignment
;
1700 /* Reallocate the block with only as much excess as it needs. */
1702 result
= malloc (adj
+ size
);
1703 if (result
== NULL
) /* Impossible unless interrupted. */
1707 adj
= (unsigned long int) ((char *) result
- (char *) NULL
) % alignment
;
1708 /* It's conceivable we might have been so unlucky as to get a
1709 different block with weaker alignment. If so, this block is too
1710 short to contain SIZE after alignment correction. So we must
1711 try again and get another block, slightly larger. */
1712 } while (adj
> lastadj
);
1716 /* Record this block in the list of aligned blocks, so that `free'
1717 can identify the pointer it is passed, which will be in the middle
1718 of an allocated block. */
1720 struct alignlist
*l
;
1721 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1722 if (l
->aligned
== NULL
)
1723 /* This slot is free. Use it. */
1727 l
= (struct alignlist
*) malloc (sizeof (struct alignlist
));
1733 l
->next
= _aligned_blocks
;
1734 _aligned_blocks
= l
;
1737 result
= l
->aligned
= (char *) result
+ alignment
- adj
;
1743 #endif /* Not DJGPP v1 */
1744 /* Allocate memory on a page boundary.
1745 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1747 This library is free software; you can redistribute it and/or
1748 modify it under the terms of the GNU General Public License as
1749 published by the Free Software Foundation; either version 2 of the
1750 License, or (at your option) any later version.
1752 This library is distributed in the hope that it will be useful,
1753 but WITHOUT ANY WARRANTY; without even the implied warranty of
1754 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1755 General Public License for more details.
1757 You should have received a copy of the GNU General Public
1758 License along with this library; see the file COPYING. If
1759 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1760 Fifth Floor, Boston, MA 02110-1301, USA.
1762 The author may be reached (Email) at the address mike@ai.mit.edu,
1763 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1765 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1767 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1768 on MSDOS, where it conflicts with a system header file. */
1770 #define ELIDE_VALLOC
1774 #ifndef ELIDE_VALLOC
1776 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
1778 #include <sys/cdefs.h>
1779 #if defined (__GLIBC__) && __GLIBC__ >= 2
1780 /* __getpagesize is already declared in <unistd.h> with return type int */
1782 extern size_t __getpagesize
PP ((void));
1785 #include "getpagesize.h"
1786 #define __getpagesize() getpagesize()
1789 #ifndef _MALLOC_INTERNAL
1790 #define _MALLOC_INTERNAL
1794 static __malloc_size_t pagesize
;
1798 __malloc_size_t size
;
1801 pagesize
= __getpagesize ();
1803 return memalign (pagesize
, size
);
1806 #endif /* Not ELIDE_VALLOC. */
1810 /* Standard debugging hooks for `malloc'.
1811 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1812 Written May 1989 by Mike Haertel.
1814 This library is free software; you can redistribute it and/or
1815 modify it under the terms of the GNU General Public License as
1816 published by the Free Software Foundation; either version 2 of the
1817 License, or (at your option) any later version.
1819 This library is distributed in the hope that it will be useful,
1820 but WITHOUT ANY WARRANTY; without even the implied warranty of
1821 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1822 General Public License for more details.
1824 You should have received a copy of the GNU General Public
1825 License along with this library; see the file COPYING. If
1826 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1827 Fifth Floor, Boston, MA 02110-1301, USA.
1829 The author may be reached (Email) at the address mike@ai.mit.edu,
1830 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1835 #ifndef _MALLOC_INTERNAL
1836 #define _MALLOC_INTERNAL
1842 /* Old hook values. */
1843 static void (*old_free_hook
) __P ((__ptr_t ptr
));
1844 static __ptr_t (*old_malloc_hook
) __P ((__malloc_size_t size
));
1845 static __ptr_t (*old_realloc_hook
) __P ((__ptr_t ptr
, __malloc_size_t size
));
1847 /* Function to call when something awful happens. */
1848 static void (*abortfunc
) __P ((enum mcheck_status
));
1850 /* Arbitrary magical numbers. */
1851 #define MAGICWORD 0xfedabeeb
1852 #define MAGICFREE 0xd8675309
1853 #define MAGICBYTE ((char) 0xd7)
1854 #define MALLOCFLOOD ((char) 0x93)
1855 #define FREEFLOOD ((char) 0x95)
1859 __malloc_size_t size
; /* Exact size requested by user. */
1860 unsigned long int magic
; /* Magic number to check header integrity. */
1863 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1864 #define flood memset
1866 static void flood
__P ((__ptr_t
, int, __malloc_size_t
));
1868 flood (ptr
, val
, size
)
1871 __malloc_size_t size
;
1879 static enum mcheck_status checkhdr
__P ((const struct hdr
*));
1880 static enum mcheck_status
1882 const struct hdr
*hdr
;
1884 enum mcheck_status status
;
1888 status
= MCHECK_HEAD
;
1891 status
= MCHECK_FREE
;
1894 if (((char *) &hdr
[1])[hdr
->size
] != MAGICBYTE
)
1895 status
= MCHECK_TAIL
;
1900 if (status
!= MCHECK_OK
)
1901 (*abortfunc
) (status
);
1905 static void freehook
__P ((__ptr_t
));
1914 hdr
= ((struct hdr
*) ptr
) - 1;
1916 hdr
->magic
= MAGICFREE
;
1917 flood (ptr
, FREEFLOOD
, hdr
->size
);
1922 __free_hook
= old_free_hook
;
1924 __free_hook
= freehook
;
1927 static __ptr_t mallochook
__P ((__malloc_size_t
));
1930 __malloc_size_t size
;
1934 __malloc_hook
= old_malloc_hook
;
1935 hdr
= (struct hdr
*) malloc (sizeof (struct hdr
) + size
+ 1);
1936 __malloc_hook
= mallochook
;
1941 hdr
->magic
= MAGICWORD
;
1942 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1943 flood ((__ptr_t
) (hdr
+ 1), MALLOCFLOOD
, size
);
1944 return (__ptr_t
) (hdr
+ 1);
1947 static __ptr_t reallochook
__P ((__ptr_t
, __malloc_size_t
));
1949 reallochook (ptr
, size
)
1951 __malloc_size_t size
;
1953 struct hdr
*hdr
= NULL
;
1954 __malloc_size_t osize
= 0;
1958 hdr
= ((struct hdr
*) ptr
) - 1;
1963 flood ((char *) ptr
+ size
, FREEFLOOD
, osize
- size
);
1966 __free_hook
= old_free_hook
;
1967 __malloc_hook
= old_malloc_hook
;
1968 __realloc_hook
= old_realloc_hook
;
1969 hdr
= (struct hdr
*) realloc ((__ptr_t
) hdr
, sizeof (struct hdr
) + size
+ 1);
1970 __free_hook
= freehook
;
1971 __malloc_hook
= mallochook
;
1972 __realloc_hook
= reallochook
;
1977 hdr
->magic
= MAGICWORD
;
1978 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1980 flood ((char *) (hdr
+ 1) + osize
, MALLOCFLOOD
, size
- osize
);
1981 return (__ptr_t
) (hdr
+ 1);
1986 enum mcheck_status status
;
1992 msg
= "memory is consistent, library is buggy";
1995 msg
= "memory clobbered before allocated block";
1998 msg
= "memory clobbered past end of allocated block";
2001 msg
= "block freed twice";
2004 msg
= "bogus mcheck_status, library is buggy";
2007 #ifdef __GNU_LIBRARY__
2010 fprintf (stderr
, "mcheck: %s\n", msg
);
2016 static int mcheck_used
= 0;
2020 void (*func
) __P ((enum mcheck_status
));
2022 abortfunc
= (func
!= NULL
) ? func
: &mabort
;
2024 /* These hooks may not be safely inserted if malloc is already in use. */
2025 if (!__malloc_initialized
&& !mcheck_used
)
2027 old_free_hook
= __free_hook
;
2028 __free_hook
= freehook
;
2029 old_malloc_hook
= __malloc_hook
;
2030 __malloc_hook
= mallochook
;
2031 old_realloc_hook
= __realloc_hook
;
2032 __realloc_hook
= reallochook
;
2036 return mcheck_used
? 0 : -1;
2040 mprobe (__ptr_t ptr
)
2042 return mcheck_used
? checkhdr (ptr
) : MCHECK_DISABLED
;
2045 #endif /* GC_MCHECK */
2047 /* arch-tag: 93dce5c0-f49a-41b5-86b1-f91c4169c02e
2048 (do not change this comment) */