1 /* This file is no longer automatically generated from libc. */
3 #define _MALLOC_INTERNAL
5 /* The malloc headers and source files from the C library follow here. */
7 /* Declarations for `malloc' and friends.
8 Copyright 1990, 91, 92, 93, 95, 96, 99 Free Software Foundation, Inc.
9 Written May 1989 by Mike Haertel.
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Library General Public License as
13 published by the Free Software Foundation; either version 2 of the
14 License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Library General Public License for more details.
21 You should have received a copy of the GNU Library General Public
22 License along with this library; see the file COPYING.LIB. If
23 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
24 Fifth Floor, Boston, MA 02110-1301, USA.
26 The author may be reached (Email) at the address mike@ai.mit.edu,
27 or (US mail) as Mike Haertel c/o Free Software Foundation. */
33 #ifdef _MALLOC_INTERNAL
39 #if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
40 || defined STDC_HEADERS || defined PROTOTYPES) \
41 && ! defined (BROKEN_PROTOTYPES))
45 #define __ptr_t void *
46 #else /* Not C++ or ANSI C. */
50 #define __ptr_t char *
51 #endif /* C++ or ANSI C. */
53 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
57 #define memset(s, zero, n) bzero ((s), (n))
60 #define memcpy(d, s, n) bcopy ((s), (d), (n))
75 #endif /* _MALLOC_INTERNAL. */
85 #define __malloc_size_t size_t
86 #define __malloc_ptrdiff_t ptrdiff_t
91 #define __malloc_size_t __SIZE_TYPE__
94 #ifndef __malloc_size_t
95 #define __malloc_size_t unsigned int
97 #define __malloc_ptrdiff_t int
104 #ifndef FREE_RETURN_TYPE
105 #define FREE_RETURN_TYPE void
109 /* Allocate SIZE bytes of memory. */
110 extern __ptr_t malloc
PP ((__malloc_size_t __size
));
111 /* Re-allocate the previously allocated block
112 in __ptr_t, making the new block SIZE bytes long. */
113 extern __ptr_t realloc
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
114 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
115 extern __ptr_t calloc
PP ((__malloc_size_t __nmemb
, __malloc_size_t __size
));
116 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
117 extern FREE_RETURN_TYPE free
PP ((__ptr_t __ptr
));
119 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
120 #if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict. */
121 extern __ptr_t memalign
PP ((__malloc_size_t __alignment
,
122 __malloc_size_t __size
));
125 /* Allocate SIZE bytes on a page boundary. */
126 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
127 extern __ptr_t valloc
PP ((__malloc_size_t __size
));
131 #ifdef _MALLOC_INTERNAL
133 /* The allocator divides the heap into blocks of fixed size; large
134 requests receive one or more whole blocks, and small requests
135 receive a fragment of a block. Fragment sizes are powers of two,
136 and all fragments of a block are the same size. When all the
137 fragments in a block have been freed, the block itself is freed. */
138 #define INT_BIT (CHAR_BIT * sizeof(int))
139 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
140 #define BLOCKSIZE (1 << BLOCKLOG)
141 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
143 /* Determine the amount of memory spanned by the initial heap table
144 (not an absolute limit). */
145 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
147 /* Number of contiguous free blocks allowed to build up at the end of
148 memory before they will be returned to the system. */
149 #define FINAL_FREE_BLOCKS 8
151 /* Data structure giving per-block information. */
154 /* Heap information for a busy block. */
157 /* Zero for a large (multiblock) object, or positive giving the
158 logarithm to the base two of the fragment size. */
164 __malloc_size_t nfree
; /* Free frags in a fragmented block. */
165 __malloc_size_t first
; /* First free fragment of the block. */
167 /* For a large object, in its first block, this has the number
168 of blocks in the object. In the other blocks, this has a
169 negative number which says how far back the first block is. */
170 __malloc_ptrdiff_t size
;
173 /* Heap information for a free block
174 (that may be the first of a free cluster). */
177 __malloc_size_t size
; /* Size (in blocks) of a free cluster. */
178 __malloc_size_t next
; /* Index of next free cluster. */
179 __malloc_size_t prev
; /* Index of previous free cluster. */
183 /* Pointer to first block of the heap. */
184 extern char *_heapbase
;
186 /* Table indexed by block number giving per-block information. */
187 extern malloc_info
*_heapinfo
;
189 /* Address to block number and vice versa. */
190 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
191 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
193 /* Current search index for the heap table. */
194 extern __malloc_size_t _heapindex
;
196 /* Limit of valid info table indices. */
197 extern __malloc_size_t _heaplimit
;
199 /* Doubly linked lists of free fragments. */
206 /* Free list headers for each fragment size. */
207 extern struct list _fraghead
[];
209 /* List of blocks allocated with `memalign' (or `valloc'). */
212 struct alignlist
*next
;
213 __ptr_t aligned
; /* The address that memaligned returned. */
214 __ptr_t exact
; /* The address that malloc returned. */
216 extern struct alignlist
*_aligned_blocks
;
218 /* Instrumentation. */
219 extern __malloc_size_t _chunks_used
;
220 extern __malloc_size_t _bytes_used
;
221 extern __malloc_size_t _chunks_free
;
222 extern __malloc_size_t _bytes_free
;
224 /* Internal versions of `malloc', `realloc', and `free'
225 used when these functions need to call each other.
226 They are the same but don't call the hooks. */
227 extern __ptr_t _malloc_internal
PP ((__malloc_size_t __size
));
228 extern __ptr_t _realloc_internal
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
229 extern void _free_internal
PP ((__ptr_t __ptr
));
231 #endif /* _MALLOC_INTERNAL. */
233 /* Given an address in the middle of a malloc'd object,
234 return the address of the beginning of the object. */
235 extern __ptr_t malloc_find_object_address
PP ((__ptr_t __ptr
));
237 /* Underlying allocation function; successive calls should
238 return contiguous pieces of memory. */
239 extern __ptr_t (*__morecore
) PP ((__malloc_ptrdiff_t __size
));
241 /* Default value of `__morecore'. */
242 extern __ptr_t __default_morecore
PP ((__malloc_ptrdiff_t __size
));
244 /* If not NULL, this function is called after each time
245 `__morecore' is called to increase the data size. */
246 extern void (*__after_morecore_hook
) PP ((void));
248 /* Number of extra blocks to get each time we ask for more core.
249 This reduces the frequency of calling `(*__morecore)'. */
250 extern __malloc_size_t __malloc_extra_blocks
;
252 /* Nonzero if `malloc' has been called and done its initialization. */
253 extern int __malloc_initialized
;
254 /* Function called to initialize malloc data structures. */
255 extern int __malloc_initialize
PP ((void));
257 /* Hooks for debugging versions. */
258 extern void (*__malloc_initialize_hook
) PP ((void));
259 extern void (*__free_hook
) PP ((__ptr_t __ptr
));
260 extern __ptr_t (*__malloc_hook
) PP ((__malloc_size_t __size
));
261 extern __ptr_t (*__realloc_hook
) PP ((__ptr_t __ptr
, __malloc_size_t __size
));
262 extern __ptr_t (*__memalign_hook
) PP ((__malloc_size_t __size
,
263 __malloc_size_t __alignment
));
265 /* Return values for `mprobe': these are the kinds of inconsistencies that
266 `mcheck' enables detection of. */
269 MCHECK_DISABLED
= -1, /* Consistency checking is not turned on. */
270 MCHECK_OK
, /* Block is fine. */
271 MCHECK_FREE
, /* Block freed twice. */
272 MCHECK_HEAD
, /* Memory before the block was clobbered. */
273 MCHECK_TAIL
/* Memory after the block was clobbered. */
276 /* Activate a standard collection of debugging hooks. This must be called
277 before `malloc' is ever called. ABORTFUNC is called with an error code
278 (see enum above) when an inconsistency is detected. If ABORTFUNC is
279 null, the standard function prints on stderr and then calls `abort'. */
280 extern int mcheck
PP ((void (*__abortfunc
) PP ((enum mcheck_status
))));
282 /* Check for aberrations in a particular malloc'd block. You must have
283 called `mcheck' already. These are the same checks that `mcheck' does
284 when you free or reallocate a block. */
285 extern enum mcheck_status mprobe
PP ((__ptr_t __ptr
));
287 /* Activate a standard collection of tracing hooks. */
288 extern void mtrace
PP ((void));
289 extern void muntrace
PP ((void));
291 /* Statistics available to the user. */
294 __malloc_size_t bytes_total
; /* Total size of the heap. */
295 __malloc_size_t chunks_used
; /* Chunks allocated by the user. */
296 __malloc_size_t bytes_used
; /* Byte total of user-allocated chunks. */
297 __malloc_size_t chunks_free
; /* Chunks in the free list. */
298 __malloc_size_t bytes_free
; /* Byte total of chunks in the free list. */
301 /* Pick up the current statistics. */
302 extern struct mstats mstats
PP ((void));
304 /* Call WARNFUN with a warning message when memory usage is high. */
305 extern void memory_warnings
PP ((__ptr_t __start
,
306 void (*__warnfun
) PP ((const char *))));
309 /* Relocating allocator. */
311 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
312 extern __ptr_t r_alloc
PP ((__ptr_t
*__handleptr
, __malloc_size_t __size
));
314 /* Free the storage allocated in HANDLEPTR. */
315 extern void r_alloc_free
PP ((__ptr_t
*__handleptr
));
317 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
318 extern __ptr_t r_re_alloc
PP ((__ptr_t
*__handleptr
, __malloc_size_t __size
));
325 #endif /* malloc.h */
326 /* Memory allocator `malloc'.
327 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
328 Written May 1989 by Mike Haertel.
330 This library is free software; you can redistribute it and/or
331 modify it under the terms of the GNU Library General Public License as
332 published by the Free Software Foundation; either version 2 of the
333 License, or (at your option) any later version.
335 This library is distributed in the hope that it will be useful,
336 but WITHOUT ANY WARRANTY; without even the implied warranty of
337 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
338 Library General Public License for more details.
340 You should have received a copy of the GNU Library General Public
341 License along with this library; see the file COPYING.LIB. If
342 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
343 Fifth Floor, Boston, MA 02110-1301, USA.
345 The author may be reached (Email) at the address mike@ai.mit.edu,
346 or (US mail) as Mike Haertel c/o Free Software Foundation. */
348 #ifndef _MALLOC_INTERNAL
349 #define _MALLOC_INTERNAL
354 /* How to really get more memory. */
356 extern __ptr_t bss_sbrk
PP ((ptrdiff_t __size
));
357 extern int bss_sbrk_did_unexec
;
359 __ptr_t (*__morecore
) PP ((ptrdiff_t __size
)) = __default_morecore
;
361 /* Debugging hook for `malloc'. */
362 __ptr_t (*__malloc_hook
) PP ((__malloc_size_t __size
));
364 /* Pointer to the base of the first block. */
367 /* Block information table. Allocated with align/__free (not malloc/free). */
368 malloc_info
*_heapinfo
;
370 /* Number of info entries. */
371 static __malloc_size_t heapsize
;
373 /* Search index in the info table. */
374 __malloc_size_t _heapindex
;
376 /* Limit of valid info table indices. */
377 __malloc_size_t _heaplimit
;
379 /* Free lists for each fragment size. */
380 struct list _fraghead
[BLOCKLOG
];
382 /* Instrumentation. */
383 __malloc_size_t _chunks_used
;
384 __malloc_size_t _bytes_used
;
385 __malloc_size_t _chunks_free
;
386 __malloc_size_t _bytes_free
;
388 /* Are you experienced? */
389 int __malloc_initialized
;
391 __malloc_size_t __malloc_extra_blocks
;
393 void (*__malloc_initialize_hook
) PP ((void));
394 void (*__after_morecore_hook
) PP ((void));
396 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
398 /* Some code for hunting a bug writing into _heapinfo.
400 Call this macro with argument PROT non-zero to protect internal
401 malloc state against writing to it, call it with a zero argument to
402 make it readable and writable.
404 Note that this only works if BLOCKSIZE == page size, which is
405 the case on the i386. */
407 #include <sys/types.h>
408 #include <sys/mman.h>
410 static int state_protected_p
;
411 static __malloc_size_t last_state_size
;
412 static malloc_info
*last_heapinfo
;
415 protect_malloc_state (protect_p
)
418 /* If _heapinfo has been relocated, make sure its old location
419 isn't left read-only; it will be reused by malloc. */
420 if (_heapinfo
!= last_heapinfo
422 && state_protected_p
)
423 mprotect (last_heapinfo
, last_state_size
, PROT_READ
| PROT_WRITE
);
425 last_state_size
= _heaplimit
* sizeof *_heapinfo
;
426 last_heapinfo
= _heapinfo
;
428 if (protect_p
!= state_protected_p
)
430 state_protected_p
= protect_p
;
431 if (mprotect (_heapinfo
, last_state_size
,
432 protect_p
? PROT_READ
: PROT_READ
| PROT_WRITE
) != 0)
437 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
440 #define PROTECT_MALLOC_STATE(PROT) /* empty */
444 /* Aligned allocation. */
445 static __ptr_t align
PP ((__malloc_size_t
));
448 __malloc_size_t size
;
451 unsigned long int adj
;
453 /* align accepts an unsigned argument, but __morecore accepts a
454 signed one. This could lead to trouble if SIZE overflows a
455 signed int type accepted by __morecore. We just punt in that
456 case, since they are requesting a ludicrous amount anyway. */
457 if ((__malloc_ptrdiff_t
)size
< 0)
460 result
= (*__morecore
) (size
);
461 adj
= (unsigned long int) ((unsigned long int) ((char *) result
-
462 (char *) NULL
)) % BLOCKSIZE
;
466 adj
= BLOCKSIZE
- adj
;
467 new = (*__morecore
) (adj
);
468 result
= (char *) result
+ adj
;
471 if (__after_morecore_hook
)
472 (*__after_morecore_hook
) ();
477 /* Get SIZE bytes, if we can get them starting at END.
478 Return the address of the space we got.
479 If we cannot get space at END, fail and return 0. */
480 static __ptr_t get_contiguous_space
PP ((__malloc_ptrdiff_t
, __ptr_t
));
482 get_contiguous_space (size
, position
)
483 __malloc_ptrdiff_t size
;
489 before
= (*__morecore
) (0);
490 /* If we can tell in advance that the break is at the wrong place,
492 if (before
!= position
)
495 /* Allocate SIZE bytes and get the address of them. */
496 after
= (*__morecore
) (size
);
500 /* It was not contiguous--reject it. */
501 if (after
!= position
)
503 (*__morecore
) (- size
);
511 /* This is called when `_heapinfo' and `heapsize' have just
512 been set to describe a new info table. Set up the table
513 to describe itself and account for it in the statistics. */
514 static void register_heapinfo
PP ((void));
521 __malloc_size_t block
, blocks
;
523 block
= BLOCK (_heapinfo
);
524 blocks
= BLOCKIFY (heapsize
* sizeof (malloc_info
));
526 /* Account for the _heapinfo block itself in the statistics. */
527 _bytes_used
+= blocks
* BLOCKSIZE
;
530 /* Describe the heapinfo block itself in the heapinfo. */
531 _heapinfo
[block
].busy
.type
= 0;
532 _heapinfo
[block
].busy
.info
.size
= blocks
;
533 /* Leave back-pointers for malloc_find_address. */
535 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
538 /* Set everything up and remember that we have. */
540 __malloc_initialize ()
542 if (__malloc_initialized
)
549 if (__malloc_initialize_hook
)
550 (*__malloc_initialize_hook
) ();
552 heapsize
= HEAP
/ BLOCKSIZE
;
553 _heapinfo
= (malloc_info
*) align (heapsize
* sizeof (malloc_info
));
554 if (_heapinfo
== NULL
)
556 memset (_heapinfo
, 0, heapsize
* sizeof (malloc_info
));
557 _heapinfo
[0].free
.size
= 0;
558 _heapinfo
[0].free
.next
= _heapinfo
[0].free
.prev
= 0;
560 _heapbase
= (char *) _heapinfo
;
561 _heaplimit
= BLOCK (_heapbase
+ heapsize
* sizeof (malloc_info
));
563 register_heapinfo ();
565 __malloc_initialized
= 1;
566 PROTECT_MALLOC_STATE (1);
570 static int morecore_recursing
;
572 /* Get neatly aligned memory, initializing or
573 growing the heap info table as necessary. */
574 static __ptr_t morecore
PP ((__malloc_size_t
));
577 __malloc_size_t size
;
580 malloc_info
*newinfo
, *oldinfo
;
581 __malloc_size_t newsize
;
583 if (morecore_recursing
)
584 /* Avoid recursion. The caller will know how to handle a null return. */
587 result
= align (size
);
591 PROTECT_MALLOC_STATE (0);
593 /* Check if we need to grow the info table. */
594 if ((__malloc_size_t
) BLOCK ((char *) result
+ size
) > heapsize
)
596 /* Calculate the new _heapinfo table size. We do not account for the
597 added blocks in the table itself, as we hope to place them in
598 existing free space, which is already covered by part of the
603 while ((__malloc_size_t
) BLOCK ((char *) result
+ size
) > newsize
);
605 /* We must not reuse existing core for the new info table when called
606 from realloc in the case of growing a large block, because the
607 block being grown is momentarily marked as free. In this case
608 _heaplimit is zero so we know not to reuse space for internal
612 /* First try to allocate the new info table in core we already
613 have, in the usual way using realloc. If realloc cannot
614 extend it in place or relocate it to existing sufficient core,
615 we will get called again, and the code above will notice the
616 `morecore_recursing' flag and return null. */
617 int save
= errno
; /* Don't want to clobber errno with ENOMEM. */
618 morecore_recursing
= 1;
619 newinfo
= (malloc_info
*) _realloc_internal
620 (_heapinfo
, newsize
* sizeof (malloc_info
));
621 morecore_recursing
= 0;
626 /* We found some space in core, and realloc has put the old
627 table's blocks on the free list. Now zero the new part
628 of the table and install the new table location. */
629 memset (&newinfo
[heapsize
], 0,
630 (newsize
- heapsize
) * sizeof (malloc_info
));
637 /* Allocate new space for the malloc info table. */
640 newinfo
= (malloc_info
*) align (newsize
* sizeof (malloc_info
));
645 (*__morecore
) (-size
);
649 /* Is it big enough to record status for its own space?
651 if ((__malloc_size_t
) BLOCK ((char *) newinfo
652 + newsize
* sizeof (malloc_info
))
656 /* Must try again. First give back most of what we just got. */
657 (*__morecore
) (- newsize
* sizeof (malloc_info
));
661 /* Copy the old table to the beginning of the new,
662 and zero the rest of the new table. */
663 memcpy (newinfo
, _heapinfo
, heapsize
* sizeof (malloc_info
));
664 memset (&newinfo
[heapsize
], 0,
665 (newsize
- heapsize
) * sizeof (malloc_info
));
670 register_heapinfo ();
672 /* Reset _heaplimit so _free_internal never decides
673 it can relocate or resize the info table. */
675 _free_internal (oldinfo
);
676 PROTECT_MALLOC_STATE (0);
678 /* The new heap limit includes the new table just allocated. */
679 _heaplimit
= BLOCK ((char *) newinfo
+ heapsize
* sizeof (malloc_info
));
684 _heaplimit
= BLOCK ((char *) result
+ size
);
688 /* Allocate memory from the heap. */
690 _malloc_internal (size
)
691 __malloc_size_t size
;
694 __malloc_size_t block
, blocks
, lastblocks
, start
;
695 register __malloc_size_t i
;
698 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
699 valid address you can realloc and free (though not dereference).
701 It turns out that some extant code (sunrpc, at least Ultrix's version)
702 expects `malloc (0)' to return non-NULL and breaks otherwise.
710 PROTECT_MALLOC_STATE (0);
712 if (size
< sizeof (struct list
))
713 size
= sizeof (struct list
);
715 #ifdef SUNOS_LOCALTIME_BUG
720 /* Determine the allocation policy based on the request size. */
721 if (size
<= BLOCKSIZE
/ 2)
723 /* Small allocation to receive a fragment of a block.
724 Determine the logarithm to base two of the fragment size. */
725 register __malloc_size_t log
= 1;
727 while ((size
/= 2) != 0)
730 /* Look in the fragment lists for a
731 free fragment of the desired size. */
732 next
= _fraghead
[log
].next
;
735 /* There are free fragments of this size.
736 Pop a fragment out of the fragment list and return it.
737 Update the block's nfree and first counters. */
738 result
= (__ptr_t
) next
;
739 next
->prev
->next
= next
->next
;
740 if (next
->next
!= NULL
)
741 next
->next
->prev
= next
->prev
;
742 block
= BLOCK (result
);
743 if (--_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
744 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
745 ((unsigned long int) ((char *) next
->next
- (char *) NULL
)
748 /* Update the statistics. */
750 _bytes_used
+= 1 << log
;
752 _bytes_free
-= 1 << log
;
756 /* No free fragments of the desired size, so get a new block
757 and break it into fragments, returning the first. */
758 #ifdef GC_MALLOC_CHECK
759 result
= _malloc_internal (BLOCKSIZE
);
760 PROTECT_MALLOC_STATE (0);
762 result
= malloc (BLOCKSIZE
);
766 PROTECT_MALLOC_STATE (1);
770 /* Link all fragments but the first into the free list. */
771 next
= (struct list
*) ((char *) result
+ (1 << log
));
773 next
->prev
= &_fraghead
[log
];
774 _fraghead
[log
].next
= next
;
776 for (i
= 2; i
< (__malloc_size_t
) (BLOCKSIZE
>> log
); ++i
)
778 next
= (struct list
*) ((char *) result
+ (i
<< log
));
779 next
->next
= _fraghead
[log
].next
;
780 next
->prev
= &_fraghead
[log
];
781 next
->prev
->next
= next
;
782 next
->next
->prev
= next
;
785 /* Initialize the nfree and first counters for this block. */
786 block
= BLOCK (result
);
787 _heapinfo
[block
].busy
.type
= log
;
788 _heapinfo
[block
].busy
.info
.frag
.nfree
= i
- 1;
789 _heapinfo
[block
].busy
.info
.frag
.first
= i
- 1;
791 _chunks_free
+= (BLOCKSIZE
>> log
) - 1;
792 _bytes_free
+= BLOCKSIZE
- (1 << log
);
793 _bytes_used
-= BLOCKSIZE
- (1 << log
);
798 /* Large allocation to receive one or more blocks.
799 Search the free list in a circle starting at the last place visited.
800 If we loop completely around without finding a large enough
801 space we will have to get more memory from the system. */
802 blocks
= BLOCKIFY (size
);
803 start
= block
= _heapindex
;
804 while (_heapinfo
[block
].free
.size
< blocks
)
806 block
= _heapinfo
[block
].free
.next
;
809 /* Need to get more from the system. Get a little extra. */
810 __malloc_size_t wantblocks
= blocks
+ __malloc_extra_blocks
;
811 block
= _heapinfo
[0].free
.prev
;
812 lastblocks
= _heapinfo
[block
].free
.size
;
813 /* Check to see if the new core will be contiguous with the
814 final free block; if so we don't need to get as much. */
815 if (_heaplimit
!= 0 && block
+ lastblocks
== _heaplimit
&&
816 /* We can't do this if we will have to make the heap info
817 table bigger to accomodate the new space. */
818 block
+ wantblocks
<= heapsize
&&
819 get_contiguous_space ((wantblocks
- lastblocks
) * BLOCKSIZE
,
820 ADDRESS (block
+ lastblocks
)))
822 /* We got it contiguously. Which block we are extending
823 (the `final free block' referred to above) might have
824 changed, if it got combined with a freed info table. */
825 block
= _heapinfo
[0].free
.prev
;
826 _heapinfo
[block
].free
.size
+= (wantblocks
- lastblocks
);
827 _bytes_free
+= (wantblocks
- lastblocks
) * BLOCKSIZE
;
828 _heaplimit
+= wantblocks
- lastblocks
;
831 result
= morecore (wantblocks
* BLOCKSIZE
);
834 block
= BLOCK (result
);
835 /* Put the new block at the end of the free list. */
836 _heapinfo
[block
].free
.size
= wantblocks
;
837 _heapinfo
[block
].free
.prev
= _heapinfo
[0].free
.prev
;
838 _heapinfo
[block
].free
.next
= 0;
839 _heapinfo
[0].free
.prev
= block
;
840 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
= block
;
842 /* Now loop to use some of that block for this allocation. */
846 /* At this point we have found a suitable free list entry.
847 Figure out how to remove what we need from the list. */
848 result
= ADDRESS (block
);
849 if (_heapinfo
[block
].free
.size
> blocks
)
851 /* The block we found has a bit left over,
852 so relink the tail end back into the free list. */
853 _heapinfo
[block
+ blocks
].free
.size
854 = _heapinfo
[block
].free
.size
- blocks
;
855 _heapinfo
[block
+ blocks
].free
.next
856 = _heapinfo
[block
].free
.next
;
857 _heapinfo
[block
+ blocks
].free
.prev
858 = _heapinfo
[block
].free
.prev
;
859 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
860 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
861 = _heapindex
= block
+ blocks
;
865 /* The block exactly matches our requirements,
866 so just remove it from the list. */
867 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
868 = _heapinfo
[block
].free
.prev
;
869 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
870 = _heapindex
= _heapinfo
[block
].free
.next
;
874 _heapinfo
[block
].busy
.type
= 0;
875 _heapinfo
[block
].busy
.info
.size
= blocks
;
877 _bytes_used
+= blocks
* BLOCKSIZE
;
878 _bytes_free
-= blocks
* BLOCKSIZE
;
880 /* Mark all the blocks of the object just allocated except for the
881 first with a negative number so you can find the first block by
882 adding that adjustment. */
884 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
887 PROTECT_MALLOC_STATE (1);
893 __malloc_size_t size
;
895 if (!__malloc_initialized
&& !__malloc_initialize ())
898 return (__malloc_hook
!= NULL
? *__malloc_hook
: _malloc_internal
) (size
);
903 /* On some ANSI C systems, some libc functions call _malloc, _free
904 and _realloc. Make them use the GNU functions. */
908 __malloc_size_t size
;
910 return malloc (size
);
923 __malloc_size_t size
;
925 return realloc (ptr
, size
);
929 /* Free a block of memory allocated by `malloc'.
930 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
931 Written May 1989 by Mike Haertel.
933 This library is free software; you can redistribute it and/or
934 modify it under the terms of the GNU Library General Public License as
935 published by the Free Software Foundation; either version 2 of the
936 License, or (at your option) any later version.
938 This library is distributed in the hope that it will be useful,
939 but WITHOUT ANY WARRANTY; without even the implied warranty of
940 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
941 Library General Public License for more details.
943 You should have received a copy of the GNU Library General Public
944 License along with this library; see the file COPYING.LIB. If
945 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
946 Fifth Floor, Boston, MA 02110-1301, USA.
948 The author may be reached (Email) at the address mike@ai.mit.edu,
949 or (US mail) as Mike Haertel c/o Free Software Foundation. */
951 #ifndef _MALLOC_INTERNAL
952 #define _MALLOC_INTERNAL
957 /* Cope with systems lacking `memmove'. */
959 #if (defined (MEMMOVE_MISSING) || \
960 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
962 #undef __malloc_safe_bcopy
963 #define __malloc_safe_bcopy safe_bcopy
965 /* This function is defined in realloc.c. */
966 extern void __malloc_safe_bcopy
PP ((__ptr_t
, __ptr_t
, __malloc_size_t
));
967 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
972 /* Debugging hook for free. */
973 void (*__free_hook
) PP ((__ptr_t __ptr
));
975 /* List of blocks allocated by memalign. */
976 struct alignlist
*_aligned_blocks
= NULL
;
978 /* Return memory to the heap.
979 Like `free' but don't call a __free_hook if there is one. */
985 __malloc_size_t block
, blocks
;
986 register __malloc_size_t i
;
987 struct list
*prev
, *next
;
989 const __malloc_size_t lesscore_threshold
990 /* Threshold of free space at which we will return some to the system. */
991 = FINAL_FREE_BLOCKS
+ 2 * __malloc_extra_blocks
;
993 register struct alignlist
*l
;
998 PROTECT_MALLOC_STATE (0);
1000 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1001 if (l
->aligned
== ptr
)
1003 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1008 block
= BLOCK (ptr
);
1010 type
= _heapinfo
[block
].busy
.type
;
1014 /* Get as many statistics as early as we can. */
1016 _bytes_used
-= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1017 _bytes_free
+= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1019 /* Find the free cluster previous to this one in the free list.
1020 Start searching at the last block referenced; this may benefit
1021 programs with locality of allocation. */
1025 i
= _heapinfo
[i
].free
.prev
;
1029 i
= _heapinfo
[i
].free
.next
;
1030 while (i
> 0 && i
< block
);
1031 i
= _heapinfo
[i
].free
.prev
;
1034 /* Determine how to link this block into the free list. */
1035 if (block
== i
+ _heapinfo
[i
].free
.size
)
1037 /* Coalesce this block with its predecessor. */
1038 _heapinfo
[i
].free
.size
+= _heapinfo
[block
].busy
.info
.size
;
1043 /* Really link this block back into the free list. */
1044 _heapinfo
[block
].free
.size
= _heapinfo
[block
].busy
.info
.size
;
1045 _heapinfo
[block
].free
.next
= _heapinfo
[i
].free
.next
;
1046 _heapinfo
[block
].free
.prev
= i
;
1047 _heapinfo
[i
].free
.next
= block
;
1048 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1052 /* Now that the block is linked in, see if we can coalesce it
1053 with its successor (by deleting its successor from the list
1054 and adding in its size). */
1055 if (block
+ _heapinfo
[block
].free
.size
== _heapinfo
[block
].free
.next
)
1057 _heapinfo
[block
].free
.size
1058 += _heapinfo
[_heapinfo
[block
].free
.next
].free
.size
;
1059 _heapinfo
[block
].free
.next
1060 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.next
;
1061 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1065 /* How many trailing free blocks are there now? */
1066 blocks
= _heapinfo
[block
].free
.size
;
1068 /* Where is the current end of accessible core? */
1069 curbrk
= (*__morecore
) (0);
1071 if (_heaplimit
!= 0 && curbrk
== ADDRESS (_heaplimit
))
1073 /* The end of the malloc heap is at the end of accessible core.
1074 It's possible that moving _heapinfo will allow us to
1075 return some space to the system. */
1077 __malloc_size_t info_block
= BLOCK (_heapinfo
);
1078 __malloc_size_t info_blocks
= _heapinfo
[info_block
].busy
.info
.size
;
1079 __malloc_size_t prev_block
= _heapinfo
[block
].free
.prev
;
1080 __malloc_size_t prev_blocks
= _heapinfo
[prev_block
].free
.size
;
1081 __malloc_size_t next_block
= _heapinfo
[block
].free
.next
;
1082 __malloc_size_t next_blocks
= _heapinfo
[next_block
].free
.size
;
1084 if (/* Win if this block being freed is last in core, the info table
1085 is just before it, the previous free block is just before the
1086 info table, and the two free blocks together form a useful
1087 amount to return to the system. */
1088 (block
+ blocks
== _heaplimit
&&
1089 info_block
+ info_blocks
== block
&&
1090 prev_block
!= 0 && prev_block
+ prev_blocks
== info_block
&&
1091 blocks
+ prev_blocks
>= lesscore_threshold
) ||
1092 /* Nope, not the case. We can also win if this block being
1093 freed is just before the info table, and the table extends
1094 to the end of core or is followed only by a free block,
1095 and the total free space is worth returning to the system. */
1096 (block
+ blocks
== info_block
&&
1097 ((info_block
+ info_blocks
== _heaplimit
&&
1098 blocks
>= lesscore_threshold
) ||
1099 (info_block
+ info_blocks
== next_block
&&
1100 next_block
+ next_blocks
== _heaplimit
&&
1101 blocks
+ next_blocks
>= lesscore_threshold
)))
1104 malloc_info
*newinfo
;
1105 __malloc_size_t oldlimit
= _heaplimit
;
1107 /* Free the old info table, clearing _heaplimit to avoid
1108 recursion into this code. We don't want to return the
1109 table's blocks to the system before we have copied them to
1110 the new location. */
1112 _free_internal (_heapinfo
);
1113 _heaplimit
= oldlimit
;
1115 /* Tell malloc to search from the beginning of the heap for
1116 free blocks, so it doesn't reuse the ones just freed. */
1119 /* Allocate new space for the info table and move its data. */
1120 newinfo
= (malloc_info
*) _malloc_internal (info_blocks
1122 PROTECT_MALLOC_STATE (0);
1123 memmove (newinfo
, _heapinfo
, info_blocks
* BLOCKSIZE
);
1124 _heapinfo
= newinfo
;
1126 /* We should now have coalesced the free block with the
1127 blocks freed from the old info table. Examine the entire
1128 trailing free block to decide below whether to return some
1130 block
= _heapinfo
[0].free
.prev
;
1131 blocks
= _heapinfo
[block
].free
.size
;
1134 /* Now see if we can return stuff to the system. */
1135 if (block
+ blocks
== _heaplimit
&& blocks
>= lesscore_threshold
)
1137 register __malloc_size_t bytes
= blocks
* BLOCKSIZE
;
1138 _heaplimit
-= blocks
;
1139 (*__morecore
) (-bytes
);
1140 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
1141 = _heapinfo
[block
].free
.next
;
1142 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
1143 = _heapinfo
[block
].free
.prev
;
1144 block
= _heapinfo
[block
].free
.prev
;
1146 _bytes_free
-= bytes
;
1150 /* Set the next search to begin at this block. */
1155 /* Do some of the statistics. */
1157 _bytes_used
-= 1 << type
;
1159 _bytes_free
+= 1 << type
;
1161 /* Get the address of the first free fragment in this block. */
1162 prev
= (struct list
*) ((char *) ADDRESS (block
) +
1163 (_heapinfo
[block
].busy
.info
.frag
.first
<< type
));
1165 if (_heapinfo
[block
].busy
.info
.frag
.nfree
== (BLOCKSIZE
>> type
) - 1)
1167 /* If all fragments of this block are free, remove them
1168 from the fragment list and free the whole block. */
1170 for (i
= 1; i
< (__malloc_size_t
) (BLOCKSIZE
>> type
); ++i
)
1172 prev
->prev
->next
= next
;
1174 next
->prev
= prev
->prev
;
1175 _heapinfo
[block
].busy
.type
= 0;
1176 _heapinfo
[block
].busy
.info
.size
= 1;
1178 /* Keep the statistics accurate. */
1180 _bytes_used
+= BLOCKSIZE
;
1181 _chunks_free
-= BLOCKSIZE
>> type
;
1182 _bytes_free
-= BLOCKSIZE
;
1184 #ifdef GC_MALLOC_CHECK
1185 _free_internal (ADDRESS (block
));
1187 free (ADDRESS (block
));
1190 else if (_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
1192 /* If some fragments of this block are free, link this
1193 fragment into the fragment list after the first free
1194 fragment of this block. */
1195 next
= (struct list
*) ptr
;
1196 next
->next
= prev
->next
;
1199 if (next
->next
!= NULL
)
1200 next
->next
->prev
= next
;
1201 ++_heapinfo
[block
].busy
.info
.frag
.nfree
;
1205 /* No fragments of this block are free, so link this
1206 fragment into the fragment list and announce that
1207 it is the first free fragment of this block. */
1208 prev
= (struct list
*) ptr
;
1209 _heapinfo
[block
].busy
.info
.frag
.nfree
= 1;
1210 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
1211 ((unsigned long int) ((char *) ptr
- (char *) NULL
)
1212 % BLOCKSIZE
>> type
);
1213 prev
->next
= _fraghead
[type
].next
;
1214 prev
->prev
= &_fraghead
[type
];
1215 prev
->prev
->next
= prev
;
1216 if (prev
->next
!= NULL
)
1217 prev
->next
->prev
= prev
;
1222 PROTECT_MALLOC_STATE (1);
1225 /* Return memory to the heap. */
1231 if (__free_hook
!= NULL
)
1232 (*__free_hook
) (ptr
);
1234 _free_internal (ptr
);
1237 /* Define the `cfree' alias for `free'. */
1239 weak_alias (free
, cfree
)
1248 /* Change the size of a block allocated by `malloc'.
1249 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1250 Written May 1989 by Mike Haertel.
1252 This library is free software; you can redistribute it and/or
1253 modify it under the terms of the GNU Library General Public License as
1254 published by the Free Software Foundation; either version 2 of the
1255 License, or (at your option) any later version.
1257 This library is distributed in the hope that it will be useful,
1258 but WITHOUT ANY WARRANTY; without even the implied warranty of
1259 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1260 Library General Public License for more details.
1262 You should have received a copy of the GNU Library General Public
1263 License along with this library; see the file COPYING.LIB. If
1264 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1265 Fifth Floor, Boston, MA 02110-1301, USA.
1267 The author may be reached (Email) at the address mike@ai.mit.edu,
1268 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1270 #ifndef _MALLOC_INTERNAL
1271 #define _MALLOC_INTERNAL
1277 /* Cope with systems lacking `memmove'. */
1278 #if (defined (MEMMOVE_MISSING) || \
1279 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1282 #undef __malloc_safe_bcopy
1283 #define __malloc_safe_bcopy safe_bcopy
1286 /* Snarfed directly from Emacs src/dispnew.c:
1287 XXX Should use system bcopy if it handles overlap. */
1289 /* Like bcopy except never gets confused by overlap. */
1292 __malloc_safe_bcopy (afrom
, ato
, size
)
1295 __malloc_size_t size
;
1297 char *from
= afrom
, *to
= ato
;
1299 if (size
<= 0 || from
== to
)
1302 /* If the source and destination don't overlap, then bcopy can
1303 handle it. If they do overlap, but the destination is lower in
1304 memory than the source, we'll assume bcopy can handle that. */
1305 if (to
< from
|| from
+ size
<= to
)
1306 bcopy (from
, to
, size
);
1308 /* Otherwise, we'll copy from the end. */
1311 register char *endf
= from
+ size
;
1312 register char *endt
= to
+ size
;
1314 /* If TO - FROM is large, then we should break the copy into
1315 nonoverlapping chunks of TO - FROM bytes each. However, if
1316 TO - FROM is small, then the bcopy function call overhead
1317 makes this not worth it. The crossover point could be about
1318 anywhere. Since I don't think the obvious copy loop is too
1319 bad, I'm trying to err in its favor. */
1324 while (endf
!= from
);
1330 endt
-= (to
- from
);
1331 endf
-= (to
- from
);
1336 bcopy (endf
, endt
, to
- from
);
1339 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1340 little left over. The amount left over is
1341 (endt + (to - from)) - to, which is endt - from. */
1342 bcopy (from
, to
, endt
- from
);
1349 extern void __malloc_safe_bcopy
PP ((__ptr_t
, __ptr_t
, __malloc_size_t
));
1350 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1356 #define min(A, B) ((A) < (B) ? (A) : (B))
1358 /* Debugging hook for realloc. */
1359 __ptr_t (*__realloc_hook
) PP ((__ptr_t __ptr
, __malloc_size_t __size
));
1361 /* Resize the given region to the new size, returning a pointer
1362 to the (possibly moved) region. This is optimized for speed;
1363 some benchmarks seem to indicate that greater compactness is
1364 achieved by unconditionally allocating and copying to a
1365 new region. This module has incestuous knowledge of the
1366 internals of both free and malloc. */
1368 _realloc_internal (ptr
, size
)
1370 __malloc_size_t size
;
1374 __malloc_size_t block
, blocks
, oldlimit
;
1378 _free_internal (ptr
);
1379 return _malloc_internal (0);
1381 else if (ptr
== NULL
)
1382 return _malloc_internal (size
);
1384 block
= BLOCK (ptr
);
1386 PROTECT_MALLOC_STATE (0);
1388 type
= _heapinfo
[block
].busy
.type
;
1392 /* Maybe reallocate a large block to a small fragment. */
1393 if (size
<= BLOCKSIZE
/ 2)
1395 result
= _malloc_internal (size
);
1398 memcpy (result
, ptr
, size
);
1399 _free_internal (ptr
);
1404 /* The new size is a large allocation as well;
1405 see if we can hold it in place. */
1406 blocks
= BLOCKIFY (size
);
1407 if (blocks
< _heapinfo
[block
].busy
.info
.size
)
1409 /* The new size is smaller; return
1410 excess memory to the free list. */
1411 _heapinfo
[block
+ blocks
].busy
.type
= 0;
1412 _heapinfo
[block
+ blocks
].busy
.info
.size
1413 = _heapinfo
[block
].busy
.info
.size
- blocks
;
1414 _heapinfo
[block
].busy
.info
.size
= blocks
;
1415 /* We have just created a new chunk by splitting a chunk in two.
1416 Now we will free this chunk; increment the statistics counter
1417 so it doesn't become wrong when _free_internal decrements it. */
1419 _free_internal (ADDRESS (block
+ blocks
));
1422 else if (blocks
== _heapinfo
[block
].busy
.info
.size
)
1423 /* No size change necessary. */
1427 /* Won't fit, so allocate a new region that will.
1428 Free the old region first in case there is sufficient
1429 adjacent free space to grow without moving. */
1430 blocks
= _heapinfo
[block
].busy
.info
.size
;
1431 /* Prevent free from actually returning memory to the system. */
1432 oldlimit
= _heaplimit
;
1434 _free_internal (ptr
);
1435 result
= _malloc_internal (size
);
1436 PROTECT_MALLOC_STATE (0);
1437 if (_heaplimit
== 0)
1438 _heaplimit
= oldlimit
;
1441 /* Now we're really in trouble. We have to unfree
1442 the thing we just freed. Unfortunately it might
1443 have been coalesced with its neighbors. */
1444 if (_heapindex
== block
)
1445 (void) _malloc_internal (blocks
* BLOCKSIZE
);
1449 = _malloc_internal ((block
- _heapindex
) * BLOCKSIZE
);
1450 (void) _malloc_internal (blocks
* BLOCKSIZE
);
1451 _free_internal (previous
);
1456 memmove (result
, ptr
, blocks
* BLOCKSIZE
);
1461 /* Old size is a fragment; type is logarithm
1462 to base two of the fragment size. */
1463 if (size
> (__malloc_size_t
) (1 << (type
- 1)) &&
1464 size
<= (__malloc_size_t
) (1 << type
))
1465 /* The new size is the same kind of fragment. */
1469 /* The new size is different; allocate a new space,
1470 and copy the lesser of the new size and the old. */
1471 result
= _malloc_internal (size
);
1474 memcpy (result
, ptr
, min (size
, (__malloc_size_t
) 1 << type
));
1475 _free_internal (ptr
);
1480 PROTECT_MALLOC_STATE (1);
1487 __malloc_size_t size
;
1489 if (!__malloc_initialized
&& !__malloc_initialize ())
1492 return (__realloc_hook
!= NULL
? *__realloc_hook
: _realloc_internal
)
1495 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1497 This library is free software; you can redistribute it and/or
1498 modify it under the terms of the GNU Library General Public License as
1499 published by the Free Software Foundation; either version 2 of the
1500 License, or (at your option) any later version.
1502 This library is distributed in the hope that it will be useful,
1503 but WITHOUT ANY WARRANTY; without even the implied warranty of
1504 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1505 Library General Public License for more details.
1507 You should have received a copy of the GNU Library General Public
1508 License along with this library; see the file COPYING.LIB. If
1509 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1510 Fifth Floor, Boston, MA 02110-1301, USA.
1512 The author may be reached (Email) at the address mike@ai.mit.edu,
1513 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1515 #ifndef _MALLOC_INTERNAL
1516 #define _MALLOC_INTERNAL
1520 /* Allocate an array of NMEMB elements each SIZE bytes long.
1521 The entire array is initialized to zeros. */
1523 calloc (nmemb
, size
)
1524 register __malloc_size_t nmemb
;
1525 register __malloc_size_t size
;
1527 register __ptr_t result
= malloc (nmemb
* size
);
1530 (void) memset (result
, 0, nmemb
* size
);
1534 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1535 This file is part of the GNU C Library.
1537 The GNU C Library is free software; you can redistribute it and/or modify
1538 it under the terms of the GNU General Public License as published by
1539 the Free Software Foundation; either version 2, or (at your option)
1542 The GNU C Library is distributed in the hope that it will be useful,
1543 but WITHOUT ANY WARRANTY; without even the implied warranty of
1544 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1545 GNU General Public License for more details.
1547 You should have received a copy of the GNU General Public License
1548 along with the GNU C Library; see the file COPYING. If not, write to
1549 the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1550 MA 02110-1301, USA. */
1552 #ifndef _MALLOC_INTERNAL
1553 #define _MALLOC_INTERNAL
1557 #ifndef __GNU_LIBRARY__
1561 #ifdef __GNU_LIBRARY__
1562 /* It is best not to declare this and cast its result on foreign operating
1563 systems with potentially hostile include files. */
1566 extern __ptr_t __sbrk
PP ((ptrdiff_t increment
));
1573 /* Allocate INCREMENT more bytes of data space,
1574 and return the start of data space, or NULL on errors.
1575 If INCREMENT is negative, shrink data space. */
1577 __default_morecore (increment
)
1578 __malloc_ptrdiff_t increment
;
1582 if (!bss_sbrk_did_unexec
)
1584 return bss_sbrk (increment
);
1587 result
= (__ptr_t
) __sbrk (increment
);
1588 if (result
== (__ptr_t
) -1)
1592 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1594 This library is free software; you can redistribute it and/or
1595 modify it under the terms of the GNU Library General Public License as
1596 published by the Free Software Foundation; either version 2 of the
1597 License, or (at your option) any later version.
1599 This library is distributed in the hope that it will be useful,
1600 but WITHOUT ANY WARRANTY; without even the implied warranty of
1601 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1602 Library General Public License for more details.
1604 You should have received a copy of the GNU Library General Public
1605 License along with this library; see the file COPYING.LIB. If
1606 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1607 Fifth Floor, Boston, MA 02110-1301, USA. */
1609 #ifndef _MALLOC_INTERNAL
1610 #define _MALLOC_INTERNAL
1614 #if __DJGPP__ - 0 == 1
1616 /* There is some problem with memalign in DJGPP v1 and we are supposed
1617 to omit it. Noone told me why, they just told me to do it. */
1621 __ptr_t (*__memalign_hook
) PP ((__malloc_size_t __size
,
1622 __malloc_size_t __alignment
));
1625 memalign (alignment
, size
)
1626 __malloc_size_t alignment
;
1627 __malloc_size_t size
;
1630 unsigned long int adj
, lastadj
;
1632 if (__memalign_hook
)
1633 return (*__memalign_hook
) (alignment
, size
);
1635 /* Allocate a block with enough extra space to pad the block with up to
1636 (ALIGNMENT - 1) bytes if necessary. */
1637 result
= malloc (size
+ alignment
- 1);
1641 /* Figure out how much we will need to pad this particular block
1642 to achieve the required alignment. */
1643 adj
= (unsigned long int) ((char *) result
- (char *) NULL
) % alignment
;
1647 /* Reallocate the block with only as much excess as it needs. */
1649 result
= malloc (adj
+ size
);
1650 if (result
== NULL
) /* Impossible unless interrupted. */
1654 adj
= (unsigned long int) ((char *) result
- (char *) NULL
) % alignment
;
1655 /* It's conceivable we might have been so unlucky as to get a
1656 different block with weaker alignment. If so, this block is too
1657 short to contain SIZE after alignment correction. So we must
1658 try again and get another block, slightly larger. */
1659 } while (adj
> lastadj
);
1663 /* Record this block in the list of aligned blocks, so that `free'
1664 can identify the pointer it is passed, which will be in the middle
1665 of an allocated block. */
1667 struct alignlist
*l
;
1668 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1669 if (l
->aligned
== NULL
)
1670 /* This slot is free. Use it. */
1674 l
= (struct alignlist
*) malloc (sizeof (struct alignlist
));
1680 l
->next
= _aligned_blocks
;
1681 _aligned_blocks
= l
;
1684 result
= l
->aligned
= (char *) result
+ alignment
- adj
;
1690 #endif /* Not DJGPP v1 */
1691 /* Allocate memory on a page boundary.
1692 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1694 This library is free software; you can redistribute it and/or
1695 modify it under the terms of the GNU Library General Public License as
1696 published by the Free Software Foundation; either version 2 of the
1697 License, or (at your option) any later version.
1699 This library is distributed in the hope that it will be useful,
1700 but WITHOUT ANY WARRANTY; without even the implied warranty of
1701 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1702 Library General Public License for more details.
1704 You should have received a copy of the GNU Library General Public
1705 License along with this library; see the file COPYING.LIB. If
1706 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1707 Fifth Floor, Boston, MA 02110-1301, USA.
1709 The author may be reached (Email) at the address mike@ai.mit.edu,
1710 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1712 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1714 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1715 on MSDOS, where it conflicts with a system header file. */
1717 #define ELIDE_VALLOC
1721 #ifndef ELIDE_VALLOC
1723 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
1725 #include <sys/cdefs.h>
1726 #if defined (__GLIBC__) && __GLIBC__ >= 2
1727 /* __getpagesize is already declared in <unistd.h> with return type int */
1729 extern size_t __getpagesize
PP ((void));
1732 #include "getpagesize.h"
1733 #define __getpagesize() getpagesize()
1736 #ifndef _MALLOC_INTERNAL
1737 #define _MALLOC_INTERNAL
1741 static __malloc_size_t pagesize
;
1745 __malloc_size_t size
;
1748 pagesize
= __getpagesize ();
1750 return memalign (pagesize
, size
);
1753 #endif /* Not ELIDE_VALLOC. */
1757 /* Standard debugging hooks for `malloc'.
1758 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1759 Written May 1989 by Mike Haertel.
1761 This library is free software; you can redistribute it and/or
1762 modify it under the terms of the GNU Library General Public License as
1763 published by the Free Software Foundation; either version 2 of the
1764 License, or (at your option) any later version.
1766 This library is distributed in the hope that it will be useful,
1767 but WITHOUT ANY WARRANTY; without even the implied warranty of
1768 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1769 Library General Public License for more details.
1771 You should have received a copy of the GNU Library General Public
1772 License along with this library; see the file COPYING.LIB. If
1773 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1774 Fifth Floor, Boston, MA 02110-1301, USA.
1776 The author may be reached (Email) at the address mike@ai.mit.edu,
1777 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1782 #ifndef _MALLOC_INTERNAL
1783 #define _MALLOC_INTERNAL
1789 /* Old hook values. */
1790 static void (*old_free_hook
) __P ((__ptr_t ptr
));
1791 static __ptr_t (*old_malloc_hook
) __P ((__malloc_size_t size
));
1792 static __ptr_t (*old_realloc_hook
) __P ((__ptr_t ptr
, __malloc_size_t size
));
1794 /* Function to call when something awful happens. */
1795 static void (*abortfunc
) __P ((enum mcheck_status
));
1797 /* Arbitrary magical numbers. */
1798 #define MAGICWORD 0xfedabeeb
1799 #define MAGICFREE 0xd8675309
1800 #define MAGICBYTE ((char) 0xd7)
1801 #define MALLOCFLOOD ((char) 0x93)
1802 #define FREEFLOOD ((char) 0x95)
1806 __malloc_size_t size
; /* Exact size requested by user. */
1807 unsigned long int magic
; /* Magic number to check header integrity. */
1810 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1811 #define flood memset
1813 static void flood
__P ((__ptr_t
, int, __malloc_size_t
));
1815 flood (ptr
, val
, size
)
1818 __malloc_size_t size
;
1826 static enum mcheck_status checkhdr
__P ((const struct hdr
*));
1827 static enum mcheck_status
1829 const struct hdr
*hdr
;
1831 enum mcheck_status status
;
1835 status
= MCHECK_HEAD
;
1838 status
= MCHECK_FREE
;
1841 if (((char *) &hdr
[1])[hdr
->size
] != MAGICBYTE
)
1842 status
= MCHECK_TAIL
;
1847 if (status
!= MCHECK_OK
)
1848 (*abortfunc
) (status
);
1852 static void freehook
__P ((__ptr_t
));
1861 hdr
= ((struct hdr
*) ptr
) - 1;
1863 hdr
->magic
= MAGICFREE
;
1864 flood (ptr
, FREEFLOOD
, hdr
->size
);
1869 __free_hook
= old_free_hook
;
1871 __free_hook
= freehook
;
1874 static __ptr_t mallochook
__P ((__malloc_size_t
));
1877 __malloc_size_t size
;
1881 __malloc_hook
= old_malloc_hook
;
1882 hdr
= (struct hdr
*) malloc (sizeof (struct hdr
) + size
+ 1);
1883 __malloc_hook
= mallochook
;
1888 hdr
->magic
= MAGICWORD
;
1889 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1890 flood ((__ptr_t
) (hdr
+ 1), MALLOCFLOOD
, size
);
1891 return (__ptr_t
) (hdr
+ 1);
1894 static __ptr_t reallochook
__P ((__ptr_t
, __malloc_size_t
));
1896 reallochook (ptr
, size
)
1898 __malloc_size_t size
;
1900 struct hdr
*hdr
= NULL
;
1901 __malloc_size_t osize
= 0;
1905 hdr
= ((struct hdr
*) ptr
) - 1;
1910 flood ((char *) ptr
+ size
, FREEFLOOD
, osize
- size
);
1913 __free_hook
= old_free_hook
;
1914 __malloc_hook
= old_malloc_hook
;
1915 __realloc_hook
= old_realloc_hook
;
1916 hdr
= (struct hdr
*) realloc ((__ptr_t
) hdr
, sizeof (struct hdr
) + size
+ 1);
1917 __free_hook
= freehook
;
1918 __malloc_hook
= mallochook
;
1919 __realloc_hook
= reallochook
;
1924 hdr
->magic
= MAGICWORD
;
1925 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1927 flood ((char *) (hdr
+ 1) + osize
, MALLOCFLOOD
, size
- osize
);
1928 return (__ptr_t
) (hdr
+ 1);
1933 enum mcheck_status status
;
1939 msg
= "memory is consistent, library is buggy";
1942 msg
= "memory clobbered before allocated block";
1945 msg
= "memory clobbered past end of allocated block";
1948 msg
= "block freed twice";
1951 msg
= "bogus mcheck_status, library is buggy";
1954 #ifdef __GNU_LIBRARY__
1957 fprintf (stderr
, "mcheck: %s\n", msg
);
1963 static int mcheck_used
= 0;
1967 void (*func
) __P ((enum mcheck_status
));
1969 abortfunc
= (func
!= NULL
) ? func
: &mabort
;
1971 /* These hooks may not be safely inserted if malloc is already in use. */
1972 if (!__malloc_initialized
&& !mcheck_used
)
1974 old_free_hook
= __free_hook
;
1975 __free_hook
= freehook
;
1976 old_malloc_hook
= __malloc_hook
;
1977 __malloc_hook
= mallochook
;
1978 old_realloc_hook
= __realloc_hook
;
1979 __realloc_hook
= reallochook
;
1983 return mcheck_used
? 0 : -1;
1987 mprobe (__ptr_t ptr
)
1989 return mcheck_used
? checkhdr (ptr
) : MCHECK_DISABLED
;
1992 #endif /* GC_MCHECK */
1994 /* arch-tag: 93dce5c0-f49a-41b5-86b1-f91c4169c02e
1995 (do not change this comment) */