1 /* This file is no longer automatically generated from libc. */
3 #define _MALLOC_INTERNAL
5 /* The malloc headers and source files from the C library follow here. */
7 /* Declarations for `malloc' and friends.
8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
9 2005, 2006, 2007 Free Software Foundation, Inc.
10 Written May 1989 by Mike Haertel.
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public
23 License along with this library; see the file COPYING. If
24 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25 Fifth Floor, Boston, MA 02110-1301, USA.
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
34 #ifdef _MALLOC_INTERNAL
47 #define __ptr_t void *
57 #endif /* _MALLOC_INTERNAL. */
66 #define __malloc_size_t size_t
67 #define __malloc_ptrdiff_t ptrdiff_t
70 /* Allocate SIZE bytes of memory. */
71 extern __ptr_t malloc
PP ((__malloc_size_t __size
));
72 /* Re-allocate the previously allocated block
73 in __ptr_t, making the new block SIZE bytes long. */
74 extern __ptr_t realloc
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
75 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
76 extern __ptr_t calloc
PP ((__malloc_size_t __nmemb
, __malloc_size_t __size
));
77 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
78 extern void free
PP ((__ptr_t __ptr
));
80 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
81 #if !defined (_MALLOC_INTERNAL) || defined (MSDOS) /* Avoid conflict. */
82 extern __ptr_t memalign
PP ((__malloc_size_t __alignment
,
83 __malloc_size_t __size
));
84 extern int posix_memalign
PP ((__ptr_t
*, __malloc_size_t
,
85 __malloc_size_t size
));
88 /* Allocate SIZE bytes on a page boundary. */
89 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
90 extern __ptr_t valloc
PP ((__malloc_size_t __size
));
94 /* Set up mutexes and make malloc etc. thread-safe. */
95 extern void malloc_enable_thread
PP ((void));
98 #ifdef _MALLOC_INTERNAL
100 /* The allocator divides the heap into blocks of fixed size; large
101 requests receive one or more whole blocks, and small requests
102 receive a fragment of a block. Fragment sizes are powers of two,
103 and all fragments of a block are the same size. When all the
104 fragments in a block have been freed, the block itself is freed. */
105 #define INT_BIT (CHAR_BIT * sizeof (int))
106 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
107 #define BLOCKSIZE (1 << BLOCKLOG)
108 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
110 /* Determine the amount of memory spanned by the initial heap table
111 (not an absolute limit). */
112 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
114 /* Number of contiguous free blocks allowed to build up at the end of
115 memory before they will be returned to the system. */
116 #define FINAL_FREE_BLOCKS 8
118 /* Data structure giving per-block information. */
121 /* Heap information for a busy block. */
124 /* Zero for a large (multiblock) object, or positive giving the
125 logarithm to the base two of the fragment size. */
131 __malloc_size_t nfree
; /* Free frags in a fragmented block. */
132 __malloc_size_t first
; /* First free fragment of the block. */
134 /* For a large object, in its first block, this has the number
135 of blocks in the object. In the other blocks, this has a
136 negative number which says how far back the first block is. */
137 __malloc_ptrdiff_t size
;
140 /* Heap information for a free block
141 (that may be the first of a free cluster). */
144 __malloc_size_t size
; /* Size (in blocks) of a free cluster. */
145 __malloc_size_t next
; /* Index of next free cluster. */
146 __malloc_size_t prev
; /* Index of previous free cluster. */
150 /* Pointer to first block of the heap. */
151 extern char *_heapbase
;
153 /* Table indexed by block number giving per-block information. */
154 extern malloc_info
*_heapinfo
;
156 /* Address to block number and vice versa. */
157 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
158 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
160 /* Current search index for the heap table. */
161 extern __malloc_size_t _heapindex
;
163 /* Limit of valid info table indices. */
164 extern __malloc_size_t _heaplimit
;
166 /* Doubly linked lists of free fragments. */
173 /* Free list headers for each fragment size. */
174 extern struct list _fraghead
[];
176 /* List of blocks allocated with `memalign' (or `valloc'). */
179 struct alignlist
*next
;
180 __ptr_t aligned
; /* The address that memaligned returned. */
181 __ptr_t exact
; /* The address that malloc returned. */
183 extern struct alignlist
*_aligned_blocks
;
185 /* Instrumentation. */
186 extern __malloc_size_t _chunks_used
;
187 extern __malloc_size_t _bytes_used
;
188 extern __malloc_size_t _chunks_free
;
189 extern __malloc_size_t _bytes_free
;
191 /* Internal versions of `malloc', `realloc', and `free'
192 used when these functions need to call each other.
193 They are the same but don't call the hooks. */
194 extern __ptr_t _malloc_internal
PP ((__malloc_size_t __size
));
195 extern __ptr_t _realloc_internal
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
196 extern void _free_internal
PP ((__ptr_t __ptr
));
197 extern __ptr_t _malloc_internal_nolock
PP ((__malloc_size_t __size
));
198 extern __ptr_t _realloc_internal_nolock
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
199 extern void _free_internal_nolock
PP ((__ptr_t __ptr
));
202 extern pthread_mutex_t _malloc_mutex
, _aligned_blocks_mutex
;
203 extern int _malloc_thread_enabled_p
;
206 if (_malloc_thread_enabled_p) \
207 pthread_mutex_lock (&_malloc_mutex); \
211 if (_malloc_thread_enabled_p) \
212 pthread_mutex_unlock (&_malloc_mutex); \
214 #define LOCK_ALIGNED_BLOCKS() \
216 if (_malloc_thread_enabled_p) \
217 pthread_mutex_lock (&_aligned_blocks_mutex); \
219 #define UNLOCK_ALIGNED_BLOCKS() \
221 if (_malloc_thread_enabled_p) \
222 pthread_mutex_unlock (&_aligned_blocks_mutex); \
227 #define LOCK_ALIGNED_BLOCKS()
228 #define UNLOCK_ALIGNED_BLOCKS()
231 #endif /* _MALLOC_INTERNAL. */
233 /* Given an address in the middle of a malloc'd object,
234 return the address of the beginning of the object. */
235 extern __ptr_t malloc_find_object_address
PP ((__ptr_t __ptr
));
237 /* Underlying allocation function; successive calls should
238 return contiguous pieces of memory. */
239 extern __ptr_t (*__morecore
) PP ((__malloc_ptrdiff_t __size
));
241 /* Default value of `__morecore'. */
242 extern __ptr_t __default_morecore
PP ((__malloc_ptrdiff_t __size
));
244 /* If not NULL, this function is called after each time
245 `__morecore' is called to increase the data size. */
246 extern void (*__after_morecore_hook
) PP ((void));
248 /* Number of extra blocks to get each time we ask for more core.
249 This reduces the frequency of calling `(*__morecore)'. */
250 extern __malloc_size_t __malloc_extra_blocks
;
252 /* Nonzero if `malloc' has been called and done its initialization. */
253 extern int __malloc_initialized
;
254 /* Function called to initialize malloc data structures. */
255 extern int __malloc_initialize
PP ((void));
257 /* Hooks for debugging versions. */
258 extern void (*__malloc_initialize_hook
) PP ((void));
259 extern void (*__free_hook
) PP ((__ptr_t __ptr
));
260 extern __ptr_t (*__malloc_hook
) PP ((__malloc_size_t __size
));
261 extern __ptr_t (*__realloc_hook
) PP ((__ptr_t __ptr
, __malloc_size_t __size
));
262 extern __ptr_t (*__memalign_hook
) PP ((__malloc_size_t __size
,
263 __malloc_size_t __alignment
));
265 /* Return values for `mprobe': these are the kinds of inconsistencies that
266 `mcheck' enables detection of. */
269 MCHECK_DISABLED
= -1, /* Consistency checking is not turned on. */
270 MCHECK_OK
, /* Block is fine. */
271 MCHECK_FREE
, /* Block freed twice. */
272 MCHECK_HEAD
, /* Memory before the block was clobbered. */
273 MCHECK_TAIL
/* Memory after the block was clobbered. */
276 /* Activate a standard collection of debugging hooks. This must be called
277 before `malloc' is ever called. ABORTFUNC is called with an error code
278 (see enum above) when an inconsistency is detected. If ABORTFUNC is
279 null, the standard function prints on stderr and then calls `abort'. */
280 extern int mcheck
PP ((void (*__abortfunc
) PP ((enum mcheck_status
))));
282 /* Check for aberrations in a particular malloc'd block. You must have
283 called `mcheck' already. These are the same checks that `mcheck' does
284 when you free or reallocate a block. */
285 extern enum mcheck_status mprobe
PP ((__ptr_t __ptr
));
287 /* Activate a standard collection of tracing hooks. */
288 extern void mtrace
PP ((void));
289 extern void muntrace
PP ((void));
291 /* Statistics available to the user. */
294 __malloc_size_t bytes_total
; /* Total size of the heap. */
295 __malloc_size_t chunks_used
; /* Chunks allocated by the user. */
296 __malloc_size_t bytes_used
; /* Byte total of user-allocated chunks. */
297 __malloc_size_t chunks_free
; /* Chunks in the free list. */
298 __malloc_size_t bytes_free
; /* Byte total of chunks in the free list. */
301 /* Pick up the current statistics. */
302 extern struct mstats mstats
PP ((void));
304 /* Call WARNFUN with a warning message when memory usage is high. */
305 extern void memory_warnings
PP ((__ptr_t __start
,
306 void (*__warnfun
) PP ((const char *))));
309 /* Relocating allocator. */
311 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
312 extern __ptr_t r_alloc
PP ((__ptr_t
*__handleptr
, __malloc_size_t __size
));
314 /* Free the storage allocated in HANDLEPTR. */
315 extern void r_alloc_free
PP ((__ptr_t
*__handleptr
));
317 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
318 extern __ptr_t r_re_alloc
PP ((__ptr_t
*__handleptr
, __malloc_size_t __size
));
325 #endif /* malloc.h */
326 /* Memory allocator `malloc'.
327 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
328 Written May 1989 by Mike Haertel.
330 This library is free software; you can redistribute it and/or
331 modify it under the terms of the GNU General Public License as
332 published by the Free Software Foundation; either version 2 of the
333 License, or (at your option) any later version.
335 This library is distributed in the hope that it will be useful,
336 but WITHOUT ANY WARRANTY; without even the implied warranty of
337 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
338 General Public License for more details.
340 You should have received a copy of the GNU General Public
341 License along with this library; see the file COPYING. If
342 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
343 Fifth Floor, Boston, MA 02110-1301, USA.
345 The author may be reached (Email) at the address mike@ai.mit.edu,
346 or (US mail) as Mike Haertel c/o Free Software Foundation. */
348 #ifndef _MALLOC_INTERNAL
349 #define _MALLOC_INTERNAL
354 /* On Cygwin there are two heaps. temacs uses the static heap
355 (defined in sheap.c and managed with bss_sbrk), and the dumped
356 emacs uses the Cygwin heap (managed with sbrk). When emacs starts
357 on Cygwin, it reinitializes malloc, and we save the old info for
358 use by free and realloc if they're called with a pointer into the
361 Currently (2011-08-16) the Cygwin build doesn't use ralloc.c; if
362 this is changed in the future, we'll have to similarly deal with
363 reinitializing ralloc. */
365 extern __ptr_t bss_sbrk
PP ((ptrdiff_t __size
));
366 extern int bss_sbrk_did_unexec
;
367 char *bss_sbrk_heapbase
; /* _heapbase for static heap */
368 malloc_info
*bss_sbrk_heapinfo
; /* _heapinfo for static heap */
370 __ptr_t (*__morecore
) PP ((__malloc_ptrdiff_t __size
)) = __default_morecore
;
372 /* Debugging hook for `malloc'. */
373 __ptr_t (*__malloc_hook
) PP ((__malloc_size_t __size
));
375 /* Pointer to the base of the first block. */
378 /* Block information table. Allocated with align/__free (not malloc/free). */
379 malloc_info
*_heapinfo
;
381 /* Number of info entries. */
382 static __malloc_size_t heapsize
;
384 /* Search index in the info table. */
385 __malloc_size_t _heapindex
;
387 /* Limit of valid info table indices. */
388 __malloc_size_t _heaplimit
;
390 /* Free lists for each fragment size. */
391 struct list _fraghead
[BLOCKLOG
];
393 /* Instrumentation. */
394 __malloc_size_t _chunks_used
;
395 __malloc_size_t _bytes_used
;
396 __malloc_size_t _chunks_free
;
397 __malloc_size_t _bytes_free
;
399 /* Are you experienced? */
400 int __malloc_initialized
;
402 __malloc_size_t __malloc_extra_blocks
;
404 void (*__malloc_initialize_hook
) PP ((void));
405 void (*__after_morecore_hook
) PP ((void));
407 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
409 /* Some code for hunting a bug writing into _heapinfo.
411 Call this macro with argument PROT non-zero to protect internal
412 malloc state against writing to it, call it with a zero argument to
413 make it readable and writable.
415 Note that this only works if BLOCKSIZE == page size, which is
416 the case on the i386. */
418 #include <sys/types.h>
419 #include <sys/mman.h>
421 static int state_protected_p
;
422 static __malloc_size_t last_state_size
;
423 static malloc_info
*last_heapinfo
;
426 protect_malloc_state (protect_p
)
429 /* If _heapinfo has been relocated, make sure its old location
430 isn't left read-only; it will be reused by malloc. */
431 if (_heapinfo
!= last_heapinfo
433 && state_protected_p
)
434 mprotect (last_heapinfo
, last_state_size
, PROT_READ
| PROT_WRITE
);
436 last_state_size
= _heaplimit
* sizeof *_heapinfo
;
437 last_heapinfo
= _heapinfo
;
439 if (protect_p
!= state_protected_p
)
441 state_protected_p
= protect_p
;
442 if (mprotect (_heapinfo
, last_state_size
,
443 protect_p
? PROT_READ
: PROT_READ
| PROT_WRITE
) != 0)
448 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
451 #define PROTECT_MALLOC_STATE(PROT) /* empty */
455 /* Aligned allocation. */
456 static __ptr_t align
PP ((__malloc_size_t
));
459 __malloc_size_t size
;
462 unsigned long int adj
;
464 /* align accepts an unsigned argument, but __morecore accepts a
465 signed one. This could lead to trouble if SIZE overflows a
466 signed int type accepted by __morecore. We just punt in that
467 case, since they are requesting a ludicrous amount anyway. */
468 if ((__malloc_ptrdiff_t
)size
< 0)
471 result
= (*__morecore
) (size
);
472 adj
= (unsigned long int) ((unsigned long int) ((char *) result
-
473 (char *) NULL
)) % BLOCKSIZE
;
477 adj
= BLOCKSIZE
- adj
;
478 new = (*__morecore
) (adj
);
479 result
= (char *) result
+ adj
;
482 if (__after_morecore_hook
)
483 (*__after_morecore_hook
) ();
488 /* Get SIZE bytes, if we can get them starting at END.
489 Return the address of the space we got.
490 If we cannot get space at END, fail and return 0. */
491 static __ptr_t get_contiguous_space
PP ((__malloc_ptrdiff_t
, __ptr_t
));
493 get_contiguous_space (size
, position
)
494 __malloc_ptrdiff_t size
;
500 before
= (*__morecore
) (0);
501 /* If we can tell in advance that the break is at the wrong place,
503 if (before
!= position
)
506 /* Allocate SIZE bytes and get the address of them. */
507 after
= (*__morecore
) (size
);
511 /* It was not contiguous--reject it. */
512 if (after
!= position
)
514 (*__morecore
) (- size
);
522 /* This is called when `_heapinfo' and `heapsize' have just
523 been set to describe a new info table. Set up the table
524 to describe itself and account for it in the statistics. */
526 register_heapinfo (void)
528 __malloc_size_t block
, blocks
;
530 block
= BLOCK (_heapinfo
);
531 blocks
= BLOCKIFY (heapsize
* sizeof (malloc_info
));
533 /* Account for the _heapinfo block itself in the statistics. */
534 _bytes_used
+= blocks
* BLOCKSIZE
;
537 /* Describe the heapinfo block itself in the heapinfo. */
538 _heapinfo
[block
].busy
.type
= 0;
539 _heapinfo
[block
].busy
.info
.size
= blocks
;
540 /* Leave back-pointers for malloc_find_address. */
542 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
546 pthread_mutex_t _malloc_mutex
= PTHREAD_MUTEX_INITIALIZER
;
547 pthread_mutex_t _aligned_blocks_mutex
= PTHREAD_MUTEX_INITIALIZER
;
548 int _malloc_thread_enabled_p
;
551 malloc_atfork_handler_prepare ()
554 LOCK_ALIGNED_BLOCKS ();
558 malloc_atfork_handler_parent ()
560 UNLOCK_ALIGNED_BLOCKS ();
565 malloc_atfork_handler_child ()
567 UNLOCK_ALIGNED_BLOCKS ();
571 /* Set up mutexes and make malloc etc. thread-safe. */
573 malloc_enable_thread ()
575 if (_malloc_thread_enabled_p
)
578 /* Some pthread implementations call malloc for statically
579 initialized mutexes when they are used first. To avoid such a
580 situation, we initialize mutexes here while their use is
581 disabled in malloc etc. */
582 pthread_mutex_init (&_malloc_mutex
, NULL
);
583 pthread_mutex_init (&_aligned_blocks_mutex
, NULL
);
584 pthread_atfork (malloc_atfork_handler_prepare
,
585 malloc_atfork_handler_parent
,
586 malloc_atfork_handler_child
);
587 _malloc_thread_enabled_p
= 1;
592 malloc_initialize_1 ()
599 if (bss_sbrk_did_unexec
)
600 /* we're reinitializing the dumped emacs */
602 bss_sbrk_heapbase
= _heapbase
;
603 bss_sbrk_heapinfo
= _heapinfo
;
604 memset (_fraghead
, 0, BLOCKLOG
* sizeof (struct list
));
608 if (__malloc_initialize_hook
)
609 (*__malloc_initialize_hook
) ();
611 heapsize
= HEAP
/ BLOCKSIZE
;
612 _heapinfo
= (malloc_info
*) align (heapsize
* sizeof (malloc_info
));
613 if (_heapinfo
== NULL
)
615 memset (_heapinfo
, 0, heapsize
* sizeof (malloc_info
));
616 _heapinfo
[0].free
.size
= 0;
617 _heapinfo
[0].free
.next
= _heapinfo
[0].free
.prev
= 0;
619 _heapbase
= (char *) _heapinfo
;
620 _heaplimit
= BLOCK (_heapbase
+ heapsize
* sizeof (malloc_info
));
622 register_heapinfo ();
624 __malloc_initialized
= 1;
625 PROTECT_MALLOC_STATE (1);
629 /* Set everything up and remember that we have.
630 main will call malloc which calls this function. That is before any threads
631 or signal handlers has been set up, so we don't need thread protection. */
633 __malloc_initialize ()
635 if (__malloc_initialized
)
638 malloc_initialize_1 ();
640 return __malloc_initialized
;
643 static int morecore_recursing
;
645 /* Get neatly aligned memory, initializing or
646 growing the heap info table as necessary. */
647 static __ptr_t morecore_nolock
PP ((__malloc_size_t
));
649 morecore_nolock (size
)
650 __malloc_size_t size
;
653 malloc_info
*newinfo
, *oldinfo
;
654 __malloc_size_t newsize
;
656 if (morecore_recursing
)
657 /* Avoid recursion. The caller will know how to handle a null return. */
660 result
= align (size
);
664 PROTECT_MALLOC_STATE (0);
666 /* Check if we need to grow the info table. */
667 if ((__malloc_size_t
) BLOCK ((char *) result
+ size
) > heapsize
)
669 /* Calculate the new _heapinfo table size. We do not account for the
670 added blocks in the table itself, as we hope to place them in
671 existing free space, which is already covered by part of the
676 while ((__malloc_size_t
) BLOCK ((char *) result
+ size
) > newsize
);
678 /* We must not reuse existing core for the new info table when called
679 from realloc in the case of growing a large block, because the
680 block being grown is momentarily marked as free. In this case
681 _heaplimit is zero so we know not to reuse space for internal
685 /* First try to allocate the new info table in core we already
686 have, in the usual way using realloc. If realloc cannot
687 extend it in place or relocate it to existing sufficient core,
688 we will get called again, and the code above will notice the
689 `morecore_recursing' flag and return null. */
690 int save
= errno
; /* Don't want to clobber errno with ENOMEM. */
691 morecore_recursing
= 1;
692 newinfo
= (malloc_info
*) _realloc_internal_nolock
693 (_heapinfo
, newsize
* sizeof (malloc_info
));
694 morecore_recursing
= 0;
699 /* We found some space in core, and realloc has put the old
700 table's blocks on the free list. Now zero the new part
701 of the table and install the new table location. */
702 memset (&newinfo
[heapsize
], 0,
703 (newsize
- heapsize
) * sizeof (malloc_info
));
710 /* Allocate new space for the malloc info table. */
713 newinfo
= (malloc_info
*) align (newsize
* sizeof (malloc_info
));
718 (*__morecore
) (-size
);
722 /* Is it big enough to record status for its own space?
724 if ((__malloc_size_t
) BLOCK ((char *) newinfo
725 + newsize
* sizeof (malloc_info
))
729 /* Must try again. First give back most of what we just got. */
730 (*__morecore
) (- newsize
* sizeof (malloc_info
));
734 /* Copy the old table to the beginning of the new,
735 and zero the rest of the new table. */
736 memcpy (newinfo
, _heapinfo
, heapsize
* sizeof (malloc_info
));
737 memset (&newinfo
[heapsize
], 0,
738 (newsize
- heapsize
) * sizeof (malloc_info
));
743 register_heapinfo ();
745 /* Reset _heaplimit so _free_internal never decides
746 it can relocate or resize the info table. */
748 _free_internal_nolock (oldinfo
);
749 PROTECT_MALLOC_STATE (0);
751 /* The new heap limit includes the new table just allocated. */
752 _heaplimit
= BLOCK ((char *) newinfo
+ heapsize
* sizeof (malloc_info
));
757 _heaplimit
= BLOCK ((char *) result
+ size
);
761 /* Allocate memory from the heap. */
763 _malloc_internal_nolock (size
)
764 __malloc_size_t size
;
767 __malloc_size_t block
, blocks
, lastblocks
, start
;
768 register __malloc_size_t i
;
771 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
772 valid address you can realloc and free (though not dereference).
774 It turns out that some extant code (sunrpc, at least Ultrix's version)
775 expects `malloc (0)' to return non-NULL and breaks otherwise.
783 PROTECT_MALLOC_STATE (0);
785 if (size
< sizeof (struct list
))
786 size
= sizeof (struct list
);
788 /* Determine the allocation policy based on the request size. */
789 if (size
<= BLOCKSIZE
/ 2)
791 /* Small allocation to receive a fragment of a block.
792 Determine the logarithm to base two of the fragment size. */
793 register __malloc_size_t log
= 1;
795 while ((size
/= 2) != 0)
798 /* Look in the fragment lists for a
799 free fragment of the desired size. */
800 next
= _fraghead
[log
].next
;
803 /* There are free fragments of this size.
804 Pop a fragment out of the fragment list and return it.
805 Update the block's nfree and first counters. */
806 result
= (__ptr_t
) next
;
807 next
->prev
->next
= next
->next
;
808 if (next
->next
!= NULL
)
809 next
->next
->prev
= next
->prev
;
810 block
= BLOCK (result
);
811 if (--_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
812 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
813 ((unsigned long int) ((char *) next
->next
- (char *) NULL
)
816 /* Update the statistics. */
818 _bytes_used
+= 1 << log
;
820 _bytes_free
-= 1 << log
;
824 /* No free fragments of the desired size, so get a new block
825 and break it into fragments, returning the first. */
826 #ifdef GC_MALLOC_CHECK
827 result
= _malloc_internal_nolock (BLOCKSIZE
);
828 PROTECT_MALLOC_STATE (0);
829 #elif defined (USE_PTHREAD)
830 result
= _malloc_internal_nolock (BLOCKSIZE
);
832 result
= malloc (BLOCKSIZE
);
836 PROTECT_MALLOC_STATE (1);
840 /* Link all fragments but the first into the free list. */
841 next
= (struct list
*) ((char *) result
+ (1 << log
));
843 next
->prev
= &_fraghead
[log
];
844 _fraghead
[log
].next
= next
;
846 for (i
= 2; i
< (__malloc_size_t
) (BLOCKSIZE
>> log
); ++i
)
848 next
= (struct list
*) ((char *) result
+ (i
<< log
));
849 next
->next
= _fraghead
[log
].next
;
850 next
->prev
= &_fraghead
[log
];
851 next
->prev
->next
= next
;
852 next
->next
->prev
= next
;
855 /* Initialize the nfree and first counters for this block. */
856 block
= BLOCK (result
);
857 _heapinfo
[block
].busy
.type
= log
;
858 _heapinfo
[block
].busy
.info
.frag
.nfree
= i
- 1;
859 _heapinfo
[block
].busy
.info
.frag
.first
= i
- 1;
861 _chunks_free
+= (BLOCKSIZE
>> log
) - 1;
862 _bytes_free
+= BLOCKSIZE
- (1 << log
);
863 _bytes_used
-= BLOCKSIZE
- (1 << log
);
868 /* Large allocation to receive one or more blocks.
869 Search the free list in a circle starting at the last place visited.
870 If we loop completely around without finding a large enough
871 space we will have to get more memory from the system. */
872 blocks
= BLOCKIFY (size
);
873 start
= block
= _heapindex
;
874 while (_heapinfo
[block
].free
.size
< blocks
)
876 block
= _heapinfo
[block
].free
.next
;
879 /* Need to get more from the system. Get a little extra. */
880 __malloc_size_t wantblocks
= blocks
+ __malloc_extra_blocks
;
881 block
= _heapinfo
[0].free
.prev
;
882 lastblocks
= _heapinfo
[block
].free
.size
;
883 /* Check to see if the new core will be contiguous with the
884 final free block; if so we don't need to get as much. */
885 if (_heaplimit
!= 0 && block
+ lastblocks
== _heaplimit
&&
886 /* We can't do this if we will have to make the heap info
887 table bigger to accommodate the new space. */
888 block
+ wantblocks
<= heapsize
&&
889 get_contiguous_space ((wantblocks
- lastblocks
) * BLOCKSIZE
,
890 ADDRESS (block
+ lastblocks
)))
892 /* We got it contiguously. Which block we are extending
893 (the `final free block' referred to above) might have
894 changed, if it got combined with a freed info table. */
895 block
= _heapinfo
[0].free
.prev
;
896 _heapinfo
[block
].free
.size
+= (wantblocks
- lastblocks
);
897 _bytes_free
+= (wantblocks
- lastblocks
) * BLOCKSIZE
;
898 _heaplimit
+= wantblocks
- lastblocks
;
901 result
= morecore_nolock (wantblocks
* BLOCKSIZE
);
904 block
= BLOCK (result
);
905 /* Put the new block at the end of the free list. */
906 _heapinfo
[block
].free
.size
= wantblocks
;
907 _heapinfo
[block
].free
.prev
= _heapinfo
[0].free
.prev
;
908 _heapinfo
[block
].free
.next
= 0;
909 _heapinfo
[0].free
.prev
= block
;
910 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
= block
;
912 /* Now loop to use some of that block for this allocation. */
916 /* At this point we have found a suitable free list entry.
917 Figure out how to remove what we need from the list. */
918 result
= ADDRESS (block
);
919 if (_heapinfo
[block
].free
.size
> blocks
)
921 /* The block we found has a bit left over,
922 so relink the tail end back into the free list. */
923 _heapinfo
[block
+ blocks
].free
.size
924 = _heapinfo
[block
].free
.size
- blocks
;
925 _heapinfo
[block
+ blocks
].free
.next
926 = _heapinfo
[block
].free
.next
;
927 _heapinfo
[block
+ blocks
].free
.prev
928 = _heapinfo
[block
].free
.prev
;
929 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
930 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
931 = _heapindex
= block
+ blocks
;
935 /* The block exactly matches our requirements,
936 so just remove it from the list. */
937 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
938 = _heapinfo
[block
].free
.prev
;
939 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
940 = _heapindex
= _heapinfo
[block
].free
.next
;
944 _heapinfo
[block
].busy
.type
= 0;
945 _heapinfo
[block
].busy
.info
.size
= blocks
;
947 _bytes_used
+= blocks
* BLOCKSIZE
;
948 _bytes_free
-= blocks
* BLOCKSIZE
;
950 /* Mark all the blocks of the object just allocated except for the
951 first with a negative number so you can find the first block by
952 adding that adjustment. */
954 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
957 PROTECT_MALLOC_STATE (1);
963 _malloc_internal (size
)
964 __malloc_size_t size
;
969 result
= _malloc_internal_nolock (size
);
977 __malloc_size_t size
;
979 __ptr_t (*hook
) (__malloc_size_t
);
981 if (!__malloc_initialized
&& !__malloc_initialize ())
984 /* Copy the value of __malloc_hook to an automatic variable in case
985 __malloc_hook is modified in another thread between its
986 NULL-check and the use.
988 Note: Strictly speaking, this is not a right solution. We should
989 use mutexes to access non-read-only variables that are shared
990 among multiple threads. We just leave it for compatibility with
991 glibc malloc (i.e., assignments to __malloc_hook) for now. */
992 hook
= __malloc_hook
;
993 return (hook
!= NULL
? *hook
: _malloc_internal
) (size
);
998 /* On some ANSI C systems, some libc functions call _malloc, _free
999 and _realloc. Make them use the GNU functions. */
1003 __malloc_size_t size
;
1005 return malloc (size
);
1016 _realloc (ptr
, size
)
1018 __malloc_size_t size
;
1020 return realloc (ptr
, size
);
1024 /* Free a block of memory allocated by `malloc'.
1025 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1026 Written May 1989 by Mike Haertel.
1028 This library is free software; you can redistribute it and/or
1029 modify it under the terms of the GNU General Public License as
1030 published by the Free Software Foundation; either version 2 of the
1031 License, or (at your option) any later version.
1033 This library is distributed in the hope that it will be useful,
1034 but WITHOUT ANY WARRANTY; without even the implied warranty of
1035 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1036 General Public License for more details.
1038 You should have received a copy of the GNU General Public
1039 License along with this library; see the file COPYING. If
1040 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1041 Fifth Floor, Boston, MA 02110-1301, USA.
1043 The author may be reached (Email) at the address mike@ai.mit.edu,
1044 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1046 #ifndef _MALLOC_INTERNAL
1047 #define _MALLOC_INTERNAL
1052 /* Debugging hook for free. */
1053 void (*__free_hook
) PP ((__ptr_t __ptr
));
1055 /* List of blocks allocated by memalign. */
1056 struct alignlist
*_aligned_blocks
= NULL
;
1058 /* Return memory to the heap.
1059 Like `_free_internal' but don't lock mutex. */
1061 _free_internal_nolock (ptr
)
1065 __malloc_size_t block
, blocks
;
1066 register __malloc_size_t i
;
1067 struct list
*prev
, *next
;
1069 const __malloc_size_t lesscore_threshold
1070 /* Threshold of free space at which we will return some to the system. */
1071 = FINAL_FREE_BLOCKS
+ 2 * __malloc_extra_blocks
;
1073 register struct alignlist
*l
;
1079 if (ptr
< _heapbase
)
1080 /* We're being asked to free something in the static heap. */
1084 PROTECT_MALLOC_STATE (0);
1086 LOCK_ALIGNED_BLOCKS ();
1087 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1088 if (l
->aligned
== ptr
)
1090 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1094 UNLOCK_ALIGNED_BLOCKS ();
1096 block
= BLOCK (ptr
);
1098 type
= _heapinfo
[block
].busy
.type
;
1102 /* Get as many statistics as early as we can. */
1104 _bytes_used
-= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1105 _bytes_free
+= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1107 /* Find the free cluster previous to this one in the free list.
1108 Start searching at the last block referenced; this may benefit
1109 programs with locality of allocation. */
1113 i
= _heapinfo
[i
].free
.prev
;
1117 i
= _heapinfo
[i
].free
.next
;
1118 while (i
> 0 && i
< block
);
1119 i
= _heapinfo
[i
].free
.prev
;
1122 /* Determine how to link this block into the free list. */
1123 if (block
== i
+ _heapinfo
[i
].free
.size
)
1125 /* Coalesce this block with its predecessor. */
1126 _heapinfo
[i
].free
.size
+= _heapinfo
[block
].busy
.info
.size
;
1131 /* Really link this block back into the free list. */
1132 _heapinfo
[block
].free
.size
= _heapinfo
[block
].busy
.info
.size
;
1133 _heapinfo
[block
].free
.next
= _heapinfo
[i
].free
.next
;
1134 _heapinfo
[block
].free
.prev
= i
;
1135 _heapinfo
[i
].free
.next
= block
;
1136 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1140 /* Now that the block is linked in, see if we can coalesce it
1141 with its successor (by deleting its successor from the list
1142 and adding in its size). */
1143 if (block
+ _heapinfo
[block
].free
.size
== _heapinfo
[block
].free
.next
)
1145 _heapinfo
[block
].free
.size
1146 += _heapinfo
[_heapinfo
[block
].free
.next
].free
.size
;
1147 _heapinfo
[block
].free
.next
1148 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.next
;
1149 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1153 /* How many trailing free blocks are there now? */
1154 blocks
= _heapinfo
[block
].free
.size
;
1156 /* Where is the current end of accessible core? */
1157 curbrk
= (*__morecore
) (0);
1159 if (_heaplimit
!= 0 && curbrk
== ADDRESS (_heaplimit
))
1161 /* The end of the malloc heap is at the end of accessible core.
1162 It's possible that moving _heapinfo will allow us to
1163 return some space to the system. */
1165 __malloc_size_t info_block
= BLOCK (_heapinfo
);
1166 __malloc_size_t info_blocks
= _heapinfo
[info_block
].busy
.info
.size
;
1167 __malloc_size_t prev_block
= _heapinfo
[block
].free
.prev
;
1168 __malloc_size_t prev_blocks
= _heapinfo
[prev_block
].free
.size
;
1169 __malloc_size_t next_block
= _heapinfo
[block
].free
.next
;
1170 __malloc_size_t next_blocks
= _heapinfo
[next_block
].free
.size
;
1172 if (/* Win if this block being freed is last in core, the info table
1173 is just before it, the previous free block is just before the
1174 info table, and the two free blocks together form a useful
1175 amount to return to the system. */
1176 (block
+ blocks
== _heaplimit
&&
1177 info_block
+ info_blocks
== block
&&
1178 prev_block
!= 0 && prev_block
+ prev_blocks
== info_block
&&
1179 blocks
+ prev_blocks
>= lesscore_threshold
) ||
1180 /* Nope, not the case. We can also win if this block being
1181 freed is just before the info table, and the table extends
1182 to the end of core or is followed only by a free block,
1183 and the total free space is worth returning to the system. */
1184 (block
+ blocks
== info_block
&&
1185 ((info_block
+ info_blocks
== _heaplimit
&&
1186 blocks
>= lesscore_threshold
) ||
1187 (info_block
+ info_blocks
== next_block
&&
1188 next_block
+ next_blocks
== _heaplimit
&&
1189 blocks
+ next_blocks
>= lesscore_threshold
)))
1192 malloc_info
*newinfo
;
1193 __malloc_size_t oldlimit
= _heaplimit
;
1195 /* Free the old info table, clearing _heaplimit to avoid
1196 recursion into this code. We don't want to return the
1197 table's blocks to the system before we have copied them to
1198 the new location. */
1200 _free_internal_nolock (_heapinfo
);
1201 _heaplimit
= oldlimit
;
1203 /* Tell malloc to search from the beginning of the heap for
1204 free blocks, so it doesn't reuse the ones just freed. */
1207 /* Allocate new space for the info table and move its data. */
1208 newinfo
= (malloc_info
*) _malloc_internal_nolock (info_blocks
1210 PROTECT_MALLOC_STATE (0);
1211 memmove (newinfo
, _heapinfo
, info_blocks
* BLOCKSIZE
);
1212 _heapinfo
= newinfo
;
1214 /* We should now have coalesced the free block with the
1215 blocks freed from the old info table. Examine the entire
1216 trailing free block to decide below whether to return some
1218 block
= _heapinfo
[0].free
.prev
;
1219 blocks
= _heapinfo
[block
].free
.size
;
1222 /* Now see if we can return stuff to the system. */
1223 if (block
+ blocks
== _heaplimit
&& blocks
>= lesscore_threshold
)
1225 register __malloc_size_t bytes
= blocks
* BLOCKSIZE
;
1226 _heaplimit
-= blocks
;
1227 (*__morecore
) (-bytes
);
1228 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
1229 = _heapinfo
[block
].free
.next
;
1230 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
1231 = _heapinfo
[block
].free
.prev
;
1232 block
= _heapinfo
[block
].free
.prev
;
1234 _bytes_free
-= bytes
;
1238 /* Set the next search to begin at this block. */
1243 /* Do some of the statistics. */
1245 _bytes_used
-= 1 << type
;
1247 _bytes_free
+= 1 << type
;
1249 /* Get the address of the first free fragment in this block. */
1250 prev
= (struct list
*) ((char *) ADDRESS (block
) +
1251 (_heapinfo
[block
].busy
.info
.frag
.first
<< type
));
1253 if (_heapinfo
[block
].busy
.info
.frag
.nfree
== (BLOCKSIZE
>> type
) - 1)
1255 /* If all fragments of this block are free, remove them
1256 from the fragment list and free the whole block. */
1258 for (i
= 1; i
< (__malloc_size_t
) (BLOCKSIZE
>> type
); ++i
)
1260 prev
->prev
->next
= next
;
1262 next
->prev
= prev
->prev
;
1263 _heapinfo
[block
].busy
.type
= 0;
1264 _heapinfo
[block
].busy
.info
.size
= 1;
1266 /* Keep the statistics accurate. */
1268 _bytes_used
+= BLOCKSIZE
;
1269 _chunks_free
-= BLOCKSIZE
>> type
;
1270 _bytes_free
-= BLOCKSIZE
;
1272 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1273 _free_internal_nolock (ADDRESS (block
));
1275 free (ADDRESS (block
));
1278 else if (_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
1280 /* If some fragments of this block are free, link this
1281 fragment into the fragment list after the first free
1282 fragment of this block. */
1283 next
= (struct list
*) ptr
;
1284 next
->next
= prev
->next
;
1287 if (next
->next
!= NULL
)
1288 next
->next
->prev
= next
;
1289 ++_heapinfo
[block
].busy
.info
.frag
.nfree
;
1293 /* No fragments of this block are free, so link this
1294 fragment into the fragment list and announce that
1295 it is the first free fragment of this block. */
1296 prev
= (struct list
*) ptr
;
1297 _heapinfo
[block
].busy
.info
.frag
.nfree
= 1;
1298 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
1299 ((unsigned long int) ((char *) ptr
- (char *) NULL
)
1300 % BLOCKSIZE
>> type
);
1301 prev
->next
= _fraghead
[type
].next
;
1302 prev
->prev
= &_fraghead
[type
];
1303 prev
->prev
->next
= prev
;
1304 if (prev
->next
!= NULL
)
1305 prev
->next
->prev
= prev
;
1310 PROTECT_MALLOC_STATE (1);
1313 /* Return memory to the heap.
1314 Like `free' but don't call a __free_hook if there is one. */
1316 _free_internal (ptr
)
1320 _free_internal_nolock (ptr
);
1324 /* Return memory to the heap. */
1330 void (*hook
) (__ptr_t
) = __free_hook
;
1335 _free_internal (ptr
);
1338 /* Define the `cfree' alias for `free'. */
1340 weak_alias (free
, cfree
)
1349 /* Change the size of a block allocated by `malloc'.
1350 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1351 Written May 1989 by Mike Haertel.
1353 This library is free software; you can redistribute it and/or
1354 modify it under the terms of the GNU General Public License as
1355 published by the Free Software Foundation; either version 2 of the
1356 License, or (at your option) any later version.
1358 This library is distributed in the hope that it will be useful,
1359 but WITHOUT ANY WARRANTY; without even the implied warranty of
1360 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1361 General Public License for more details.
1363 You should have received a copy of the GNU General Public
1364 License along with this library; see the file COPYING. If
1365 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1366 Fifth Floor, Boston, MA 02110-1301, USA.
1368 The author may be reached (Email) at the address mike@ai.mit.edu,
1369 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1371 #ifndef _MALLOC_INTERNAL
1372 #define _MALLOC_INTERNAL
1377 #define min(A, B) ((A) < (B) ? (A) : (B))
1379 /* On Cygwin the dumped emacs may try to realloc storage allocated in
1380 the static heap. We just malloc space in the new heap and copy the
1384 special_realloc (ptr
, size
)
1386 __malloc_size_t size
;
1390 __malloc_size_t block
, oldsize
;
1392 block
= ((char *) ptr
- bss_sbrk_heapbase
) / BLOCKSIZE
+ 1;
1393 type
= bss_sbrk_heapinfo
[block
].busy
.type
;
1395 type
== 0 ? bss_sbrk_heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
1396 : (__malloc_size_t
) 1 << type
;
1397 result
= _malloc_internal_nolock (size
);
1399 memcpy (result
, ptr
, min (oldsize
, size
));
1404 /* Debugging hook for realloc. */
1405 __ptr_t (*__realloc_hook
) PP ((__ptr_t __ptr
, __malloc_size_t __size
));
1407 /* Resize the given region to the new size, returning a pointer
1408 to the (possibly moved) region. This is optimized for speed;
1409 some benchmarks seem to indicate that greater compactness is
1410 achieved by unconditionally allocating and copying to a
1411 new region. This module has incestuous knowledge of the
1412 internals of both free and malloc. */
1414 _realloc_internal_nolock (ptr
, size
)
1416 __malloc_size_t size
;
1420 __malloc_size_t block
, blocks
, oldlimit
;
1424 _free_internal_nolock (ptr
);
1425 return _malloc_internal_nolock (0);
1427 else if (ptr
== NULL
)
1428 return _malloc_internal_nolock (size
);
1431 if (ptr
< _heapbase
)
1432 /* ptr points into the static heap */
1433 return special_realloc (ptr
, size
);
1436 block
= BLOCK (ptr
);
1438 PROTECT_MALLOC_STATE (0);
1440 type
= _heapinfo
[block
].busy
.type
;
1444 /* Maybe reallocate a large block to a small fragment. */
1445 if (size
<= BLOCKSIZE
/ 2)
1447 result
= _malloc_internal_nolock (size
);
1450 memcpy (result
, ptr
, size
);
1451 _free_internal_nolock (ptr
);
1456 /* The new size is a large allocation as well;
1457 see if we can hold it in place. */
1458 blocks
= BLOCKIFY (size
);
1459 if (blocks
< _heapinfo
[block
].busy
.info
.size
)
1461 /* The new size is smaller; return
1462 excess memory to the free list. */
1463 _heapinfo
[block
+ blocks
].busy
.type
= 0;
1464 _heapinfo
[block
+ blocks
].busy
.info
.size
1465 = _heapinfo
[block
].busy
.info
.size
- blocks
;
1466 _heapinfo
[block
].busy
.info
.size
= blocks
;
1467 /* We have just created a new chunk by splitting a chunk in two.
1468 Now we will free this chunk; increment the statistics counter
1469 so it doesn't become wrong when _free_internal decrements it. */
1471 _free_internal_nolock (ADDRESS (block
+ blocks
));
1474 else if (blocks
== _heapinfo
[block
].busy
.info
.size
)
1475 /* No size change necessary. */
1479 /* Won't fit, so allocate a new region that will.
1480 Free the old region first in case there is sufficient
1481 adjacent free space to grow without moving. */
1482 blocks
= _heapinfo
[block
].busy
.info
.size
;
1483 /* Prevent free from actually returning memory to the system. */
1484 oldlimit
= _heaplimit
;
1486 _free_internal_nolock (ptr
);
1487 result
= _malloc_internal_nolock (size
);
1488 PROTECT_MALLOC_STATE (0);
1489 if (_heaplimit
== 0)
1490 _heaplimit
= oldlimit
;
1493 /* Now we're really in trouble. We have to unfree
1494 the thing we just freed. Unfortunately it might
1495 have been coalesced with its neighbors. */
1496 if (_heapindex
== block
)
1497 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1501 = _malloc_internal_nolock ((block
- _heapindex
) * BLOCKSIZE
);
1502 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1503 _free_internal_nolock (previous
);
1508 memmove (result
, ptr
, blocks
* BLOCKSIZE
);
1513 /* Old size is a fragment; type is logarithm
1514 to base two of the fragment size. */
1515 if (size
> (__malloc_size_t
) (1 << (type
- 1)) &&
1516 size
<= (__malloc_size_t
) (1 << type
))
1517 /* The new size is the same kind of fragment. */
1521 /* The new size is different; allocate a new space,
1522 and copy the lesser of the new size and the old. */
1523 result
= _malloc_internal_nolock (size
);
1526 memcpy (result
, ptr
, min (size
, (__malloc_size_t
) 1 << type
));
1527 _free_internal_nolock (ptr
);
1532 PROTECT_MALLOC_STATE (1);
1538 _realloc_internal (ptr
, size
)
1540 __malloc_size_t size
;
1545 result
= _realloc_internal_nolock (ptr
, size
);
1554 __malloc_size_t size
;
1556 __ptr_t (*hook
) (__ptr_t
, __malloc_size_t
);
1558 if (!__malloc_initialized
&& !__malloc_initialize ())
1561 hook
= __realloc_hook
;
1562 return (hook
!= NULL
? *hook
: _realloc_internal
) (ptr
, size
);
1564 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1566 This library is free software; you can redistribute it and/or
1567 modify it under the terms of the GNU General Public License as
1568 published by the Free Software Foundation; either version 2 of the
1569 License, or (at your option) any later version.
1571 This library is distributed in the hope that it will be useful,
1572 but WITHOUT ANY WARRANTY; without even the implied warranty of
1573 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1574 General Public License for more details.
1576 You should have received a copy of the GNU General Public
1577 License along with this library; see the file COPYING. If
1578 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1579 Fifth Floor, Boston, MA 02110-1301, USA.
1581 The author may be reached (Email) at the address mike@ai.mit.edu,
1582 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1584 #ifndef _MALLOC_INTERNAL
1585 #define _MALLOC_INTERNAL
1589 /* Allocate an array of NMEMB elements each SIZE bytes long.
1590 The entire array is initialized to zeros. */
1592 calloc (nmemb
, size
)
1593 register __malloc_size_t nmemb
;
1594 register __malloc_size_t size
;
1596 register __ptr_t result
= malloc (nmemb
* size
);
1599 (void) memset (result
, 0, nmemb
* size
);
1603 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1604 This file is part of the GNU C Library.
1606 The GNU C Library is free software; you can redistribute it and/or modify
1607 it under the terms of the GNU General Public License as published by
1608 the Free Software Foundation; either version 2, or (at your option)
1611 The GNU C Library is distributed in the hope that it will be useful,
1612 but WITHOUT ANY WARRANTY; without even the implied warranty of
1613 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1614 GNU General Public License for more details.
1616 You should have received a copy of the GNU General Public License
1617 along with the GNU C Library; see the file COPYING. If not, write to
1618 the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1619 MA 02110-1301, USA. */
1621 #ifndef _MALLOC_INTERNAL
1622 #define _MALLOC_INTERNAL
1626 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1628 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1630 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1631 /* It is best not to declare this and cast its result on foreign operating
1632 systems with potentially hostile include files. */
1635 extern __ptr_t __sbrk
PP ((ptrdiff_t increment
));
1636 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1642 /* Allocate INCREMENT more bytes of data space,
1643 and return the start of data space, or NULL on errors.
1644 If INCREMENT is negative, shrink data space. */
1646 __default_morecore (increment
)
1647 __malloc_ptrdiff_t increment
;
1650 #if defined (CYGWIN)
1651 if (!bss_sbrk_did_unexec
)
1653 return bss_sbrk (increment
);
1656 result
= (__ptr_t
) __sbrk (increment
);
1657 if (result
== (__ptr_t
) -1)
1661 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1663 This library is free software; you can redistribute it and/or
1664 modify it under the terms of the GNU General Public License as
1665 published by the Free Software Foundation; either version 2 of the
1666 License, or (at your option) any later version.
1668 This library is distributed in the hope that it will be useful,
1669 but WITHOUT ANY WARRANTY; without even the implied warranty of
1670 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1671 General Public License for more details.
1673 You should have received a copy of the GNU General Public
1674 License along with this library; see the file COPYING. If
1675 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1676 Fifth Floor, Boston, MA 02110-1301, USA. */
1678 #ifndef _MALLOC_INTERNAL
1679 #define _MALLOC_INTERNAL
1683 __ptr_t (*__memalign_hook
) PP ((__malloc_size_t __size
,
1684 __malloc_size_t __alignment
));
1687 memalign (alignment
, size
)
1688 __malloc_size_t alignment
;
1689 __malloc_size_t size
;
1692 unsigned long int adj
, lastadj
;
1693 __ptr_t (*hook
) (__malloc_size_t
, __malloc_size_t
) = __memalign_hook
;
1696 return (*hook
) (alignment
, size
);
1698 /* Allocate a block with enough extra space to pad the block with up to
1699 (ALIGNMENT - 1) bytes if necessary. */
1700 result
= malloc (size
+ alignment
- 1);
1704 /* Figure out how much we will need to pad this particular block
1705 to achieve the required alignment. */
1706 adj
= (unsigned long int) ((char *) result
- (char *) NULL
) % alignment
;
1710 /* Reallocate the block with only as much excess as it needs. */
1712 result
= malloc (adj
+ size
);
1713 if (result
== NULL
) /* Impossible unless interrupted. */
1717 adj
= (unsigned long int) ((char *) result
- (char *) NULL
) % alignment
;
1718 /* It's conceivable we might have been so unlucky as to get a
1719 different block with weaker alignment. If so, this block is too
1720 short to contain SIZE after alignment correction. So we must
1721 try again and get another block, slightly larger. */
1722 } while (adj
> lastadj
);
1726 /* Record this block in the list of aligned blocks, so that `free'
1727 can identify the pointer it is passed, which will be in the middle
1728 of an allocated block. */
1730 struct alignlist
*l
;
1731 LOCK_ALIGNED_BLOCKS ();
1732 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1733 if (l
->aligned
== NULL
)
1734 /* This slot is free. Use it. */
1738 l
= (struct alignlist
*) malloc (sizeof (struct alignlist
));
1741 l
->next
= _aligned_blocks
;
1742 _aligned_blocks
= l
;
1748 result
= l
->aligned
= (char *) result
+ alignment
- adj
;
1750 UNLOCK_ALIGNED_BLOCKS ();
1770 posix_memalign (memptr
, alignment
, size
)
1772 __malloc_size_t alignment
;
1773 __malloc_size_t size
;
1778 || alignment
% sizeof (__ptr_t
) != 0
1779 || (alignment
& (alignment
- 1)) != 0)
1782 mem
= memalign (alignment
, size
);
1791 /* Allocate memory on a page boundary.
1792 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1794 This library is free software; you can redistribute it and/or
1795 modify it under the terms of the GNU General Public License as
1796 published by the Free Software Foundation; either version 2 of the
1797 License, or (at your option) any later version.
1799 This library is distributed in the hope that it will be useful,
1800 but WITHOUT ANY WARRANTY; without even the implied warranty of
1801 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1802 General Public License for more details.
1804 You should have received a copy of the GNU General Public
1805 License along with this library; see the file COPYING. If
1806 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1807 Fifth Floor, Boston, MA 02110-1301, USA.
1809 The author may be reached (Email) at the address mike@ai.mit.edu,
1810 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1812 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1814 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1815 on MSDOS, where it conflicts with a system header file. */
1817 #define ELIDE_VALLOC
1821 #ifndef ELIDE_VALLOC
1823 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
1825 #include <sys/cdefs.h>
1826 #if defined (__GLIBC__) && __GLIBC__ >= 2
1827 /* __getpagesize is already declared in <unistd.h> with return type int */
1829 extern size_t __getpagesize
PP ((void));
1832 #include "getpagesize.h"
1833 #define __getpagesize() getpagesize ()
1836 #ifndef _MALLOC_INTERNAL
1837 #define _MALLOC_INTERNAL
1841 static __malloc_size_t pagesize
;
1845 __malloc_size_t size
;
1848 pagesize
= __getpagesize ();
1850 return memalign (pagesize
, size
);
1853 #endif /* Not ELIDE_VALLOC. */
1857 /* Standard debugging hooks for `malloc'.
1858 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1859 Written May 1989 by Mike Haertel.
1861 This library is free software; you can redistribute it and/or
1862 modify it under the terms of the GNU General Public License as
1863 published by the Free Software Foundation; either version 2 of the
1864 License, or (at your option) any later version.
1866 This library is distributed in the hope that it will be useful,
1867 but WITHOUT ANY WARRANTY; without even the implied warranty of
1868 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1869 General Public License for more details.
1871 You should have received a copy of the GNU General Public
1872 License along with this library; see the file COPYING. If
1873 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1874 Fifth Floor, Boston, MA 02110-1301, USA.
1876 The author may be reached (Email) at the address mike@ai.mit.edu,
1877 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1882 #ifndef _MALLOC_INTERNAL
1883 #define _MALLOC_INTERNAL
1889 /* Old hook values. */
1890 static void (*old_free_hook
) (__ptr_t ptr
);
1891 static __ptr_t (*old_malloc_hook
) (__malloc_size_t size
);
1892 static __ptr_t (*old_realloc_hook
) (__ptr_t ptr
, __malloc_size_t size
);
1894 /* Function to call when something awful happens. */
1895 static void (*abortfunc
) (enum mcheck_status
);
1897 /* Arbitrary magical numbers. */
1898 #define MAGICWORD 0xfedabeeb
1899 #define MAGICFREE 0xd8675309
1900 #define MAGICBYTE ((char) 0xd7)
1901 #define MALLOCFLOOD ((char) 0x93)
1902 #define FREEFLOOD ((char) 0x95)
1906 __malloc_size_t size
; /* Exact size requested by user. */
1907 unsigned long int magic
; /* Magic number to check header integrity. */
1910 static enum mcheck_status
checkhdr (const struct hdr
*);
1911 static enum mcheck_status
1913 const struct hdr
*hdr
;
1915 enum mcheck_status status
;
1919 status
= MCHECK_HEAD
;
1922 status
= MCHECK_FREE
;
1925 if (((char *) &hdr
[1])[hdr
->size
] != MAGICBYTE
)
1926 status
= MCHECK_TAIL
;
1931 if (status
!= MCHECK_OK
)
1932 (*abortfunc
) (status
);
1936 static void freehook (__ptr_t
);
1945 hdr
= ((struct hdr
*) ptr
) - 1;
1947 hdr
->magic
= MAGICFREE
;
1948 memset (ptr
, FREEFLOOD
, hdr
->size
);
1953 __free_hook
= old_free_hook
;
1955 __free_hook
= freehook
;
1958 static __ptr_t
mallochook (__malloc_size_t
);
1961 __malloc_size_t size
;
1965 __malloc_hook
= old_malloc_hook
;
1966 hdr
= (struct hdr
*) malloc (sizeof (struct hdr
) + size
+ 1);
1967 __malloc_hook
= mallochook
;
1972 hdr
->magic
= MAGICWORD
;
1973 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1974 memset ((__ptr_t
) (hdr
+ 1), MALLOCFLOOD
, size
);
1975 return (__ptr_t
) (hdr
+ 1);
1978 static __ptr_t
reallochook (__ptr_t
, __malloc_size_t
);
1980 reallochook (ptr
, size
)
1982 __malloc_size_t size
;
1984 struct hdr
*hdr
= NULL
;
1985 __malloc_size_t osize
= 0;
1989 hdr
= ((struct hdr
*) ptr
) - 1;
1994 memset ((char *) ptr
+ size
, FREEFLOOD
, osize
- size
);
1997 __free_hook
= old_free_hook
;
1998 __malloc_hook
= old_malloc_hook
;
1999 __realloc_hook
= old_realloc_hook
;
2000 hdr
= (struct hdr
*) realloc ((__ptr_t
) hdr
, sizeof (struct hdr
) + size
+ 1);
2001 __free_hook
= freehook
;
2002 __malloc_hook
= mallochook
;
2003 __realloc_hook
= reallochook
;
2008 hdr
->magic
= MAGICWORD
;
2009 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
2011 memset ((char *) (hdr
+ 1) + osize
, MALLOCFLOOD
, size
- osize
);
2012 return (__ptr_t
) (hdr
+ 1);
2017 enum mcheck_status status
;
2023 msg
= "memory is consistent, library is buggy";
2026 msg
= "memory clobbered before allocated block";
2029 msg
= "memory clobbered past end of allocated block";
2032 msg
= "block freed twice";
2035 msg
= "bogus mcheck_status, library is buggy";
2038 #ifdef __GNU_LIBRARY__
2041 fprintf (stderr
, "mcheck: %s\n", msg
);
2047 static int mcheck_used
= 0;
2051 void (*func
) (enum mcheck_status
);
2053 abortfunc
= (func
!= NULL
) ? func
: &mabort
;
2055 /* These hooks may not be safely inserted if malloc is already in use. */
2056 if (!__malloc_initialized
&& !mcheck_used
)
2058 old_free_hook
= __free_hook
;
2059 __free_hook
= freehook
;
2060 old_malloc_hook
= __malloc_hook
;
2061 __malloc_hook
= mallochook
;
2062 old_realloc_hook
= __realloc_hook
;
2063 __realloc_hook
= reallochook
;
2067 return mcheck_used
? 0 : -1;
2071 mprobe (__ptr_t ptr
)
2073 return mcheck_used
? checkhdr (ptr
) : MCHECK_DISABLED
;
2076 #endif /* GC_MCHECK */