1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2014 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <http://www.gnu.org/licenses/>.
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
38 #include <w32heap.h> /* for sbrk */
49 /* Allocate SIZE bytes of memory. */
50 extern void *malloc (size_t size
);
51 /* Re-allocate the previously allocated block
52 in ptr, making the new block SIZE bytes long. */
53 extern void *realloc (void *ptr
, size_t size
);
54 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
55 extern void *calloc (size_t nmemb
, size_t size
);
56 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
57 extern void free (void *ptr
);
59 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
61 extern void *aligned_alloc (size_t, size_t);
62 extern void *memalign (size_t, size_t);
63 extern int posix_memalign (void **, size_t, size_t);
67 /* Set up mutexes and make malloc etc. thread-safe. */
68 extern void malloc_enable_thread (void);
71 /* The allocator divides the heap into blocks of fixed size; large
72 requests receive one or more whole blocks, and small requests
73 receive a fragment of a block. Fragment sizes are powers of two,
74 and all fragments of a block are the same size. When all the
75 fragments in a block have been freed, the block itself is freed. */
76 #define INT_BIT (CHAR_BIT * sizeof (int))
77 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
78 #define BLOCKSIZE (1 << BLOCKLOG)
79 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
81 /* Determine the amount of memory spanned by the initial heap table
82 (not an absolute limit). */
83 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
85 /* Number of contiguous free blocks allowed to build up at the end of
86 memory before they will be returned to the system. */
87 #define FINAL_FREE_BLOCKS 8
89 /* Data structure giving per-block information. */
92 /* Heap information for a busy block. */
95 /* Zero for a large (multiblock) object, or positive giving the
96 logarithm to the base two of the fragment size. */
102 size_t nfree
; /* Free frags in a fragmented block. */
103 size_t first
; /* First free fragment of the block. */
105 /* For a large object, in its first block, this has the number
106 of blocks in the object. In the other blocks, this has a
107 negative number which says how far back the first block is. */
111 /* Heap information for a free block
112 (that may be the first of a free cluster). */
115 size_t size
; /* Size (in blocks) of a free cluster. */
116 size_t next
; /* Index of next free cluster. */
117 size_t prev
; /* Index of previous free cluster. */
121 /* Pointer to first block of the heap. */
122 extern char *_heapbase
;
124 /* Table indexed by block number giving per-block information. */
125 extern malloc_info
*_heapinfo
;
127 /* Address to block number and vice versa. */
128 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
129 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
131 /* Current search index for the heap table. */
132 extern size_t _heapindex
;
134 /* Limit of valid info table indices. */
135 extern size_t _heaplimit
;
137 /* Doubly linked lists of free fragments. */
144 /* Free list headers for each fragment size. */
145 extern struct list _fraghead
[];
147 /* List of blocks allocated with aligned_alloc and friends. */
150 struct alignlist
*next
;
151 void *aligned
; /* The address that aligned_alloc returned. */
152 void *exact
; /* The address that malloc returned. */
154 extern struct alignlist
*_aligned_blocks
;
156 /* Instrumentation. */
157 extern size_t _chunks_used
;
158 extern size_t _bytes_used
;
159 extern size_t _chunks_free
;
160 extern size_t _bytes_free
;
162 /* Internal versions of `malloc', `realloc', and `free'
163 used when these functions need to call each other.
164 They are the same but don't call the hooks. */
165 extern void *_malloc_internal (size_t);
166 extern void *_realloc_internal (void *, size_t);
167 extern void _free_internal (void *);
168 extern void *_malloc_internal_nolock (size_t);
169 extern void *_realloc_internal_nolock (void *, size_t);
170 extern void _free_internal_nolock (void *);
173 extern pthread_mutex_t _malloc_mutex
, _aligned_blocks_mutex
;
174 extern int _malloc_thread_enabled_p
;
177 if (_malloc_thread_enabled_p) \
178 pthread_mutex_lock (&_malloc_mutex); \
182 if (_malloc_thread_enabled_p) \
183 pthread_mutex_unlock (&_malloc_mutex); \
185 #define LOCK_ALIGNED_BLOCKS() \
187 if (_malloc_thread_enabled_p) \
188 pthread_mutex_lock (&_aligned_blocks_mutex); \
190 #define UNLOCK_ALIGNED_BLOCKS() \
192 if (_malloc_thread_enabled_p) \
193 pthread_mutex_unlock (&_aligned_blocks_mutex); \
198 #define LOCK_ALIGNED_BLOCKS()
199 #define UNLOCK_ALIGNED_BLOCKS()
202 /* Given an address in the middle of a malloc'd object,
203 return the address of the beginning of the object. */
204 extern void *malloc_find_object_address (void *ptr
);
206 /* Underlying allocation function; successive calls should
207 return contiguous pieces of memory. */
208 extern void *(*__morecore
) (ptrdiff_t size
);
210 /* Default value of `__morecore'. */
211 extern void *__default_morecore (ptrdiff_t size
);
213 /* If not NULL, this function is called after each time
214 `__morecore' is called to increase the data size. */
215 extern void (*__after_morecore_hook
) (void);
217 /* Number of extra blocks to get each time we ask for more core.
218 This reduces the frequency of calling `(*__morecore)'. */
219 extern size_t __malloc_extra_blocks
;
221 /* Nonzero if `malloc' has been called and done its initialization. */
222 extern int __malloc_initialized
;
223 /* Function called to initialize malloc data structures. */
224 extern int __malloc_initialize (void);
226 /* Hooks for debugging versions. */
227 extern void (*__malloc_initialize_hook
) (void);
228 extern void (*__free_hook
) (void *ptr
);
229 extern void *(*__malloc_hook
) (size_t size
);
230 extern void *(*__realloc_hook
) (void *ptr
, size_t size
);
231 extern void *(*__memalign_hook
) (size_t size
, size_t alignment
);
233 /* Return values for `mprobe': these are the kinds of inconsistencies that
234 `mcheck' enables detection of. */
237 MCHECK_DISABLED
= -1, /* Consistency checking is not turned on. */
238 MCHECK_OK
, /* Block is fine. */
239 MCHECK_FREE
, /* Block freed twice. */
240 MCHECK_HEAD
, /* Memory before the block was clobbered. */
241 MCHECK_TAIL
/* Memory after the block was clobbered. */
244 /* Activate a standard collection of debugging hooks. This must be called
245 before `malloc' is ever called. ABORTFUNC is called with an error code
246 (see enum above) when an inconsistency is detected. If ABORTFUNC is
247 null, the standard function prints on stderr and then calls `abort'. */
248 extern int mcheck (void (*abortfunc
) (enum mcheck_status
));
250 /* Check for aberrations in a particular malloc'd block. You must have
251 called `mcheck' already. These are the same checks that `mcheck' does
252 when you free or reallocate a block. */
253 extern enum mcheck_status
mprobe (void *ptr
);
255 /* Activate a standard collection of tracing hooks. */
256 extern void mtrace (void);
257 extern void muntrace (void);
259 /* Statistics available to the user. */
262 size_t bytes_total
; /* Total size of the heap. */
263 size_t chunks_used
; /* Chunks allocated by the user. */
264 size_t bytes_used
; /* Byte total of user-allocated chunks. */
265 size_t chunks_free
; /* Chunks in the free list. */
266 size_t bytes_free
; /* Byte total of chunks in the free list. */
269 /* Pick up the current statistics. */
270 extern struct mstats
mstats (void);
272 /* Call WARNFUN with a warning message when memory usage is high. */
273 extern void memory_warnings (void *start
, void (*warnfun
) (const char *));
279 /* Memory allocator `malloc'.
280 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
281 Written May 1989 by Mike Haertel.
283 This library is free software; you can redistribute it and/or
284 modify it under the terms of the GNU General Public License as
285 published by the Free Software Foundation; either version 2 of the
286 License, or (at your option) any later version.
288 This library is distributed in the hope that it will be useful,
289 but WITHOUT ANY WARRANTY; without even the implied warranty of
290 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
291 General Public License for more details.
293 You should have received a copy of the GNU General Public
294 License along with this library. If not, see <http://www.gnu.org/licenses/>.
296 The author may be reached (Email) at the address mike@ai.mit.edu,
297 or (US mail) as Mike Haertel c/o Free Software Foundation. */
301 /* On Cygwin there are two heaps. temacs uses the static heap
302 (defined in sheap.c and managed with bss_sbrk), and the dumped
303 emacs uses the Cygwin heap (managed with sbrk). When emacs starts
304 on Cygwin, it reinitializes malloc, and we save the old info for
305 use by free and realloc if they're called with a pointer into the
308 Currently (2011-08-16) the Cygwin build doesn't use ralloc.c; if
309 this is changed in the future, we'll have to similarly deal with
310 reinitializing ralloc. */
312 extern void *bss_sbrk (ptrdiff_t size
);
313 extern int bss_sbrk_did_unexec
;
314 char *bss_sbrk_heapbase
; /* _heapbase for static heap */
315 malloc_info
*bss_sbrk_heapinfo
; /* _heapinfo for static heap */
317 void *(*__morecore
) (ptrdiff_t size
) = __default_morecore
;
319 /* Debugging hook for `malloc'. */
320 void *(*__malloc_hook
) (size_t size
);
322 /* Pointer to the base of the first block. */
325 /* Block information table. Allocated with align/__free (not malloc/free). */
326 malloc_info
*_heapinfo
;
328 /* Number of info entries. */
329 static size_t heapsize
;
331 /* Search index in the info table. */
334 /* Limit of valid info table indices. */
337 /* Free lists for each fragment size. */
338 struct list _fraghead
[BLOCKLOG
];
340 /* Instrumentation. */
346 /* Are you experienced? */
347 int __malloc_initialized
;
349 size_t __malloc_extra_blocks
;
351 void (*__malloc_initialize_hook
) (void);
352 void (*__after_morecore_hook
) (void);
354 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
356 /* Some code for hunting a bug writing into _heapinfo.
358 Call this macro with argument PROT non-zero to protect internal
359 malloc state against writing to it, call it with a zero argument to
360 make it readable and writable.
362 Note that this only works if BLOCKSIZE == page size, which is
363 the case on the i386. */
365 #include <sys/types.h>
366 #include <sys/mman.h>
368 static int state_protected_p
;
369 static size_t last_state_size
;
370 static malloc_info
*last_heapinfo
;
373 protect_malloc_state (int protect_p
)
375 /* If _heapinfo has been relocated, make sure its old location
376 isn't left read-only; it will be reused by malloc. */
377 if (_heapinfo
!= last_heapinfo
379 && state_protected_p
)
380 mprotect (last_heapinfo
, last_state_size
, PROT_READ
| PROT_WRITE
);
382 last_state_size
= _heaplimit
* sizeof *_heapinfo
;
383 last_heapinfo
= _heapinfo
;
385 if (protect_p
!= state_protected_p
)
387 state_protected_p
= protect_p
;
388 if (mprotect (_heapinfo
, last_state_size
,
389 protect_p
? PROT_READ
: PROT_READ
| PROT_WRITE
) != 0)
394 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
397 #define PROTECT_MALLOC_STATE(PROT) /* empty */
401 /* Aligned allocation. */
408 /* align accepts an unsigned argument, but __morecore accepts a
409 signed one. This could lead to trouble if SIZE overflows the
410 ptrdiff_t type accepted by __morecore. We just punt in that
411 case, since they are requesting a ludicrous amount anyway. */
412 if (PTRDIFF_MAX
< size
)
415 result
= (*__morecore
) (size
);
416 adj
= (uintptr_t) result
% BLOCKSIZE
;
419 adj
= BLOCKSIZE
- adj
;
421 result
= (char *) result
+ adj
;
424 if (__after_morecore_hook
)
425 (*__after_morecore_hook
) ();
430 /* Get SIZE bytes, if we can get them starting at END.
431 Return the address of the space we got.
432 If we cannot get space at END, fail and return 0. */
434 get_contiguous_space (ptrdiff_t size
, void *position
)
439 before
= (*__morecore
) (0);
440 /* If we can tell in advance that the break is at the wrong place,
442 if (before
!= position
)
445 /* Allocate SIZE bytes and get the address of them. */
446 after
= (*__morecore
) (size
);
450 /* It was not contiguous--reject it. */
451 if (after
!= position
)
453 (*__morecore
) (- size
);
461 /* This is called when `_heapinfo' and `heapsize' have just
462 been set to describe a new info table. Set up the table
463 to describe itself and account for it in the statistics. */
465 register_heapinfo (void)
467 size_t block
, blocks
;
469 block
= BLOCK (_heapinfo
);
470 blocks
= BLOCKIFY (heapsize
* sizeof (malloc_info
));
472 /* Account for the _heapinfo block itself in the statistics. */
473 _bytes_used
+= blocks
* BLOCKSIZE
;
476 /* Describe the heapinfo block itself in the heapinfo. */
477 _heapinfo
[block
].busy
.type
= 0;
478 _heapinfo
[block
].busy
.info
.size
= blocks
;
479 /* Leave back-pointers for malloc_find_address. */
481 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
485 pthread_mutex_t _malloc_mutex
= PTHREAD_MUTEX_INITIALIZER
;
486 pthread_mutex_t _aligned_blocks_mutex
= PTHREAD_MUTEX_INITIALIZER
;
487 int _malloc_thread_enabled_p
;
490 malloc_atfork_handler_prepare (void)
493 LOCK_ALIGNED_BLOCKS ();
497 malloc_atfork_handler_parent (void)
499 UNLOCK_ALIGNED_BLOCKS ();
504 malloc_atfork_handler_child (void)
506 UNLOCK_ALIGNED_BLOCKS ();
510 /* Set up mutexes and make malloc etc. thread-safe. */
512 malloc_enable_thread (void)
514 if (_malloc_thread_enabled_p
)
517 /* Some pthread implementations call malloc for statically
518 initialized mutexes when they are used first. To avoid such a
519 situation, we initialize mutexes here while their use is
520 disabled in malloc etc. */
521 pthread_mutex_init (&_malloc_mutex
, NULL
);
522 pthread_mutex_init (&_aligned_blocks_mutex
, NULL
);
523 pthread_atfork (malloc_atfork_handler_prepare
,
524 malloc_atfork_handler_parent
,
525 malloc_atfork_handler_child
);
526 _malloc_thread_enabled_p
= 1;
531 malloc_initialize_1 (void)
538 if (bss_sbrk_did_unexec
)
539 /* we're reinitializing the dumped emacs */
541 bss_sbrk_heapbase
= _heapbase
;
542 bss_sbrk_heapinfo
= _heapinfo
;
543 memset (_fraghead
, 0, BLOCKLOG
* sizeof (struct list
));
547 if (__malloc_initialize_hook
)
548 (*__malloc_initialize_hook
) ();
550 heapsize
= HEAP
/ BLOCKSIZE
;
551 _heapinfo
= align (heapsize
* sizeof (malloc_info
));
552 if (_heapinfo
== NULL
)
554 memset (_heapinfo
, 0, heapsize
* sizeof (malloc_info
));
555 _heapinfo
[0].free
.size
= 0;
556 _heapinfo
[0].free
.next
= _heapinfo
[0].free
.prev
= 0;
558 _heapbase
= (char *) _heapinfo
;
559 _heaplimit
= BLOCK (_heapbase
+ heapsize
* sizeof (malloc_info
));
561 register_heapinfo ();
563 __malloc_initialized
= 1;
564 PROTECT_MALLOC_STATE (1);
568 /* Set everything up and remember that we have.
569 main will call malloc which calls this function. That is before any threads
570 or signal handlers has been set up, so we don't need thread protection. */
572 __malloc_initialize (void)
574 if (__malloc_initialized
)
577 malloc_initialize_1 ();
579 return __malloc_initialized
;
582 static int morecore_recursing
;
584 /* Get neatly aligned memory, initializing or
585 growing the heap info table as necessary. */
587 morecore_nolock (size_t size
)
590 malloc_info
*newinfo
, *oldinfo
;
593 if (morecore_recursing
)
594 /* Avoid recursion. The caller will know how to handle a null return. */
597 result
= align (size
);
601 PROTECT_MALLOC_STATE (0);
603 /* Check if we need to grow the info table. */
604 if ((size_t) BLOCK ((char *) result
+ size
) > heapsize
)
606 /* Calculate the new _heapinfo table size. We do not account for the
607 added blocks in the table itself, as we hope to place them in
608 existing free space, which is already covered by part of the
613 while ((size_t) BLOCK ((char *) result
+ size
) > newsize
);
615 /* We must not reuse existing core for the new info table when called
616 from realloc in the case of growing a large block, because the
617 block being grown is momentarily marked as free. In this case
618 _heaplimit is zero so we know not to reuse space for internal
622 /* First try to allocate the new info table in core we already
623 have, in the usual way using realloc. If realloc cannot
624 extend it in place or relocate it to existing sufficient core,
625 we will get called again, and the code above will notice the
626 `morecore_recursing' flag and return null. */
627 int save
= errno
; /* Don't want to clobber errno with ENOMEM. */
628 morecore_recursing
= 1;
629 newinfo
= _realloc_internal_nolock (_heapinfo
,
630 newsize
* sizeof (malloc_info
));
631 morecore_recursing
= 0;
636 /* We found some space in core, and realloc has put the old
637 table's blocks on the free list. Now zero the new part
638 of the table and install the new table location. */
639 memset (&newinfo
[heapsize
], 0,
640 (newsize
- heapsize
) * sizeof (malloc_info
));
647 /* Allocate new space for the malloc info table. */
650 newinfo
= align (newsize
* sizeof (malloc_info
));
655 (*__morecore
) (-size
);
659 /* Is it big enough to record status for its own space?
661 if ((size_t) BLOCK ((char *) newinfo
662 + newsize
* sizeof (malloc_info
))
666 /* Must try again. First give back most of what we just got. */
667 (*__morecore
) (- newsize
* sizeof (malloc_info
));
671 /* Copy the old table to the beginning of the new,
672 and zero the rest of the new table. */
673 memcpy (newinfo
, _heapinfo
, heapsize
* sizeof (malloc_info
));
674 memset (&newinfo
[heapsize
], 0,
675 (newsize
- heapsize
) * sizeof (malloc_info
));
680 register_heapinfo ();
682 /* Reset _heaplimit so _free_internal never decides
683 it can relocate or resize the info table. */
685 _free_internal_nolock (oldinfo
);
686 PROTECT_MALLOC_STATE (0);
688 /* The new heap limit includes the new table just allocated. */
689 _heaplimit
= BLOCK ((char *) newinfo
+ heapsize
* sizeof (malloc_info
));
694 _heaplimit
= BLOCK ((char *) result
+ size
);
698 /* Allocate memory from the heap. */
700 _malloc_internal_nolock (size_t size
)
703 size_t block
, blocks
, lastblocks
, start
;
707 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
708 valid address you can realloc and free (though not dereference).
710 It turns out that some extant code (sunrpc, at least Ultrix's version)
711 expects `malloc (0)' to return non-NULL and breaks otherwise.
719 PROTECT_MALLOC_STATE (0);
721 if (size
< sizeof (struct list
))
722 size
= sizeof (struct list
);
724 /* Determine the allocation policy based on the request size. */
725 if (size
<= BLOCKSIZE
/ 2)
727 /* Small allocation to receive a fragment of a block.
728 Determine the logarithm to base two of the fragment size. */
729 register size_t log
= 1;
731 while ((size
/= 2) != 0)
734 /* Look in the fragment lists for a
735 free fragment of the desired size. */
736 next
= _fraghead
[log
].next
;
739 /* There are free fragments of this size.
740 Pop a fragment out of the fragment list and return it.
741 Update the block's nfree and first counters. */
743 next
->prev
->next
= next
->next
;
744 if (next
->next
!= NULL
)
745 next
->next
->prev
= next
->prev
;
746 block
= BLOCK (result
);
747 if (--_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
748 _heapinfo
[block
].busy
.info
.frag
.first
=
749 (uintptr_t) next
->next
% BLOCKSIZE
>> log
;
751 /* Update the statistics. */
753 _bytes_used
+= 1 << log
;
755 _bytes_free
-= 1 << log
;
759 /* No free fragments of the desired size, so get a new block
760 and break it into fragments, returning the first. */
761 #ifdef GC_MALLOC_CHECK
762 result
= _malloc_internal_nolock (BLOCKSIZE
);
763 PROTECT_MALLOC_STATE (0);
764 #elif defined (USE_PTHREAD)
765 result
= _malloc_internal_nolock (BLOCKSIZE
);
767 result
= malloc (BLOCKSIZE
);
771 PROTECT_MALLOC_STATE (1);
775 /* Link all fragments but the first into the free list. */
776 next
= (struct list
*) ((char *) result
+ (1 << log
));
778 next
->prev
= &_fraghead
[log
];
779 _fraghead
[log
].next
= next
;
781 for (i
= 2; i
< (size_t) (BLOCKSIZE
>> log
); ++i
)
783 next
= (struct list
*) ((char *) result
+ (i
<< log
));
784 next
->next
= _fraghead
[log
].next
;
785 next
->prev
= &_fraghead
[log
];
786 next
->prev
->next
= next
;
787 next
->next
->prev
= next
;
790 /* Initialize the nfree and first counters for this block. */
791 block
= BLOCK (result
);
792 _heapinfo
[block
].busy
.type
= log
;
793 _heapinfo
[block
].busy
.info
.frag
.nfree
= i
- 1;
794 _heapinfo
[block
].busy
.info
.frag
.first
= i
- 1;
796 _chunks_free
+= (BLOCKSIZE
>> log
) - 1;
797 _bytes_free
+= BLOCKSIZE
- (1 << log
);
798 _bytes_used
-= BLOCKSIZE
- (1 << log
);
803 /* Large allocation to receive one or more blocks.
804 Search the free list in a circle starting at the last place visited.
805 If we loop completely around without finding a large enough
806 space we will have to get more memory from the system. */
807 blocks
= BLOCKIFY (size
);
808 start
= block
= _heapindex
;
809 while (_heapinfo
[block
].free
.size
< blocks
)
811 block
= _heapinfo
[block
].free
.next
;
814 /* Need to get more from the system. Get a little extra. */
815 size_t wantblocks
= blocks
+ __malloc_extra_blocks
;
816 block
= _heapinfo
[0].free
.prev
;
817 lastblocks
= _heapinfo
[block
].free
.size
;
818 /* Check to see if the new core will be contiguous with the
819 final free block; if so we don't need to get as much. */
820 if (_heaplimit
!= 0 && block
+ lastblocks
== _heaplimit
&&
821 /* We can't do this if we will have to make the heap info
822 table bigger to accommodate the new space. */
823 block
+ wantblocks
<= heapsize
&&
824 get_contiguous_space ((wantblocks
- lastblocks
) * BLOCKSIZE
,
825 ADDRESS (block
+ lastblocks
)))
827 /* We got it contiguously. Which block we are extending
828 (the `final free block' referred to above) might have
829 changed, if it got combined with a freed info table. */
830 block
= _heapinfo
[0].free
.prev
;
831 _heapinfo
[block
].free
.size
+= (wantblocks
- lastblocks
);
832 _bytes_free
+= (wantblocks
- lastblocks
) * BLOCKSIZE
;
833 _heaplimit
+= wantblocks
- lastblocks
;
836 result
= morecore_nolock (wantblocks
* BLOCKSIZE
);
839 block
= BLOCK (result
);
840 /* Put the new block at the end of the free list. */
841 _heapinfo
[block
].free
.size
= wantblocks
;
842 _heapinfo
[block
].free
.prev
= _heapinfo
[0].free
.prev
;
843 _heapinfo
[block
].free
.next
= 0;
844 _heapinfo
[0].free
.prev
= block
;
845 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
= block
;
847 /* Now loop to use some of that block for this allocation. */
851 /* At this point we have found a suitable free list entry.
852 Figure out how to remove what we need from the list. */
853 result
= ADDRESS (block
);
854 if (_heapinfo
[block
].free
.size
> blocks
)
856 /* The block we found has a bit left over,
857 so relink the tail end back into the free list. */
858 _heapinfo
[block
+ blocks
].free
.size
859 = _heapinfo
[block
].free
.size
- blocks
;
860 _heapinfo
[block
+ blocks
].free
.next
861 = _heapinfo
[block
].free
.next
;
862 _heapinfo
[block
+ blocks
].free
.prev
863 = _heapinfo
[block
].free
.prev
;
864 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
865 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
866 = _heapindex
= block
+ blocks
;
870 /* The block exactly matches our requirements,
871 so just remove it from the list. */
872 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
873 = _heapinfo
[block
].free
.prev
;
874 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
875 = _heapindex
= _heapinfo
[block
].free
.next
;
879 _heapinfo
[block
].busy
.type
= 0;
880 _heapinfo
[block
].busy
.info
.size
= blocks
;
882 _bytes_used
+= blocks
* BLOCKSIZE
;
883 _bytes_free
-= blocks
* BLOCKSIZE
;
885 /* Mark all the blocks of the object just allocated except for the
886 first with a negative number so you can find the first block by
887 adding that adjustment. */
889 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
892 PROTECT_MALLOC_STATE (1);
898 _malloc_internal (size_t size
)
903 result
= _malloc_internal_nolock (size
);
912 void *(*hook
) (size_t);
914 if (!__malloc_initialized
&& !__malloc_initialize ())
917 /* Copy the value of __malloc_hook to an automatic variable in case
918 __malloc_hook is modified in another thread between its
919 NULL-check and the use.
921 Note: Strictly speaking, this is not a right solution. We should
922 use mutexes to access non-read-only variables that are shared
923 among multiple threads. We just leave it for compatibility with
924 glibc malloc (i.e., assignments to __malloc_hook) for now. */
925 hook
= __malloc_hook
;
926 return (hook
!= NULL
? *hook
: _malloc_internal
) (size
);
931 /* On some ANSI C systems, some libc functions call _malloc, _free
932 and _realloc. Make them use the GNU functions. */
934 extern void *_malloc (size_t);
935 extern void _free (void *);
936 extern void *_realloc (void *, size_t);
939 _malloc (size_t size
)
941 return malloc (size
);
951 _realloc (void *ptr
, size_t size
)
953 return realloc (ptr
, size
);
957 /* Free a block of memory allocated by `malloc'.
958 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
959 Written May 1989 by Mike Haertel.
961 This library is free software; you can redistribute it and/or
962 modify it under the terms of the GNU General Public License as
963 published by the Free Software Foundation; either version 2 of the
964 License, or (at your option) any later version.
966 This library is distributed in the hope that it will be useful,
967 but WITHOUT ANY WARRANTY; without even the implied warranty of
968 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
969 General Public License for more details.
971 You should have received a copy of the GNU General Public
972 License along with this library. If not, see <http://www.gnu.org/licenses/>.
974 The author may be reached (Email) at the address mike@ai.mit.edu,
975 or (US mail) as Mike Haertel c/o Free Software Foundation. */
978 /* Debugging hook for free. */
979 void (*__free_hook
) (void *__ptr
);
981 /* List of blocks allocated by aligned_alloc. */
982 struct alignlist
*_aligned_blocks
= NULL
;
984 /* Return memory to the heap.
985 Like `_free_internal' but don't lock mutex. */
987 _free_internal_nolock (void *ptr
)
990 size_t block
, blocks
;
992 struct list
*prev
, *next
;
994 const size_t lesscore_threshold
995 /* Threshold of free space at which we will return some to the system. */
996 = FINAL_FREE_BLOCKS
+ 2 * __malloc_extra_blocks
;
998 register struct alignlist
*l
;
1004 if ((char *) ptr
< _heapbase
)
1005 /* We're being asked to free something in the static heap. */
1009 PROTECT_MALLOC_STATE (0);
1011 LOCK_ALIGNED_BLOCKS ();
1012 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1013 if (l
->aligned
== ptr
)
1015 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1019 UNLOCK_ALIGNED_BLOCKS ();
1021 block
= BLOCK (ptr
);
1023 type
= _heapinfo
[block
].busy
.type
;
1027 /* Get as many statistics as early as we can. */
1029 _bytes_used
-= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1030 _bytes_free
+= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1032 /* Find the free cluster previous to this one in the free list.
1033 Start searching at the last block referenced; this may benefit
1034 programs with locality of allocation. */
1038 i
= _heapinfo
[i
].free
.prev
;
1042 i
= _heapinfo
[i
].free
.next
;
1043 while (i
> 0 && i
< block
);
1044 i
= _heapinfo
[i
].free
.prev
;
1047 /* Determine how to link this block into the free list. */
1048 if (block
== i
+ _heapinfo
[i
].free
.size
)
1050 /* Coalesce this block with its predecessor. */
1051 _heapinfo
[i
].free
.size
+= _heapinfo
[block
].busy
.info
.size
;
1056 /* Really link this block back into the free list. */
1057 _heapinfo
[block
].free
.size
= _heapinfo
[block
].busy
.info
.size
;
1058 _heapinfo
[block
].free
.next
= _heapinfo
[i
].free
.next
;
1059 _heapinfo
[block
].free
.prev
= i
;
1060 _heapinfo
[i
].free
.next
= block
;
1061 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1065 /* Now that the block is linked in, see if we can coalesce it
1066 with its successor (by deleting its successor from the list
1067 and adding in its size). */
1068 if (block
+ _heapinfo
[block
].free
.size
== _heapinfo
[block
].free
.next
)
1070 _heapinfo
[block
].free
.size
1071 += _heapinfo
[_heapinfo
[block
].free
.next
].free
.size
;
1072 _heapinfo
[block
].free
.next
1073 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.next
;
1074 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1078 /* How many trailing free blocks are there now? */
1079 blocks
= _heapinfo
[block
].free
.size
;
1081 /* Where is the current end of accessible core? */
1082 curbrk
= (*__morecore
) (0);
1084 if (_heaplimit
!= 0 && curbrk
== ADDRESS (_heaplimit
))
1086 /* The end of the malloc heap is at the end of accessible core.
1087 It's possible that moving _heapinfo will allow us to
1088 return some space to the system. */
1090 size_t info_block
= BLOCK (_heapinfo
);
1091 size_t info_blocks
= _heapinfo
[info_block
].busy
.info
.size
;
1092 size_t prev_block
= _heapinfo
[block
].free
.prev
;
1093 size_t prev_blocks
= _heapinfo
[prev_block
].free
.size
;
1094 size_t next_block
= _heapinfo
[block
].free
.next
;
1095 size_t next_blocks
= _heapinfo
[next_block
].free
.size
;
1097 if (/* Win if this block being freed is last in core, the info table
1098 is just before it, the previous free block is just before the
1099 info table, and the two free blocks together form a useful
1100 amount to return to the system. */
1101 (block
+ blocks
== _heaplimit
&&
1102 info_block
+ info_blocks
== block
&&
1103 prev_block
!= 0 && prev_block
+ prev_blocks
== info_block
&&
1104 blocks
+ prev_blocks
>= lesscore_threshold
) ||
1105 /* Nope, not the case. We can also win if this block being
1106 freed is just before the info table, and the table extends
1107 to the end of core or is followed only by a free block,
1108 and the total free space is worth returning to the system. */
1109 (block
+ blocks
== info_block
&&
1110 ((info_block
+ info_blocks
== _heaplimit
&&
1111 blocks
>= lesscore_threshold
) ||
1112 (info_block
+ info_blocks
== next_block
&&
1113 next_block
+ next_blocks
== _heaplimit
&&
1114 blocks
+ next_blocks
>= lesscore_threshold
)))
1117 malloc_info
*newinfo
;
1118 size_t oldlimit
= _heaplimit
;
1120 /* Free the old info table, clearing _heaplimit to avoid
1121 recursion into this code. We don't want to return the
1122 table's blocks to the system before we have copied them to
1123 the new location. */
1125 _free_internal_nolock (_heapinfo
);
1126 _heaplimit
= oldlimit
;
1128 /* Tell malloc to search from the beginning of the heap for
1129 free blocks, so it doesn't reuse the ones just freed. */
1132 /* Allocate new space for the info table and move its data. */
1133 newinfo
= _malloc_internal_nolock (info_blocks
* BLOCKSIZE
);
1134 PROTECT_MALLOC_STATE (0);
1135 memmove (newinfo
, _heapinfo
, info_blocks
* BLOCKSIZE
);
1136 _heapinfo
= newinfo
;
1138 /* We should now have coalesced the free block with the
1139 blocks freed from the old info table. Examine the entire
1140 trailing free block to decide below whether to return some
1142 block
= _heapinfo
[0].free
.prev
;
1143 blocks
= _heapinfo
[block
].free
.size
;
1146 /* Now see if we can return stuff to the system. */
1147 if (block
+ blocks
== _heaplimit
&& blocks
>= lesscore_threshold
)
1149 register size_t bytes
= blocks
* BLOCKSIZE
;
1150 _heaplimit
-= blocks
;
1151 (*__morecore
) (-bytes
);
1152 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
1153 = _heapinfo
[block
].free
.next
;
1154 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
1155 = _heapinfo
[block
].free
.prev
;
1156 block
= _heapinfo
[block
].free
.prev
;
1158 _bytes_free
-= bytes
;
1162 /* Set the next search to begin at this block. */
1167 /* Do some of the statistics. */
1169 _bytes_used
-= 1 << type
;
1171 _bytes_free
+= 1 << type
;
1173 /* Get the address of the first free fragment in this block. */
1174 prev
= (struct list
*) ((char *) ADDRESS (block
) +
1175 (_heapinfo
[block
].busy
.info
.frag
.first
<< type
));
1177 if (_heapinfo
[block
].busy
.info
.frag
.nfree
== (BLOCKSIZE
>> type
) - 1)
1179 /* If all fragments of this block are free, remove them
1180 from the fragment list and free the whole block. */
1182 for (i
= 1; i
< (size_t) (BLOCKSIZE
>> type
); ++i
)
1184 prev
->prev
->next
= next
;
1186 next
->prev
= prev
->prev
;
1187 _heapinfo
[block
].busy
.type
= 0;
1188 _heapinfo
[block
].busy
.info
.size
= 1;
1190 /* Keep the statistics accurate. */
1192 _bytes_used
+= BLOCKSIZE
;
1193 _chunks_free
-= BLOCKSIZE
>> type
;
1194 _bytes_free
-= BLOCKSIZE
;
1196 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1197 _free_internal_nolock (ADDRESS (block
));
1199 free (ADDRESS (block
));
1202 else if (_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
1204 /* If some fragments of this block are free, link this
1205 fragment into the fragment list after the first free
1206 fragment of this block. */
1208 next
->next
= prev
->next
;
1211 if (next
->next
!= NULL
)
1212 next
->next
->prev
= next
;
1213 ++_heapinfo
[block
].busy
.info
.frag
.nfree
;
1217 /* No fragments of this block are free, so link this
1218 fragment into the fragment list and announce that
1219 it is the first free fragment of this block. */
1221 _heapinfo
[block
].busy
.info
.frag
.nfree
= 1;
1222 _heapinfo
[block
].busy
.info
.frag
.first
=
1223 (uintptr_t) ptr
% BLOCKSIZE
>> type
;
1224 prev
->next
= _fraghead
[type
].next
;
1225 prev
->prev
= &_fraghead
[type
];
1226 prev
->prev
->next
= prev
;
1227 if (prev
->next
!= NULL
)
1228 prev
->next
->prev
= prev
;
1233 PROTECT_MALLOC_STATE (1);
1236 /* Return memory to the heap.
1237 Like `free' but don't call a __free_hook if there is one. */
1239 _free_internal (void *ptr
)
1242 _free_internal_nolock (ptr
);
1246 /* Return memory to the heap. */
1251 void (*hook
) (void *) = __free_hook
;
1256 _free_internal (ptr
);
1259 /* Define the `cfree' alias for `free'. */
1261 weak_alias (free
, cfree
)
1269 /* Change the size of a block allocated by `malloc'.
1270 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1271 Written May 1989 by Mike Haertel.
1273 This library is free software; you can redistribute it and/or
1274 modify it under the terms of the GNU General Public License as
1275 published by the Free Software Foundation; either version 2 of the
1276 License, or (at your option) any later version.
1278 This library is distributed in the hope that it will be useful,
1279 but WITHOUT ANY WARRANTY; without even the implied warranty of
1280 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1281 General Public License for more details.
1283 You should have received a copy of the GNU General Public
1284 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1286 The author may be reached (Email) at the address mike@ai.mit.edu,
1287 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1290 #define min(A, B) ((A) < (B) ? (A) : (B))
1293 /* On Cygwin the dumped emacs may try to realloc storage allocated in
1294 the static heap. We just malloc space in the new heap and copy the
1298 special_realloc (void *ptr
, size_t size
)
1302 size_t block
, oldsize
;
1304 block
= ((char *) ptr
- bss_sbrk_heapbase
) / BLOCKSIZE
+ 1;
1305 type
= bss_sbrk_heapinfo
[block
].busy
.type
;
1307 type
== 0 ? bss_sbrk_heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
1308 : (size_t) 1 << type
;
1309 result
= _malloc_internal_nolock (size
);
1311 return memcpy (result
, ptr
, min (oldsize
, size
));
1316 /* Debugging hook for realloc. */
1317 void *(*__realloc_hook
) (void *ptr
, size_t size
);
1319 /* Resize the given region to the new size, returning a pointer
1320 to the (possibly moved) region. This is optimized for speed;
1321 some benchmarks seem to indicate that greater compactness is
1322 achieved by unconditionally allocating and copying to a
1323 new region. This module has incestuous knowledge of the
1324 internals of both free and malloc. */
1326 _realloc_internal_nolock (void *ptr
, size_t size
)
1330 size_t block
, blocks
, oldlimit
;
1334 _free_internal_nolock (ptr
);
1335 return _malloc_internal_nolock (0);
1337 else if (ptr
== NULL
)
1338 return _malloc_internal_nolock (size
);
1341 if ((char *) ptr
< _heapbase
)
1342 /* ptr points into the static heap */
1343 return special_realloc (ptr
, size
);
1346 block
= BLOCK (ptr
);
1348 PROTECT_MALLOC_STATE (0);
1350 type
= _heapinfo
[block
].busy
.type
;
1354 /* Maybe reallocate a large block to a small fragment. */
1355 if (size
<= BLOCKSIZE
/ 2)
1357 result
= _malloc_internal_nolock (size
);
1360 memcpy (result
, ptr
, size
);
1361 _free_internal_nolock (ptr
);
1366 /* The new size is a large allocation as well;
1367 see if we can hold it in place. */
1368 blocks
= BLOCKIFY (size
);
1369 if (blocks
< _heapinfo
[block
].busy
.info
.size
)
1371 /* The new size is smaller; return
1372 excess memory to the free list. */
1373 _heapinfo
[block
+ blocks
].busy
.type
= 0;
1374 _heapinfo
[block
+ blocks
].busy
.info
.size
1375 = _heapinfo
[block
].busy
.info
.size
- blocks
;
1376 _heapinfo
[block
].busy
.info
.size
= blocks
;
1377 /* We have just created a new chunk by splitting a chunk in two.
1378 Now we will free this chunk; increment the statistics counter
1379 so it doesn't become wrong when _free_internal decrements it. */
1381 _free_internal_nolock (ADDRESS (block
+ blocks
));
1384 else if (blocks
== _heapinfo
[block
].busy
.info
.size
)
1385 /* No size change necessary. */
1389 /* Won't fit, so allocate a new region that will.
1390 Free the old region first in case there is sufficient
1391 adjacent free space to grow without moving. */
1392 blocks
= _heapinfo
[block
].busy
.info
.size
;
1393 /* Prevent free from actually returning memory to the system. */
1394 oldlimit
= _heaplimit
;
1396 _free_internal_nolock (ptr
);
1397 result
= _malloc_internal_nolock (size
);
1398 PROTECT_MALLOC_STATE (0);
1399 if (_heaplimit
== 0)
1400 _heaplimit
= oldlimit
;
1403 /* Now we're really in trouble. We have to unfree
1404 the thing we just freed. Unfortunately it might
1405 have been coalesced with its neighbors. */
1406 if (_heapindex
== block
)
1407 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1411 = _malloc_internal_nolock ((block
- _heapindex
) * BLOCKSIZE
);
1412 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1413 _free_internal_nolock (previous
);
1418 memmove (result
, ptr
, blocks
* BLOCKSIZE
);
1423 /* Old size is a fragment; type is logarithm
1424 to base two of the fragment size. */
1425 if (size
> (size_t) (1 << (type
- 1)) &&
1426 size
<= (size_t) (1 << type
))
1427 /* The new size is the same kind of fragment. */
1431 /* The new size is different; allocate a new space,
1432 and copy the lesser of the new size and the old. */
1433 result
= _malloc_internal_nolock (size
);
1436 memcpy (result
, ptr
, min (size
, (size_t) 1 << type
));
1437 _free_internal_nolock (ptr
);
1442 PROTECT_MALLOC_STATE (1);
1448 _realloc_internal (void *ptr
, size_t size
)
1453 result
= _realloc_internal_nolock (ptr
, size
);
1460 realloc (void *ptr
, size_t size
)
1462 void *(*hook
) (void *, size_t);
1464 if (!__malloc_initialized
&& !__malloc_initialize ())
1467 hook
= __realloc_hook
;
1468 return (hook
!= NULL
? *hook
: _realloc_internal
) (ptr
, size
);
1470 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1472 This library is free software; you can redistribute it and/or
1473 modify it under the terms of the GNU General Public License as
1474 published by the Free Software Foundation; either version 2 of the
1475 License, or (at your option) any later version.
1477 This library is distributed in the hope that it will be useful,
1478 but WITHOUT ANY WARRANTY; without even the implied warranty of
1479 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1480 General Public License for more details.
1482 You should have received a copy of the GNU General Public
1483 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1485 The author may be reached (Email) at the address mike@ai.mit.edu,
1486 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1488 /* Allocate an array of NMEMB elements each SIZE bytes long.
1489 The entire array is initialized to zeros. */
1491 calloc (size_t nmemb
, size_t size
)
1494 size_t bytes
= nmemb
* size
;
1496 if (size
!= 0 && bytes
/ size
!= nmemb
)
1502 result
= malloc (bytes
);
1504 return memset (result
, 0, bytes
);
1507 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1508 This file is part of the GNU C Library.
1510 The GNU C Library is free software; you can redistribute it and/or modify
1511 it under the terms of the GNU General Public License as published by
1512 the Free Software Foundation; either version 2, or (at your option)
1515 The GNU C Library is distributed in the hope that it will be useful,
1516 but WITHOUT ANY WARRANTY; without even the implied warranty of
1517 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1518 GNU General Public License for more details.
1520 You should have received a copy of the GNU General Public License
1521 along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
1523 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1525 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1527 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1528 /* It is best not to declare this and cast its result on foreign operating
1529 systems with potentially hostile include files. */
1531 extern void *__sbrk (ptrdiff_t increment
);
1532 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1534 /* Allocate INCREMENT more bytes of data space,
1535 and return the start of data space, or NULL on errors.
1536 If INCREMENT is negative, shrink data space. */
1538 __default_morecore (ptrdiff_t increment
)
1541 #if defined (CYGWIN)
1542 if (!bss_sbrk_did_unexec
)
1544 return bss_sbrk (increment
);
1547 result
= (void *) __sbrk (increment
);
1548 if (result
== (void *) -1)
1552 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1554 This library is free software; you can redistribute it and/or
1555 modify it under the terms of the GNU General Public License as
1556 published by the Free Software Foundation; either version 2 of the
1557 License, or (at your option) any later version.
1559 This library is distributed in the hope that it will be useful,
1560 but WITHOUT ANY WARRANTY; without even the implied warranty of
1561 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1562 General Public License for more details.
1564 You should have received a copy of the GNU General Public
1565 License along with this library. If not, see <http://www.gnu.org/licenses/>. */
1567 void *(*__memalign_hook
) (size_t size
, size_t alignment
);
1570 aligned_alloc (size_t alignment
, size_t size
)
1573 size_t adj
, lastadj
;
1574 void *(*hook
) (size_t, size_t) = __memalign_hook
;
1577 return (*hook
) (alignment
, size
);
1579 /* Allocate a block with enough extra space to pad the block with up to
1580 (ALIGNMENT - 1) bytes if necessary. */
1581 if (- size
< alignment
)
1586 result
= malloc (size
+ alignment
- 1);
1590 /* Figure out how much we will need to pad this particular block
1591 to achieve the required alignment. */
1592 adj
= (uintptr_t) result
% alignment
;
1596 /* Reallocate the block with only as much excess as it needs. */
1598 result
= malloc (adj
+ size
);
1599 if (result
== NULL
) /* Impossible unless interrupted. */
1603 adj
= (uintptr_t) result
% alignment
;
1604 /* It's conceivable we might have been so unlucky as to get a
1605 different block with weaker alignment. If so, this block is too
1606 short to contain SIZE after alignment correction. So we must
1607 try again and get another block, slightly larger. */
1608 } while (adj
> lastadj
);
1612 /* Record this block in the list of aligned blocks, so that `free'
1613 can identify the pointer it is passed, which will be in the middle
1614 of an allocated block. */
1616 struct alignlist
*l
;
1617 LOCK_ALIGNED_BLOCKS ();
1618 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1619 if (l
->aligned
== NULL
)
1620 /* This slot is free. Use it. */
1624 l
= malloc (sizeof *l
);
1627 l
->next
= _aligned_blocks
;
1628 _aligned_blocks
= l
;
1634 result
= l
->aligned
= (char *) result
+ alignment
- adj
;
1636 UNLOCK_ALIGNED_BLOCKS ();
1647 /* An obsolete alias for aligned_alloc, for any old libraries that use
1651 memalign (size_t alignment
, size_t size
)
1653 return aligned_alloc (alignment
, size
);
1657 posix_memalign (void **memptr
, size_t alignment
, size_t size
)
1662 || alignment
% sizeof (void *) != 0
1663 || (alignment
& (alignment
- 1)) != 0)
1666 mem
= aligned_alloc (alignment
, size
);
1675 /* Allocate memory on a page boundary.
1676 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1678 This library is free software; you can redistribute it and/or
1679 modify it under the terms of the GNU General Public License as
1680 published by the Free Software Foundation; either version 2 of the
1681 License, or (at your option) any later version.
1683 This library is distributed in the hope that it will be useful,
1684 but WITHOUT ANY WARRANTY; without even the implied warranty of
1685 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1686 General Public License for more details.
1688 You should have received a copy of the GNU General Public
1689 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1691 The author may be reached (Email) at the address mike@ai.mit.edu,
1692 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1694 /* Allocate SIZE bytes on a page boundary. */
1695 extern void *valloc (size_t);
1697 #if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1698 # include "getpagesize.h"
1699 #elif !defined getpagesize
1700 extern int getpagesize (void);
1703 static size_t pagesize
;
1706 valloc (size_t size
)
1709 pagesize
= getpagesize ();
1711 return aligned_alloc (pagesize
, size
);
1716 /* Standard debugging hooks for `malloc'.
1717 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1718 Written May 1989 by Mike Haertel.
1720 This library is free software; you can redistribute it and/or
1721 modify it under the terms of the GNU General Public License as
1722 published by the Free Software Foundation; either version 2 of the
1723 License, or (at your option) any later version.
1725 This library is distributed in the hope that it will be useful,
1726 but WITHOUT ANY WARRANTY; without even the implied warranty of
1727 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1728 General Public License for more details.
1730 You should have received a copy of the GNU General Public
1731 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1733 The author may be reached (Email) at the address mike@ai.mit.edu,
1734 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1738 /* Old hook values. */
1739 static void (*old_free_hook
) (void *ptr
);
1740 static void *(*old_malloc_hook
) (size_t size
);
1741 static void *(*old_realloc_hook
) (void *ptr
, size_t size
);
1743 /* Function to call when something awful happens. */
1744 static void (*abortfunc
) (enum mcheck_status
);
1746 /* Arbitrary magical numbers. */
1747 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1748 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1749 #define MAGICBYTE ((char) 0xd7)
1750 #define MALLOCFLOOD ((char) 0x93)
1751 #define FREEFLOOD ((char) 0x95)
1755 size_t size
; /* Exact size requested by user. */
1756 size_t magic
; /* Magic number to check header integrity. */
1759 static enum mcheck_status
1760 checkhdr (const struct hdr
*hdr
)
1762 enum mcheck_status status
;
1766 status
= MCHECK_HEAD
;
1769 status
= MCHECK_FREE
;
1772 if (((char *) &hdr
[1])[hdr
->size
] != MAGICBYTE
)
1773 status
= MCHECK_TAIL
;
1778 if (status
!= MCHECK_OK
)
1779 (*abortfunc
) (status
);
1784 freehook (void *ptr
)
1790 hdr
= ((struct hdr
*) ptr
) - 1;
1792 hdr
->magic
= MAGICFREE
;
1793 memset (ptr
, FREEFLOOD
, hdr
->size
);
1798 __free_hook
= old_free_hook
;
1800 __free_hook
= freehook
;
1804 mallochook (size_t size
)
1808 __malloc_hook
= old_malloc_hook
;
1809 hdr
= malloc (sizeof *hdr
+ size
+ 1);
1810 __malloc_hook
= mallochook
;
1815 hdr
->magic
= MAGICWORD
;
1816 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1817 return memset (hdr
+ 1, MALLOCFLOOD
, size
);
1821 reallochook (void *ptr
, size_t size
)
1823 struct hdr
*hdr
= NULL
;
1828 hdr
= ((struct hdr
*) ptr
) - 1;
1833 memset ((char *) ptr
+ size
, FREEFLOOD
, osize
- size
);
1836 __free_hook
= old_free_hook
;
1837 __malloc_hook
= old_malloc_hook
;
1838 __realloc_hook
= old_realloc_hook
;
1839 hdr
= realloc (hdr
, sizeof *hdr
+ size
+ 1);
1840 __free_hook
= freehook
;
1841 __malloc_hook
= mallochook
;
1842 __realloc_hook
= reallochook
;
1847 hdr
->magic
= MAGICWORD
;
1848 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1850 memset ((char *) (hdr
+ 1) + osize
, MALLOCFLOOD
, size
- osize
);
1855 mabort (enum mcheck_status status
)
1861 msg
= "memory is consistent, library is buggy";
1864 msg
= "memory clobbered before allocated block";
1867 msg
= "memory clobbered past end of allocated block";
1870 msg
= "block freed twice";
1873 msg
= "bogus mcheck_status, library is buggy";
1876 #ifdef __GNU_LIBRARY__
1879 fprintf (stderr
, "mcheck: %s\n", msg
);
1885 static int mcheck_used
= 0;
1888 mcheck (void (*func
) (enum mcheck_status
))
1890 abortfunc
= (func
!= NULL
) ? func
: &mabort
;
1892 /* These hooks may not be safely inserted if malloc is already in use. */
1893 if (!__malloc_initialized
&& !mcheck_used
)
1895 old_free_hook
= __free_hook
;
1896 __free_hook
= freehook
;
1897 old_malloc_hook
= __malloc_hook
;
1898 __malloc_hook
= mallochook
;
1899 old_realloc_hook
= __realloc_hook
;
1900 __realloc_hook
= reallochook
;
1904 return mcheck_used
? 0 : -1;
1910 return mcheck_used
? checkhdr (ptr
) : MCHECK_DISABLED
;
1913 #endif /* GC_MCHECK */