* lisp/emacs-lisp/easy-mmode.el (define-minor-mode): Use mode function
[emacs.git] / src / gmalloc.c
blobf4a32c79ca1d710160ca751151e761079dbd811d
1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2014 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <http://www.gnu.org/licenses/>.
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
22 /* If HYBRID_MALLOC is defined in config.h, then conf_post.h #defines
23 malloc and friends as macros before including stdlib.h. In this
24 file we will need the prototypes for the system malloc, so we must
25 include stdlib.h before config.h. And we have to do this
26 unconditionally, since HYBRID_MALLOC hasn't been defined yet. */
27 #include <stdlib.h>
29 #include <config.h>
31 #if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
32 #define USE_PTHREAD
33 #endif
35 #include <string.h>
36 #include <limits.h>
37 #include <stdint.h>
39 #ifdef HYBRID_GET_CURRENT_DIR_NAME
40 #undef get_current_dir_name
41 #endif
43 #include <unistd.h>
45 #ifdef USE_PTHREAD
46 #include <pthread.h>
47 #endif
49 #ifdef WINDOWSNT
50 #include <w32heap.h> /* for sbrk */
51 #endif
53 #ifdef emacs
54 extern void emacs_abort (void);
55 #endif
57 /* If HYBRID_MALLOC is defined, then temacs will use malloc,
58 realloc... as defined in this file (and renamed gmalloc,
59 grealloc... via the macros that follow). The dumped emacs,
60 however, will use the system malloc, realloc.... In other source
61 files, malloc, realloc... are renamed hybrid_malloc,
62 hybrid_realloc... via macros in conf_post.h. hybrid_malloc and
63 friends are wrapper functions defined later in this file.
64 aligned_alloc is defined as a macro only in alloc.c.
66 As of this writing (August 2014), Cygwin is the only platform on
67 which HYBRID_MACRO is defined. Any other platform that wants to
68 define it will have to define the macros DUMPED and
69 ALLOCATED_BEFORE_DUMPING, defined below for Cygwin. */
70 #ifdef HYBRID_MALLOC
71 #undef malloc
72 #undef realloc
73 #undef calloc
74 #undef free
75 #define malloc gmalloc
76 #define realloc grealloc
77 #define calloc gcalloc
78 #define aligned_alloc galigned_alloc
79 #define free gfree
80 #endif /* HYBRID_MALLOC */
82 #ifdef CYGWIN
83 extern void *bss_sbrk (ptrdiff_t size);
84 extern int bss_sbrk_did_unexec;
85 extern char *bss_sbrk_buffer;
86 extern char *bss_sbrk_buffer_end;
87 #define DUMPED bss_sbrk_did_unexec
88 #define ALLOCATED_BEFORE_DUMPING(P) \
89 ((char *) (P) < bss_sbrk_buffer_end && (char *) (P) >= bss_sbrk_buffer)
90 #endif
92 #ifdef __cplusplus
93 extern "C"
95 #endif
97 #include <stddef.h>
100 /* Allocate SIZE bytes of memory. */
101 extern void *malloc (size_t size) ATTRIBUTE_MALLOC_SIZE ((1));
102 /* Re-allocate the previously allocated block
103 in ptr, making the new block SIZE bytes long. */
104 extern void *realloc (void *ptr, size_t size) ATTRIBUTE_ALLOC_SIZE ((2));
105 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
106 extern void *calloc (size_t nmemb, size_t size) ATTRIBUTE_MALLOC_SIZE ((1,2));
107 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
108 extern void free (void *ptr);
110 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
111 #ifdef MSDOS
112 extern void *aligned_alloc (size_t, size_t);
113 extern void *memalign (size_t, size_t);
114 extern int posix_memalign (void **, size_t, size_t);
115 #endif
117 #ifdef USE_PTHREAD
118 /* Set up mutexes and make malloc etc. thread-safe. */
119 extern void malloc_enable_thread (void);
120 #endif
122 #ifdef emacs
123 extern void emacs_abort (void);
124 #endif
126 /* The allocator divides the heap into blocks of fixed size; large
127 requests receive one or more whole blocks, and small requests
128 receive a fragment of a block. Fragment sizes are powers of two,
129 and all fragments of a block are the same size. When all the
130 fragments in a block have been freed, the block itself is freed. */
131 #define INT_BIT (CHAR_BIT * sizeof (int))
132 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
133 #define BLOCKSIZE (1 << BLOCKLOG)
134 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
136 /* Determine the amount of memory spanned by the initial heap table
137 (not an absolute limit). */
138 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
140 /* Number of contiguous free blocks allowed to build up at the end of
141 memory before they will be returned to the system. */
142 #define FINAL_FREE_BLOCKS 8
144 /* Data structure giving per-block information. */
145 typedef union
147 /* Heap information for a busy block. */
148 struct
150 /* Zero for a large (multiblock) object, or positive giving the
151 logarithm to the base two of the fragment size. */
152 int type;
153 union
155 struct
157 size_t nfree; /* Free frags in a fragmented block. */
158 size_t first; /* First free fragment of the block. */
159 } frag;
160 /* For a large object, in its first block, this has the number
161 of blocks in the object. In the other blocks, this has a
162 negative number which says how far back the first block is. */
163 ptrdiff_t size;
164 } info;
165 } busy;
166 /* Heap information for a free block
167 (that may be the first of a free cluster). */
168 struct
170 size_t size; /* Size (in blocks) of a free cluster. */
171 size_t next; /* Index of next free cluster. */
172 size_t prev; /* Index of previous free cluster. */
173 } free;
174 } malloc_info;
176 /* Pointer to first block of the heap. */
177 extern char *_heapbase;
179 /* Table indexed by block number giving per-block information. */
180 extern malloc_info *_heapinfo;
182 /* Address to block number and vice versa. */
183 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
184 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
186 /* Current search index for the heap table. */
187 extern size_t _heapindex;
189 /* Limit of valid info table indices. */
190 extern size_t _heaplimit;
192 /* Doubly linked lists of free fragments. */
193 struct list
195 struct list *next;
196 struct list *prev;
199 /* Free list headers for each fragment size. */
200 extern struct list _fraghead[];
202 /* List of blocks allocated with aligned_alloc and friends. */
203 struct alignlist
205 struct alignlist *next;
206 void *aligned; /* The address that aligned_alloc returned. */
207 void *exact; /* The address that malloc returned. */
209 extern struct alignlist *_aligned_blocks;
211 /* Instrumentation. */
212 extern size_t _chunks_used;
213 extern size_t _bytes_used;
214 extern size_t _chunks_free;
215 extern size_t _bytes_free;
217 /* Internal versions of `malloc', `realloc', and `free'
218 used when these functions need to call each other.
219 They are the same but don't call the hooks. */
220 extern void *_malloc_internal (size_t);
221 extern void *_realloc_internal (void *, size_t);
222 extern void _free_internal (void *);
223 extern void *_malloc_internal_nolock (size_t);
224 extern void *_realloc_internal_nolock (void *, size_t);
225 extern void _free_internal_nolock (void *);
227 #ifdef USE_PTHREAD
228 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
229 extern int _malloc_thread_enabled_p;
230 #define LOCK() \
231 do { \
232 if (_malloc_thread_enabled_p) \
233 pthread_mutex_lock (&_malloc_mutex); \
234 } while (0)
235 #define UNLOCK() \
236 do { \
237 if (_malloc_thread_enabled_p) \
238 pthread_mutex_unlock (&_malloc_mutex); \
239 } while (0)
240 #define LOCK_ALIGNED_BLOCKS() \
241 do { \
242 if (_malloc_thread_enabled_p) \
243 pthread_mutex_lock (&_aligned_blocks_mutex); \
244 } while (0)
245 #define UNLOCK_ALIGNED_BLOCKS() \
246 do { \
247 if (_malloc_thread_enabled_p) \
248 pthread_mutex_unlock (&_aligned_blocks_mutex); \
249 } while (0)
250 #else
251 #define LOCK()
252 #define UNLOCK()
253 #define LOCK_ALIGNED_BLOCKS()
254 #define UNLOCK_ALIGNED_BLOCKS()
255 #endif
257 /* Given an address in the middle of a malloc'd object,
258 return the address of the beginning of the object. */
259 extern void *malloc_find_object_address (void *ptr);
261 /* Underlying allocation function; successive calls should
262 return contiguous pieces of memory. */
263 extern void *(*__morecore) (ptrdiff_t size);
265 /* Default value of `__morecore'. */
266 extern void *__default_morecore (ptrdiff_t size);
268 /* If not NULL, this function is called after each time
269 `__morecore' is called to increase the data size. */
270 extern void (*__after_morecore_hook) (void);
272 /* Number of extra blocks to get each time we ask for more core.
273 This reduces the frequency of calling `(*__morecore)'. */
274 extern size_t __malloc_extra_blocks;
276 /* Nonzero if `malloc' has been called and done its initialization. */
277 extern int __malloc_initialized;
278 /* Function called to initialize malloc data structures. */
279 extern int __malloc_initialize (void);
281 /* Hooks for debugging versions. */
282 extern void (*__malloc_initialize_hook) (void);
283 extern void (*__free_hook) (void *ptr);
284 extern void *(*__malloc_hook) (size_t size);
285 extern void *(*__realloc_hook) (void *ptr, size_t size);
286 extern void *(*__memalign_hook) (size_t size, size_t alignment);
288 /* Return values for `mprobe': these are the kinds of inconsistencies that
289 `mcheck' enables detection of. */
290 enum mcheck_status
292 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
293 MCHECK_OK, /* Block is fine. */
294 MCHECK_FREE, /* Block freed twice. */
295 MCHECK_HEAD, /* Memory before the block was clobbered. */
296 MCHECK_TAIL /* Memory after the block was clobbered. */
299 /* Activate a standard collection of debugging hooks. This must be called
300 before `malloc' is ever called. ABORTFUNC is called with an error code
301 (see enum above) when an inconsistency is detected. If ABORTFUNC is
302 null, the standard function prints on stderr and then calls `abort'. */
303 extern int mcheck (void (*abortfunc) (enum mcheck_status));
305 /* Check for aberrations in a particular malloc'd block. You must have
306 called `mcheck' already. These are the same checks that `mcheck' does
307 when you free or reallocate a block. */
308 extern enum mcheck_status mprobe (void *ptr);
310 /* Activate a standard collection of tracing hooks. */
311 extern void mtrace (void);
312 extern void muntrace (void);
314 /* Statistics available to the user. */
315 struct mstats
317 size_t bytes_total; /* Total size of the heap. */
318 size_t chunks_used; /* Chunks allocated by the user. */
319 size_t bytes_used; /* Byte total of user-allocated chunks. */
320 size_t chunks_free; /* Chunks in the free list. */
321 size_t bytes_free; /* Byte total of chunks in the free list. */
324 /* Pick up the current statistics. */
325 extern struct mstats mstats (void);
327 /* Call WARNFUN with a warning message when memory usage is high. */
328 extern void memory_warnings (void *start, void (*warnfun) (const char *));
330 #ifdef __cplusplus
332 #endif
334 /* Memory allocator `malloc'.
335 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
336 Written May 1989 by Mike Haertel.
338 This library is free software; you can redistribute it and/or
339 modify it under the terms of the GNU General Public License as
340 published by the Free Software Foundation; either version 2 of the
341 License, or (at your option) any later version.
343 This library is distributed in the hope that it will be useful,
344 but WITHOUT ANY WARRANTY; without even the implied warranty of
345 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
346 General Public License for more details.
348 You should have received a copy of the GNU General Public
349 License along with this library. If not, see <http://www.gnu.org/licenses/>.
351 The author may be reached (Email) at the address mike@ai.mit.edu,
352 or (US mail) as Mike Haertel c/o Free Software Foundation. */
354 #include <errno.h>
356 void *(*__morecore) (ptrdiff_t size) = __default_morecore;
358 /* Debugging hook for `malloc'. */
359 void *(*__malloc_hook) (size_t size);
361 /* Pointer to the base of the first block. */
362 char *_heapbase;
364 /* Block information table. Allocated with align/__free (not malloc/free). */
365 malloc_info *_heapinfo;
367 /* Number of info entries. */
368 static size_t heapsize;
370 /* Search index in the info table. */
371 size_t _heapindex;
373 /* Limit of valid info table indices. */
374 size_t _heaplimit;
376 /* Free lists for each fragment size. */
377 struct list _fraghead[BLOCKLOG];
379 /* Instrumentation. */
380 size_t _chunks_used;
381 size_t _bytes_used;
382 size_t _chunks_free;
383 size_t _bytes_free;
385 /* Are you experienced? */
386 int __malloc_initialized;
388 size_t __malloc_extra_blocks;
390 void (*__malloc_initialize_hook) (void);
391 void (*__after_morecore_hook) (void);
393 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
395 /* Some code for hunting a bug writing into _heapinfo.
397 Call this macro with argument PROT non-zero to protect internal
398 malloc state against writing to it, call it with a zero argument to
399 make it readable and writable.
401 Note that this only works if BLOCKSIZE == page size, which is
402 the case on the i386. */
404 #include <sys/types.h>
405 #include <sys/mman.h>
407 static int state_protected_p;
408 static size_t last_state_size;
409 static malloc_info *last_heapinfo;
411 void
412 protect_malloc_state (int protect_p)
414 /* If _heapinfo has been relocated, make sure its old location
415 isn't left read-only; it will be reused by malloc. */
416 if (_heapinfo != last_heapinfo
417 && last_heapinfo
418 && state_protected_p)
419 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
421 last_state_size = _heaplimit * sizeof *_heapinfo;
422 last_heapinfo = _heapinfo;
424 if (protect_p != state_protected_p)
426 state_protected_p = protect_p;
427 if (mprotect (_heapinfo, last_state_size,
428 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
429 abort ();
433 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
435 #else
436 #define PROTECT_MALLOC_STATE(PROT) /* empty */
437 #endif
440 /* Aligned allocation. */
441 static void *
442 align (size_t size)
444 void *result;
445 ptrdiff_t adj;
447 /* align accepts an unsigned argument, but __morecore accepts a
448 signed one. This could lead to trouble if SIZE overflows the
449 ptrdiff_t type accepted by __morecore. We just punt in that
450 case, since they are requesting a ludicrous amount anyway. */
451 if (PTRDIFF_MAX < size)
452 result = 0;
453 else
454 result = (*__morecore) (size);
455 adj = (uintptr_t) result % BLOCKSIZE;
456 if (adj != 0)
458 adj = BLOCKSIZE - adj;
459 (*__morecore) (adj);
460 result = (char *) result + adj;
463 if (__after_morecore_hook)
464 (*__after_morecore_hook) ();
466 return result;
469 /* Get SIZE bytes, if we can get them starting at END.
470 Return the address of the space we got.
471 If we cannot get space at END, fail and return 0. */
472 static void *
473 get_contiguous_space (ptrdiff_t size, void *position)
475 void *before;
476 void *after;
478 before = (*__morecore) (0);
479 /* If we can tell in advance that the break is at the wrong place,
480 fail now. */
481 if (before != position)
482 return 0;
484 /* Allocate SIZE bytes and get the address of them. */
485 after = (*__morecore) (size);
486 if (!after)
487 return 0;
489 /* It was not contiguous--reject it. */
490 if (after != position)
492 (*__morecore) (- size);
493 return 0;
496 return after;
500 /* This is called when `_heapinfo' and `heapsize' have just
501 been set to describe a new info table. Set up the table
502 to describe itself and account for it in the statistics. */
503 static void
504 register_heapinfo (void)
506 size_t block, blocks;
508 block = BLOCK (_heapinfo);
509 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
511 /* Account for the _heapinfo block itself in the statistics. */
512 _bytes_used += blocks * BLOCKSIZE;
513 ++_chunks_used;
515 /* Describe the heapinfo block itself in the heapinfo. */
516 _heapinfo[block].busy.type = 0;
517 _heapinfo[block].busy.info.size = blocks;
518 /* Leave back-pointers for malloc_find_address. */
519 while (--blocks > 0)
520 _heapinfo[block + blocks].busy.info.size = -blocks;
523 #ifdef USE_PTHREAD
524 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
525 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
526 int _malloc_thread_enabled_p;
528 static void
529 malloc_atfork_handler_prepare (void)
531 LOCK ();
532 LOCK_ALIGNED_BLOCKS ();
535 static void
536 malloc_atfork_handler_parent (void)
538 UNLOCK_ALIGNED_BLOCKS ();
539 UNLOCK ();
542 static void
543 malloc_atfork_handler_child (void)
545 UNLOCK_ALIGNED_BLOCKS ();
546 UNLOCK ();
549 /* Set up mutexes and make malloc etc. thread-safe. */
550 void
551 malloc_enable_thread (void)
553 if (_malloc_thread_enabled_p)
554 return;
556 /* Some pthread implementations call malloc for statically
557 initialized mutexes when they are used first. To avoid such a
558 situation, we initialize mutexes here while their use is
559 disabled in malloc etc. */
560 pthread_mutex_init (&_malloc_mutex, NULL);
561 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
562 pthread_atfork (malloc_atfork_handler_prepare,
563 malloc_atfork_handler_parent,
564 malloc_atfork_handler_child);
565 _malloc_thread_enabled_p = 1;
567 #endif /* USE_PTHREAD */
569 static void
570 malloc_initialize_1 (void)
572 #ifdef GC_MCHECK
573 mcheck (NULL);
574 #endif
576 if (__malloc_initialize_hook)
577 (*__malloc_initialize_hook) ();
579 heapsize = HEAP / BLOCKSIZE;
580 _heapinfo = align (heapsize * sizeof (malloc_info));
581 if (_heapinfo == NULL)
582 return;
583 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
584 _heapinfo[0].free.size = 0;
585 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
586 _heapindex = 0;
587 _heapbase = (char *) _heapinfo;
588 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
590 register_heapinfo ();
592 __malloc_initialized = 1;
593 PROTECT_MALLOC_STATE (1);
594 return;
597 /* Set everything up and remember that we have.
598 main will call malloc which calls this function. That is before any threads
599 or signal handlers has been set up, so we don't need thread protection. */
601 __malloc_initialize (void)
603 if (__malloc_initialized)
604 return 0;
606 malloc_initialize_1 ();
608 return __malloc_initialized;
611 static int morecore_recursing;
613 /* Get neatly aligned memory, initializing or
614 growing the heap info table as necessary. */
615 static void *
616 morecore_nolock (size_t size)
618 void *result;
619 malloc_info *newinfo, *oldinfo;
620 size_t newsize;
622 if (morecore_recursing)
623 /* Avoid recursion. The caller will know how to handle a null return. */
624 return NULL;
626 result = align (size);
627 if (result == NULL)
628 return NULL;
630 PROTECT_MALLOC_STATE (0);
632 /* Check if we need to grow the info table. */
633 if ((size_t) BLOCK ((char *) result + size) > heapsize)
635 /* Calculate the new _heapinfo table size. We do not account for the
636 added blocks in the table itself, as we hope to place them in
637 existing free space, which is already covered by part of the
638 existing table. */
639 newsize = heapsize;
641 newsize *= 2;
642 while ((size_t) BLOCK ((char *) result + size) > newsize);
644 /* We must not reuse existing core for the new info table when called
645 from realloc in the case of growing a large block, because the
646 block being grown is momentarily marked as free. In this case
647 _heaplimit is zero so we know not to reuse space for internal
648 allocation. */
649 if (_heaplimit != 0)
651 /* First try to allocate the new info table in core we already
652 have, in the usual way using realloc. If realloc cannot
653 extend it in place or relocate it to existing sufficient core,
654 we will get called again, and the code above will notice the
655 `morecore_recursing' flag and return null. */
656 int save = errno; /* Don't want to clobber errno with ENOMEM. */
657 morecore_recursing = 1;
658 newinfo = _realloc_internal_nolock (_heapinfo,
659 newsize * sizeof (malloc_info));
660 morecore_recursing = 0;
661 if (newinfo == NULL)
662 errno = save;
663 else
665 /* We found some space in core, and realloc has put the old
666 table's blocks on the free list. Now zero the new part
667 of the table and install the new table location. */
668 memset (&newinfo[heapsize], 0,
669 (newsize - heapsize) * sizeof (malloc_info));
670 _heapinfo = newinfo;
671 heapsize = newsize;
672 goto got_heap;
676 /* Allocate new space for the malloc info table. */
677 while (1)
679 newinfo = align (newsize * sizeof (malloc_info));
681 /* Did it fail? */
682 if (newinfo == NULL)
684 (*__morecore) (-size);
685 return NULL;
688 /* Is it big enough to record status for its own space?
689 If so, we win. */
690 if ((size_t) BLOCK ((char *) newinfo
691 + newsize * sizeof (malloc_info))
692 < newsize)
693 break;
695 /* Must try again. First give back most of what we just got. */
696 (*__morecore) (- newsize * sizeof (malloc_info));
697 newsize *= 2;
700 /* Copy the old table to the beginning of the new,
701 and zero the rest of the new table. */
702 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
703 memset (&newinfo[heapsize], 0,
704 (newsize - heapsize) * sizeof (malloc_info));
705 oldinfo = _heapinfo;
706 _heapinfo = newinfo;
707 heapsize = newsize;
709 register_heapinfo ();
711 /* Reset _heaplimit so _free_internal never decides
712 it can relocate or resize the info table. */
713 _heaplimit = 0;
714 _free_internal_nolock (oldinfo);
715 PROTECT_MALLOC_STATE (0);
717 /* The new heap limit includes the new table just allocated. */
718 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
719 return result;
722 got_heap:
723 _heaplimit = BLOCK ((char *) result + size);
724 return result;
727 /* Allocate memory from the heap. */
728 void *
729 _malloc_internal_nolock (size_t size)
731 void *result;
732 size_t block, blocks, lastblocks, start;
733 register size_t i;
734 struct list *next;
736 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
737 valid address you can realloc and free (though not dereference).
739 It turns out that some extant code (sunrpc, at least Ultrix's version)
740 expects `malloc (0)' to return non-NULL and breaks otherwise.
741 Be compatible. */
743 #if 0
744 if (size == 0)
745 return NULL;
746 #endif
748 PROTECT_MALLOC_STATE (0);
750 if (size < sizeof (struct list))
751 size = sizeof (struct list);
753 /* Determine the allocation policy based on the request size. */
754 if (size <= BLOCKSIZE / 2)
756 /* Small allocation to receive a fragment of a block.
757 Determine the logarithm to base two of the fragment size. */
758 register size_t log = 1;
759 --size;
760 while ((size /= 2) != 0)
761 ++log;
763 /* Look in the fragment lists for a
764 free fragment of the desired size. */
765 next = _fraghead[log].next;
766 if (next != NULL)
768 /* There are free fragments of this size.
769 Pop a fragment out of the fragment list and return it.
770 Update the block's nfree and first counters. */
771 result = next;
772 next->prev->next = next->next;
773 if (next->next != NULL)
774 next->next->prev = next->prev;
775 block = BLOCK (result);
776 if (--_heapinfo[block].busy.info.frag.nfree != 0)
777 _heapinfo[block].busy.info.frag.first =
778 (uintptr_t) next->next % BLOCKSIZE >> log;
780 /* Update the statistics. */
781 ++_chunks_used;
782 _bytes_used += 1 << log;
783 --_chunks_free;
784 _bytes_free -= 1 << log;
786 else
788 /* No free fragments of the desired size, so get a new block
789 and break it into fragments, returning the first. */
790 #ifdef GC_MALLOC_CHECK
791 result = _malloc_internal_nolock (BLOCKSIZE);
792 PROTECT_MALLOC_STATE (0);
793 #elif defined (USE_PTHREAD)
794 result = _malloc_internal_nolock (BLOCKSIZE);
795 #else
796 result = malloc (BLOCKSIZE);
797 #endif
798 if (result == NULL)
800 PROTECT_MALLOC_STATE (1);
801 goto out;
804 /* Link all fragments but the first into the free list. */
805 next = (struct list *) ((char *) result + (1 << log));
806 next->next = NULL;
807 next->prev = &_fraghead[log];
808 _fraghead[log].next = next;
810 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
812 next = (struct list *) ((char *) result + (i << log));
813 next->next = _fraghead[log].next;
814 next->prev = &_fraghead[log];
815 next->prev->next = next;
816 next->next->prev = next;
819 /* Initialize the nfree and first counters for this block. */
820 block = BLOCK (result);
821 _heapinfo[block].busy.type = log;
822 _heapinfo[block].busy.info.frag.nfree = i - 1;
823 _heapinfo[block].busy.info.frag.first = i - 1;
825 _chunks_free += (BLOCKSIZE >> log) - 1;
826 _bytes_free += BLOCKSIZE - (1 << log);
827 _bytes_used -= BLOCKSIZE - (1 << log);
830 else
832 /* Large allocation to receive one or more blocks.
833 Search the free list in a circle starting at the last place visited.
834 If we loop completely around without finding a large enough
835 space we will have to get more memory from the system. */
836 blocks = BLOCKIFY (size);
837 start = block = _heapindex;
838 while (_heapinfo[block].free.size < blocks)
840 block = _heapinfo[block].free.next;
841 if (block == start)
843 /* Need to get more from the system. Get a little extra. */
844 size_t wantblocks = blocks + __malloc_extra_blocks;
845 block = _heapinfo[0].free.prev;
846 lastblocks = _heapinfo[block].free.size;
847 /* Check to see if the new core will be contiguous with the
848 final free block; if so we don't need to get as much. */
849 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
850 /* We can't do this if we will have to make the heap info
851 table bigger to accommodate the new space. */
852 block + wantblocks <= heapsize &&
853 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
854 ADDRESS (block + lastblocks)))
856 /* We got it contiguously. Which block we are extending
857 (the `final free block' referred to above) might have
858 changed, if it got combined with a freed info table. */
859 block = _heapinfo[0].free.prev;
860 _heapinfo[block].free.size += (wantblocks - lastblocks);
861 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
862 _heaplimit += wantblocks - lastblocks;
863 continue;
865 result = morecore_nolock (wantblocks * BLOCKSIZE);
866 if (result == NULL)
867 goto out;
868 block = BLOCK (result);
869 /* Put the new block at the end of the free list. */
870 _heapinfo[block].free.size = wantblocks;
871 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
872 _heapinfo[block].free.next = 0;
873 _heapinfo[0].free.prev = block;
874 _heapinfo[_heapinfo[block].free.prev].free.next = block;
875 ++_chunks_free;
876 /* Now loop to use some of that block for this allocation. */
880 /* At this point we have found a suitable free list entry.
881 Figure out how to remove what we need from the list. */
882 result = ADDRESS (block);
883 if (_heapinfo[block].free.size > blocks)
885 /* The block we found has a bit left over,
886 so relink the tail end back into the free list. */
887 _heapinfo[block + blocks].free.size
888 = _heapinfo[block].free.size - blocks;
889 _heapinfo[block + blocks].free.next
890 = _heapinfo[block].free.next;
891 _heapinfo[block + blocks].free.prev
892 = _heapinfo[block].free.prev;
893 _heapinfo[_heapinfo[block].free.prev].free.next
894 = _heapinfo[_heapinfo[block].free.next].free.prev
895 = _heapindex = block + blocks;
897 else
899 /* The block exactly matches our requirements,
900 so just remove it from the list. */
901 _heapinfo[_heapinfo[block].free.next].free.prev
902 = _heapinfo[block].free.prev;
903 _heapinfo[_heapinfo[block].free.prev].free.next
904 = _heapindex = _heapinfo[block].free.next;
905 --_chunks_free;
908 _heapinfo[block].busy.type = 0;
909 _heapinfo[block].busy.info.size = blocks;
910 ++_chunks_used;
911 _bytes_used += blocks * BLOCKSIZE;
912 _bytes_free -= blocks * BLOCKSIZE;
914 /* Mark all the blocks of the object just allocated except for the
915 first with a negative number so you can find the first block by
916 adding that adjustment. */
917 while (--blocks > 0)
918 _heapinfo[block + blocks].busy.info.size = -blocks;
921 PROTECT_MALLOC_STATE (1);
922 out:
923 return result;
926 void *
927 _malloc_internal (size_t size)
929 void *result;
931 LOCK ();
932 result = _malloc_internal_nolock (size);
933 UNLOCK ();
935 return result;
938 void *
939 malloc (size_t size)
941 void *(*hook) (size_t);
943 if (!__malloc_initialized && !__malloc_initialize ())
944 return NULL;
946 /* Copy the value of __malloc_hook to an automatic variable in case
947 __malloc_hook is modified in another thread between its
948 NULL-check and the use.
950 Note: Strictly speaking, this is not a right solution. We should
951 use mutexes to access non-read-only variables that are shared
952 among multiple threads. We just leave it for compatibility with
953 glibc malloc (i.e., assignments to __malloc_hook) for now. */
954 hook = __malloc_hook;
955 return (hook != NULL ? *hook : _malloc_internal) (size);
958 #ifndef _LIBC
960 /* On some ANSI C systems, some libc functions call _malloc, _free
961 and _realloc. Make them use the GNU functions. */
963 extern void *_malloc (size_t);
964 extern void _free (void *);
965 extern void *_realloc (void *, size_t);
967 void *
968 _malloc (size_t size)
970 return malloc (size);
973 void
974 _free (void *ptr)
976 free (ptr);
979 void *
980 _realloc (void *ptr, size_t size)
982 return realloc (ptr, size);
985 #endif
986 /* Free a block of memory allocated by `malloc'.
987 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
988 Written May 1989 by Mike Haertel.
990 This library is free software; you can redistribute it and/or
991 modify it under the terms of the GNU General Public License as
992 published by the Free Software Foundation; either version 2 of the
993 License, or (at your option) any later version.
995 This library is distributed in the hope that it will be useful,
996 but WITHOUT ANY WARRANTY; without even the implied warranty of
997 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
998 General Public License for more details.
1000 You should have received a copy of the GNU General Public
1001 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1003 The author may be reached (Email) at the address mike@ai.mit.edu,
1004 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1007 /* Debugging hook for free. */
1008 void (*__free_hook) (void *__ptr);
1010 /* List of blocks allocated by aligned_alloc. */
1011 struct alignlist *_aligned_blocks = NULL;
1013 /* Return memory to the heap.
1014 Like `_free_internal' but don't lock mutex. */
1015 void
1016 _free_internal_nolock (void *ptr)
1018 int type;
1019 size_t block, blocks;
1020 register size_t i;
1021 struct list *prev, *next;
1022 void *curbrk;
1023 const size_t lesscore_threshold
1024 /* Threshold of free space at which we will return some to the system. */
1025 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1027 register struct alignlist *l;
1029 if (ptr == NULL)
1030 return;
1032 PROTECT_MALLOC_STATE (0);
1034 LOCK_ALIGNED_BLOCKS ();
1035 for (l = _aligned_blocks; l != NULL; l = l->next)
1036 if (l->aligned == ptr)
1038 l->aligned = NULL; /* Mark the slot in the list as free. */
1039 ptr = l->exact;
1040 break;
1042 UNLOCK_ALIGNED_BLOCKS ();
1044 block = BLOCK (ptr);
1046 type = _heapinfo[block].busy.type;
1047 switch (type)
1049 case 0:
1050 /* Get as many statistics as early as we can. */
1051 --_chunks_used;
1052 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1053 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1055 /* Find the free cluster previous to this one in the free list.
1056 Start searching at the last block referenced; this may benefit
1057 programs with locality of allocation. */
1058 i = _heapindex;
1059 if (i > block)
1060 while (i > block)
1061 i = _heapinfo[i].free.prev;
1062 else
1065 i = _heapinfo[i].free.next;
1066 while (i > 0 && i < block);
1067 i = _heapinfo[i].free.prev;
1070 /* Determine how to link this block into the free list. */
1071 if (block == i + _heapinfo[i].free.size)
1073 /* Coalesce this block with its predecessor. */
1074 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1075 block = i;
1077 else
1079 /* Really link this block back into the free list. */
1080 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1081 _heapinfo[block].free.next = _heapinfo[i].free.next;
1082 _heapinfo[block].free.prev = i;
1083 _heapinfo[i].free.next = block;
1084 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1085 ++_chunks_free;
1088 /* Now that the block is linked in, see if we can coalesce it
1089 with its successor (by deleting its successor from the list
1090 and adding in its size). */
1091 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1093 _heapinfo[block].free.size
1094 += _heapinfo[_heapinfo[block].free.next].free.size;
1095 _heapinfo[block].free.next
1096 = _heapinfo[_heapinfo[block].free.next].free.next;
1097 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1098 --_chunks_free;
1101 /* How many trailing free blocks are there now? */
1102 blocks = _heapinfo[block].free.size;
1104 /* Where is the current end of accessible core? */
1105 curbrk = (*__morecore) (0);
1107 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1109 /* The end of the malloc heap is at the end of accessible core.
1110 It's possible that moving _heapinfo will allow us to
1111 return some space to the system. */
1113 size_t info_block = BLOCK (_heapinfo);
1114 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1115 size_t prev_block = _heapinfo[block].free.prev;
1116 size_t prev_blocks = _heapinfo[prev_block].free.size;
1117 size_t next_block = _heapinfo[block].free.next;
1118 size_t next_blocks = _heapinfo[next_block].free.size;
1120 if (/* Win if this block being freed is last in core, the info table
1121 is just before it, the previous free block is just before the
1122 info table, and the two free blocks together form a useful
1123 amount to return to the system. */
1124 (block + blocks == _heaplimit &&
1125 info_block + info_blocks == block &&
1126 prev_block != 0 && prev_block + prev_blocks == info_block &&
1127 blocks + prev_blocks >= lesscore_threshold) ||
1128 /* Nope, not the case. We can also win if this block being
1129 freed is just before the info table, and the table extends
1130 to the end of core or is followed only by a free block,
1131 and the total free space is worth returning to the system. */
1132 (block + blocks == info_block &&
1133 ((info_block + info_blocks == _heaplimit &&
1134 blocks >= lesscore_threshold) ||
1135 (info_block + info_blocks == next_block &&
1136 next_block + next_blocks == _heaplimit &&
1137 blocks + next_blocks >= lesscore_threshold)))
1140 malloc_info *newinfo;
1141 size_t oldlimit = _heaplimit;
1143 /* Free the old info table, clearing _heaplimit to avoid
1144 recursion into this code. We don't want to return the
1145 table's blocks to the system before we have copied them to
1146 the new location. */
1147 _heaplimit = 0;
1148 _free_internal_nolock (_heapinfo);
1149 _heaplimit = oldlimit;
1151 /* Tell malloc to search from the beginning of the heap for
1152 free blocks, so it doesn't reuse the ones just freed. */
1153 _heapindex = 0;
1155 /* Allocate new space for the info table and move its data. */
1156 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
1157 PROTECT_MALLOC_STATE (0);
1158 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1159 _heapinfo = newinfo;
1161 /* We should now have coalesced the free block with the
1162 blocks freed from the old info table. Examine the entire
1163 trailing free block to decide below whether to return some
1164 to the system. */
1165 block = _heapinfo[0].free.prev;
1166 blocks = _heapinfo[block].free.size;
1169 /* Now see if we can return stuff to the system. */
1170 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1172 register size_t bytes = blocks * BLOCKSIZE;
1173 _heaplimit -= blocks;
1174 (*__morecore) (-bytes);
1175 _heapinfo[_heapinfo[block].free.prev].free.next
1176 = _heapinfo[block].free.next;
1177 _heapinfo[_heapinfo[block].free.next].free.prev
1178 = _heapinfo[block].free.prev;
1179 block = _heapinfo[block].free.prev;
1180 --_chunks_free;
1181 _bytes_free -= bytes;
1185 /* Set the next search to begin at this block. */
1186 _heapindex = block;
1187 break;
1189 default:
1190 /* Do some of the statistics. */
1191 --_chunks_used;
1192 _bytes_used -= 1 << type;
1193 ++_chunks_free;
1194 _bytes_free += 1 << type;
1196 /* Get the address of the first free fragment in this block. */
1197 prev = (struct list *) ((char *) ADDRESS (block) +
1198 (_heapinfo[block].busy.info.frag.first << type));
1200 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1202 /* If all fragments of this block are free, remove them
1203 from the fragment list and free the whole block. */
1204 next = prev;
1205 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
1206 next = next->next;
1207 prev->prev->next = next;
1208 if (next != NULL)
1209 next->prev = prev->prev;
1210 _heapinfo[block].busy.type = 0;
1211 _heapinfo[block].busy.info.size = 1;
1213 /* Keep the statistics accurate. */
1214 ++_chunks_used;
1215 _bytes_used += BLOCKSIZE;
1216 _chunks_free -= BLOCKSIZE >> type;
1217 _bytes_free -= BLOCKSIZE;
1219 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1220 _free_internal_nolock (ADDRESS (block));
1221 #else
1222 free (ADDRESS (block));
1223 #endif
1225 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1227 /* If some fragments of this block are free, link this
1228 fragment into the fragment list after the first free
1229 fragment of this block. */
1230 next = ptr;
1231 next->next = prev->next;
1232 next->prev = prev;
1233 prev->next = next;
1234 if (next->next != NULL)
1235 next->next->prev = next;
1236 ++_heapinfo[block].busy.info.frag.nfree;
1238 else
1240 /* No fragments of this block are free, so link this
1241 fragment into the fragment list and announce that
1242 it is the first free fragment of this block. */
1243 prev = ptr;
1244 _heapinfo[block].busy.info.frag.nfree = 1;
1245 _heapinfo[block].busy.info.frag.first =
1246 (uintptr_t) ptr % BLOCKSIZE >> type;
1247 prev->next = _fraghead[type].next;
1248 prev->prev = &_fraghead[type];
1249 prev->prev->next = prev;
1250 if (prev->next != NULL)
1251 prev->next->prev = prev;
1253 break;
1256 PROTECT_MALLOC_STATE (1);
1259 /* Return memory to the heap.
1260 Like `free' but don't call a __free_hook if there is one. */
1261 void
1262 _free_internal (void *ptr)
1264 LOCK ();
1265 _free_internal_nolock (ptr);
1266 UNLOCK ();
1269 /* Return memory to the heap. */
1271 void
1272 free (void *ptr)
1274 void (*hook) (void *) = __free_hook;
1276 if (hook != NULL)
1277 (*hook) (ptr);
1278 else
1279 _free_internal (ptr);
1282 /* Define the `cfree' alias for `free'. */
1283 #ifdef weak_alias
1284 weak_alias (free, cfree)
1285 #else
1286 void
1287 cfree (void *ptr)
1289 free (ptr);
1291 #endif
1292 /* Change the size of a block allocated by `malloc'.
1293 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1294 Written May 1989 by Mike Haertel.
1296 This library is free software; you can redistribute it and/or
1297 modify it under the terms of the GNU General Public License as
1298 published by the Free Software Foundation; either version 2 of the
1299 License, or (at your option) any later version.
1301 This library is distributed in the hope that it will be useful,
1302 but WITHOUT ANY WARRANTY; without even the implied warranty of
1303 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1304 General Public License for more details.
1306 You should have received a copy of the GNU General Public
1307 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1309 The author may be reached (Email) at the address mike@ai.mit.edu,
1310 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1312 #ifndef min
1313 #define min(A, B) ((A) < (B) ? (A) : (B))
1314 #endif
1316 /* Debugging hook for realloc. */
1317 void *(*__realloc_hook) (void *ptr, size_t size);
1319 /* Resize the given region to the new size, returning a pointer
1320 to the (possibly moved) region. This is optimized for speed;
1321 some benchmarks seem to indicate that greater compactness is
1322 achieved by unconditionally allocating and copying to a
1323 new region. This module has incestuous knowledge of the
1324 internals of both free and malloc. */
1325 void *
1326 _realloc_internal_nolock (void *ptr, size_t size)
1328 void *result;
1329 int type;
1330 size_t block, blocks, oldlimit;
1332 if (size == 0)
1334 _free_internal_nolock (ptr);
1335 return _malloc_internal_nolock (0);
1337 else if (ptr == NULL)
1338 return _malloc_internal_nolock (size);
1340 block = BLOCK (ptr);
1342 PROTECT_MALLOC_STATE (0);
1344 type = _heapinfo[block].busy.type;
1345 switch (type)
1347 case 0:
1348 /* Maybe reallocate a large block to a small fragment. */
1349 if (size <= BLOCKSIZE / 2)
1351 result = _malloc_internal_nolock (size);
1352 if (result != NULL)
1354 memcpy (result, ptr, size);
1355 _free_internal_nolock (ptr);
1356 goto out;
1360 /* The new size is a large allocation as well;
1361 see if we can hold it in place. */
1362 blocks = BLOCKIFY (size);
1363 if (blocks < _heapinfo[block].busy.info.size)
1365 /* The new size is smaller; return
1366 excess memory to the free list. */
1367 _heapinfo[block + blocks].busy.type = 0;
1368 _heapinfo[block + blocks].busy.info.size
1369 = _heapinfo[block].busy.info.size - blocks;
1370 _heapinfo[block].busy.info.size = blocks;
1371 /* We have just created a new chunk by splitting a chunk in two.
1372 Now we will free this chunk; increment the statistics counter
1373 so it doesn't become wrong when _free_internal decrements it. */
1374 ++_chunks_used;
1375 _free_internal_nolock (ADDRESS (block + blocks));
1376 result = ptr;
1378 else if (blocks == _heapinfo[block].busy.info.size)
1379 /* No size change necessary. */
1380 result = ptr;
1381 else
1383 /* Won't fit, so allocate a new region that will.
1384 Free the old region first in case there is sufficient
1385 adjacent free space to grow without moving. */
1386 blocks = _heapinfo[block].busy.info.size;
1387 /* Prevent free from actually returning memory to the system. */
1388 oldlimit = _heaplimit;
1389 _heaplimit = 0;
1390 _free_internal_nolock (ptr);
1391 result = _malloc_internal_nolock (size);
1392 PROTECT_MALLOC_STATE (0);
1393 if (_heaplimit == 0)
1394 _heaplimit = oldlimit;
1395 if (result == NULL)
1397 /* Now we're really in trouble. We have to unfree
1398 the thing we just freed. Unfortunately it might
1399 have been coalesced with its neighbors. */
1400 if (_heapindex == block)
1401 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1402 else
1404 void *previous
1405 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1406 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1407 _free_internal_nolock (previous);
1409 goto out;
1411 if (ptr != result)
1412 memmove (result, ptr, blocks * BLOCKSIZE);
1414 break;
1416 default:
1417 /* Old size is a fragment; type is logarithm
1418 to base two of the fragment size. */
1419 if (size > (size_t) (1 << (type - 1)) &&
1420 size <= (size_t) (1 << type))
1421 /* The new size is the same kind of fragment. */
1422 result = ptr;
1423 else
1425 /* The new size is different; allocate a new space,
1426 and copy the lesser of the new size and the old. */
1427 result = _malloc_internal_nolock (size);
1428 if (result == NULL)
1429 goto out;
1430 memcpy (result, ptr, min (size, (size_t) 1 << type));
1431 _free_internal_nolock (ptr);
1433 break;
1436 PROTECT_MALLOC_STATE (1);
1437 out:
1438 return result;
1441 void *
1442 _realloc_internal (void *ptr, size_t size)
1444 void *result;
1446 LOCK ();
1447 result = _realloc_internal_nolock (ptr, size);
1448 UNLOCK ();
1450 return result;
1453 void *
1454 realloc (void *ptr, size_t size)
1456 void *(*hook) (void *, size_t);
1458 if (!__malloc_initialized && !__malloc_initialize ())
1459 return NULL;
1461 hook = __realloc_hook;
1462 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1464 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1466 This library is free software; you can redistribute it and/or
1467 modify it under the terms of the GNU General Public License as
1468 published by the Free Software Foundation; either version 2 of the
1469 License, or (at your option) any later version.
1471 This library is distributed in the hope that it will be useful,
1472 but WITHOUT ANY WARRANTY; without even the implied warranty of
1473 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1474 General Public License for more details.
1476 You should have received a copy of the GNU General Public
1477 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1479 The author may be reached (Email) at the address mike@ai.mit.edu,
1480 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1482 /* Allocate an array of NMEMB elements each SIZE bytes long.
1483 The entire array is initialized to zeros. */
1484 void *
1485 calloc (size_t nmemb, size_t size)
1487 void *result;
1488 size_t bytes = nmemb * size;
1490 if (size != 0 && bytes / size != nmemb)
1492 errno = ENOMEM;
1493 return NULL;
1496 result = malloc (bytes);
1497 if (result)
1498 return memset (result, 0, bytes);
1499 return result;
1501 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1502 This file is part of the GNU C Library.
1504 The GNU C Library is free software; you can redistribute it and/or modify
1505 it under the terms of the GNU General Public License as published by
1506 the Free Software Foundation; either version 2, or (at your option)
1507 any later version.
1509 The GNU C Library is distributed in the hope that it will be useful,
1510 but WITHOUT ANY WARRANTY; without even the implied warranty of
1511 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1512 GNU General Public License for more details.
1514 You should have received a copy of the GNU General Public License
1515 along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
1517 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1518 compatible. */
1519 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1520 #define __sbrk sbrk
1521 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1522 /* It is best not to declare this and cast its result on foreign operating
1523 systems with potentially hostile include files. */
1525 extern void *__sbrk (ptrdiff_t increment);
1526 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1528 /* Allocate INCREMENT more bytes of data space,
1529 and return the start of data space, or NULL on errors.
1530 If INCREMENT is negative, shrink data space. */
1531 void *
1532 __default_morecore (ptrdiff_t increment)
1534 void *result;
1535 #if defined (CYGWIN)
1536 if (!DUMPED)
1538 return bss_sbrk (increment);
1540 #endif
1541 result = (void *) __sbrk (increment);
1542 if (result == (void *) -1)
1543 return NULL;
1544 return result;
1546 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1548 This library is free software; you can redistribute it and/or
1549 modify it under the terms of the GNU General Public License as
1550 published by the Free Software Foundation; either version 2 of the
1551 License, or (at your option) any later version.
1553 This library is distributed in the hope that it will be useful,
1554 but WITHOUT ANY WARRANTY; without even the implied warranty of
1555 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1556 General Public License for more details.
1558 You should have received a copy of the GNU General Public
1559 License along with this library. If not, see <http://www.gnu.org/licenses/>. */
1561 void *(*__memalign_hook) (size_t size, size_t alignment);
1563 void *
1564 aligned_alloc (size_t alignment, size_t size)
1566 void *result;
1567 size_t adj, lastadj;
1568 void *(*hook) (size_t, size_t) = __memalign_hook;
1570 if (hook)
1571 return (*hook) (alignment, size);
1573 /* Allocate a block with enough extra space to pad the block with up to
1574 (ALIGNMENT - 1) bytes if necessary. */
1575 if (- size < alignment)
1577 errno = ENOMEM;
1578 return NULL;
1580 result = malloc (size + alignment - 1);
1581 if (result == NULL)
1582 return NULL;
1584 /* Figure out how much we will need to pad this particular block
1585 to achieve the required alignment. */
1586 adj = alignment - (uintptr_t) result % alignment;
1587 if (adj == alignment)
1588 adj = 0;
1590 if (adj != alignment - 1)
1594 /* Reallocate the block with only as much excess as it
1595 needs. */
1596 free (result);
1597 result = malloc (size + adj);
1598 if (result == NULL) /* Impossible unless interrupted. */
1599 return NULL;
1601 lastadj = adj;
1602 adj = alignment - (uintptr_t) result % alignment;
1603 if (adj == alignment)
1604 adj = 0;
1605 /* It's conceivable we might have been so unlucky as to get
1606 a different block with weaker alignment. If so, this
1607 block is too short to contain SIZE after alignment
1608 correction. So we must try again and get another block,
1609 slightly larger. */
1610 } while (adj > lastadj);
1613 if (adj != 0)
1615 /* Record this block in the list of aligned blocks, so that `free'
1616 can identify the pointer it is passed, which will be in the middle
1617 of an allocated block. */
1619 struct alignlist *l;
1620 LOCK_ALIGNED_BLOCKS ();
1621 for (l = _aligned_blocks; l != NULL; l = l->next)
1622 if (l->aligned == NULL)
1623 /* This slot is free. Use it. */
1624 break;
1625 if (l == NULL)
1627 l = malloc (sizeof *l);
1628 if (l != NULL)
1630 l->next = _aligned_blocks;
1631 _aligned_blocks = l;
1634 if (l != NULL)
1636 l->exact = result;
1637 result = l->aligned = (char *) result + adj;
1639 UNLOCK_ALIGNED_BLOCKS ();
1640 if (l == NULL)
1642 free (result);
1643 result = NULL;
1647 return result;
1650 /* An obsolete alias for aligned_alloc, for any old libraries that use
1651 this alias. */
1653 void *
1654 memalign (size_t alignment, size_t size)
1656 return aligned_alloc (alignment, size);
1659 /* If HYBRID_MALLOC is defined, we may want to use the system
1660 posix_memalign below. */
1661 #ifndef HYBRID_MALLOC
1663 posix_memalign (void **memptr, size_t alignment, size_t size)
1665 void *mem;
1667 if (alignment == 0
1668 || alignment % sizeof (void *) != 0
1669 || (alignment & (alignment - 1)) != 0)
1670 return EINVAL;
1672 mem = aligned_alloc (alignment, size);
1673 if (mem == NULL)
1674 return ENOMEM;
1676 *memptr = mem;
1678 return 0;
1680 #endif
1682 /* Allocate memory on a page boundary.
1683 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1685 This library is free software; you can redistribute it and/or
1686 modify it under the terms of the GNU General Public License as
1687 published by the Free Software Foundation; either version 2 of the
1688 License, or (at your option) any later version.
1690 This library is distributed in the hope that it will be useful,
1691 but WITHOUT ANY WARRANTY; without even the implied warranty of
1692 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1693 General Public License for more details.
1695 You should have received a copy of the GNU General Public
1696 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1698 The author may be reached (Email) at the address mike@ai.mit.edu,
1699 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1701 /* Allocate SIZE bytes on a page boundary. */
1702 extern void *valloc (size_t);
1704 #if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1705 # include "getpagesize.h"
1706 #elif !defined getpagesize
1707 extern int getpagesize (void);
1708 #endif
1710 static size_t pagesize;
1712 void *
1713 valloc (size_t size)
1715 if (pagesize == 0)
1716 pagesize = getpagesize ();
1718 return aligned_alloc (pagesize, size);
1721 #ifdef HYBRID_MALLOC
1722 #undef malloc
1723 #undef realloc
1724 #undef calloc
1725 #undef aligned_alloc
1726 #undef free
1728 /* See the comments near the beginning of this file for explanations
1729 of the following functions. */
1731 void *
1732 hybrid_malloc (size_t size)
1734 if (DUMPED)
1735 return malloc (size);
1736 return gmalloc (size);
1739 void *
1740 hybrid_calloc (size_t nmemb, size_t size)
1742 if (DUMPED)
1743 return calloc (nmemb, size);
1744 return gcalloc (nmemb, size);
1747 void
1748 hybrid_free (void *ptr)
1750 if (!DUMPED)
1751 gfree (ptr);
1752 else if (!ALLOCATED_BEFORE_DUMPING (ptr))
1753 free (ptr);
1754 /* Otherwise the dumped emacs is trying to free something allocated
1755 before dumping; do nothing. */
1756 return;
1759 #if defined HAVE_ALIGNED_ALLOC || defined HAVE_POSIX_MEMALIGN
1760 void *
1761 hybrid_aligned_alloc (size_t alignment, size_t size)
1763 if (!DUMPED)
1764 return galigned_alloc (alignment, size);
1765 /* The following is copied from alloc.c */
1766 #ifdef HAVE_ALIGNED_ALLOC
1767 return aligned_alloc (alignment, size);
1768 #else /* HAVE_POSIX_MEMALIGN */
1769 void *p;
1770 return posix_memalign (&p, alignment, size) == 0 ? p : 0;
1771 #endif
1773 #endif
1775 void *
1776 hybrid_realloc (void *ptr, size_t size)
1778 void *result;
1779 int type;
1780 size_t block, oldsize;
1782 if (!DUMPED)
1783 return grealloc (ptr, size);
1784 if (!ALLOCATED_BEFORE_DUMPING (ptr))
1785 return realloc (ptr, size);
1787 /* The dumped emacs is trying to realloc storage allocated before
1788 dumping. We just malloc new space and copy the data. */
1789 if (size == 0 || ptr == NULL)
1790 return malloc (size);
1791 block = ((char *) ptr - _heapbase) / BLOCKSIZE + 1;
1792 type = _heapinfo[block].busy.type;
1793 oldsize =
1794 type == 0 ? _heapinfo[block].busy.info.size * BLOCKSIZE
1795 : (size_t) 1 << type;
1796 result = malloc (size);
1797 if (result)
1798 return memcpy (result, ptr, min (oldsize, size));
1799 return result;
1802 #ifdef HYBRID_GET_CURRENT_DIR_NAME
1803 /* Defined in sysdep.c. */
1804 char *gget_current_dir_name (void);
1806 char *
1807 hybrid_get_current_dir_name (void)
1809 if (DUMPED)
1810 return get_current_dir_name ();
1811 return gget_current_dir_name ();
1813 #endif
1815 #endif /* HYBRID_MALLOC */
1817 #ifdef GC_MCHECK
1819 /* Standard debugging hooks for `malloc'.
1820 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1821 Written May 1989 by Mike Haertel.
1823 This library is free software; you can redistribute it and/or
1824 modify it under the terms of the GNU General Public License as
1825 published by the Free Software Foundation; either version 2 of the
1826 License, or (at your option) any later version.
1828 This library is distributed in the hope that it will be useful,
1829 but WITHOUT ANY WARRANTY; without even the implied warranty of
1830 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1831 General Public License for more details.
1833 You should have received a copy of the GNU General Public
1834 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1836 The author may be reached (Email) at the address mike@ai.mit.edu,
1837 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1839 #include <stdio.h>
1841 /* Old hook values. */
1842 static void (*old_free_hook) (void *ptr);
1843 static void *(*old_malloc_hook) (size_t size);
1844 static void *(*old_realloc_hook) (void *ptr, size_t size);
1846 /* Function to call when something awful happens. */
1847 static void (*abortfunc) (enum mcheck_status);
1849 /* Arbitrary magical numbers. */
1850 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1851 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1852 #define MAGICBYTE ((char) 0xd7)
1853 #define MALLOCFLOOD ((char) 0x93)
1854 #define FREEFLOOD ((char) 0x95)
1856 struct hdr
1858 size_t size; /* Exact size requested by user. */
1859 size_t magic; /* Magic number to check header integrity. */
1862 static enum mcheck_status
1863 checkhdr (const struct hdr *hdr)
1865 enum mcheck_status status;
1866 switch (hdr->magic)
1868 default:
1869 status = MCHECK_HEAD;
1870 break;
1871 case MAGICFREE:
1872 status = MCHECK_FREE;
1873 break;
1874 case MAGICWORD:
1875 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1876 status = MCHECK_TAIL;
1877 else
1878 status = MCHECK_OK;
1879 break;
1881 if (status != MCHECK_OK)
1882 (*abortfunc) (status);
1883 return status;
1886 static void
1887 freehook (void *ptr)
1889 struct hdr *hdr;
1891 if (ptr)
1893 struct alignlist *l;
1895 /* If the block was allocated by aligned_alloc, its real pointer
1896 to free is recorded in _aligned_blocks; find that. */
1897 PROTECT_MALLOC_STATE (0);
1898 LOCK_ALIGNED_BLOCKS ();
1899 for (l = _aligned_blocks; l != NULL; l = l->next)
1900 if (l->aligned == ptr)
1902 l->aligned = NULL; /* Mark the slot in the list as free. */
1903 ptr = l->exact;
1904 break;
1906 UNLOCK_ALIGNED_BLOCKS ();
1907 PROTECT_MALLOC_STATE (1);
1909 hdr = ((struct hdr *) ptr) - 1;
1910 checkhdr (hdr);
1911 hdr->magic = MAGICFREE;
1912 memset (ptr, FREEFLOOD, hdr->size);
1914 else
1915 hdr = NULL;
1917 __free_hook = old_free_hook;
1918 free (hdr);
1919 __free_hook = freehook;
1922 static void *
1923 mallochook (size_t size)
1925 struct hdr *hdr;
1927 __malloc_hook = old_malloc_hook;
1928 hdr = malloc (sizeof *hdr + size + 1);
1929 __malloc_hook = mallochook;
1930 if (hdr == NULL)
1931 return NULL;
1933 hdr->size = size;
1934 hdr->magic = MAGICWORD;
1935 ((char *) &hdr[1])[size] = MAGICBYTE;
1936 return memset (hdr + 1, MALLOCFLOOD, size);
1939 static void *
1940 reallochook (void *ptr, size_t size)
1942 struct hdr *hdr = NULL;
1943 size_t osize = 0;
1945 if (ptr)
1947 hdr = ((struct hdr *) ptr) - 1;
1948 osize = hdr->size;
1950 checkhdr (hdr);
1951 if (size < osize)
1952 memset ((char *) ptr + size, FREEFLOOD, osize - size);
1955 __free_hook = old_free_hook;
1956 __malloc_hook = old_malloc_hook;
1957 __realloc_hook = old_realloc_hook;
1958 hdr = realloc (hdr, sizeof *hdr + size + 1);
1959 __free_hook = freehook;
1960 __malloc_hook = mallochook;
1961 __realloc_hook = reallochook;
1962 if (hdr == NULL)
1963 return NULL;
1965 hdr->size = size;
1966 hdr->magic = MAGICWORD;
1967 ((char *) &hdr[1])[size] = MAGICBYTE;
1968 if (size > osize)
1969 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1970 return hdr + 1;
1973 static void
1974 mabort (enum mcheck_status status)
1976 const char *msg;
1977 switch (status)
1979 case MCHECK_OK:
1980 msg = "memory is consistent, library is buggy";
1981 break;
1982 case MCHECK_HEAD:
1983 msg = "memory clobbered before allocated block";
1984 break;
1985 case MCHECK_TAIL:
1986 msg = "memory clobbered past end of allocated block";
1987 break;
1988 case MCHECK_FREE:
1989 msg = "block freed twice";
1990 break;
1991 default:
1992 msg = "bogus mcheck_status, library is buggy";
1993 break;
1995 #ifdef __GNU_LIBRARY__
1996 __libc_fatal (msg);
1997 #else
1998 fprintf (stderr, "mcheck: %s\n", msg);
1999 fflush (stderr);
2000 # ifdef emacs
2001 emacs_abort ();
2002 # else
2003 abort ();
2004 # endif
2005 #endif
2008 static int mcheck_used = 0;
2011 mcheck (void (*func) (enum mcheck_status))
2013 abortfunc = (func != NULL) ? func : &mabort;
2015 /* These hooks may not be safely inserted if malloc is already in use. */
2016 if (!__malloc_initialized && !mcheck_used)
2018 old_free_hook = __free_hook;
2019 __free_hook = freehook;
2020 old_malloc_hook = __malloc_hook;
2021 __malloc_hook = mallochook;
2022 old_realloc_hook = __realloc_hook;
2023 __realloc_hook = reallochook;
2024 mcheck_used = 1;
2027 return mcheck_used ? 0 : -1;
2030 enum mcheck_status
2031 mprobe (void *ptr)
2033 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2036 #endif /* GC_MCHECK */