2 * libc/stdlib/malloc/malloc.c -- malloc function
4 * Copyright (C) 2002,03 NEC Electronics Corporation
5 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
7 * This file is subject to the terms and conditions of the GNU Lesser
8 * General Public License. See the file COPYING.LIB in the main
9 * directory of this archive for more details.
11 * Written by Miles Bader <miles@gnu.org>
24 /* The malloc heap. We provide a bit of initial static space so that
25 programs can do a little mallocing without mmaping in more space. */
26 HEAP_DECLARE_STATIC_FREE_AREA (initial_fa
, 256);
27 struct heap_free_area
*__malloc_heap
= HEAP_INIT_WITH_FA (initial_fa
);
28 #ifdef HEAP_USE_LOCKING
29 __UCLIBC_MUTEX_INIT(__malloc_heap_lock
,PTHREAD_MUTEX_INITIALIZER
);
32 #if defined(MALLOC_USE_LOCKING) && defined(MALLOC_USE_SBRK)
33 /* A lock protecting our use of sbrk. */
34 __UCLIBC_MUTEX(__malloc_sbrk_lock
);
35 #endif /* MALLOC_USE_LOCKING && MALLOC_USE_SBRK */
38 #ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
39 /* A list of all malloc_mmb structures describing blocks that
40 malloc has mmapped, ordered by the block address. */
41 struct malloc_mmb
*__malloc_mmapped_blocks
= 0;
43 /* A heap used for allocating malloc_mmb structures. We could allocate
44 them from the main heap, but that tends to cause heap fragmentation in
46 HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa
, 48); /* enough for 3 mmbs */
47 struct heap_free_area
*__malloc_mmb_heap
= HEAP_INIT_WITH_FA (initial_mmb_fa
);
48 #ifdef HEAP_USE_LOCKING
49 __UCLIBC_MUTEX_INIT(__malloc_mmb_heap_lock
,PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
);
51 #endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
54 #ifdef HEAP_USE_LOCKING
55 #define malloc_from_heap(size, heap, lck) __malloc_from_heap(size, heap, lck)
57 #define malloc_from_heap(size, heap, lck) __malloc_from_heap(size, heap)
60 __malloc_from_heap (size_t size
, struct heap_free_area
**heap
61 #ifdef HEAP_USE_LOCKING
62 , __UCLIBC_MUTEX_TYPE
*heap_lock
68 MALLOC_DEBUG (1, "malloc: %d bytes", size
);
70 /* Include extra space to record the size of the allocated block. */
71 size
+= MALLOC_HEADER_SIZE
;
73 __heap_lock (heap_lock
);
75 /* First try to get memory that's already in our heap. */
76 mem
= __heap_alloc (heap
, &size
);
78 __heap_unlock (heap_lock
);
81 /* We couldn't allocate from the heap, so grab some more
82 from the system, add it to the heap, and try again. */
84 /* If we're trying to allocate a block bigger than the default
85 MALLOC_HEAP_EXTEND_SIZE, make sure we get enough to hold it. */
88 = (size
< MALLOC_HEAP_EXTEND_SIZE
89 ? MALLOC_HEAP_EXTEND_SIZE
90 : MALLOC_ROUND_UP_TO_PAGE_SIZE (size
));
92 /* Allocate the new heap block. */
93 #ifdef MALLOC_USE_SBRK
95 __malloc_lock_sbrk ();
97 /* Use sbrk we can, as it's faster than mmap, and guarantees
98 contiguous allocation. */
99 block
= sbrk (block_size
);
100 if (likely (block
!= (void *)-1))
102 /* Because sbrk can return results of arbitrary
103 alignment, align the result to a MALLOC_ALIGNMENT boundary. */
104 long aligned_block
= MALLOC_ROUND_UP ((long)block
, MALLOC_ALIGNMENT
);
105 if (block
!= (void *)aligned_block
)
106 /* Have to adjust. We should only have to actually do this
107 the first time (after which we will have aligned the brk
110 /* Move the brk to reflect the alignment; our next allocation
111 should start on exactly the right alignment. */
112 sbrk (aligned_block
- (long)block
);
113 block
= (void *)aligned_block
;
117 __malloc_unlock_sbrk ();
119 #else /* !MALLOC_USE_SBRK */
121 /* Otherwise, use mmap. */
122 #ifdef __ARCH_USE_MMU__
123 block
= mmap ((void *)0, block_size
, PROT_READ
| PROT_WRITE
,
124 MAP_PRIVATE
| MAP_ANONYMOUS
, 0, 0);
126 block
= mmap ((void *)0, block_size
, PROT_READ
| PROT_WRITE
,
127 MAP_SHARED
| MAP_ANONYMOUS
| MAP_UNINITIALIZED
, 0, 0);
130 #endif /* MALLOC_USE_SBRK */
132 if (likely (block
!= (void *)-1))
134 #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
135 struct malloc_mmb
*mmb
, *prev_mmb
, *new_mmb
;
138 MALLOC_DEBUG (1, "adding system memory to heap: 0x%lx - 0x%lx (%d bytes)",
139 (long)block
, (long)block
+ block_size
, block_size
);
141 /* Get back the heap lock. */
142 __heap_lock (heap_lock
);
144 /* Put BLOCK into the heap. */
145 __heap_free (heap
, block
, block_size
);
147 MALLOC_DEBUG_INDENT (-1);
149 /* Try again to allocate. */
150 mem
= __heap_alloc (heap
, &size
);
153 #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
154 /* Insert a record of BLOCK in sorted order into the
155 __malloc_mmapped_blocks list. */
157 new_mmb
= malloc_from_heap (sizeof *new_mmb
, &__malloc_mmb_heap
, &__malloc_mmb_heap_lock
);
159 for (prev_mmb
= 0, mmb
= __malloc_mmapped_blocks
;
161 prev_mmb
= mmb
, mmb
= mmb
->next
)
162 if (block
< mmb
->mem
)
166 new_mmb
->mem
= block
;
167 new_mmb
->size
= block_size
;
170 prev_mmb
->next
= new_mmb
;
172 __malloc_mmapped_blocks
= new_mmb
;
174 MALLOC_MMB_DEBUG (0, "new mmb at 0x%x: 0x%x[%d]",
176 (unsigned)new_mmb
->mem
, block_size
);
177 #endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
178 __heap_unlock (heap_lock
);
183 /* Record the size of the block and get the user address. */
185 mem
= MALLOC_SETUP (mem
, size
);
187 MALLOC_DEBUG (-1, "malloc: returning 0x%lx (base:0x%lx, total_size:%ld)",
188 (long)mem
, (long)MALLOC_BASE(mem
), (long)MALLOC_SIZE(mem
));
191 MALLOC_DEBUG (-1, "malloc: returning 0");
200 #ifdef MALLOC_DEBUGGING
201 static smallint debugging_initialized
;
202 if (! debugging_initialized
)
204 debugging_initialized
= 1;
205 __malloc_debug_init ();
208 __heap_check (__malloc_heap
, "malloc");
211 if (unlikely (size
== 0))
214 /* Check if they are doing something dumb like malloc(-1) */
215 if (unlikely(((unsigned long)size
> (unsigned long)(MALLOC_HEADER_SIZE
*-2))))
218 mem
= malloc_from_heap (size
, &__malloc_heap
, &__malloc_heap_lock
);
222 __set_errno (ENOMEM
);