4 #include "hurdmalloc.h" /* XXX see that file */
7 #define vm_allocate __vm_allocate
8 #define vm_page_size __vm_page_size
11 * Mach Operating System
12 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
13 * All Rights Reserved.
15 * Permission to use, copy, modify and distribute this software and its
16 * documentation is hereby granted, provided that both the copyright
17 * notice and this permission notice appear in all copies of the
18 * software, derivative works or modified versions, and any portions
19 * thereof, and that both notices appear in supporting documentation.
21 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
22 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
23 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
25 * Carnegie Mellon requests users of this software to return to
27 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
28 * School of Computer Science
29 * Carnegie Mellon University
30 * Pittsburgh PA 15213-3890
32 * any improvements or extensions that they make and grant Carnegie Mellon
33 * the rights to redistribute these changes.
38 * Revision 1.15 2001/09/19 03:04:09 drepper
40 * (realloc): Replace bcopy with memcpy.
42 * Revision 1.14 2001/04/01 05:03:14 roland
43 * 2001-03-11 Roland McGrath <roland@frob.com>
45 * * mach/mach_error.h: Fix ancient #endif syntax.
46 * * hurd/hurdmalloc.c: Likewise.
48 * Revision 1.13 1996/12/20 01:32:01 drepper
49 * Update from main archive 961219
51 * Revision 1.12 1996/11/15 19:44:13 thomas
52 * Tue Nov 12 16:58:41 1996 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
54 * * mach/msg-destroy.c (mach_msg_destroy_port,
55 * mach_msg_destroy_memory): Use prototype syntax.
56 * * hurd/hurdmalloc.c (more_memory, malloc_fork_prepare,
57 * malloc_fork_parent, malloc_fork_child): Likewise.
59 * Revision 1.11 1996/06/06 15:13:47 miles
60 * Changes to bring in line with the hurd libthreads/malloc.c:
61 * (more_memory): Use assert_perror instead of MACH_CALL.
62 * "cthread_internals.h": Include removed.
63 * (realloc): Use LOG2_MIN_SIZE.
64 * (LOG2_MIN_SIZE): New macro.
65 * (realloc): Don't bother allocating a new block if the
66 * new size request fits in the old one and doesn't waste any space.
67 * Only free the old block if we successfully got a new one.
68 * [MCHECK] (struct header): New type.
69 * (union header): Only define if !MCHECK.
70 * (HEADER_SIZE, HEADER_NEXT, HEADER_FREE, HEADER_CHECK): New macros.
71 * [MCHECK] (MIN_SIZE): Add correct definition for this case.
72 * (more_memory, malloc, free, realloc): Use above macros, and add
73 * appropriate checks & frobs in MCHECK case.
75 * Revision 1.6 1996/03/07 21:13:08 miles
78 * Don't bother allocating a new block if the new size request fits in the old
79 * one and doesn't waste any space.
80 * Only free the old block if we successfully got a new one.
81 * (LOG2_MIN_SIZE): New macro.
83 * Revision 1.5 1996/03/06 23:51:04 miles
84 * [MCHECK] (struct header): New type.
85 * (union header): Only define if !MCHECK.
86 * (HEADER_SIZE, HEADER_NEXT, HEADER_FREE, HEADER_CHECK): New macros.
87 * [MCHECK] (MIN_SIZE): Add correct definition for this case.
88 * (more_memory, malloc, free, realloc):
89 * Use above macros, and add appropriate checks & frobs in MCHECK case.
91 * Revision 1.4 1994/05/05 11:21:42 roland
94 * Revision 2.7 91/05/14 17:57:34 mrt
95 * Correcting copyright
97 * Revision 2.6 91/02/14 14:20:26 mrt
98 * Added new Mach copyright
99 * [91/02/13 12:41:21 mrt]
101 * Revision 2.5 90/11/05 14:37:33 rpd
102 * Added malloc_fork* code.
108 * Revision 2.4 90/08/07 14:31:28 rpd
109 * Removed RCS keyword nonsense.
111 * Revision 2.3 90/06/02 15:14:00 rpd
112 * Converted to new IPC.
113 * [90/03/20 20:56:57 rpd]
115 * Revision 2.2 89/12/08 19:53:59 rwd
116 * Removed conditionals.
119 * Revision 2.1 89/08/03 17:09:46 rwd
123 * 13-Sep-88 Eric Cooper (ecc) at Carnegie Mellon University
124 * Changed realloc() to copy min(old size, new size) bytes.
125 * Bug found by Mike Kupfer at Olivetti.
129 * Author: Eric Cooper, Carnegie Mellon University
132 * Memory allocator for use with multiple threads.
138 #include <cthreads.h>
143 * Structure of memory block header.
144 * When free, next points to next block on free list.
145 * When allocated, fl points to free list.
146 * Size of header is 4 bytes, so minimum usable block size is 8 bytes.
149 #define CHECK_BUSY 0x8a3c743e
150 #define CHECK_FREE 0x66688b92
154 typedef struct header
{
158 struct free_list
*fl
;
162 #define HEADER_SIZE sizeof (struct header)
163 #define HEADER_NEXT(h) ((h)->u.next)
164 #define HEADER_FREE(h) ((h)->u.fl)
165 #define HEADER_CHECK(h) ((h)->check)
167 #define LOG2_MIN_SIZE 4
171 typedef union header
{
173 struct free_list
*fl
;
176 #define HEADER_SIZE sizeof (union header)
177 #define HEADER_NEXT(h) ((h)->next)
178 #define HEADER_FREE(h) ((h)->fl)
179 #define MIN_SIZE 8 /* minimum block size */
180 #define LOG2_MIN_SIZE 3
184 typedef struct free_list
{
185 spin_lock_t lock
; /* spin lock for mutual exclusion */
186 header_t head
; /* head of free list for this size */
188 int in_use
; /* # mallocs - # frees */
193 * Free list with index i contains blocks of size 2 ^ (i + LOG2_MIN_SIZE)
194 * including header. Smallest block size is MIN_SIZE, with MIN_SIZE -
195 * HEADER_SIZE bytes available to user. Size argument to malloc is a signed
196 * integer for sanity checking, so largest block size is 2^31.
200 static struct free_list malloc_free_list
[NBUCKETS
];
202 /* Initialization just sets everything to zero, but might be necessary on a
203 machine where spin_lock_init does otherwise, and is necessary when
204 running an executable that was written by something like Emacs's unexec.
205 It preserves the values of data variables like malloc_free_list, but
206 does not save the vm_allocate'd space allocated by this malloc. */
212 for (i
= 0; i
< NBUCKETS
; ++i
)
214 spin_lock_init (&malloc_free_list
[i
].lock
);
215 malloc_free_list
[i
].head
= NULL
;
217 malloc_free_list
[i
].in_use
= 0;
221 /* This not only suppresses a `defined but not used' warning,
222 but it is ABSOLUTELY NECESSARY to avoid the hyperclever
223 compiler from "optimizing out" the entire function! */
228 more_memory(int size
, free_list_t fl
)
236 if (size
<= vm_page_size
) {
237 amount
= vm_page_size
;
238 n
= vm_page_size
/ size
;
239 /* We lose vm_page_size - n*size bytes here. */
245 r
= vm_allocate(mach_task_self(), &where
, (vm_size_t
) amount
, TRUE
);
248 h
= (header_t
) where
;
250 HEADER_NEXT (h
) = fl
->head
;
252 HEADER_CHECK (h
) = CHECK_FREE
;
255 h
= (header_t
) ((char *) h
+ size
);
259 /* Declaration changed to standard one for GNU. */
262 register size_t size
;
265 register free_list_t fl
;
268 if ((int) size
< 0) /* sanity check */
272 * Find smallest power-of-two block size
273 * big enough to hold requested size plus header.
281 ASSERT(i
< NBUCKETS
);
282 fl
= &malloc_free_list
[i
];
283 spin_lock(&fl
->lock
);
287 * Free list is empty;
288 * allocate more blocks.
296 spin_unlock(&fl
->lock
);
301 * Pop block from free list.
303 fl
->head
= HEADER_NEXT (h
);
306 assert (HEADER_CHECK (h
) == CHECK_FREE
);
307 HEADER_CHECK (h
) = CHECK_BUSY
;
313 spin_unlock(&fl
->lock
);
315 * Store free list pointer in block header
316 * so we can figure out where it goes
319 HEADER_FREE (h
) = fl
;
321 * Return pointer past the block header.
323 return ((char *) h
) + HEADER_SIZE
;
326 /* Declaration changed to standard one for GNU. */
332 register free_list_t fl
;
338 * Find free list for block.
340 h
= (header_t
) (base
- HEADER_SIZE
);
343 assert (HEADER_CHECK (h
) == CHECK_BUSY
);
346 fl
= HEADER_FREE (h
);
347 i
= fl
- malloc_free_list
;
351 if (i
< 0 || i
>= NBUCKETS
) {
352 ASSERT(0 <= i
&& i
< NBUCKETS
);
355 if (fl
!= &malloc_free_list
[i
]) {
356 ASSERT(fl
== &malloc_free_list
[i
]);
360 * Push block on free list.
362 spin_lock(&fl
->lock
);
363 HEADER_NEXT (h
) = fl
->head
;
365 HEADER_CHECK (h
) = CHECK_FREE
;
371 spin_unlock(&fl
->lock
);
375 /* Declaration changed to standard one for GNU. */
377 realloc(old_base
, new_size
)
382 register free_list_t fl
;
384 unsigned int old_size
;
388 return malloc (new_size
);
391 * Find size of old block.
393 h
= (header_t
) (old_base
- HEADER_SIZE
);
395 assert (HEADER_CHECK (h
) == CHECK_BUSY
);
397 fl
= HEADER_FREE (h
);
398 i
= fl
- malloc_free_list
;
402 if (i
< 0 || i
>= NBUCKETS
) {
403 ASSERT(0 <= i
&& i
< NBUCKETS
);
406 if (fl
!= &malloc_free_list
[i
]) {
407 ASSERT(fl
== &malloc_free_list
[i
]);
411 * Free list with index i contains blocks of size
412 * 2 ^ (i + * LOG2_MIN_SIZE) including header.
414 old_size
= (1 << (i
+ LOG2_MIN_SIZE
)) - HEADER_SIZE
;
416 if (new_size
<= old_size
417 && new_size
> (((old_size
+ HEADER_SIZE
) >> 1) - HEADER_SIZE
))
418 /* The new size still fits in the same block, and wouldn't fit in
419 the next smaller block! */
423 * Allocate new block, copy old bytes, and free old block.
425 new_base
= malloc(new_size
);
427 memcpy (new_base
, old_base
,
428 (int) (old_size
< new_size
? old_size
: new_size
));
430 if (new_base
|| new_size
== 0)
431 /* Free OLD_BASE, but only if the malloc didn't fail. */
439 print_malloc_free_list()
441 register int i
, size
;
442 register free_list_t fl
;
448 fprintf(stderr
, " Size In Use Free Total\n");
449 for (i
= 0, size
= MIN_SIZE
, fl
= malloc_free_list
;
451 i
+= 1, size
<<= 1, fl
+= 1) {
452 spin_lock(&fl
->lock
);
453 if (fl
->in_use
!= 0 || fl
->head
!= 0) {
454 total_used
+= fl
->in_use
* size
;
455 for (n
= 0, h
= fl
->head
; h
!= 0; h
= HEADER_NEXT (h
), n
+= 1)
457 total_free
+= n
* size
;
458 fprintf(stderr
, "%10d %10d %10d %10d\n",
459 size
, fl
->in_use
, n
, fl
->in_use
+ n
);
461 spin_unlock(&fl
->lock
);
463 fprintf(stderr
, " all sizes %10d %10d %10d\n",
464 total_used
, total_free
, total_used
+ total_free
);
469 malloc_fork_prepare(void)
471 * Prepare the malloc module for a fork by insuring that no thread is in a
472 * malloc critical section.
477 for (i
= 0; i
< NBUCKETS
; i
++) {
478 spin_lock(&malloc_free_list
[i
].lock
);
483 malloc_fork_parent(void)
485 * Called in the parent process after a fork() to resume normal operation.
490 for (i
= NBUCKETS
-1; i
>= 0; i
--) {
491 spin_unlock(&malloc_free_list
[i
].lock
);
496 malloc_fork_child(void)
498 * Called in the child process after a fork() to resume normal operation.
503 for (i
= NBUCKETS
-1; i
>= 0; i
--) {
504 spin_unlock(&malloc_free_list
[i
].lock
);
509 text_set_element (_hurd_fork_prepare_hook
, malloc_fork_prepare
);
510 text_set_element (_hurd_fork_parent_hook
, malloc_fork_parent
);
511 text_set_element (_hurd_fork_child_hook
, malloc_fork_child
);
512 text_set_element (_hurd_preinit_hook
, malloc_init
);