2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 #include "private/gc_priv.h"
20 * These are extra allocation routines which are likely to be less
21 * frequently used than those in malloc.c. They are separate in the
22 * hope that the .o file will be excluded from statically linked
23 * executables. We should probably break this up further.
30 # ifndef WIN32_LEAN_AND_MEAN
31 # define WIN32_LEAN_AND_MEAN 1
39 /* Some externally visible but unadvertised variables to allow access to */
40 /* free lists from inlined allocators without including gc_priv.h */
41 /* or introducing dependencies on internal data structure layouts. */
42 void ** const GC_objfreelist_ptr
= GC_objfreelist
;
43 void ** const GC_aobjfreelist_ptr
= GC_aobjfreelist
;
44 void ** const GC_uobjfreelist_ptr
= GC_uobjfreelist
;
45 # ifdef ATOMIC_UNCOLLECTABLE
46 void ** const GC_auobjfreelist_ptr
= GC_auobjfreelist
;
50 STATIC
void * GC_generic_or_special_malloc(size_t lb
, int knd
)
53 # ifdef STUBBORN_ALLOC
55 return(GC_malloc_stubborn((size_t)lb
));
58 return(GC_malloc_atomic((size_t)lb
));
60 return(GC_malloc((size_t)lb
));
62 return(GC_malloc_uncollectable((size_t)lb
));
63 # ifdef ATOMIC_UNCOLLECTABLE
65 return(GC_malloc_atomic_uncollectable((size_t)lb
));
66 # endif /* ATOMIC_UNCOLLECTABLE */
68 return(GC_generic_malloc(lb
,knd
));
72 /* Change the size of the block pointed to by p to contain at least */
73 /* lb bytes. The object may be (and quite likely will be) moved. */
74 /* The kind (e.g. atomic) is the same as that of the old. */
75 /* Shrinking of large blocks is not implemented well. */
76 GC_API
void * GC_CALL
GC_realloc(void * p
, size_t lb
)
80 size_t sz
; /* Current size in bytes */
81 size_t orig_sz
; /* Original sz in bytes */
84 if (p
== 0) return(GC_malloc(lb
)); /* Required by ANSI */
88 obj_kind
= hhdr
-> hb_obj_kind
;
91 if (sz
> MAXOBJBYTES
) {
92 /* Round it up to the next whole heap block */
95 sz
= (sz
+HBLKSIZE
-1) & (~HBLKMASK
);
97 descr
= GC_obj_kinds
[obj_kind
].ok_descriptor
;
98 if (GC_obj_kinds
[obj_kind
].ok_relocate_descr
) descr
+= sz
;
99 hhdr
-> hb_descr
= descr
;
100 # ifdef MARK_BIT_PER_OBJ
101 GC_ASSERT(hhdr
-> hb_inv_sz
== LARGE_INV_SZ
);
103 GC_ASSERT(hhdr
-> hb_large_block
&&
104 hhdr
-> hb_map
[ANY_INDEX
] == 1);
106 if (IS_UNCOLLECTABLE(obj_kind
)) GC_non_gc_bytes
+= (sz
- orig_sz
);
107 /* Extra area is already cleared by GC_alloc_large_and_clear. */
109 if (ADD_SLOP(lb
) <= sz
) {
110 if (lb
>= (sz
>> 1)) {
111 # ifdef STUBBORN_ALLOC
112 if (obj_kind
== STUBBORN
) GC_change_stubborn(p
);
115 /* Clear unneeded part of object to avoid bogus pointer */
117 /* Safe for stubborn objects. */
118 BZERO(((ptr_t
)p
) + lb
, orig_sz
- lb
);
124 GC_generic_or_special_malloc((word
)lb
, obj_kind
);
126 if (result
== 0) return(0);
127 /* Could also return original object. But this */
128 /* gives the client warning of imminent disaster. */
129 BCOPY(p
, result
, lb
);
138 GC_generic_or_special_malloc((word
)lb
, obj_kind
);
140 if (result
== 0) return(0);
141 BCOPY(p
, result
, sz
);
149 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
150 # define REDIRECT_REALLOC GC_realloc
153 # ifdef REDIRECT_REALLOC
155 /* As with malloc, avoid two levels of extra calls here. */
157 # define GC_debug_realloc_replacement(p, lb) \
158 GC_debug_realloc(p, lb, GC_DBG_RA "unknown", 0)
160 void * realloc(void * p
, size_t lb
)
162 return(REDIRECT_REALLOC(p
, lb
));
165 # undef GC_debug_realloc_replacement
166 # endif /* REDIRECT_REALLOC */
169 /* Allocate memory such that only pointers to near the */
170 /* beginning of the object are considered. */
171 /* We avoid holding allocation lock while we clear memory. */
172 GC_INNER
void * GC_generic_malloc_ignore_off_page(size_t lb
, int k
)
182 return(GC_generic_malloc((word
)lb
, k
));
183 lg
= ROUNDED_UP_GRANULES(lb
);
184 lb_rounded
= GRANULES_TO_BYTES(lg
);
186 return((*GC_get_oom_fn())(lb
));
187 n_blocks
= OBJ_SZ_TO_BLOCKS(lb_rounded
);
188 init
= GC_obj_kinds
[k
].ok_init
;
189 if (EXPECT(GC_have_errors
, FALSE
))
190 GC_print_all_errors();
191 GC_INVOKE_FINALIZERS();
192 GC_DBG_COLLECT_AT_MALLOC(lb
);
194 result
= (ptr_t
)GC_alloc_large(ADD_SLOP(lb
), k
, IGNORE_OFF_PAGE
);
196 if (GC_debugging_started
) {
197 BZERO(result
, n_blocks
* HBLKSIZE
);
200 /* Clear any memory that might be used for GC descriptors */
201 /* before we release the lock. */
202 ((word
*)result
)[0] = 0;
203 ((word
*)result
)[1] = 0;
204 ((word
*)result
)[GRANULES_TO_WORDS(lg
)-1] = 0;
205 ((word
*)result
)[GRANULES_TO_WORDS(lg
)-2] = 0;
209 GC_bytes_allocd
+= lb_rounded
;
211 GC_oom_func oom_fn
= GC_oom_fn
;
213 return((*oom_fn
)(lb
));
216 if (init
&& !GC_debugging_started
) {
217 BZERO(result
, n_blocks
* HBLKSIZE
);
223 GC_API
void * GC_CALL
GC_malloc_ignore_off_page(size_t lb
)
225 return((void *)GC_generic_malloc_ignore_off_page(lb
, NORMAL
));
228 GC_API
void * GC_CALL
GC_malloc_atomic_ignore_off_page(size_t lb
)
230 return((void *)GC_generic_malloc_ignore_off_page(lb
, PTRFREE
));
233 /* Increment GC_bytes_allocd from code that doesn't have direct access */
235 GC_API
void GC_CALL
GC_incr_bytes_allocd(size_t n
)
237 GC_bytes_allocd
+= n
;
240 /* The same for GC_bytes_freed. */
241 GC_API
void GC_CALL
GC_incr_bytes_freed(size_t n
)
246 # ifdef PARALLEL_MARK
247 STATIC
volatile AO_t GC_bytes_allocd_tmp
= 0;
248 /* Number of bytes of memory allocated since */
249 /* we released the GC lock. Instead of */
250 /* reacquiring the GC lock just to add this in, */
251 /* we add it in the next time we reacquire */
252 /* the lock. (Atomically adding it doesn't */
253 /* work, since we would have to atomically */
254 /* update it in GC_malloc, which is too */
256 # endif /* PARALLEL_MARK */
258 /* Return a list of 1 or more objects of the indicated size, linked */
259 /* through the first word in the object. This has the advantage that */
260 /* it acquires the allocation lock only once, and may greatly reduce */
261 /* time wasted contending for the allocation lock. Typical usage would */
262 /* be in a thread that requires many items of the same size. It would */
263 /* keep its own free list in thread-local storage, and call */
264 /* GC_malloc_many or friends to replenish it. (We do not round up */
265 /* object sizes, since a call indicates the intention to consume many */
266 /* objects of exactly this size.) */
267 /* We assume that the size is a multiple of GRANULE_BYTES. */
268 /* We return the free-list by assigning it to *result, since it is */
269 /* not safe to return, e.g. a linked list of pointer-free objects, */
270 /* since the collector would not retain the entire list if it were */
271 /* invoked just as we were returning. */
272 /* Note that the client should usually clear the link field. */
273 GC_API
void GC_CALL
GC_generic_malloc_many(size_t lb
, int k
, void **result
)
278 size_t lw
; /* Length in words. */
279 size_t lg
; /* Length in granules. */
280 signed_word my_bytes_allocd
= 0;
281 struct obj_kind
* ok
= &(GC_obj_kinds
[k
]);
284 GC_ASSERT(lb
!= 0 && (lb
& (GRANULE_BYTES
-1)) == 0);
285 if (!SMALL_OBJ(lb
)) {
286 op
= GC_generic_malloc(lb
, k
);
287 if (EXPECT(0 != op
, TRUE
))
292 lw
= BYTES_TO_WORDS(lb
);
293 lg
= BYTES_TO_GRANULES(lb
);
294 if (EXPECT(GC_have_errors
, FALSE
))
295 GC_print_all_errors();
296 GC_INVOKE_FINALIZERS();
297 GC_DBG_COLLECT_AT_MALLOC(lb
);
299 if (!EXPECT(GC_is_initialized
, TRUE
)) GC_init();
300 /* Do our share of marking work */
301 if (GC_incremental
&& !GC_dont_gc
) {
303 GC_collect_a_little_inner(1);
306 /* First see if we can reclaim a page of objects waiting to be */
309 struct hblk
** rlh
= ok
-> ok_reclaim_list
;
314 while ((hbp
= *rlh
) != 0) {
316 *rlh
= hhdr
-> hb_next
;
317 GC_ASSERT(hhdr
-> hb_sz
== lb
);
318 hhdr
-> hb_last_reclaimed
= (unsigned short) GC_gc_no
;
319 # ifdef PARALLEL_MARK
321 signed_word my_bytes_allocd_tmp
=
322 (signed_word
)AO_load(&GC_bytes_allocd_tmp
);
323 GC_ASSERT(my_bytes_allocd_tmp
>= 0);
324 /* We only decrement it while holding the GC lock. */
325 /* Thus we can't accidentally adjust it down in more */
326 /* than one thread simultaneously. */
328 if (my_bytes_allocd_tmp
!= 0) {
329 (void)AO_fetch_and_add(&GC_bytes_allocd_tmp
,
330 (AO_t
)(-my_bytes_allocd_tmp
));
331 GC_bytes_allocd
+= my_bytes_allocd_tmp
;
333 GC_acquire_mark_lock();
334 ++ GC_fl_builder_count
;
336 GC_release_mark_lock();
339 op
= GC_reclaim_generic(hbp
, hhdr
, lb
,
340 ok
-> ok_init
, 0, &my_bytes_allocd
);
342 /* We also reclaimed memory, so we need to adjust */
344 /* This should be atomic, so the results may be */
346 GC_bytes_found
+= my_bytes_allocd
;
347 # ifdef PARALLEL_MARK
350 (void)AO_fetch_and_add(&GC_bytes_allocd_tmp
,
351 (AO_t
)my_bytes_allocd
);
352 GC_acquire_mark_lock();
353 -- GC_fl_builder_count
;
354 if (GC_fl_builder_count
== 0) GC_notify_all_builder();
355 GC_release_mark_lock();
356 (void) GC_clear_stack(0);
360 GC_bytes_allocd
+= my_bytes_allocd
;
363 # ifdef PARALLEL_MARK
365 GC_acquire_mark_lock();
366 -- GC_fl_builder_count
;
367 if (GC_fl_builder_count
== 0) GC_notify_all_builder();
368 GC_release_mark_lock();
370 /* GC lock is needed for reclaim list access. We */
371 /* must decrement fl_builder_count before reaquiring GC */
372 /* lock. Hopefully this path is rare. */
377 /* Next try to use prefix of global free list if there is one. */
378 /* We don't refill it, but we need to use it up before allocating */
379 /* a new block ourselves. */
380 opp
= &(GC_obj_kinds
[k
].ok_freelist
[lg
]);
381 if ( (op
= *opp
) != 0 ) {
384 for (p
= op
; p
!= 0; p
= obj_link(p
)) {
385 my_bytes_allocd
+= lb
;
386 if ((word
)my_bytes_allocd
>= HBLKSIZE
) {
392 GC_bytes_allocd
+= my_bytes_allocd
;
395 /* Next try to allocate a new block worth of objects of this size. */
397 struct hblk
*h
= GC_allochblk(lb
, k
, 0);
399 if (IS_UNCOLLECTABLE(k
)) GC_set_hdr_marks(HDR(h
));
400 GC_bytes_allocd
+= HBLKSIZE
- HBLKSIZE
% lb
;
401 # ifdef PARALLEL_MARK
403 GC_acquire_mark_lock();
404 ++ GC_fl_builder_count
;
406 GC_release_mark_lock();
408 op
= GC_build_fl(h
, lw
,
409 (ok
-> ok_init
|| GC_debugging_started
), 0);
412 GC_acquire_mark_lock();
413 -- GC_fl_builder_count
;
414 if (GC_fl_builder_count
== 0) GC_notify_all_builder();
415 GC_release_mark_lock();
416 (void) GC_clear_stack(0);
420 op
= GC_build_fl(h
, lw
, (ok
-> ok_init
|| GC_debugging_started
), 0);
425 /* As a last attempt, try allocating a single object. Note that */
426 /* this may trigger a collection or expand the heap. */
427 op
= GC_generic_malloc_inner(lb
, k
);
428 if (0 != op
) obj_link(op
) = 0;
433 (void) GC_clear_stack(0);
436 /* Note that the "atomic" version of this would be unsafe, since the */
437 /* links would not be seen by the collector. */
438 GC_API
void * GC_CALL
GC_malloc_many(size_t lb
)
441 GC_generic_malloc_many((lb
+ EXTRA_BYTES
+ GRANULE_BYTES
-1)
442 & ~(GRANULE_BYTES
-1),
447 /* Not well tested nor integrated. */
448 /* Debug version is tricky and currently missing. */
451 GC_API
void * GC_CALL
GC_memalign(size_t align
, size_t lb
)
457 if (align
<= GRANULE_BYTES
) return GC_malloc(lb
);
458 if (align
>= HBLKSIZE
/2 || lb
>= HBLKSIZE
/2) {
459 if (align
> HBLKSIZE
) {
460 return (*GC_get_oom_fn())(LONG_MAX
-1024); /* Fail */
462 return GC_malloc(lb
<= HBLKSIZE
? HBLKSIZE
: lb
);
463 /* Will be HBLKSIZE aligned. */
465 /* We could also try to make sure that the real rounded-up object size */
466 /* is a multiple of align. That would be correct up to HBLKSIZE. */
467 new_lb
= lb
+ align
- 1;
468 result
= GC_malloc(new_lb
);
469 /* It is ok not to check result for NULL as in that case */
470 /* GC_memalign returns NULL too since (0 + 0 % align) is 0. */
471 offset
= (word
)result
% align
;
473 offset
= align
- offset
;
474 if (!GC_all_interior_pointers
) {
475 if (offset
>= VALID_OFFSET_SZ
) return GC_malloc(HBLKSIZE
);
476 GC_register_displacement(offset
);
479 result
= (void *) ((ptr_t
)result
+ offset
);
480 GC_ASSERT((word
)result
% align
== 0);
484 /* This one exists largerly to redirect posix_memalign for leaks finding. */
485 GC_API
int GC_CALL
GC_posix_memalign(void **memptr
, size_t align
, size_t lb
)
487 /* Check alignment properly. */
488 if (((align
- 1) & align
) != 0 || align
< sizeof(void *)) {
490 return ERROR_INVALID_PARAMETER
;
496 if ((*memptr
= GC_memalign(align
, lb
)) == NULL
) {
498 return ERROR_NOT_ENOUGH_MEMORY
;
506 #ifdef ATOMIC_UNCOLLECTABLE
507 /* Allocate lb bytes of pointerfree, untraced, uncollectable data */
508 /* This is normally roughly equivalent to the system malloc. */
509 /* But it may be useful if malloc is redefined. */
510 GC_API
void * GC_CALL
GC_malloc_atomic_uncollectable(size_t lb
)
517 if( SMALL_OBJ(lb
) ) {
518 GC_DBG_COLLECT_AT_MALLOC(lb
);
519 if (EXTRA_BYTES
!= 0 && lb
!= 0) lb
--;
520 /* We don't need the extra byte, since this won't be */
521 /* collected anyway. */
522 lg
= GC_size_map
[lb
];
523 opp
= &(GC_auobjfreelist
[lg
]);
526 if (EXPECT(0 != op
, TRUE
)) {
529 GC_bytes_allocd
+= GRANULES_TO_BYTES(lg
);
530 /* Mark bit was already set while object was on free list. */
531 GC_non_gc_bytes
+= GRANULES_TO_BYTES(lg
);
535 op
= (ptr_t
)GC_generic_malloc(lb
, AUNCOLLECTABLE
);
537 GC_ASSERT(0 == op
|| GC_is_marked(op
));
542 op
= (ptr_t
)GC_generic_malloc(lb
, AUNCOLLECTABLE
);
543 if (0 == op
) return(0);
545 GC_ASSERT(((word
)op
& (HBLKSIZE
- 1)) == 0);
549 set_mark_bit_from_hdr(hhdr
, 0); /* Only object. */
551 GC_ASSERT(hhdr
-> hb_n_marks
== 0);
553 hhdr
-> hb_n_marks
= 1;
558 #endif /* ATOMIC_UNCOLLECTABLE */
560 /* provide a version of strdup() that uses the collector to allocate the
561 copy of the string */
562 GC_API
char * GC_CALL
GC_strdup(const char *s
)
566 if (s
== NULL
) return NULL
;
568 if ((copy
= GC_malloc_atomic(lb
)) == NULL
) {
578 GC_API
char * GC_CALL
GC_strndup(const char *str
, size_t size
)
581 size_t len
= strlen(str
); /* str is expected to be non-NULL */
584 copy
= GC_malloc_atomic(len
+ 1);
591 BCOPY(str
, copy
, len
);
596 #ifdef GC_REQUIRE_WCSDUP
597 # include <wchar.h> /* for wcslen() */
599 GC_API
wchar_t * GC_CALL
GC_wcsdup(const wchar_t *str
)
601 size_t lb
= (wcslen(str
) + 1) * sizeof(wchar_t);
602 wchar_t *copy
= GC_malloc_atomic(lb
);
609 BCOPY(str
, copy
, lb
);
612 #endif /* GC_REQUIRE_WCSDUP */