2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
18 * These are extra allocation routines which are likely to be less
19 * frequently used than those in malloc.c. They are separate in the
20 * hope that the .o file will be excluded from statically linked
21 * executables. We should probably break this up further.
25 #include "private/gc_priv.h"
27 extern ptr_t
GC_clear_stack(); /* in misc.c, behaves like identity */
28 void GC_extend_size_map(); /* in misc.c. */
29 GC_bool
GC_alloc_reclaim_list(); /* in malloc.c */
31 /* Some externally visible but unadvertised variables to allow access to */
32 /* free lists from inlined allocators without including gc_priv.h */
33 /* or introducing dependencies on internal data structure layouts. */
34 ptr_t
* GC_CONST GC_objfreelist_ptr
= GC_objfreelist
;
35 ptr_t
* GC_CONST GC_aobjfreelist_ptr
= GC_aobjfreelist
;
36 ptr_t
* GC_CONST GC_uobjfreelist_ptr
= GC_uobjfreelist
;
37 # ifdef ATOMIC_UNCOLLECTABLE
38 ptr_t
* GC_CONST GC_auobjfreelist_ptr
= GC_auobjfreelist
;
42 GC_PTR
GC_generic_or_special_malloc(lb
,knd
)
47 # ifdef STUBBORN_ALLOC
49 return(GC_malloc_stubborn((size_t)lb
));
52 return(GC_malloc_atomic((size_t)lb
));
54 return(GC_malloc((size_t)lb
));
56 return(GC_malloc_uncollectable((size_t)lb
));
57 # ifdef ATOMIC_UNCOLLECTABLE
59 return(GC_malloc_atomic_uncollectable((size_t)lb
));
60 # endif /* ATOMIC_UNCOLLECTABLE */
62 return(GC_generic_malloc(lb
,knd
));
67 /* Change the size of the block pointed to by p to contain at least */
68 /* lb bytes. The object may be (and quite likely will be) moved. */
69 /* The kind (e.g. atomic) is the same as that of the old. */
70 /* Shrinking of large blocks is not implemented well. */
72 GC_PTR
GC_realloc(GC_PTR p
, size_t lb
)
74 GC_PTR
GC_realloc(p
,lb
)
79 register struct hblk
* h
;
81 register word sz
; /* Current size in bytes */
82 register word orig_sz
; /* Original sz in bytes */
85 if (p
== 0) return(GC_malloc(lb
)); /* Required by ANSI */
89 obj_kind
= hhdr
-> hb_obj_kind
;
90 sz
= WORDS_TO_BYTES(sz
);
93 if (sz
> MAXOBJBYTES
) {
94 /* Round it up to the next whole heap block */
97 sz
= (sz
+HBLKSIZE
-1) & (~HBLKMASK
);
98 hhdr
-> hb_sz
= BYTES_TO_WORDS(sz
);
99 descr
= GC_obj_kinds
[obj_kind
].ok_descriptor
;
100 if (GC_obj_kinds
[obj_kind
].ok_relocate_descr
) descr
+= sz
;
101 hhdr
-> hb_descr
= descr
;
102 if (IS_UNCOLLECTABLE(obj_kind
)) GC_non_gc_bytes
+= (sz
- orig_sz
);
103 /* Extra area is already cleared by GC_alloc_large_and_clear. */
105 if (ADD_SLOP(lb
) <= sz
) {
106 if (lb
>= (sz
>> 1)) {
107 # ifdef STUBBORN_ALLOC
108 if (obj_kind
== STUBBORN
) GC_change_stubborn(p
);
111 /* Clear unneeded part of object to avoid bogus pointer */
113 /* Safe for stubborn objects. */
114 BZERO(((ptr_t
)p
) + lb
, orig_sz
- lb
);
120 GC_generic_or_special_malloc((word
)lb
, obj_kind
);
122 if (result
== 0) return(0);
123 /* Could also return original object. But this */
124 /* gives the client warning of imminent disaster. */
125 BCOPY(p
, result
, lb
);
134 GC_generic_or_special_malloc((word
)lb
, obj_kind
);
136 if (result
== 0) return(0);
137 BCOPY(p
, result
, sz
);
145 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
146 # define REDIRECT_REALLOC GC_realloc
149 # ifdef REDIRECT_REALLOC
151 /* As with malloc, avoid two levels of extra calls here. */
152 # ifdef GC_ADD_CALLER
153 # define RA GC_RETURN_ADDR,
157 # define GC_debug_realloc_replacement(p, lb) \
158 GC_debug_realloc(p, lb, RA "unknown", 0)
161 GC_PTR
realloc(GC_PTR p
, size_t lb
)
168 return(REDIRECT_REALLOC(p
, lb
));
171 # undef GC_debug_realloc_replacement
172 # endif /* REDIRECT_REALLOC */
175 /* Allocate memory such that only pointers to near the */
176 /* beginning of the object are considered. */
177 /* We avoid holding allocation lock while we clear memory. */
178 ptr_t
GC_generic_malloc_ignore_off_page(lb
, k
)
182 register ptr_t result
;
189 return(GC_generic_malloc((word
)lb
, k
));
190 lw
= ROUNDED_UP_WORDS(lb
);
191 n_blocks
= OBJ_SZ_TO_BLOCKS(lw
);
192 init
= GC_obj_kinds
[k
].ok_init
;
193 if (GC_have_errors
) GC_print_all_errors();
194 GC_INVOKE_FINALIZERS();
197 result
= (ptr_t
)GC_alloc_large(lw
, k
, IGNORE_OFF_PAGE
);
199 if (GC_debugging_started
) {
200 BZERO(result
, n_blocks
* HBLKSIZE
);
203 /* Clear any memory that might be used for GC descriptors */
204 /* before we release the lock. */
205 ((word
*)result
)[0] = 0;
206 ((word
*)result
)[1] = 0;
207 ((word
*)result
)[lw
-1] = 0;
208 ((word
*)result
)[lw
-2] = 0;
212 GC_words_allocd
+= lw
;
216 return((*GC_oom_fn
)(lb
));
218 if (init
&& !GC_debugging_started
) {
219 BZERO(result
, n_blocks
* HBLKSIZE
);
225 # if defined(__STDC__) || defined(__cplusplus)
226 void * GC_malloc_ignore_off_page(size_t lb
)
228 char * GC_malloc_ignore_off_page(lb
)
232 return((GC_PTR
)GC_generic_malloc_ignore_off_page(lb
, NORMAL
));
235 # if defined(__STDC__) || defined(__cplusplus)
236 void * GC_malloc_atomic_ignore_off_page(size_t lb
)
238 char * GC_malloc_atomic_ignore_off_page(lb
)
242 return((GC_PTR
)GC_generic_malloc_ignore_off_page(lb
, PTRFREE
));
245 /* Increment GC_words_allocd from code that doesn't have direct access */
248 void GC_incr_words_allocd(size_t n
)
250 GC_words_allocd
+= n
;
253 /* The same for GC_mem_freed. */
254 void GC_incr_mem_freed(size_t n
)
258 # endif /* __STDC__ */
260 /* Analogous to the above, but assumes a small object size, and */
261 /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
262 ptr_t
GC_generic_malloc_words_small_inner(lw
, k
)
268 register struct obj_kind
* kind
= GC_obj_kinds
+ k
;
270 opp
= &(kind
-> ok_freelist
[lw
]);
271 if( (op
= *opp
) == 0 ) {
272 if (!GC_is_initialized
) {
275 if (kind
-> ok_reclaim_list
!= 0 || GC_alloc_reclaim_list(kind
)) {
276 op
= GC_clear_stack(GC_allocobj((word
)lw
, k
));
281 return ((*GC_oom_fn
)(WORDS_TO_BYTES(lw
)));
286 GC_words_allocd
+= lw
;
290 /* Analogous to the above, but assumes a small object size, and */
291 /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
293 ptr_t
GC_generic_malloc_words_small(size_t lw
, int k
)
295 ptr_t
GC_generic_malloc_words_small(lw
, k
)
303 if (GC_have_errors
) GC_print_all_errors();
304 GC_INVOKE_FINALIZERS();
307 op
= GC_generic_malloc_words_small_inner(lw
, k
);
313 #if defined(THREADS) && !defined(SRC_M3)
315 extern signed_word GC_mem_found
; /* Protected by GC lock. */
318 volatile signed_word GC_words_allocd_tmp
= 0;
319 /* Number of words of memory allocated since */
320 /* we released the GC lock. Instead of */
321 /* reacquiring the GC lock just to add this in, */
322 /* we add it in the next time we reacquire */
323 /* the lock. (Atomically adding it doesn't */
324 /* work, since we would have to atomically */
325 /* update it in GC_malloc, which is too */
327 #endif /* PARALLEL_MARK */
330 extern ptr_t
GC_reclaim_generic();
332 /* Return a list of 1 or more objects of the indicated size, linked */
333 /* through the first word in the object. This has the advantage that */
334 /* it acquires the allocation lock only once, and may greatly reduce */
335 /* time wasted contending for the allocation lock. Typical usage would */
336 /* be in a thread that requires many items of the same size. It would */
337 /* keep its own free list in thread-local storage, and call */
338 /* GC_malloc_many or friends to replenish it. (We do not round up */
339 /* object sizes, since a call indicates the intention to consume many */
340 /* objects of exactly this size.) */
341 /* We return the free-list by assigning it to *result, since it is */
342 /* not safe to return, e.g. a linked list of pointer-free objects, */
343 /* since the collector would not retain the entire list if it were */
344 /* invoked just as we were returning. */
345 /* Note that the client should usually clear the link field. */
346 void GC_generic_malloc_many(lb
, k
, result
)
355 word my_words_allocd
= 0;
356 struct obj_kind
* ok
= &(GC_obj_kinds
[k
]);
359 # if defined(GATHERSTATS) || defined(PARALLEL_MARK)
360 # define COUNT_ARG , &my_words_allocd
363 # define NEED_TO_COUNT
365 if (!SMALL_OBJ(lb
)) {
366 op
= GC_generic_malloc(lb
, k
);
367 if(0 != op
) obj_link(op
) = 0;
371 lw
= ALIGNED_WORDS(lb
);
372 if (GC_have_errors
) GC_print_all_errors();
373 GC_INVOKE_FINALIZERS();
376 if (!GC_is_initialized
) GC_init_inner();
377 /* Do our share of marking work */
378 if (GC_incremental
&& !GC_dont_gc
) {
380 GC_collect_a_little_inner(1);
383 /* First see if we can reclaim a page of objects waiting to be */
386 struct hblk
** rlh
= ok
-> ok_reclaim_list
;
391 while ((hbp
= *rlh
) != 0) {
393 *rlh
= hhdr
-> hb_next
;
394 hhdr
-> hb_last_reclaimed
= (unsigned short) GC_gc_no
;
395 # ifdef PARALLEL_MARK
397 signed_word my_words_allocd_tmp
= GC_words_allocd_tmp
;
399 GC_ASSERT(my_words_allocd_tmp
>= 0);
400 /* We only decrement it while holding the GC lock. */
401 /* Thus we can't accidentally adjust it down in more */
402 /* than one thread simultaneously. */
403 if (my_words_allocd_tmp
!= 0) {
405 (volatile GC_word
*)(&GC_words_allocd_tmp
),
406 (GC_word
)(-my_words_allocd_tmp
));
407 GC_words_allocd
+= my_words_allocd_tmp
;
410 GC_acquire_mark_lock();
411 ++ GC_fl_builder_count
;
414 GC_release_mark_lock();
416 op
= GC_reclaim_generic(hbp
, hhdr
, lw
,
417 ok
-> ok_init
, 0 COUNT_ARG
);
419 # ifdef NEED_TO_COUNT
420 /* We are neither gathering statistics, nor marking in */
421 /* parallel. Thus GC_reclaim_generic doesn't count */
423 for (p
= op
; p
!= 0; p
= obj_link(p
)) {
424 my_words_allocd
+= lw
;
427 # if defined(GATHERSTATS)
428 /* We also reclaimed memory, so we need to adjust */
430 /* This should be atomic, so the results may be */
432 GC_mem_found
+= my_words_allocd
;
434 # ifdef PARALLEL_MARK
437 (volatile GC_word
*)(&GC_words_allocd_tmp
),
438 (GC_word
)(my_words_allocd
));
439 GC_acquire_mark_lock();
440 -- GC_fl_builder_count
;
441 if (GC_fl_builder_count
== 0) GC_notify_all_builder();
442 GC_release_mark_lock();
443 (void) GC_clear_stack(0);
446 GC_words_allocd
+= my_words_allocd
;
450 # ifdef PARALLEL_MARK
451 GC_acquire_mark_lock();
452 -- GC_fl_builder_count
;
453 if (GC_fl_builder_count
== 0) GC_notify_all_builder();
454 GC_release_mark_lock();
457 /* GC lock is needed for reclaim list access. We */
458 /* must decrement fl_builder_count before reaquiring GC */
459 /* lock. Hopefully this path is rare. */
463 /* Next try to use prefix of global free list if there is one. */
464 /* We don't refill it, but we need to use it up before allocating */
465 /* a new block ourselves. */
466 opp
= &(GC_obj_kinds
[k
].ok_freelist
[lw
]);
467 if ( (op
= *opp
) != 0 ) {
470 for (p
= op
; p
!= 0; p
= obj_link(p
)) {
471 my_words_allocd
+= lw
;
472 if (my_words_allocd
>= BODY_SZ
) {
478 GC_words_allocd
+= my_words_allocd
;
481 /* Next try to allocate a new block worth of objects of this size. */
483 struct hblk
*h
= GC_allochblk(lw
, k
, 0);
485 if (IS_UNCOLLECTABLE(k
)) GC_set_hdr_marks(HDR(h
));
486 GC_words_allocd
+= BYTES_TO_WORDS(HBLKSIZE
)
487 - BYTES_TO_WORDS(HBLKSIZE
) % lw
;
488 # ifdef PARALLEL_MARK
489 GC_acquire_mark_lock();
490 ++ GC_fl_builder_count
;
493 GC_release_mark_lock();
496 op
= GC_build_fl(h
, lw
, ok
-> ok_init
, 0);
497 # ifdef PARALLEL_MARK
499 GC_acquire_mark_lock();
500 -- GC_fl_builder_count
;
501 if (GC_fl_builder_count
== 0) GC_notify_all_builder();
502 GC_release_mark_lock();
503 (void) GC_clear_stack(0);
511 /* As a last attempt, try allocating a single object. Note that */
512 /* this may trigger a collection or expand the heap. */
513 op
= GC_generic_malloc_inner(lb
, k
);
514 if (0 != op
) obj_link(op
) = 0;
520 (void) GC_clear_stack(0);
523 GC_PTR
GC_malloc_many(size_t lb
)
526 GC_generic_malloc_many(lb
, NORMAL
, &result
);
530 /* Note that the "atomic" version of this would be unsafe, since the */
531 /* links would not be seen by the collector. */
534 /* Allocate lb bytes of pointerful, traced, but not collectable data */
536 GC_PTR
GC_malloc_uncollectable(size_t lb
)
538 GC_PTR
GC_malloc_uncollectable(lb
)
547 if( SMALL_OBJ(lb
) ) {
549 if (EXTRA_BYTES
!= 0 && lb
!= 0) lb
--;
550 /* We don't need the extra byte, since this won't be */
551 /* collected anyway. */
552 lw
= GC_size_map
[lb
];
554 lw
= ALIGNED_WORDS(lb
);
556 opp
= &(GC_uobjfreelist
[lw
]);
558 if( FASTLOCK_SUCCEEDED() && (op
= *opp
) != 0 ) {
559 /* See above comment on signals. */
562 GC_words_allocd
+= lw
;
563 /* Mark bit ws already set on free list. It will be */
564 /* cleared only temporarily during a collection, as a */
565 /* result of the normal free list mark bit clearing. */
566 GC_non_gc_bytes
+= WORDS_TO_BYTES(lw
);
571 op
= (ptr_t
)GC_generic_malloc((word
)lb
, UNCOLLECTABLE
);
573 op
= (ptr_t
)GC_generic_malloc((word
)lb
, UNCOLLECTABLE
);
575 if (0 == op
) return(0);
576 /* We don't need the lock here, since we have an undisguised */
577 /* pointer. We do need to hold the lock while we adjust */
580 register struct hblk
* h
;
583 lw
= HDR(h
) -> hb_sz
;
588 GC_non_gc_bytes
+= WORDS_TO_BYTES(lw
);
596 /* Not well tested nor integrated. */
597 /* Debug version is tricky and currently missing. */
600 GC_PTR
GC_memalign(size_t align
, size_t lb
)
607 if (align
<= WORDS_TO_BYTES(2) && lb
> align
) return GC_malloc(lb
);
609 if (align
<= WORDS_TO_BYTES(1)) return GC_malloc(lb
);
610 if (align
>= HBLKSIZE
/2 || lb
>= HBLKSIZE
/2) {
611 if (align
> HBLKSIZE
) return GC_oom_fn(LONG_MAX
-1024) /* Fail */;
612 return GC_malloc(lb
<= HBLKSIZE
? HBLKSIZE
: lb
);
613 /* Will be HBLKSIZE aligned. */
615 /* We could also try to make sure that the real rounded-up object size */
616 /* is a multiple of align. That would be correct up to HBLKSIZE. */
617 new_lb
= lb
+ align
- 1;
618 result
= GC_malloc(new_lb
);
619 offset
= (word
)result
% align
;
621 offset
= align
- offset
;
622 if (!GC_all_interior_pointers
) {
623 if (offset
>= VALID_OFFSET_SZ
) return GC_malloc(HBLKSIZE
);
624 GC_register_displacement(offset
);
627 result
= (GC_PTR
) ((ptr_t
)result
+ offset
);
628 GC_ASSERT((word
)result
% align
== 0);
633 # ifdef ATOMIC_UNCOLLECTABLE
634 /* Allocate lb bytes of pointerfree, untraced, uncollectable data */
635 /* This is normally roughly equivalent to the system malloc. */
636 /* But it may be useful if malloc is redefined. */
638 GC_PTR
GC_malloc_atomic_uncollectable(size_t lb
)
640 GC_PTR
GC_malloc_atomic_uncollectable(lb
)
649 if( SMALL_OBJ(lb
) ) {
651 if (EXTRA_BYTES
!= 0 && lb
!= 0) lb
--;
652 /* We don't need the extra byte, since this won't be */
653 /* collected anyway. */
654 lw
= GC_size_map
[lb
];
656 lw
= ALIGNED_WORDS(lb
);
658 opp
= &(GC_auobjfreelist
[lw
]);
660 if( FASTLOCK_SUCCEEDED() && (op
= *opp
) != 0 ) {
661 /* See above comment on signals. */
664 GC_words_allocd
+= lw
;
665 /* Mark bit was already set while object was on free list. */
666 GC_non_gc_bytes
+= WORDS_TO_BYTES(lw
);
671 op
= (ptr_t
)GC_generic_malloc((word
)lb
, AUNCOLLECTABLE
);
673 op
= (ptr_t
)GC_generic_malloc((word
)lb
, AUNCOLLECTABLE
);
675 if (0 == op
) return(0);
676 /* We don't need the lock here, since we have an undisguised */
677 /* pointer. We do need to hold the lock while we adjust */
680 register struct hblk
* h
;
683 lw
= HDR(h
) -> hb_sz
;
688 GC_non_gc_bytes
+= WORDS_TO_BYTES(lw
);
695 #endif /* ATOMIC_UNCOLLECTABLE */