2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
18 * These are extra allocation routines which are likely to be less
19 * frequently used than those in malloc.c. They are separate in the
20 * hope that the .o file will be excluded from statically linked
21 * executables. We should probably break this up further.
25 #include "private/gc_priv.h"
27 extern ptr_t
GC_clear_stack(); /* in misc.c, behaves like identity */
28 void GC_extend_size_map(); /* in misc.c. */
29 GC_bool
GC_alloc_reclaim_list(); /* in malloc.c */
31 /* Some externally visible but unadvertised variables to allow access to */
32 /* free lists from inlined allocators without including gc_priv.h */
33 /* or introducing dependencies on internal data structure layouts. */
34 ptr_t
* GC_CONST GC_objfreelist_ptr
= GC_objfreelist
;
35 ptr_t
* GC_CONST GC_aobjfreelist_ptr
= GC_aobjfreelist
;
36 ptr_t
* GC_CONST GC_uobjfreelist_ptr
= GC_uobjfreelist
;
37 # ifdef ATOMIC_UNCOLLECTABLE
38 ptr_t
* GC_CONST GC_auobjfreelist_ptr
= GC_auobjfreelist
;
42 GC_PTR
GC_generic_or_special_malloc(lb
,knd
)
47 # ifdef STUBBORN_ALLOC
49 return(GC_malloc_stubborn((size_t)lb
));
52 return(GC_malloc_atomic((size_t)lb
));
54 return(GC_malloc((size_t)lb
));
56 return(GC_malloc_uncollectable((size_t)lb
));
57 # ifdef ATOMIC_UNCOLLECTABLE
59 return(GC_malloc_atomic_uncollectable((size_t)lb
));
60 # endif /* ATOMIC_UNCOLLECTABLE */
62 return(GC_generic_malloc(lb
,knd
));
67 /* Change the size of the block pointed to by p to contain at least */
68 /* lb bytes. The object may be (and quite likely will be) moved. */
69 /* The kind (e.g. atomic) is the same as that of the old. */
70 /* Shrinking of large blocks is not implemented well. */
72 GC_PTR
GC_realloc(GC_PTR p
, size_t lb
)
74 GC_PTR
GC_realloc(p
,lb
)
79 register struct hblk
* h
;
81 register word sz
; /* Current size in bytes */
82 register word orig_sz
; /* Original sz in bytes */
85 if (p
== 0) return(GC_malloc(lb
)); /* Required by ANSI */
89 obj_kind
= hhdr
-> hb_obj_kind
;
90 sz
= WORDS_TO_BYTES(sz
);
93 if (sz
> MAXOBJBYTES
) {
94 /* Round it up to the next whole heap block */
97 sz
= (sz
+HBLKSIZE
-1) & (~HBLKMASK
);
98 hhdr
-> hb_sz
= BYTES_TO_WORDS(sz
);
99 descr
= GC_obj_kinds
[obj_kind
].ok_descriptor
;
100 if (GC_obj_kinds
[obj_kind
].ok_relocate_descr
) descr
+= sz
;
101 hhdr
-> hb_descr
= descr
;
102 if (IS_UNCOLLECTABLE(obj_kind
)) GC_non_gc_bytes
+= (sz
- orig_sz
);
103 /* Extra area is already cleared by GC_alloc_large_and_clear. */
105 if (ADD_SLOP(lb
) <= sz
) {
106 if (lb
>= (sz
>> 1)) {
107 # ifdef STUBBORN_ALLOC
108 if (obj_kind
== STUBBORN
) GC_change_stubborn(p
);
111 /* Clear unneeded part of object to avoid bogus pointer */
113 /* Safe for stubborn objects. */
114 BZERO(((ptr_t
)p
) + lb
, orig_sz
- lb
);
120 GC_generic_or_special_malloc((word
)lb
, obj_kind
);
122 if (result
== 0) return(0);
123 /* Could also return original object. But this */
124 /* gives the client warning of imminent disaster. */
125 BCOPY(p
, result
, lb
);
134 GC_generic_or_special_malloc((word
)lb
, obj_kind
);
136 if (result
== 0) return(0);
137 BCOPY(p
, result
, sz
);
145 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
146 # define REDIRECT_REALLOC GC_realloc
149 # ifdef REDIRECT_REALLOC
151 /* As with malloc, avoid two levels of extra calls here. */
152 # ifdef GC_ADD_CALLER
153 # define RA GC_RETURN_ADDR,
157 # define GC_debug_realloc_replacement(p, lb) \
158 GC_debug_realloc(p, lb, RA "unknown", 0)
161 GC_PTR
realloc(GC_PTR p
, size_t lb
)
168 return(REDIRECT_REALLOC(p
, lb
));
171 # undef GC_debug_realloc_replacement
172 # endif /* REDIRECT_REALLOC */
175 /* The same thing, except caller does not hold allocation lock. */
176 /* We avoid holding allocation lock while we clear memory. */
177 ptr_t
GC_generic_malloc_ignore_off_page(lb
, k
)
181 register ptr_t result
;
188 return(GC_generic_malloc((word
)lb
, k
));
189 lw
= ROUNDED_UP_WORDS(lb
);
190 n_blocks
= OBJ_SZ_TO_BLOCKS(lw
);
191 init
= GC_obj_kinds
[k
].ok_init
;
192 if (GC_have_errors
) GC_print_all_errors();
193 GC_INVOKE_FINALIZERS();
196 result
= (ptr_t
)GC_alloc_large(lw
, k
, IGNORE_OFF_PAGE
);
198 if (GC_debugging_started
) {
199 BZERO(result
, n_blocks
* HBLKSIZE
);
202 /* Clear any memory that might be used for GC descriptors */
203 /* before we release the lock. */
204 ((word
*)result
)[0] = 0;
205 ((word
*)result
)[1] = 0;
206 ((word
*)result
)[lw
-1] = 0;
207 ((word
*)result
)[lw
-2] = 0;
211 GC_words_allocd
+= lw
;
215 return((*GC_oom_fn
)(lb
));
217 if (init
&& !GC_debugging_started
) {
218 BZERO(result
, n_blocks
* HBLKSIZE
);
224 # if defined(__STDC__) || defined(__cplusplus)
225 void * GC_malloc_ignore_off_page(size_t lb
)
227 char * GC_malloc_ignore_off_page(lb
)
231 return((GC_PTR
)GC_generic_malloc_ignore_off_page(lb
, NORMAL
));
234 # if defined(__STDC__) || defined(__cplusplus)
235 void * GC_malloc_atomic_ignore_off_page(size_t lb
)
237 char * GC_malloc_atomic_ignore_off_page(lb
)
241 return((GC_PTR
)GC_generic_malloc_ignore_off_page(lb
, PTRFREE
));
244 /* Increment GC_words_allocd from code that doesn't have direct access */
247 void GC_incr_words_allocd(size_t n
)
249 GC_words_allocd
+= n
;
252 /* The same for GC_mem_freed. */
253 void GC_incr_mem_freed(size_t n
)
257 # endif /* __STDC__ */
259 /* Analogous to the above, but assumes a small object size, and */
260 /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
261 ptr_t
GC_generic_malloc_words_small_inner(lw
, k
)
267 register struct obj_kind
* kind
= GC_obj_kinds
+ k
;
269 opp
= &(kind
-> ok_freelist
[lw
]);
270 if( (op
= *opp
) == 0 ) {
271 if (!GC_is_initialized
) {
274 if (kind
-> ok_reclaim_list
!= 0 || GC_alloc_reclaim_list(kind
)) {
275 op
= GC_clear_stack(GC_allocobj((word
)lw
, k
));
280 return ((*GC_oom_fn
)(WORDS_TO_BYTES(lw
)));
285 GC_words_allocd
+= lw
;
289 /* Analogous to the above, but assumes a small object size, and */
290 /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
292 ptr_t
GC_generic_malloc_words_small(size_t lw
, int k
)
294 ptr_t
GC_generic_malloc_words_small(lw
, k
)
302 if (GC_have_errors
) GC_print_all_errors();
303 GC_INVOKE_FINALIZERS();
306 op
= GC_generic_malloc_words_small_inner(lw
, k
);
312 #if defined(THREADS) && !defined(SRC_M3)
314 extern signed_word GC_mem_found
; /* Protected by GC lock. */
317 volatile signed_word GC_words_allocd_tmp
= 0;
318 /* Number of words of memory allocated since */
319 /* we released the GC lock. Instead of */
320 /* reacquiring the GC lock just to add this in, */
321 /* we add it in the next time we reacquire */
322 /* the lock. (Atomically adding it doesn't */
323 /* work, since we would have to atomically */
324 /* update it in GC_malloc, which is too */
326 #endif /* PARALLEL_MARK */
329 extern ptr_t
GC_reclaim_generic();
331 /* Return a list of 1 or more objects of the indicated size, linked */
332 /* through the first word in the object. This has the advantage that */
333 /* it acquires the allocation lock only once, and may greatly reduce */
334 /* time wasted contending for the allocation lock. Typical usage would */
335 /* be in a thread that requires many items of the same size. It would */
336 /* keep its own free list in thread-local storage, and call */
337 /* GC_malloc_many or friends to replenish it. (We do not round up */
338 /* object sizes, since a call indicates the intention to consume many */
339 /* objects of exactly this size.) */
340 /* We return the free-list by assigning it to *result, since it is */
341 /* not safe to return, e.g. a linked list of pointer-free objects, */
342 /* since the collector would not retain the entire list if it were */
343 /* invoked just as we were returning. */
344 /* Note that the client should usually clear the link field. */
345 void GC_generic_malloc_many(lb
, k
, result
)
354 word my_words_allocd
= 0;
355 struct obj_kind
* ok
= &(GC_obj_kinds
[k
]);
358 # if defined(GATHERSTATS) || defined(PARALLEL_MARK)
359 # define COUNT_ARG , &my_words_allocd
362 # define NEED_TO_COUNT
364 if (!SMALL_OBJ(lb
)) {
365 op
= GC_generic_malloc(lb
, k
);
366 if(0 != op
) obj_link(op
) = 0;
370 lw
= ALIGNED_WORDS(lb
);
371 if (GC_have_errors
) GC_print_all_errors();
372 GC_INVOKE_FINALIZERS();
375 if (!GC_is_initialized
) GC_init_inner();
376 /* Do our share of marking work */
377 if (GC_incremental
&& !GC_dont_gc
) {
379 GC_collect_a_little_inner(1);
382 /* First see if we can reclaim a page of objects waiting to be */
385 struct hblk
** rlh
= ok
-> ok_reclaim_list
;
390 while ((hbp
= *rlh
) != 0) {
392 *rlh
= hhdr
-> hb_next
;
393 hhdr
-> hb_last_reclaimed
= (unsigned short) GC_gc_no
;
394 # ifdef PARALLEL_MARK
396 signed_word my_words_allocd_tmp
= GC_words_allocd_tmp
;
398 GC_ASSERT(my_words_allocd_tmp
>= 0);
399 /* We only decrement it while holding the GC lock. */
400 /* Thus we can't accidentally adjust it down in more */
401 /* than one thread simultaneously. */
402 if (my_words_allocd_tmp
!= 0) {
404 (volatile GC_word
*)(&GC_words_allocd_tmp
),
405 (GC_word
)(-my_words_allocd_tmp
));
406 GC_words_allocd
+= my_words_allocd_tmp
;
409 GC_acquire_mark_lock();
410 ++ GC_fl_builder_count
;
413 GC_release_mark_lock();
415 op
= GC_reclaim_generic(hbp
, hhdr
, lw
,
416 ok
-> ok_init
, 0 COUNT_ARG
);
418 # ifdef NEED_TO_COUNT
419 /* We are neither gathering statistics, nor marking in */
420 /* parallel. Thus GC_reclaim_generic doesn't count */
422 for (p
= op
; p
!= 0; p
= obj_link(p
)) {
423 my_words_allocd
+= lw
;
426 # if defined(GATHERSTATS)
427 /* We also reclaimed memory, so we need to adjust */
429 /* This should be atomic, so the results may be */
431 GC_mem_found
+= my_words_allocd
;
433 # ifdef PARALLEL_MARK
436 (volatile GC_word
*)(&GC_words_allocd_tmp
),
437 (GC_word
)(my_words_allocd
));
438 GC_acquire_mark_lock();
439 -- GC_fl_builder_count
;
440 if (GC_fl_builder_count
== 0) GC_notify_all_builder();
441 GC_release_mark_lock();
442 (void) GC_clear_stack(0);
445 GC_words_allocd
+= my_words_allocd
;
449 # ifdef PARALLEL_MARK
450 GC_acquire_mark_lock();
451 -- GC_fl_builder_count
;
452 if (GC_fl_builder_count
== 0) GC_notify_all_builder();
453 GC_release_mark_lock();
456 /* GC lock is needed for reclaim list access. We */
457 /* must decrement fl_builder_count before reaquiring GC */
458 /* lock. Hopefully this path is rare. */
462 /* Next try to use prefix of global free list if there is one. */
463 /* We don't refill it, but we need to use it up before allocating */
464 /* a new block ourselves. */
465 opp
= &(GC_obj_kinds
[k
].ok_freelist
[lw
]);
466 if ( (op
= *opp
) != 0 ) {
469 for (p
= op
; p
!= 0; p
= obj_link(p
)) {
470 my_words_allocd
+= lw
;
471 if (my_words_allocd
>= BODY_SZ
) {
477 GC_words_allocd
+= my_words_allocd
;
480 /* Next try to allocate a new block worth of objects of this size. */
482 struct hblk
*h
= GC_allochblk(lw
, k
, 0);
484 if (IS_UNCOLLECTABLE(k
)) GC_set_hdr_marks(HDR(h
));
485 GC_words_allocd
+= BYTES_TO_WORDS(HBLKSIZE
)
486 - BYTES_TO_WORDS(HBLKSIZE
) % lw
;
487 # ifdef PARALLEL_MARK
488 GC_acquire_mark_lock();
489 ++ GC_fl_builder_count
;
492 GC_release_mark_lock();
495 op
= GC_build_fl(h
, lw
, ok
-> ok_init
, 0);
496 # ifdef PARALLEL_MARK
498 GC_acquire_mark_lock();
499 -- GC_fl_builder_count
;
500 if (GC_fl_builder_count
== 0) GC_notify_all_builder();
501 GC_release_mark_lock();
502 (void) GC_clear_stack(0);
510 /* As a last attempt, try allocating a single object. Note that */
511 /* this may trigger a collection or expand the heap. */
512 op
= GC_generic_malloc_inner(lb
, k
);
513 if (0 != op
) obj_link(op
) = 0;
519 (void) GC_clear_stack(0);
522 GC_PTR
GC_malloc_many(size_t lb
)
525 GC_generic_malloc_many(lb
, NORMAL
, &result
);
529 /* Note that the "atomic" version of this would be unsafe, since the */
530 /* links would not be seen by the collector. */
533 /* Allocate lb bytes of pointerful, traced, but not collectable data */
535 GC_PTR
GC_malloc_uncollectable(size_t lb
)
537 GC_PTR
GC_malloc_uncollectable(lb
)
546 if( SMALL_OBJ(lb
) ) {
548 if (EXTRA_BYTES
!= 0 && lb
!= 0) lb
--;
549 /* We don't need the extra byte, since this won't be */
550 /* collected anyway. */
551 lw
= GC_size_map
[lb
];
553 lw
= ALIGNED_WORDS(lb
);
555 opp
= &(GC_uobjfreelist
[lw
]);
557 if( FASTLOCK_SUCCEEDED() && (op
= *opp
) != 0 ) {
558 /* See above comment on signals. */
561 GC_words_allocd
+= lw
;
562 /* Mark bit ws already set on free list. It will be */
563 /* cleared only temporarily during a collection, as a */
564 /* result of the normal free list mark bit clearing. */
565 GC_non_gc_bytes
+= WORDS_TO_BYTES(lw
);
570 op
= (ptr_t
)GC_generic_malloc((word
)lb
, UNCOLLECTABLE
);
572 op
= (ptr_t
)GC_generic_malloc((word
)lb
, UNCOLLECTABLE
);
574 if (0 == op
) return(0);
575 /* We don't need the lock here, since we have an undisguised */
576 /* pointer. We do need to hold the lock while we adjust */
579 register struct hblk
* h
;
582 lw
= HDR(h
) -> hb_sz
;
587 GC_non_gc_bytes
+= WORDS_TO_BYTES(lw
);
595 /* Not well tested nor integrated. */
596 /* Debug version is tricky and currently missing. */
599 GC_PTR
GC_memalign(size_t align
, size_t lb
)
606 if (align
<= WORDS_TO_BYTES(2) && lb
> align
) return GC_malloc(lb
);
608 if (align
<= WORDS_TO_BYTES(1)) return GC_malloc(lb
);
609 if (align
>= HBLKSIZE
/2 || lb
>= HBLKSIZE
/2) {
610 if (align
> HBLKSIZE
) return GC_oom_fn(LONG_MAX
-1024) /* Fail */;
611 return GC_malloc(lb
<= HBLKSIZE
? HBLKSIZE
: lb
);
612 /* Will be HBLKSIZE aligned. */
614 /* We could also try to make sure that the real rounded-up object size */
615 /* is a multiple of align. That would be correct up to HBLKSIZE. */
616 new_lb
= lb
+ align
- 1;
617 result
= GC_malloc(new_lb
);
618 offset
= (word
)result
% align
;
620 offset
= align
- offset
;
621 if (!GC_all_interior_pointers
) {
622 if (offset
>= VALID_OFFSET_SZ
) return GC_malloc(HBLKSIZE
);
623 GC_register_displacement(offset
);
626 result
= (GC_PTR
) ((ptr_t
)result
+ offset
);
627 GC_ASSERT((word
)result
% align
== 0);
632 # ifdef ATOMIC_UNCOLLECTABLE
633 /* Allocate lb bytes of pointerfree, untraced, uncollectable data */
634 /* This is normally roughly equivalent to the system malloc. */
635 /* But it may be useful if malloc is redefined. */
637 GC_PTR
GC_malloc_atomic_uncollectable(size_t lb
)
639 GC_PTR
GC_malloc_atomic_uncollectable(lb
)
648 if( SMALL_OBJ(lb
) ) {
650 if (EXTRA_BYTES
!= 0 && lb
!= 0) lb
--;
651 /* We don't need the extra byte, since this won't be */
652 /* collected anyway. */
653 lw
= GC_size_map
[lb
];
655 lw
= ALIGNED_WORDS(lb
);
657 opp
= &(GC_auobjfreelist
[lw
]);
659 if( FASTLOCK_SUCCEEDED() && (op
= *opp
) != 0 ) {
660 /* See above comment on signals. */
663 GC_words_allocd
+= lw
;
664 /* Mark bit was already set while object was on free list. */
665 GC_non_gc_bytes
+= WORDS_TO_BYTES(lw
);
670 op
= (ptr_t
)GC_generic_malloc((word
)lb
, AUNCOLLECTABLE
);
672 op
= (ptr_t
)GC_generic_malloc((word
)lb
, AUNCOLLECTABLE
);
674 if (0 == op
) return(0);
675 /* We don't need the lock here, since we have an undisguised */
676 /* pointer. We do need to hold the lock while we adjust */
679 register struct hblk
* h
;
682 lw
= HDR(h
) -> hb_sz
;
687 GC_non_gc_bytes
+= WORDS_TO_BYTES(lw
);
694 #endif /* ATOMIC_UNCOLLECTABLE */