2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
18 * These are extra allocation routines which are likely to be less
19 * frequently used than those in malloc.c. They are separate in the
20 * hope that the .o file will be excluded from statically linked
21 * executables. We should probably break this up further.
25 #include "private/gc_priv.h"
27 extern ptr_t
GC_clear_stack(); /* in misc.c, behaves like identity */
28 void GC_extend_size_map(); /* in misc.c. */
29 GC_bool
GC_alloc_reclaim_list(); /* in malloc.c */
31 /* Some externally visible but unadvertised variables to allow access to */
32 /* free lists from inlined allocators without including gc_priv.h */
33 /* or introducing dependencies on internal data structure layouts. */
34 ptr_t
* GC_CONST GC_objfreelist_ptr
= GC_objfreelist
;
35 ptr_t
* GC_CONST GC_aobjfreelist_ptr
= GC_aobjfreelist
;
36 ptr_t
* GC_CONST GC_uobjfreelist_ptr
= GC_uobjfreelist
;
37 # ifdef ATOMIC_UNCOLLECTABLE
38 ptr_t
* GC_CONST GC_auobjfreelist_ptr
= GC_auobjfreelist
;
42 GC_PTR
GC_generic_or_special_malloc(lb
,knd
)
47 # ifdef STUBBORN_ALLOC
49 return(GC_malloc_stubborn((size_t)lb
));
52 return(GC_malloc_atomic((size_t)lb
));
54 return(GC_malloc((size_t)lb
));
56 return(GC_malloc_uncollectable((size_t)lb
));
57 # ifdef ATOMIC_UNCOLLECTABLE
59 return(GC_malloc_atomic_uncollectable((size_t)lb
));
60 # endif /* ATOMIC_UNCOLLECTABLE */
62 return(GC_generic_malloc(lb
,knd
));
67 /* Change the size of the block pointed to by p to contain at least */
68 /* lb bytes. The object may be (and quite likely will be) moved. */
69 /* The kind (e.g. atomic) is the same as that of the old. */
70 /* Shrinking of large blocks is not implemented well. */
72 GC_PTR
GC_realloc(GC_PTR p
, size_t lb
)
74 GC_PTR
GC_realloc(p
,lb
)
79 register struct hblk
* h
;
81 register word sz
; /* Current size in bytes */
82 register word orig_sz
; /* Original sz in bytes */
85 if (p
== 0) return(GC_malloc(lb
)); /* Required by ANSI */
89 obj_kind
= hhdr
-> hb_obj_kind
;
90 sz
= WORDS_TO_BYTES(sz
);
93 if (sz
> MAXOBJBYTES
) {
94 /* Round it up to the next whole heap block */
97 sz
= (sz
+HBLKSIZE
-1) & (~HBLKMASK
);
98 hhdr
-> hb_sz
= BYTES_TO_WORDS(sz
);
99 descr
= GC_obj_kinds
[obj_kind
].ok_descriptor
;
100 if (GC_obj_kinds
[obj_kind
].ok_relocate_descr
) descr
+= sz
;
101 hhdr
-> hb_descr
= descr
;
102 if (IS_UNCOLLECTABLE(obj_kind
)) GC_non_gc_bytes
+= (sz
- orig_sz
);
103 /* Extra area is already cleared by GC_alloc_large_and_clear. */
105 if (ADD_SLOP(lb
) <= sz
) {
106 if (lb
>= (sz
>> 1)) {
107 # ifdef STUBBORN_ALLOC
108 if (obj_kind
== STUBBORN
) GC_change_stubborn(p
);
111 /* Clear unneeded part of object to avoid bogus pointer */
113 /* Safe for stubborn objects. */
114 BZERO(((ptr_t
)p
) + lb
, orig_sz
- lb
);
120 GC_generic_or_special_malloc((word
)lb
, obj_kind
);
122 if (result
== 0) return(0);
123 /* Could also return original object. But this */
124 /* gives the client warning of imminent disaster. */
125 BCOPY(p
, result
, lb
);
134 GC_generic_or_special_malloc((word
)lb
, obj_kind
);
136 if (result
== 0) return(0);
137 BCOPY(p
, result
, sz
);
145 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
146 # define REDIRECT_REALLOC GC_realloc
149 # ifdef REDIRECT_REALLOC
151 GC_PTR
realloc(GC_PTR p
, size_t lb
)
158 return(REDIRECT_REALLOC(p
, lb
));
160 # endif /* REDIRECT_REALLOC */
163 /* The same thing, except caller does not hold allocation lock. */
164 /* We avoid holding allocation lock while we clear memory. */
165 ptr_t
GC_generic_malloc_ignore_off_page(lb
, k
)
169 register ptr_t result
;
176 return(GC_generic_malloc((word
)lb
, k
));
177 lw
= ROUNDED_UP_WORDS(lb
);
178 n_blocks
= OBJ_SZ_TO_BLOCKS(lw
);
179 init
= GC_obj_kinds
[k
].ok_init
;
180 if (GC_have_errors
) GC_print_all_errors();
181 GC_INVOKE_FINALIZERS();
184 result
= (ptr_t
)GC_alloc_large(lw
, k
, IGNORE_OFF_PAGE
);
186 if (GC_debugging_started
) {
187 BZERO(result
, n_blocks
* HBLKSIZE
);
190 /* Clear any memory that might be used for GC descriptors */
191 /* before we release the lock. */
192 ((word
*)result
)[0] = 0;
193 ((word
*)result
)[1] = 0;
194 ((word
*)result
)[lw
-1] = 0;
195 ((word
*)result
)[lw
-2] = 0;
199 GC_words_allocd
+= lw
;
203 return((*GC_oom_fn
)(lb
));
205 if (init
&& !GC_debugging_started
) {
206 BZERO(result
, n_blocks
* HBLKSIZE
);
212 # if defined(__STDC__) || defined(__cplusplus)
213 void * GC_malloc_ignore_off_page(size_t lb
)
215 char * GC_malloc_ignore_off_page(lb
)
219 return((GC_PTR
)GC_generic_malloc_ignore_off_page(lb
, NORMAL
));
222 # if defined(__STDC__) || defined(__cplusplus)
223 void * GC_malloc_atomic_ignore_off_page(size_t lb
)
225 char * GC_malloc_atomic_ignore_off_page(lb
)
229 return((GC_PTR
)GC_generic_malloc_ignore_off_page(lb
, PTRFREE
));
232 /* Increment GC_words_allocd from code that doesn't have direct access */
235 void GC_incr_words_allocd(size_t n
)
237 GC_words_allocd
+= n
;
240 /* The same for GC_mem_freed. */
241 void GC_incr_mem_freed(size_t n
)
245 # endif /* __STDC__ */
247 /* Analogous to the above, but assumes a small object size, and */
248 /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
249 ptr_t
GC_generic_malloc_words_small_inner(lw
, k
)
255 register struct obj_kind
* kind
= GC_obj_kinds
+ k
;
257 opp
= &(kind
-> ok_freelist
[lw
]);
258 if( (op
= *opp
) == 0 ) {
259 if (!GC_is_initialized
) {
262 if (kind
-> ok_reclaim_list
!= 0 || GC_alloc_reclaim_list(kind
)) {
263 op
= GC_clear_stack(GC_allocobj((word
)lw
, k
));
268 return ((*GC_oom_fn
)(WORDS_TO_BYTES(lw
)));
273 GC_words_allocd
+= lw
;
277 /* Analogous to the above, but assumes a small object size, and */
278 /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
280 ptr_t
GC_generic_malloc_words_small(size_t lw
, int k
)
282 ptr_t
GC_generic_malloc_words_small(lw
, k
)
290 if (GC_have_errors
) GC_print_all_errors();
291 GC_INVOKE_FINALIZERS();
294 op
= GC_generic_malloc_words_small_inner(lw
, k
);
300 #if defined(THREADS) && !defined(SRC_M3)
302 extern signed_word GC_mem_found
; /* Protected by GC lock. */
305 volatile signed_word GC_words_allocd_tmp
= 0;
306 /* Number of words of memory allocated since */
307 /* we released the GC lock. Instead of */
308 /* reacquiring the GC lock just to add this in, */
309 /* we add it in the next time we reacquire */
310 /* the lock. (Atomically adding it doesn't */
311 /* work, since we would have to atomically */
312 /* update it in GC_malloc, which is too */
314 #endif /* PARALLEL_MARK */
317 extern ptr_t
GC_reclaim_generic();
319 /* Return a list of 1 or more objects of the indicated size, linked */
320 /* through the first word in the object. This has the advantage that */
321 /* it acquires the allocation lock only once, and may greatly reduce */
322 /* time wasted contending for the allocation lock. Typical usage would */
323 /* be in a thread that requires many items of the same size. It would */
324 /* keep its own free list in thread-local storage, and call */
325 /* GC_malloc_many or friends to replenish it. (We do not round up */
326 /* object sizes, since a call indicates the intention to consume many */
327 /* objects of exactly this size.) */
328 /* We return the free-list by assigning it to *result, since it is */
329 /* not safe to return, e.g. a linked list of pointer-free objects, */
330 /* since the collector would not retain the entire list if it were */
331 /* invoked just as we were returning. */
332 /* Note that the client should usually clear the link field. */
333 void GC_generic_malloc_many(lb
, k
, result
)
342 word my_words_allocd
= 0;
343 struct obj_kind
* ok
= &(GC_obj_kinds
[k
]);
346 # if defined(GATHERSTATS) || defined(PARALLEL_MARK)
347 # define COUNT_ARG , &my_words_allocd
350 # define NEED_TO_COUNT
352 if (!SMALL_OBJ(lb
)) {
353 op
= GC_generic_malloc(lb
, k
);
354 if(0 != op
) obj_link(op
) = 0;
358 lw
= ALIGNED_WORDS(lb
);
359 if (GC_have_errors
) GC_print_all_errors();
360 GC_INVOKE_FINALIZERS();
363 if (!GC_is_initialized
) GC_init_inner();
364 /* Do our share of marking work */
365 if (GC_incremental
&& !GC_dont_gc
) {
367 GC_collect_a_little_inner(1);
370 /* First see if we can reclaim a page of objects waiting to be */
373 struct hblk
** rlh
= ok
-> ok_reclaim_list
;
378 while ((hbp
= *rlh
) != 0) {
380 *rlh
= hhdr
-> hb_next
;
381 hhdr
-> hb_last_reclaimed
= (unsigned short) GC_gc_no
;
382 # ifdef PARALLEL_MARK
384 signed_word my_words_allocd_tmp
= GC_words_allocd_tmp
;
386 GC_ASSERT(my_words_allocd_tmp
>= 0);
387 /* We only decrement it while holding the GC lock. */
388 /* Thus we can't accidentally adjust it down in more */
389 /* than one thread simultaneously. */
390 if (my_words_allocd_tmp
!= 0) {
392 (volatile GC_word
*)(&GC_words_allocd_tmp
),
393 (GC_word
)(-my_words_allocd_tmp
));
394 GC_words_allocd
+= my_words_allocd_tmp
;
397 GC_acquire_mark_lock();
398 ++ GC_fl_builder_count
;
401 GC_release_mark_lock();
403 op
= GC_reclaim_generic(hbp
, hhdr
, lw
,
404 ok
-> ok_init
, 0 COUNT_ARG
);
406 # ifdef NEED_TO_COUNT
407 /* We are neither gathering statistics, nor marking in */
408 /* parallel. Thus GC_reclaim_generic doesn't count */
410 for (p
= op
; p
!= 0; p
= obj_link(p
)) {
411 my_words_allocd
+= lw
;
414 # if defined(GATHERSTATS)
415 /* We also reclaimed memory, so we need to adjust */
417 /* This should be atomic, so the results may be */
419 GC_mem_found
+= my_words_allocd
;
421 # ifdef PARALLEL_MARK
424 (volatile GC_word
*)(&GC_words_allocd_tmp
),
425 (GC_word
)(my_words_allocd
));
426 GC_acquire_mark_lock();
427 -- GC_fl_builder_count
;
428 if (GC_fl_builder_count
== 0) GC_notify_all_builder();
429 GC_release_mark_lock();
430 (void) GC_clear_stack(0);
433 GC_words_allocd
+= my_words_allocd
;
437 # ifdef PARALLEL_MARK
438 GC_acquire_mark_lock();
439 -- GC_fl_builder_count
;
440 if (GC_fl_builder_count
== 0) GC_notify_all_builder();
441 GC_release_mark_lock();
444 /* GC lock is needed for reclaim list access. We */
445 /* must decrement fl_builder_count before reaquiring GC */
446 /* lock. Hopefully this path is rare. */
450 /* Next try to use prefix of global free list if there is one. */
451 /* We don't refill it, but we need to use it up before allocating */
452 /* a new block ourselves. */
453 opp
= &(GC_obj_kinds
[k
].ok_freelist
[lw
]);
454 if ( (op
= *opp
) != 0 ) {
457 for (p
= op
; p
!= 0; p
= obj_link(p
)) {
458 my_words_allocd
+= lw
;
459 if (my_words_allocd
>= BODY_SZ
) {
465 GC_words_allocd
+= my_words_allocd
;
468 /* Next try to allocate a new block worth of objects of this size. */
470 struct hblk
*h
= GC_allochblk(lw
, k
, 0);
472 if (IS_UNCOLLECTABLE(k
)) GC_set_hdr_marks(HDR(h
));
473 GC_words_allocd
+= BYTES_TO_WORDS(HBLKSIZE
)
474 - BYTES_TO_WORDS(HBLKSIZE
) % lw
;
475 # ifdef PARALLEL_MARK
476 GC_acquire_mark_lock();
477 ++ GC_fl_builder_count
;
480 GC_release_mark_lock();
483 op
= GC_build_fl(h
, lw
, ok
-> ok_init
, 0);
484 # ifdef PARALLEL_MARK
486 GC_acquire_mark_lock();
487 -- GC_fl_builder_count
;
488 if (GC_fl_builder_count
== 0) GC_notify_all_builder();
489 GC_release_mark_lock();
490 (void) GC_clear_stack(0);
498 /* As a last attempt, try allocating a single object. Note that */
499 /* this may trigger a collection or expand the heap. */
500 op
= GC_generic_malloc_inner(lb
, k
);
501 if (0 != op
) obj_link(op
) = 0;
507 (void) GC_clear_stack(0);
510 GC_PTR
GC_malloc_many(size_t lb
)
513 GC_generic_malloc_many(lb
, NORMAL
, &result
);
517 /* Note that the "atomic" version of this would be unsafe, since the */
518 /* links would not be seen by the collector. */
521 /* Allocate lb bytes of pointerful, traced, but not collectable data */
523 GC_PTR
GC_malloc_uncollectable(size_t lb
)
525 GC_PTR
GC_malloc_uncollectable(lb
)
534 if( SMALL_OBJ(lb
) ) {
536 if (EXTRA_BYTES
!= 0 && lb
!= 0) lb
--;
537 /* We don't need the extra byte, since this won't be */
538 /* collected anyway. */
539 lw
= GC_size_map
[lb
];
541 lw
= ALIGNED_WORDS(lb
);
543 opp
= &(GC_uobjfreelist
[lw
]);
545 if( FASTLOCK_SUCCEEDED() && (op
= *opp
) != 0 ) {
546 /* See above comment on signals. */
549 GC_words_allocd
+= lw
;
550 /* Mark bit ws already set on free list. It will be */
551 /* cleared only temporarily during a collection, as a */
552 /* result of the normal free list mark bit clearing. */
553 GC_non_gc_bytes
+= WORDS_TO_BYTES(lw
);
558 op
= (ptr_t
)GC_generic_malloc((word
)lb
, UNCOLLECTABLE
);
560 op
= (ptr_t
)GC_generic_malloc((word
)lb
, UNCOLLECTABLE
);
562 if (0 == op
) return(0);
563 /* We don't need the lock here, since we have an undisguised */
564 /* pointer. We do need to hold the lock while we adjust */
567 register struct hblk
* h
;
570 lw
= HDR(h
) -> hb_sz
;
575 GC_non_gc_bytes
+= WORDS_TO_BYTES(lw
);
583 /* Not well tested nor integrated. */
584 /* Debug version is tricky and currently missing. */
587 GC_PTR
GC_memalign(size_t align
, size_t lb
)
594 if (align
<= WORDS_TO_BYTES(2) && lb
> align
) return GC_malloc(lb
);
596 if (align
<= WORDS_TO_BYTES(1)) return GC_malloc(lb
);
597 if (align
>= HBLKSIZE
/2 || lb
>= HBLKSIZE
/2) {
598 if (align
> HBLKSIZE
) return GC_oom_fn(LONG_MAX
-1024) /* Fail */;
599 return GC_malloc(lb
<= HBLKSIZE
? HBLKSIZE
: lb
);
600 /* Will be HBLKSIZE aligned. */
602 /* We could also try to make sure that the real rounded-up object size */
603 /* is a multiple of align. That would be correct up to HBLKSIZE. */
604 new_lb
= lb
+ align
- 1;
605 result
= GC_malloc(new_lb
);
606 offset
= (word
)result
% align
;
608 offset
= align
- offset
;
609 if (!GC_all_interior_pointers
) {
610 if (offset
>= VALID_OFFSET_SZ
) return GC_malloc(HBLKSIZE
);
611 GC_register_displacement(offset
);
614 result
= (GC_PTR
) ((ptr_t
)result
+ offset
);
615 GC_ASSERT((word
)result
% align
== 0);
620 # ifdef ATOMIC_UNCOLLECTABLE
621 /* Allocate lb bytes of pointerfree, untraced, uncollectable data */
622 /* This is normally roughly equivalent to the system malloc. */
623 /* But it may be useful if malloc is redefined. */
625 GC_PTR
GC_malloc_atomic_uncollectable(size_t lb
)
627 GC_PTR
GC_malloc_atomic_uncollectable(lb
)
636 if( SMALL_OBJ(lb
) ) {
638 if (EXTRA_BYTES
!= 0 && lb
!= 0) lb
--;
639 /* We don't need the extra byte, since this won't be */
640 /* collected anyway. */
641 lw
= GC_size_map
[lb
];
643 lw
= ALIGNED_WORDS(lb
);
645 opp
= &(GC_auobjfreelist
[lw
]);
647 if( FASTLOCK_SUCCEEDED() && (op
= *opp
) != 0 ) {
648 /* See above comment on signals. */
651 GC_words_allocd
+= lw
;
652 /* Mark bit was already set while object was on free list. */
653 GC_non_gc_bytes
+= WORDS_TO_BYTES(lw
);
658 op
= (ptr_t
)GC_generic_malloc((word
)lb
, AUNCOLLECTABLE
);
660 op
= (ptr_t
)GC_generic_malloc((word
)lb
, AUNCOLLECTABLE
);
662 if (0 == op
) return(0);
663 /* We don't need the lock here, since we have an undisguised */
664 /* pointer. We do need to hold the lock while we adjust */
667 register struct hblk
* h
;
670 lw
= HDR(h
) -> hb_sz
;
675 GC_non_gc_bytes
+= WORDS_TO_BYTES(lw
);
682 #endif /* ATOMIC_UNCOLLECTABLE */