2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
18 * These are extra allocation routines which are likely to be less
19 * frequently used than those in malloc.c. They are separate in the
20 * hope that the .o file will be excluded from statically linked
21 * executables. We should probably break this up further.
25 #include "private/gc_priv.h"
27 extern ptr_t
GC_clear_stack(); /* in misc.c, behaves like identity */
28 void GC_extend_size_map(); /* in misc.c. */
29 GC_bool
GC_alloc_reclaim_list(); /* in malloc.c */
31 /* Some externally visible but unadvertised variables to allow access to */
32 /* free lists from inlined allocators without including gc_priv.h */
33 /* or introducing dependencies on internal data structure layouts. */
34 ptr_t
* GC_CONST GC_objfreelist_ptr
= GC_objfreelist
;
35 ptr_t
* GC_CONST GC_aobjfreelist_ptr
= GC_aobjfreelist
;
36 ptr_t
* GC_CONST GC_uobjfreelist_ptr
= GC_uobjfreelist
;
37 # ifdef ATOMIC_UNCOLLECTABLE
38 ptr_t
* GC_CONST GC_auobjfreelist_ptr
= GC_auobjfreelist
;
42 GC_PTR
GC_generic_or_special_malloc(lb
,knd
)
47 # ifdef STUBBORN_ALLOC
49 return(GC_malloc_stubborn((size_t)lb
));
52 return(GC_malloc_atomic((size_t)lb
));
54 return(GC_malloc((size_t)lb
));
56 return(GC_malloc_uncollectable((size_t)lb
));
57 # ifdef ATOMIC_UNCOLLECTABLE
59 return(GC_malloc_atomic_uncollectable((size_t)lb
));
60 # endif /* ATOMIC_UNCOLLECTABLE */
62 return(GC_generic_malloc(lb
,knd
));
67 /* Change the size of the block pointed to by p to contain at least */
68 /* lb bytes. The object may be (and quite likely will be) moved. */
69 /* The kind (e.g. atomic) is the same as that of the old. */
70 /* Shrinking of large blocks is not implemented well. */
72 GC_PTR
GC_realloc(GC_PTR p
, size_t lb
)
74 GC_PTR
GC_realloc(p
,lb
)
79 register struct hblk
* h
;
81 register word sz
; /* Current size in bytes */
82 register word orig_sz
; /* Original sz in bytes */
85 if (p
== 0) return(GC_malloc(lb
)); /* Required by ANSI */
89 obj_kind
= hhdr
-> hb_obj_kind
;
90 sz
= WORDS_TO_BYTES(sz
);
93 if (sz
> MAXOBJBYTES
) {
94 /* Round it up to the next whole heap block */
97 sz
= (sz
+HBLKSIZE
-1) & (~HBLKMASK
);
98 hhdr
-> hb_sz
= BYTES_TO_WORDS(sz
);
99 descr
= GC_obj_kinds
[obj_kind
].ok_descriptor
;
100 if (GC_obj_kinds
[obj_kind
].ok_relocate_descr
) descr
+= sz
;
101 hhdr
-> hb_descr
= descr
;
102 if (IS_UNCOLLECTABLE(obj_kind
)) GC_non_gc_bytes
+= (sz
- orig_sz
);
103 /* Extra area is already cleared by GC_alloc_large_and_clear. */
105 if (ADD_SLOP(lb
) <= sz
) {
106 if (lb
>= (sz
>> 1)) {
107 # ifdef STUBBORN_ALLOC
108 if (obj_kind
== STUBBORN
) GC_change_stubborn(p
);
111 /* Clear unneeded part of object to avoid bogus pointer */
113 /* Safe for stubborn objects. */
114 BZERO(((ptr_t
)p
) + lb
, orig_sz
- lb
);
120 GC_generic_or_special_malloc((word
)lb
, obj_kind
);
122 if (result
== 0) return(0);
123 /* Could also return original object. But this */
124 /* gives the client warning of imminent disaster. */
125 BCOPY(p
, result
, lb
);
134 GC_generic_or_special_malloc((word
)lb
, obj_kind
);
136 if (result
== 0) return(0);
137 BCOPY(p
, result
, sz
);
145 # if defined(REDIRECT_MALLOC) || defined(REDIRECT_REALLOC)
147 GC_PTR
realloc(GC_PTR p
, size_t lb
)
154 # ifdef REDIRECT_REALLOC
155 return(REDIRECT_REALLOC(p
, lb
));
157 return(GC_realloc(p
, lb
));
160 # endif /* REDIRECT_MALLOC */
163 /* The same thing, except caller does not hold allocation lock. */
164 /* We avoid holding allocation lock while we clear memory. */
165 ptr_t
GC_generic_malloc_ignore_off_page(lb
, k
)
169 register ptr_t result
;
176 return(GC_generic_malloc((word
)lb
, k
));
177 lw
= ROUNDED_UP_WORDS(lb
);
178 n_blocks
= OBJ_SZ_TO_BLOCKS(lw
);
179 init
= GC_obj_kinds
[k
].ok_init
;
180 GC_INVOKE_FINALIZERS();
183 result
= (ptr_t
)GC_alloc_large(lw
, k
, IGNORE_OFF_PAGE
);
185 if (GC_debugging_started
) {
186 BZERO(result
, n_blocks
* HBLKSIZE
);
189 /* Clear any memory that might be used for GC descriptors */
190 /* before we release the lock. */
191 ((word
*)result
)[0] = 0;
192 ((word
*)result
)[1] = 0;
193 ((word
*)result
)[lw
-1] = 0;
194 ((word
*)result
)[lw
-2] = 0;
198 GC_words_allocd
+= lw
;
202 return((*GC_oom_fn
)(lb
));
204 if (init
& !GC_debugging_started
) {
205 BZERO(result
, n_blocks
* HBLKSIZE
);
211 # if defined(__STDC__) || defined(__cplusplus)
212 void * GC_malloc_ignore_off_page(size_t lb
)
214 char * GC_malloc_ignore_off_page(lb
)
218 return((GC_PTR
)GC_generic_malloc_ignore_off_page(lb
, NORMAL
));
221 # if defined(__STDC__) || defined(__cplusplus)
222 void * GC_malloc_atomic_ignore_off_page(size_t lb
)
224 char * GC_malloc_atomic_ignore_off_page(lb
)
228 return((GC_PTR
)GC_generic_malloc_ignore_off_page(lb
, PTRFREE
));
231 /* Increment GC_words_allocd from code that doesn't have direct access */
234 void GC_incr_words_allocd(size_t n
)
236 GC_words_allocd
+= n
;
239 /* The same for GC_mem_freed. */
240 void GC_incr_mem_freed(size_t n
)
244 # endif /* __STDC__ */
246 /* Analogous to the above, but assumes a small object size, and */
247 /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
248 ptr_t
GC_generic_malloc_words_small_inner(lw
, k
)
254 register struct obj_kind
* kind
= GC_obj_kinds
+ k
;
256 opp
= &(kind
-> ok_freelist
[lw
]);
257 if( (op
= *opp
) == 0 ) {
258 if (!GC_is_initialized
) {
261 if (kind
-> ok_reclaim_list
!= 0 || GC_alloc_reclaim_list(kind
)) {
262 op
= GC_clear_stack(GC_allocobj((word
)lw
, k
));
267 return ((*GC_oom_fn
)(WORDS_TO_BYTES(lw
)));
272 GC_words_allocd
+= lw
;
276 /* Analogous to the above, but assumes a small object size, and */
277 /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
279 ptr_t
GC_generic_malloc_words_small(size_t lw
, int k
)
281 ptr_t
GC_generic_malloc_words_small(lw
, k
)
289 GC_INVOKE_FINALIZERS();
292 op
= GC_generic_malloc_words_small_inner(lw
, k
);
298 #if defined(THREADS) && !defined(SRC_M3)
300 extern signed_word GC_mem_found
; /* Protected by GC lock. */
303 volatile signed_word GC_words_allocd_tmp
= 0;
304 /* Number of words of memory allocated since */
305 /* we released the GC lock. Instead of */
306 /* reacquiring the GC lock just to add this in, */
307 /* we add it in the next time we reacquire */
308 /* the lock. (Atomically adding it doesn't */
309 /* work, since we would have to atomically */
310 /* update it in GC_malloc, which is too */
312 #endif /* PARALLEL_MARK */
315 extern ptr_t
GC_reclaim_generic();
317 /* Return a list of 1 or more objects of the indicated size, linked */
318 /* through the first word in the object. This has the advantage that */
319 /* it acquires the allocation lock only once, and may greatly reduce */
320 /* time wasted contending for the allocation lock. Typical usage would */
321 /* be in a thread that requires many items of the same size. It would */
322 /* keep its own free list in thread-local storage, and call */
323 /* GC_malloc_many or friends to replenish it. (We do not round up */
324 /* object sizes, since a call indicates the intention to consume many */
325 /* objects of exactly this size.) */
326 /* We return the free-list by assigning it to *result, since it is */
327 /* not safe to return, e.g. a linked list of pointer-free objects, */
328 /* since the collector would not retain the entire list if it were */
329 /* invoked just as we were returning. */
330 /* Note that the client should usually clear the link field. */
331 void GC_generic_malloc_many(lb
, k
, result
)
340 word my_words_allocd
= 0;
341 struct obj_kind
* ok
= &(GC_obj_kinds
[k
]);
344 # if defined(GATHERSTATS) || defined(PARALLEL_MARK)
345 # define COUNT_ARG , &my_words_allocd
348 # define NEED_TO_COUNT
350 if (!SMALL_OBJ(lb
)) {
351 op
= GC_generic_malloc(lb
, k
);
352 if(0 != op
) obj_link(op
) = 0;
356 lw
= ALIGNED_WORDS(lb
);
357 GC_INVOKE_FINALIZERS();
360 if (!GC_is_initialized
) GC_init_inner();
361 /* Do our share of marking work */
362 if (GC_incremental
&& !GC_dont_gc
) {
364 GC_collect_a_little_inner(1);
367 /* First see if we can reclaim a page of objects waiting to be */
370 struct hblk
** rlh
= ok
-> ok_reclaim_list
;
375 while ((hbp
= *rlh
) != 0) {
377 *rlh
= hhdr
-> hb_next
;
378 # ifdef PARALLEL_MARK
380 signed_word my_words_allocd_tmp
= GC_words_allocd_tmp
;
382 GC_ASSERT(my_words_allocd_tmp
>= 0);
383 /* We only decrement it while holding the GC lock. */
384 /* Thus we can't accidentally adjust it down in more */
385 /* than one thread simultaneously. */
386 if (my_words_allocd_tmp
!= 0) {
388 (volatile GC_word
*)(&GC_words_allocd_tmp
),
389 (GC_word
)(-my_words_allocd_tmp
));
390 GC_words_allocd
+= my_words_allocd_tmp
;
393 GC_acquire_mark_lock();
394 ++ GC_fl_builder_count
;
397 GC_release_mark_lock();
399 op
= GC_reclaim_generic(hbp
, hhdr
, lw
,
400 ok
-> ok_init
, 0 COUNT_ARG
);
402 # ifdef NEED_TO_COUNT
403 /* We are neither gathering statistics, nor marking in */
404 /* parallel. Thus GC_reclaim_generic doesn't count */
406 for (p
= op
; p
!= 0; p
= obj_link(p
)) {
407 my_words_allocd
+= lw
;
410 # if defined(GATHERSTATS)
411 /* We also reclaimed memory, so we need to adjust */
413 /* This should be atomic, so the results may be */
415 GC_mem_found
+= my_words_allocd
;
417 # ifdef PARALLEL_MARK
420 (volatile GC_word
*)(&GC_words_allocd_tmp
),
421 (GC_word
)(my_words_allocd
));
422 GC_acquire_mark_lock();
423 -- GC_fl_builder_count
;
424 if (GC_fl_builder_count
== 0) GC_notify_all_builder();
425 GC_release_mark_lock();
426 (void) GC_clear_stack(0);
429 GC_words_allocd
+= my_words_allocd
;
433 # ifdef PARALLEL_MARK
434 GC_acquire_mark_lock();
435 -- GC_fl_builder_count
;
436 if (GC_fl_builder_count
== 0) GC_notify_all_builder();
437 GC_release_mark_lock();
440 /* GC lock is needed for reclaim list access. We */
441 /* must decrement fl_builder_count before reaquiring GC */
442 /* lock. Hopefully this path is rare. */
446 /* Next try to use prefix of global free list if there is one. */
447 /* We don't refill it, but we need to use it up before allocating */
448 /* a new block ourselves. */
449 opp
= &(GC_obj_kinds
[k
].ok_freelist
[lw
]);
450 if ( (op
= *opp
) != 0 ) {
453 for (p
= op
; p
!= 0; p
= obj_link(p
)) {
454 my_words_allocd
+= lw
;
455 if (my_words_allocd
>= BODY_SZ
) {
461 GC_words_allocd
+= my_words_allocd
;
464 /* Next try to allocate a new block worth of objects of this size. */
466 struct hblk
*h
= GC_allochblk(lw
, k
, 0);
468 if (IS_UNCOLLECTABLE(k
)) GC_set_hdr_marks(HDR(h
));
469 GC_words_allocd
+= BYTES_TO_WORDS(HBLKSIZE
)
470 - BYTES_TO_WORDS(HBLKSIZE
) % lw
;
471 # ifdef PARALLEL_MARK
472 GC_acquire_mark_lock();
473 ++ GC_fl_builder_count
;
476 GC_release_mark_lock();
479 op
= GC_build_fl(h
, lw
, ok
-> ok_init
, 0);
480 # ifdef PARALLEL_MARK
482 GC_acquire_mark_lock();
483 -- GC_fl_builder_count
;
484 if (GC_fl_builder_count
== 0) GC_notify_all_builder();
485 GC_release_mark_lock();
486 (void) GC_clear_stack(0);
494 /* As a last attempt, try allocating a single object. Note that */
495 /* this may trigger a collection or expand the heap. */
496 op
= GC_generic_malloc_inner(lb
, k
);
497 if (0 != op
) obj_link(op
) = 0;
503 (void) GC_clear_stack(0);
506 GC_PTR
GC_malloc_many(size_t lb
)
509 GC_generic_malloc_many(lb
, NORMAL
, &result
);
513 /* Note that the "atomic" version of this would be unsafe, since the */
514 /* links would not be seen by the collector. */
517 /* Allocate lb bytes of pointerful, traced, but not collectable data */
519 GC_PTR
GC_malloc_uncollectable(size_t lb
)
521 GC_PTR
GC_malloc_uncollectable(lb
)
530 if( SMALL_OBJ(lb
) ) {
532 if (EXTRA_BYTES
!= 0 && lb
!= 0) lb
--;
533 /* We don't need the extra byte, since this won't be */
534 /* collected anyway. */
535 lw
= GC_size_map
[lb
];
537 lw
= ALIGNED_WORDS(lb
);
539 opp
= &(GC_uobjfreelist
[lw
]);
541 if( FASTLOCK_SUCCEEDED() && (op
= *opp
) != 0 ) {
542 /* See above comment on signals. */
545 GC_words_allocd
+= lw
;
546 /* Mark bit ws already set on free list. It will be */
547 /* cleared only temporarily during a collection, as a */
548 /* result of the normal free list mark bit clearing. */
549 GC_non_gc_bytes
+= WORDS_TO_BYTES(lw
);
554 op
= (ptr_t
)GC_generic_malloc((word
)lb
, UNCOLLECTABLE
);
556 op
= (ptr_t
)GC_generic_malloc((word
)lb
, UNCOLLECTABLE
);
558 if (0 == op
) return(0);
559 /* We don't need the lock here, since we have an undisguised */
560 /* pointer. We do need to hold the lock while we adjust */
563 register struct hblk
* h
;
566 lw
= HDR(h
) -> hb_sz
;
571 GC_non_gc_bytes
+= WORDS_TO_BYTES(lw
);
578 # ifdef ATOMIC_UNCOLLECTABLE
579 /* Allocate lb bytes of pointerfree, untraced, uncollectable data */
580 /* This is normally roughly equivalent to the system malloc. */
581 /* But it may be useful if malloc is redefined. */
583 GC_PTR
GC_malloc_atomic_uncollectable(size_t lb
)
585 GC_PTR
GC_malloc_atomic_uncollectable(lb
)
594 if( SMALL_OBJ(lb
) ) {
596 if (EXTRA_BYTES
!= 0 && lb
!= 0) lb
--;
597 /* We don't need the extra byte, since this won't be */
598 /* collected anyway. */
599 lw
= GC_size_map
[lb
];
601 lw
= ALIGNED_WORDS(lb
);
603 opp
= &(GC_auobjfreelist
[lw
]);
605 if( FASTLOCK_SUCCEEDED() && (op
= *opp
) != 0 ) {
606 /* See above comment on signals. */
609 GC_words_allocd
+= lw
;
610 /* Mark bit was already set while object was on free list. */
611 GC_non_gc_bytes
+= WORDS_TO_BYTES(lw
);
616 op
= (ptr_t
)GC_generic_malloc((word
)lb
, AUNCOLLECTABLE
);
618 op
= (ptr_t
)GC_generic_malloc((word
)lb
, AUNCOLLECTABLE
);
620 if (0 == op
) return(0);
621 /* We don't need the lock here, since we have an undisguised */
622 /* pointer. We do need to hold the lock while we adjust */
625 register struct hblk
* h
;
628 lw
= HDR(h
) -> hb_sz
;
633 GC_non_gc_bytes
+= WORDS_TO_BYTES(lw
);
640 #endif /* ATOMIC_UNCOLLECTABLE */