2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
15 /* Boehm, February 1, 1996 1:19 pm PST */
16 # define I_HIDE_POINTERS
20 # ifdef FINALIZE_ON_DEMAND
21 int GC_finalize_on_demand
= 1;
23 int GC_finalize_on_demand
= 0;
26 # ifdef JAVA_FINALIZATION
27 int GC_java_finalization
= 1;
29 int GC_java_finalization
= 0;
32 /* Type of mark procedure used for marking from finalizable object. */
33 /* This procedure normally does not mark the object, only its */
35 typedef void finalization_mark_proc(/* ptr_t finalizable_obj_ptr */);
37 # define HASH3(addr,size,log_size) \
38 ((((word)(addr) >> 3) ^ ((word)(addr) >> (3+(log_size)))) \
40 #define HASH2(addr,log_size) HASH3(addr, 1 << log_size, log_size)
42 struct hash_chain_entry
{
44 struct hash_chain_entry
* next
;
47 unsigned GC_finalization_failures
= 0;
48 /* Number of finalization requests that failed for lack of memory. */
50 static struct disappearing_link
{
51 struct hash_chain_entry prolog
;
52 # define dl_hidden_link prolog.hidden_key
53 /* Field to be cleared. */
54 # define dl_next(x) (struct disappearing_link *)((x) -> prolog.next)
55 # define dl_set_next(x,y) (x) -> prolog.next = (struct hash_chain_entry *)(y)
57 word dl_hidden_obj
; /* Pointer to object base */
60 static signed_word log_dl_table_size
= -1;
62 /* current size of array pointed to by dl_head. */
63 /* -1 ==> size is 0. */
65 word GC_dl_entries
= 0; /* Number of entries currently in disappearing */
68 static struct finalizable_object
{
69 struct hash_chain_entry prolog
;
70 # define fo_hidden_base prolog.hidden_key
71 /* Pointer to object base. */
72 /* No longer hidden once object */
73 /* is on finalize_now queue. */
74 # define fo_next(x) (struct finalizable_object *)((x) -> prolog.next)
75 # define fo_set_next(x,y) (x) -> prolog.next = (struct hash_chain_entry *)(y)
76 GC_finalization_proc fo_fn
; /* Finalizer. */
78 word fo_object_size
; /* In bytes. */
79 finalization_mark_proc
* fo_mark_proc
; /* Mark-through procedure */
82 struct finalizable_object
* GC_finalize_now
= 0;
83 /* LIst of objects that should be finalized now. */
85 static signed_word log_fo_table_size
= -1;
87 word GC_fo_entries
= 0;
90 void GC_push_finalizer_structures()
92 GC_push_all((ptr_t
)(&dl_head
), (ptr_t
)(&dl_head
) + sizeof(word
));
93 GC_push_all((ptr_t
)(&fo_head
), (ptr_t
)(&fo_head
) + sizeof(word
));
97 /* Double the size of a hash table. *size_ptr is the log of its current */
98 /* size. May be a noop. */
99 /* *table is a pointer to an array of hash headers. If we succeed, we */
100 /* update both *table and *log_size_ptr. */
101 /* Lock is held. Signals are disabled. */
102 void GC_grow_table(table
, log_size_ptr
)
103 struct hash_chain_entry
***table
;
104 signed_word
* log_size_ptr
;
107 register struct hash_chain_entry
*p
;
108 int log_old_size
= *log_size_ptr
;
109 register int log_new_size
= log_old_size
+ 1;
110 word old_size
= ((log_old_size
== -1)? 0: (1 << log_old_size
));
111 register word new_size
= 1 << log_new_size
;
112 struct hash_chain_entry
**new_table
= (struct hash_chain_entry
**)
113 GC_generic_malloc_inner_ignore_off_page(
114 (size_t)new_size
* sizeof(struct hash_chain_entry
*), NORMAL
);
116 if (new_table
== 0) {
118 ABORT("Insufficient space for initial table allocation");
123 for (i
= 0; i
< old_size
; i
++) {
126 register ptr_t real_key
= (ptr_t
)REVEAL_POINTER(p
-> hidden_key
);
127 register struct hash_chain_entry
*next
= p
-> next
;
128 register int new_hash
= HASH3(real_key
, new_size
, log_new_size
);
130 p
-> next
= new_table
[new_hash
];
131 new_table
[new_hash
] = p
;
135 *log_size_ptr
= log_new_size
;
139 # if defined(__STDC__) || defined(__cplusplus)
140 int GC_register_disappearing_link(GC_PTR
* link
)
142 int GC_register_disappearing_link(link
)
148 base
= (ptr_t
)GC_base((GC_PTR
)link
);
150 ABORT("Bad arg to GC_register_disappearing_link");
151 return(GC_general_register_disappearing_link(link
, base
));
154 # if defined(__STDC__) || defined(__cplusplus)
155 int GC_general_register_disappearing_link(GC_PTR
* link
,
158 int GC_general_register_disappearing_link(link
, obj
)
164 struct disappearing_link
*curr_dl
;
166 struct disappearing_link
* new_dl
;
169 if ((word
)link
& (ALIGNMENT
-1))
170 ABORT("Bad arg to GC_general_register_disappearing_link");
175 if (log_dl_table_size
== -1
176 || GC_dl_entries
> ((word
)1 << log_dl_table_size
)) {
180 GC_grow_table((struct hash_chain_entry
***)(&dl_head
),
183 GC_printf1("Grew dl table to %lu entries\n",
184 (unsigned long)(1 << log_dl_table_size
));
190 index
= HASH2(link
, log_dl_table_size
);
191 curr_dl
= dl_head
[index
];
192 for (curr_dl
= dl_head
[index
]; curr_dl
!= 0; curr_dl
= dl_next(curr_dl
)) {
193 if (curr_dl
-> dl_hidden_link
== HIDE_POINTER(link
)) {
194 curr_dl
-> dl_hidden_obj
= HIDE_POINTER(obj
);
203 new_dl
= (struct disappearing_link
*)
204 GC_generic_malloc_inner(sizeof(struct disappearing_link
),NORMAL
);
206 new_dl
= (struct disappearing_link
*)
207 GC_malloc(sizeof(struct disappearing_link
));
210 new_dl
-> dl_hidden_obj
= HIDE_POINTER(obj
);
211 new_dl
-> dl_hidden_link
= HIDE_POINTER(link
);
212 dl_set_next(new_dl
, dl_head
[index
]);
213 dl_head
[index
] = new_dl
;
216 GC_finalization_failures
++;
225 # if defined(__STDC__) || defined(__cplusplus)
226 int GC_unregister_disappearing_link(GC_PTR
* link
)
228 int GC_unregister_disappearing_link(link
)
232 struct disappearing_link
*curr_dl
, *prev_dl
;
238 index
= HASH2(link
, log_dl_table_size
);
239 if (((unsigned long)link
& (ALIGNMENT
-1))) goto out
;
240 prev_dl
= 0; curr_dl
= dl_head
[index
];
241 while (curr_dl
!= 0) {
242 if (curr_dl
-> dl_hidden_link
== HIDE_POINTER(link
)) {
244 dl_head
[index
] = dl_next(curr_dl
);
246 dl_set_next(prev_dl
, dl_next(curr_dl
));
251 GC_free((GC_PTR
)curr_dl
);
255 curr_dl
= dl_next(curr_dl
);
263 /* Possible finalization_marker procedures. Note that mark stack */
264 /* overflow is handled by the caller, and is not a disaster. */
265 GC_API
void GC_normal_finalize_mark_proc(p
)
270 PUSH_OBJ((word
*)p
, hhdr
, GC_mark_stack_top
,
271 &(GC_mark_stack
[GC_mark_stack_size
]));
274 /* This only pays very partial attention to the mark descriptor. */
275 /* It does the right thing for normal and atomic objects, and treats */
276 /* most others as normal. */
277 GC_API
void GC_ignore_self_finalize_mark_proc(p
)
281 word descr
= hhdr
-> hb_descr
;
284 ptr_t target_limit
= p
+ WORDS_TO_BYTES(hhdr
-> hb_sz
) - 1;
286 if ((descr
& DS_TAGS
) == DS_LENGTH
) {
287 scan_limit
= p
+ descr
- sizeof(word
);
289 scan_limit
= target_limit
+ 1 - sizeof(word
);
291 for (q
= p
; q
<= scan_limit
; q
+= ALIGNMENT
) {
293 if (r
< p
|| r
> target_limit
) {
294 GC_PUSH_ONE_HEAP((word
)r
, q
);
300 GC_API
void GC_null_finalize_mark_proc(p
)
307 /* Register a finalization function. See gc.h for details. */
308 /* in the nonthreads case, we try to avoid disabling signals, */
309 /* since it can be expensive. Threads packages typically */
310 /* make it cheaper. */
311 /* The last parameter is a procedure that determines */
312 /* marking for finalization ordering. Any objects marked */
313 /* by that procedure will be guaranteed to not have been */
314 /* finalized when this finalizer is invoked. */
315 GC_API
void GC_register_finalizer_inner(obj
, fn
, cd
, ofn
, ocd
, mp
)
317 GC_finalization_proc fn
;
319 GC_finalization_proc
* ofn
;
321 finalization_mark_proc
* mp
;
324 struct finalizable_object
* curr_fo
, * prev_fo
;
326 struct finalizable_object
*new_fo
;
333 if (log_fo_table_size
== -1
334 || GC_fo_entries
> ((word
)1 << log_fo_table_size
)) {
338 GC_grow_table((struct hash_chain_entry
***)(&fo_head
),
341 GC_printf1("Grew fo table to %lu entries\n",
342 (unsigned long)(1 << log_fo_table_size
));
348 /* in the THREADS case signals are disabled and we hold allocation */
349 /* lock; otherwise neither is true. Proceed carefully. */
351 index
= HASH2(base
, log_fo_table_size
);
352 prev_fo
= 0; curr_fo
= fo_head
[index
];
353 while (curr_fo
!= 0) {
354 if (curr_fo
-> fo_hidden_base
== HIDE_POINTER(base
)) {
355 /* Interruption by a signal in the middle of this */
356 /* should be safe. The client may see only *ocd */
357 /* updated, but we'll declare that to be his */
359 if (ocd
) *ocd
= (GC_PTR
) curr_fo
-> fo_client_data
;
360 if (ofn
) *ofn
= curr_fo
-> fo_fn
;
361 /* Delete the structure for base. */
363 fo_head
[index
] = fo_next(curr_fo
);
365 fo_set_next(prev_fo
, fo_next(curr_fo
));
369 /* May not happen if we get a signal. But a high */
370 /* estimate will only make the table larger than */
373 GC_free((GC_PTR
)curr_fo
);
376 curr_fo
-> fo_fn
= fn
;
377 curr_fo
-> fo_client_data
= (ptr_t
)cd
;
378 curr_fo
-> fo_mark_proc
= mp
;
379 /* Reinsert it. We deleted it first to maintain */
380 /* consistency in the event of a signal. */
382 fo_head
[index
] = curr_fo
;
384 fo_set_next(prev_fo
, curr_fo
);
394 curr_fo
= fo_next(curr_fo
);
406 new_fo
= (struct finalizable_object
*)
407 GC_generic_malloc_inner(sizeof(struct finalizable_object
),NORMAL
);
409 new_fo
= (struct finalizable_object
*)
410 GC_malloc(sizeof(struct finalizable_object
));
413 new_fo
-> fo_hidden_base
= (word
)HIDE_POINTER(base
);
414 new_fo
-> fo_fn
= fn
;
415 new_fo
-> fo_client_data
= (ptr_t
)cd
;
416 new_fo
-> fo_object_size
= GC_size(base
);
417 new_fo
-> fo_mark_proc
= mp
;
418 fo_set_next(new_fo
, fo_head
[index
]);
420 fo_head
[index
] = new_fo
;
422 GC_finalization_failures
++;
430 # if defined(__STDC__)
431 void GC_register_finalizer(void * obj
,
432 GC_finalization_proc fn
, void * cd
,
433 GC_finalization_proc
*ofn
, void ** ocd
)
435 void GC_register_finalizer(obj
, fn
, cd
, ofn
, ocd
)
437 GC_finalization_proc fn
;
439 GC_finalization_proc
* ofn
;
443 GC_register_finalizer_inner(obj
, fn
, cd
, ofn
,
444 ocd
, GC_normal_finalize_mark_proc
);
447 # if defined(__STDC__)
448 void GC_register_finalizer_ignore_self(void * obj
,
449 GC_finalization_proc fn
, void * cd
,
450 GC_finalization_proc
*ofn
, void ** ocd
)
452 void GC_register_finalizer_ignore_self(obj
, fn
, cd
, ofn
, ocd
)
454 GC_finalization_proc fn
;
456 GC_finalization_proc
* ofn
;
460 GC_register_finalizer_inner(obj
, fn
, cd
, ofn
,
461 ocd
, GC_ignore_self_finalize_mark_proc
);
464 # if defined(__STDC__)
465 void GC_register_finalizer_no_order(void * obj
,
466 GC_finalization_proc fn
, void * cd
,
467 GC_finalization_proc
*ofn
, void ** ocd
)
469 void GC_register_finalizer_no_order(obj
, fn
, cd
, ofn
, ocd
)
471 GC_finalization_proc fn
;
473 GC_finalization_proc
* ofn
;
477 GC_register_finalizer_inner(obj
, fn
, cd
, ofn
,
478 ocd
, GC_null_finalize_mark_proc
);
481 /* Called with world stopped. Cause disappearing links to disappear, */
482 /* and invoke finalizers. */
485 struct disappearing_link
* curr_dl
, * prev_dl
, * next_dl
;
486 struct finalizable_object
* curr_fo
, * prev_fo
, * next_fo
;
487 ptr_t real_ptr
, real_link
;
489 int dl_size
= (log_dl_table_size
== -1 ) ? 0 : (1 << log_dl_table_size
);
490 int fo_size
= (log_fo_table_size
== -1 ) ? 0 : (1 << log_fo_table_size
);
492 /* Make disappearing links disappear */
493 for (i
= 0; i
< dl_size
; i
++) {
494 curr_dl
= dl_head
[i
];
496 while (curr_dl
!= 0) {
497 real_ptr
= (ptr_t
)REVEAL_POINTER(curr_dl
-> dl_hidden_obj
);
498 real_link
= (ptr_t
)REVEAL_POINTER(curr_dl
-> dl_hidden_link
);
499 if (!GC_is_marked(real_ptr
)) {
500 *(word
*)real_link
= 0;
501 next_dl
= dl_next(curr_dl
);
503 dl_head
[i
] = next_dl
;
505 dl_set_next(prev_dl
, next_dl
);
507 GC_clear_mark_bit((ptr_t
)curr_dl
);
512 curr_dl
= dl_next(curr_dl
);
516 /* Mark all objects reachable via chains of 1 or more pointers */
517 /* from finalizable objects. */
519 if (GC_mark_state
!= MS_NONE
) ABORT("Bad mark state");
521 for (i
= 0; i
< fo_size
; i
++) {
522 for (curr_fo
= fo_head
[i
]; curr_fo
!= 0; curr_fo
= fo_next(curr_fo
)) {
523 real_ptr
= (ptr_t
)REVEAL_POINTER(curr_fo
-> fo_hidden_base
);
524 if (!GC_is_marked(real_ptr
)) {
525 GC_MARKED_FOR_FINALIZATION(real_ptr
);
526 GC_MARK_FO(real_ptr
, curr_fo
-> fo_mark_proc
);
527 if (GC_is_marked(real_ptr
)) {
528 WARN("Finalization cycle involving %lx\n", real_ptr
);
533 /* Enqueue for finalization all objects that are still */
535 GC_words_finalized
= 0;
536 for (i
= 0; i
< fo_size
; i
++) {
537 curr_fo
= fo_head
[i
];
539 while (curr_fo
!= 0) {
540 real_ptr
= (ptr_t
)REVEAL_POINTER(curr_fo
-> fo_hidden_base
);
541 if (!GC_is_marked(real_ptr
)) {
542 if (!GC_java_finalization
) {
543 GC_set_mark_bit(real_ptr
);
545 /* Delete from hash table */
546 next_fo
= fo_next(curr_fo
);
548 fo_head
[i
] = next_fo
;
550 fo_set_next(prev_fo
, next_fo
);
553 /* Add to list of objects awaiting finalization. */
554 fo_set_next(curr_fo
, GC_finalize_now
);
555 GC_finalize_now
= curr_fo
;
556 /* unhide object pointer so any future collections will */
558 curr_fo
-> fo_hidden_base
=
559 (word
) REVEAL_POINTER(curr_fo
-> fo_hidden_base
);
560 GC_words_finalized
+=
561 ALIGNED_WORDS(curr_fo
-> fo_object_size
)
562 + ALIGNED_WORDS(sizeof(struct finalizable_object
));
564 if (!GC_is_marked((ptr_t
)curr_fo
)) {
565 ABORT("GC_finalize: found accessible unmarked object\n");
571 curr_fo
= fo_next(curr_fo
);
576 if (GC_java_finalization
) {
577 /* make sure we mark everything reachable from objects finalized
578 using the no_order mark_proc */
579 for (curr_fo
= GC_finalize_now
;
580 curr_fo
!= NULL
; curr_fo
= fo_next(curr_fo
)) {
581 real_ptr
= (ptr_t
)curr_fo
-> fo_hidden_base
;
582 if (!GC_is_marked(real_ptr
)) {
583 if (curr_fo
-> fo_mark_proc
== GC_null_finalize_mark_proc
) {
584 GC_MARK_FO(real_ptr
, GC_normal_finalize_mark_proc
);
586 GC_set_mark_bit(real_ptr
);
591 /* Remove dangling disappearing links. */
592 for (i
= 0; i
< dl_size
; i
++) {
593 curr_dl
= dl_head
[i
];
595 while (curr_dl
!= 0) {
596 real_link
= GC_base((ptr_t
)REVEAL_POINTER(curr_dl
-> dl_hidden_link
));
597 if (real_link
!= 0 && !GC_is_marked(real_link
)) {
598 next_dl
= dl_next(curr_dl
);
600 dl_head
[i
] = next_dl
;
602 dl_set_next(prev_dl
, next_dl
);
604 GC_clear_mark_bit((ptr_t
)curr_dl
);
609 curr_dl
= dl_next(curr_dl
);
615 #ifndef JAVA_FINALIZATION_NOT_NEEDED
617 /* Enqueue all remaining finalizers to be run - Assumes lock is
618 * held, and signals are disabled */
619 void GC_enqueue_all_finalizers()
621 struct finalizable_object
* curr_fo
, * prev_fo
, * next_fo
;
626 fo_size
= (log_fo_table_size
== -1 ) ? 0 : (1 << log_fo_table_size
);
627 GC_words_finalized
= 0;
628 for (i
= 0; i
< fo_size
; i
++) {
629 curr_fo
= fo_head
[i
];
631 while (curr_fo
!= 0) {
632 real_ptr
= (ptr_t
)REVEAL_POINTER(curr_fo
-> fo_hidden_base
);
633 GC_MARK_FO(real_ptr
, GC_normal_finalize_mark_proc
);
634 GC_set_mark_bit(real_ptr
);
636 /* Delete from hash table */
637 next_fo
= fo_next(curr_fo
);
639 fo_head
[i
] = next_fo
;
641 fo_set_next(prev_fo
, next_fo
);
645 /* Add to list of objects awaiting finalization. */
646 fo_set_next(curr_fo
, GC_finalize_now
);
647 GC_finalize_now
= curr_fo
;
649 /* unhide object pointer so any future collections will */
651 curr_fo
-> fo_hidden_base
=
652 (word
) REVEAL_POINTER(curr_fo
-> fo_hidden_base
);
654 GC_words_finalized
+=
655 ALIGNED_WORDS(curr_fo
-> fo_object_size
)
656 + ALIGNED_WORDS(sizeof(struct finalizable_object
));
664 /* Invoke all remaining finalizers that haven't yet been run.
665 * This is needed for strict compliance with the Java standard,
666 * which can make the runtime guarantee that all finalizers are run.
667 * Unfortunately, the Java standard implies we have to keep running
668 * finalizers until there are no more left, a potential infinite loop.
670 * Note that this is even more dangerous than the usual Java
671 * finalizers, in that objects reachable from static variables
672 * may have been finalized when these finalizers are run.
673 * Finalizers run at this point must be prepared to deal with a
674 * mostly broken world.
675 * This routine is externally callable, so is called without
676 * the allocation lock.
678 GC_API
void GC_finalize_all()
684 while (GC_fo_entries
> 0) {
685 GC_enqueue_all_finalizers();
688 GC_INVOKE_FINALIZERS();
697 /* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
698 /* finalizers can only be called from some kind of `safe state' and */
699 /* getting into that safe state is expensive.) */
700 int GC_should_invoke_finalizers
GC_PROTO((void))
702 return GC_finalize_now
!= 0;
705 /* Invoke finalizers for all objects that are ready to be finalized. */
706 /* Should be called without allocation lock. */
707 int GC_invoke_finalizers()
709 register struct finalizable_object
* curr_fo
;
710 register int count
= 0;
713 while (GC_finalize_now
!= 0) {
718 curr_fo
= GC_finalize_now
;
720 if (curr_fo
!= 0) GC_finalize_now
= fo_next(curr_fo
);
723 if (curr_fo
== 0) break;
725 GC_finalize_now
= fo_next(curr_fo
);
727 fo_set_next(curr_fo
, 0);
728 (*(curr_fo
-> fo_fn
))((ptr_t
)(curr_fo
-> fo_hidden_base
),
729 curr_fo
-> fo_client_data
);
730 curr_fo
-> fo_client_data
= 0;
733 /* This is probably a bad idea. It throws off accounting if */
734 /* nearly all objects are finalizable. O.w. it shouldn't */
736 GC_free((GC_PTR
)curr_fo
);
743 GC_PTR
GC_call_with_alloc_lock(GC_fn_type fn
,
746 GC_PTR
GC_call_with_alloc_lock(fn
, client_data
)
759 result
= (*fn
)(client_data
);