re PR target/78594 (Bug in November 11th, 2016 change to rs6000.md)
[official-gcc.git] / boehm-gc / finalize.c
blob484d421d1d68c2e6659612acd49c87e04e825a0e
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (C) 2007 Free Software Foundation, Inc
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
16 /* Boehm, February 1, 1996 1:19 pm PST */
17 # define I_HIDE_POINTERS
18 # include "private/gc_pmark.h"
20 # ifdef FINALIZE_ON_DEMAND
21 int GC_finalize_on_demand = 1;
22 # else
23 int GC_finalize_on_demand = 0;
24 # endif
26 # ifdef JAVA_FINALIZATION
27 int GC_java_finalization = 1;
28 # else
29 int GC_java_finalization = 0;
30 # endif
32 /* Type of mark procedure used for marking from finalizable object. */
33 /* This procedure normally does not mark the object, only its */
34 /* descendents. */
35 typedef void finalization_mark_proc(/* ptr_t finalizable_obj_ptr */);
37 # define HASH3(addr,size,log_size) \
38 ((((word)(addr) >> 3) ^ ((word)(addr) >> (3+(log_size)))) \
39 & ((size) - 1))
40 #define HASH2(addr,log_size) HASH3(addr, 1 << log_size, log_size)
42 struct hash_chain_entry {
43 word hidden_key;
44 struct hash_chain_entry * next;
47 unsigned GC_finalization_failures = 0;
48 /* Number of finalization requests that failed for lack of memory. */
50 static struct disappearing_link {
51 struct hash_chain_entry prolog;
52 # define dl_hidden_link prolog.hidden_key
53 /* Field to be cleared. */
54 # define dl_next(x) (struct disappearing_link *)((x) -> prolog.next)
55 # define dl_set_next(x,y) (x) -> prolog.next = (struct hash_chain_entry *)(y)
57 word dl_hidden_obj; /* Pointer to object base */
58 } **dl_head = 0;
60 static signed_word log_dl_table_size = -1;
61 /* Binary log of */
62 /* current size of array pointed to by dl_head. */
63 /* -1 ==> size is 0. */
65 word GC_dl_entries = 0; /* Number of entries currently in disappearing */
66 /* link table. */
68 static struct finalizable_object {
69 struct hash_chain_entry prolog;
70 # define fo_hidden_base prolog.hidden_key
71 /* Pointer to object base. */
72 /* No longer hidden once object */
73 /* is on finalize_now queue. */
74 # define fo_next(x) (struct finalizable_object *)((x) -> prolog.next)
75 # define fo_set_next(x,y) (x) -> prolog.next = (struct hash_chain_entry *)(y)
76 GC_finalization_proc fo_fn; /* Finalizer. */
77 ptr_t fo_client_data;
78 word fo_object_size; /* In bytes. */
79 finalization_mark_proc * fo_mark_proc; /* Mark-through procedure */
80 } **fo_head = 0;
82 struct finalizable_object * GC_finalize_now = 0;
83 /* LIst of objects that should be finalized now. */
85 static signed_word log_fo_table_size = -1;
87 word GC_fo_entries = 0;
89 void GC_push_finalizer_structures GC_PROTO((void))
91 GC_push_all((ptr_t)(&dl_head), (ptr_t)(&dl_head) + sizeof(word));
92 GC_push_all((ptr_t)(&fo_head), (ptr_t)(&fo_head) + sizeof(word));
93 GC_push_all((ptr_t)(&GC_finalize_now),
94 (ptr_t)(&GC_finalize_now) + sizeof(word));
97 /* Double the size of a hash table. *size_ptr is the log of its current */
98 /* size. May be a noop. */
99 /* *table is a pointer to an array of hash headers. If we succeed, we */
100 /* update both *table and *log_size_ptr. */
101 /* Lock is held. Signals are disabled. */
102 void GC_grow_table(table, log_size_ptr)
103 struct hash_chain_entry ***table;
104 signed_word * log_size_ptr;
106 register word i;
107 register struct hash_chain_entry *p;
108 int log_old_size = *log_size_ptr;
109 register int log_new_size = log_old_size + 1;
110 word old_size = ((log_old_size == -1)? 0: (1 << log_old_size));
111 register word new_size = 1 << log_new_size;
112 struct hash_chain_entry **new_table = (struct hash_chain_entry **)
113 GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
114 (size_t)new_size * sizeof(struct hash_chain_entry *), NORMAL);
116 if (new_table == 0) {
117 if (table == 0) {
118 ABORT("Insufficient space for initial table allocation");
119 } else {
120 return;
123 for (i = 0; i < old_size; i++) {
124 p = (*table)[i];
125 while (p != 0) {
126 register ptr_t real_key = (ptr_t)REVEAL_POINTER(p -> hidden_key);
127 register struct hash_chain_entry *next = p -> next;
128 register int new_hash = HASH3(real_key, new_size, log_new_size);
130 p -> next = new_table[new_hash];
131 new_table[new_hash] = p;
132 p = next;
135 *log_size_ptr = log_new_size;
136 *table = new_table;
139 # if defined(__STDC__) || defined(__cplusplus)
140 int GC_register_disappearing_link(GC_PTR * link)
141 # else
142 int GC_register_disappearing_link(link)
143 GC_PTR * link;
144 # endif
146 ptr_t base;
148 base = (ptr_t)GC_base((GC_PTR)link);
149 if (base == 0)
150 ABORT("Bad arg to GC_register_disappearing_link");
151 return(GC_general_register_disappearing_link(link, base));
154 # if defined(__STDC__) || defined(__cplusplus)
155 int GC_general_register_disappearing_link(GC_PTR * link,
156 GC_PTR obj)
157 # else
158 int GC_general_register_disappearing_link(link, obj)
159 GC_PTR * link;
160 GC_PTR obj;
161 # endif
164 struct disappearing_link *curr_dl;
165 int index;
166 struct disappearing_link * new_dl;
167 DCL_LOCK_STATE;
169 if ((word)link & (ALIGNMENT-1))
170 ABORT("Bad arg to GC_general_register_disappearing_link");
171 # ifdef THREADS
172 DISABLE_SIGNALS();
173 LOCK();
174 # endif
175 if (log_dl_table_size == -1
176 || GC_dl_entries > ((word)1 << log_dl_table_size)) {
177 # ifndef THREADS
178 DISABLE_SIGNALS();
179 # endif
180 GC_grow_table((struct hash_chain_entry ***)(&dl_head),
181 &log_dl_table_size);
182 # ifdef CONDPRINT
183 if (GC_print_stats) {
184 GC_printf1("Grew dl table to %lu entries\n",
185 (unsigned long)(1 << log_dl_table_size));
187 # endif
188 # ifndef THREADS
189 ENABLE_SIGNALS();
190 # endif
192 index = HASH2(link, log_dl_table_size);
193 curr_dl = dl_head[index];
194 for (curr_dl = dl_head[index]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
195 if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
196 curr_dl -> dl_hidden_obj = HIDE_POINTER(obj);
197 # ifdef THREADS
198 UNLOCK();
199 ENABLE_SIGNALS();
200 # endif
201 return(1);
204 new_dl = (struct disappearing_link *)
205 GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL);
206 if (0 == new_dl) {
207 # ifdef THREADS
208 UNLOCK();
209 ENABLE_SIGNALS();
210 # endif
211 new_dl = (struct disappearing_link *)
212 GC_oom_fn(sizeof(struct disappearing_link));
213 if (0 == new_dl) {
214 GC_finalization_failures++;
215 return(0);
217 /* It's not likely we'll make it here, but ... */
218 # ifdef THREADS
219 DISABLE_SIGNALS();
220 LOCK();
221 # endif
223 new_dl -> dl_hidden_obj = HIDE_POINTER(obj);
224 new_dl -> dl_hidden_link = HIDE_POINTER(link);
225 dl_set_next(new_dl, dl_head[index]);
226 dl_head[index] = new_dl;
227 GC_dl_entries++;
228 # ifdef THREADS
229 UNLOCK();
230 ENABLE_SIGNALS();
231 # endif
232 return(0);
235 # if defined(__STDC__) || defined(__cplusplus)
236 int GC_unregister_disappearing_link(GC_PTR * link)
237 # else
238 int GC_unregister_disappearing_link(link)
239 GC_PTR * link;
240 # endif
242 struct disappearing_link *curr_dl, *prev_dl;
243 int index;
244 DCL_LOCK_STATE;
246 DISABLE_SIGNALS();
247 LOCK();
248 index = HASH2(link, log_dl_table_size);
249 if (((unsigned long)link & (ALIGNMENT-1))) goto out;
250 prev_dl = 0; curr_dl = dl_head[index];
251 while (curr_dl != 0) {
252 if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
253 if (prev_dl == 0) {
254 dl_head[index] = dl_next(curr_dl);
255 } else {
256 dl_set_next(prev_dl, dl_next(curr_dl));
258 GC_dl_entries--;
259 UNLOCK();
260 ENABLE_SIGNALS();
261 # ifdef DBG_HDRS_ALL
262 dl_set_next(curr_dl, 0);
263 # else
264 GC_free((GC_PTR)curr_dl);
265 # endif
266 return(1);
268 prev_dl = curr_dl;
269 curr_dl = dl_next(curr_dl);
271 out:
272 UNLOCK();
273 ENABLE_SIGNALS();
274 return(0);
277 /* Possible finalization_marker procedures. Note that mark stack */
278 /* overflow is handled by the caller, and is not a disaster. */
279 GC_API void GC_normal_finalize_mark_proc(p)
280 ptr_t p;
282 hdr * hhdr = HDR(p);
284 PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top,
285 &(GC_mark_stack[GC_mark_stack_size]));
288 /* This only pays very partial attention to the mark descriptor. */
289 /* It does the right thing for normal and atomic objects, and treats */
290 /* most others as normal. */
291 GC_API void GC_ignore_self_finalize_mark_proc(p)
292 ptr_t p;
294 hdr * hhdr = HDR(p);
295 word descr = hhdr -> hb_descr;
296 ptr_t q, r;
297 ptr_t scan_limit;
298 ptr_t target_limit = p + WORDS_TO_BYTES(hhdr -> hb_sz) - 1;
300 if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) {
301 scan_limit = p + descr - sizeof(word);
302 } else {
303 scan_limit = target_limit + 1 - sizeof(word);
305 for (q = p; q <= scan_limit; q += ALIGNMENT) {
306 r = *(ptr_t *)q;
307 if (r < p || r > target_limit) {
308 GC_PUSH_ONE_HEAP((word)r, q);
313 /*ARGSUSED*/
314 GC_API void GC_null_finalize_mark_proc(p)
315 ptr_t p;
319 /* Possible finalization_marker procedures. Note that mark stack */
320 /* overflow is handled by the caller, and is not a disaster. */
321 GC_API void GC_unreachable_finalize_mark_proc(p)
322 ptr_t p;
324 return GC_normal_finalize_mark_proc(p);
329 /* Register a finalization function. See gc.h for details. */
330 /* in the nonthreads case, we try to avoid disabling signals, */
331 /* since it can be expensive. Threads packages typically */
332 /* make it cheaper. */
333 /* The last parameter is a procedure that determines */
334 /* marking for finalization ordering. Any objects marked */
335 /* by that procedure will be guaranteed to not have been */
336 /* finalized when this finalizer is invoked. */
337 GC_API void GC_register_finalizer_inner(obj, fn, cd, ofn, ocd, mp)
338 GC_PTR obj;
339 GC_finalization_proc fn;
340 GC_PTR cd;
341 GC_finalization_proc * ofn;
342 GC_PTR * ocd;
343 finalization_mark_proc * mp;
345 ptr_t base;
346 struct finalizable_object * curr_fo, * prev_fo;
347 int index;
348 struct finalizable_object *new_fo;
349 hdr *hhdr;
350 DCL_LOCK_STATE;
352 # ifdef THREADS
353 DISABLE_SIGNALS();
354 LOCK();
355 # endif
356 if (log_fo_table_size == -1
357 || GC_fo_entries > ((word)1 << log_fo_table_size)) {
358 # ifndef THREADS
359 DISABLE_SIGNALS();
360 # endif
361 GC_grow_table((struct hash_chain_entry ***)(&fo_head),
362 &log_fo_table_size);
363 # ifdef CONDPRINT
364 if (GC_print_stats) {
365 GC_printf1("Grew fo table to %lu entries\n",
366 (unsigned long)(1 << log_fo_table_size));
368 # endif
369 # ifndef THREADS
370 ENABLE_SIGNALS();
371 # endif
373 /* in the THREADS case signals are disabled and we hold allocation */
374 /* lock; otherwise neither is true. Proceed carefully. */
375 base = (ptr_t)obj;
376 index = HASH2(base, log_fo_table_size);
377 prev_fo = 0; curr_fo = fo_head[index];
378 while (curr_fo != 0) {
379 if (curr_fo -> fo_hidden_base == HIDE_POINTER(base)) {
380 /* Interruption by a signal in the middle of this */
381 /* should be safe. The client may see only *ocd */
382 /* updated, but we'll declare that to be his */
383 /* problem. */
384 if (ocd) *ocd = (GC_PTR) curr_fo -> fo_client_data;
385 if (ofn) *ofn = curr_fo -> fo_fn;
386 /* Delete the structure for base. */
387 if (prev_fo == 0) {
388 fo_head[index] = fo_next(curr_fo);
389 } else {
390 fo_set_next(prev_fo, fo_next(curr_fo));
392 if (fn == 0) {
393 GC_fo_entries--;
394 /* May not happen if we get a signal. But a high */
395 /* estimate will only make the table larger than */
396 /* necessary. */
397 # if !defined(THREADS) && !defined(DBG_HDRS_ALL)
398 GC_free((GC_PTR)curr_fo);
399 # endif
400 } else {
401 curr_fo -> fo_fn = fn;
402 curr_fo -> fo_client_data = (ptr_t)cd;
403 curr_fo -> fo_mark_proc = mp;
404 /* Reinsert it. We deleted it first to maintain */
405 /* consistency in the event of a signal. */
406 if (prev_fo == 0) {
407 fo_head[index] = curr_fo;
408 } else {
409 fo_set_next(prev_fo, curr_fo);
412 # ifdef THREADS
413 UNLOCK();
414 ENABLE_SIGNALS();
415 # endif
416 return;
418 prev_fo = curr_fo;
419 curr_fo = fo_next(curr_fo);
421 if (ofn) *ofn = 0;
422 if (ocd) *ocd = 0;
423 if (fn == 0) {
424 # ifdef THREADS
425 UNLOCK();
426 ENABLE_SIGNALS();
427 # endif
428 return;
430 GET_HDR(base, hhdr);
431 if (0 == hhdr) {
432 /* We won't collect it, hence finalizer wouldn't be run. */
433 # ifdef THREADS
434 UNLOCK();
435 ENABLE_SIGNALS();
436 # endif
437 return;
439 new_fo = (struct finalizable_object *)
440 GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
441 if (0 == new_fo) {
442 # ifdef THREADS
443 UNLOCK();
444 ENABLE_SIGNALS();
445 # endif
446 new_fo = (struct finalizable_object *)
447 GC_oom_fn(sizeof(struct finalizable_object));
448 if (0 == new_fo) {
449 GC_finalization_failures++;
450 return;
452 /* It's not likely we'll make it here, but ... */
453 # ifdef THREADS
454 DISABLE_SIGNALS();
455 LOCK();
456 # endif
458 new_fo -> fo_hidden_base = (word)HIDE_POINTER(base);
459 new_fo -> fo_fn = fn;
460 new_fo -> fo_client_data = (ptr_t)cd;
461 new_fo -> fo_object_size = hhdr -> hb_sz;
462 new_fo -> fo_mark_proc = mp;
463 fo_set_next(new_fo, fo_head[index]);
464 GC_fo_entries++;
465 fo_head[index] = new_fo;
466 # ifdef THREADS
467 UNLOCK();
468 ENABLE_SIGNALS();
469 # endif
472 # if defined(__STDC__)
473 void GC_register_finalizer(void * obj,
474 GC_finalization_proc fn, void * cd,
475 GC_finalization_proc *ofn, void ** ocd)
476 # else
477 void GC_register_finalizer(obj, fn, cd, ofn, ocd)
478 GC_PTR obj;
479 GC_finalization_proc fn;
480 GC_PTR cd;
481 GC_finalization_proc * ofn;
482 GC_PTR * ocd;
483 # endif
485 GC_register_finalizer_inner(obj, fn, cd, ofn,
486 ocd, GC_normal_finalize_mark_proc);
489 # if defined(__STDC__)
490 void GC_register_finalizer_ignore_self(void * obj,
491 GC_finalization_proc fn, void * cd,
492 GC_finalization_proc *ofn, void ** ocd)
493 # else
494 void GC_register_finalizer_ignore_self(obj, fn, cd, ofn, ocd)
495 GC_PTR obj;
496 GC_finalization_proc fn;
497 GC_PTR cd;
498 GC_finalization_proc * ofn;
499 GC_PTR * ocd;
500 # endif
502 GC_register_finalizer_inner(obj, fn, cd, ofn,
503 ocd, GC_ignore_self_finalize_mark_proc);
506 # if defined(__STDC__)
507 void GC_register_finalizer_no_order(void * obj,
508 GC_finalization_proc fn, void * cd,
509 GC_finalization_proc *ofn, void ** ocd)
510 # else
511 void GC_register_finalizer_no_order(obj, fn, cd, ofn, ocd)
512 GC_PTR obj;
513 GC_finalization_proc fn;
514 GC_PTR cd;
515 GC_finalization_proc * ofn;
516 GC_PTR * ocd;
517 # endif
519 GC_register_finalizer_inner(obj, fn, cd, ofn,
520 ocd, GC_null_finalize_mark_proc);
523 # if defined(__STDC__)
524 void GC_register_finalizer_unreachable(void * obj,
525 GC_finalization_proc fn, void * cd,
526 GC_finalization_proc *ofn, void ** ocd)
527 # else
528 void GC_register_finalizer_unreachable(obj, fn, cd, ofn, ocd)
529 GC_PTR obj;
530 GC_finalization_proc fn;
531 GC_PTR cd;
532 GC_finalization_proc * ofn;
533 GC_PTR * ocd;
534 # endif
536 GC_register_finalizer_inner(obj, fn, cd, ofn,
537 ocd, GC_unreachable_finalize_mark_proc);
540 #ifndef NO_DEBUGGING
541 void GC_dump_finalization()
543 struct disappearing_link * curr_dl;
544 struct finalizable_object * curr_fo;
545 ptr_t real_ptr, real_link;
546 int dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
547 int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
548 int i;
550 GC_printf0("Disappearing links:\n");
551 for (i = 0; i < dl_size; i++) {
552 for (curr_dl = dl_head[i]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
553 real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
554 real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
555 GC_printf2("Object: 0x%lx, Link:0x%lx\n", real_ptr, real_link);
558 GC_printf0("Finalizers:\n");
559 for (i = 0; i < fo_size; i++) {
560 for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
561 real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
562 GC_printf1("Finalizable object: 0x%lx\n", real_ptr);
566 #endif
568 /* Called with world stopped. Cause disappearing links to disappear, */
569 /* and invoke finalizers. */
570 void GC_finalize()
572 struct disappearing_link * curr_dl, * prev_dl, * next_dl;
573 struct finalizable_object * curr_fo, * prev_fo, * next_fo;
574 ptr_t real_ptr, real_link;
575 register int i;
576 int dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
577 int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
579 /* Make disappearing links disappear */
580 for (i = 0; i < dl_size; i++) {
581 curr_dl = dl_head[i];
582 prev_dl = 0;
583 while (curr_dl != 0) {
584 real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
585 real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
586 if (!GC_is_marked(real_ptr)) {
587 *(word *)real_link = 0;
588 next_dl = dl_next(curr_dl);
589 if (prev_dl == 0) {
590 dl_head[i] = next_dl;
591 } else {
592 dl_set_next(prev_dl, next_dl);
594 GC_clear_mark_bit((ptr_t)curr_dl);
595 GC_dl_entries--;
596 curr_dl = next_dl;
597 } else {
598 prev_dl = curr_dl;
599 curr_dl = dl_next(curr_dl);
603 /* Mark all objects reachable via chains of 1 or more pointers */
604 /* from finalizable objects. */
605 GC_ASSERT(GC_mark_state == MS_NONE);
606 for (i = 0; i < fo_size; i++) {
607 for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
608 real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
609 if (!GC_is_marked(real_ptr)) {
610 GC_MARKED_FOR_FINALIZATION(real_ptr);
611 GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
612 if (GC_is_marked(real_ptr)) {
613 WARN("Finalization cycle involving %lx\n", real_ptr);
618 /* Enqueue for finalization all objects that are still */
619 /* unreachable. */
620 GC_words_finalized = 0;
621 for (i = 0; i < fo_size; i++) {
622 curr_fo = fo_head[i];
623 prev_fo = 0;
624 while (curr_fo != 0) {
625 real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
626 if (!GC_is_marked(real_ptr)) {
627 if (!GC_java_finalization) {
628 GC_set_mark_bit(real_ptr);
630 /* Delete from hash table */
631 next_fo = fo_next(curr_fo);
632 if (prev_fo == 0) {
633 fo_head[i] = next_fo;
634 } else {
635 fo_set_next(prev_fo, next_fo);
637 GC_fo_entries--;
638 /* Add to list of objects awaiting finalization. */
639 fo_set_next(curr_fo, GC_finalize_now);
640 GC_finalize_now = curr_fo;
641 /* unhide object pointer so any future collections will */
642 /* see it. */
643 curr_fo -> fo_hidden_base =
644 (word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
645 GC_words_finalized +=
646 ALIGNED_WORDS(curr_fo -> fo_object_size)
647 + ALIGNED_WORDS(sizeof(struct finalizable_object));
648 GC_ASSERT(GC_is_marked(GC_base((ptr_t)curr_fo)));
649 curr_fo = next_fo;
650 } else {
651 prev_fo = curr_fo;
652 curr_fo = fo_next(curr_fo);
657 if (GC_java_finalization) {
658 /* make sure we mark everything reachable from objects finalized
659 using the no_order mark_proc */
660 for (curr_fo = GC_finalize_now;
661 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
662 real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
663 if (!GC_is_marked(real_ptr)) {
664 if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
665 GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
667 if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
668 GC_set_mark_bit(real_ptr);
673 /* now revive finalize-when-unreachable objects reachable from
674 other finalizable objects */
675 curr_fo = GC_finalize_now;
676 prev_fo = 0;
677 while (curr_fo != 0) {
678 next_fo = fo_next(curr_fo);
679 if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
680 real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
681 if (!GC_is_marked(real_ptr)) {
682 GC_set_mark_bit(real_ptr);
683 } else {
684 if (prev_fo == 0)
685 GC_finalize_now = next_fo;
686 else
687 fo_set_next(prev_fo, next_fo);
689 curr_fo -> fo_hidden_base =
690 (word) HIDE_POINTER(curr_fo -> fo_hidden_base);
691 GC_words_finalized -=
692 ALIGNED_WORDS(curr_fo -> fo_object_size)
693 + ALIGNED_WORDS(sizeof(struct finalizable_object));
695 i = HASH2(real_ptr, log_fo_table_size);
696 fo_set_next (curr_fo, fo_head[i]);
697 GC_fo_entries++;
698 fo_head[i] = curr_fo;
699 curr_fo = prev_fo;
702 prev_fo = curr_fo;
703 curr_fo = next_fo;
707 /* Remove dangling disappearing links. */
708 for (i = 0; i < dl_size; i++) {
709 curr_dl = dl_head[i];
710 prev_dl = 0;
711 while (curr_dl != 0) {
712 real_link = GC_base((ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link));
713 if (real_link != 0 && !GC_is_marked(real_link)) {
714 next_dl = dl_next(curr_dl);
715 if (prev_dl == 0) {
716 dl_head[i] = next_dl;
717 } else {
718 dl_set_next(prev_dl, next_dl);
720 GC_clear_mark_bit((ptr_t)curr_dl);
721 GC_dl_entries--;
722 curr_dl = next_dl;
723 } else {
724 prev_dl = curr_dl;
725 curr_dl = dl_next(curr_dl);
731 #ifndef JAVA_FINALIZATION_NOT_NEEDED
733 /* Enqueue all remaining finalizers to be run - Assumes lock is
734 * held, and signals are disabled */
735 void GC_enqueue_all_finalizers()
737 struct finalizable_object * curr_fo, * prev_fo, * next_fo;
738 ptr_t real_ptr;
739 register int i;
740 int fo_size;
742 fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
743 GC_words_finalized = 0;
744 for (i = 0; i < fo_size; i++) {
745 curr_fo = fo_head[i];
746 prev_fo = 0;
747 while (curr_fo != 0) {
748 real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
749 GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
750 GC_set_mark_bit(real_ptr);
752 /* Delete from hash table */
753 next_fo = fo_next(curr_fo);
754 if (prev_fo == 0) {
755 fo_head[i] = next_fo;
756 } else {
757 fo_set_next(prev_fo, next_fo);
759 GC_fo_entries--;
761 /* Add to list of objects awaiting finalization. */
762 fo_set_next(curr_fo, GC_finalize_now);
763 GC_finalize_now = curr_fo;
765 /* unhide object pointer so any future collections will */
766 /* see it. */
767 curr_fo -> fo_hidden_base =
768 (word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
770 GC_words_finalized +=
771 ALIGNED_WORDS(curr_fo -> fo_object_size)
772 + ALIGNED_WORDS(sizeof(struct finalizable_object));
773 curr_fo = next_fo;
777 return;
780 /* Invoke all remaining finalizers that haven't yet been run.
781 * This is needed for strict compliance with the Java standard,
782 * which can make the runtime guarantee that all finalizers are run.
783 * Unfortunately, the Java standard implies we have to keep running
784 * finalizers until there are no more left, a potential infinite loop.
785 * YUCK.
786 * Note that this is even more dangerous than the usual Java
787 * finalizers, in that objects reachable from static variables
788 * may have been finalized when these finalizers are run.
789 * Finalizers run at this point must be prepared to deal with a
790 * mostly broken world.
791 * This routine is externally callable, so is called without
792 * the allocation lock.
794 GC_API void GC_finalize_all()
796 DCL_LOCK_STATE;
798 DISABLE_SIGNALS();
799 LOCK();
800 while (GC_fo_entries > 0) {
801 GC_enqueue_all_finalizers();
802 UNLOCK();
803 ENABLE_SIGNALS();
804 GC_INVOKE_FINALIZERS();
805 DISABLE_SIGNALS();
806 LOCK();
808 UNLOCK();
809 ENABLE_SIGNALS();
811 #endif
813 /* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
814 /* finalizers can only be called from some kind of `safe state' and */
815 /* getting into that safe state is expensive.) */
816 int GC_should_invoke_finalizers GC_PROTO((void))
818 return GC_finalize_now != 0;
821 /* Invoke finalizers for all objects that are ready to be finalized. */
822 /* Should be called without allocation lock. */
823 int GC_invoke_finalizers()
825 struct finalizable_object * curr_fo;
826 int count = 0;
827 word mem_freed_before;
828 DCL_LOCK_STATE;
830 while (GC_finalize_now != 0) {
831 # ifdef THREADS
832 DISABLE_SIGNALS();
833 LOCK();
834 # endif
835 if (count == 0) {
836 mem_freed_before = GC_mem_freed;
838 curr_fo = GC_finalize_now;
839 # ifdef THREADS
840 if (curr_fo != 0) GC_finalize_now = fo_next(curr_fo);
841 UNLOCK();
842 ENABLE_SIGNALS();
843 if (curr_fo == 0) break;
844 # else
845 GC_finalize_now = fo_next(curr_fo);
846 # endif
847 fo_set_next(curr_fo, 0);
848 (*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
849 curr_fo -> fo_client_data);
850 curr_fo -> fo_client_data = 0;
851 ++count;
852 # ifdef UNDEFINED
853 /* This is probably a bad idea. It throws off accounting if */
854 /* nearly all objects are finalizable. O.w. it shouldn't */
855 /* matter. */
856 GC_free((GC_PTR)curr_fo);
857 # endif
859 if (count != 0 && mem_freed_before != GC_mem_freed) {
860 LOCK();
861 GC_finalizer_mem_freed += (GC_mem_freed - mem_freed_before);
862 UNLOCK();
864 return count;
867 void (* GC_finalizer_notifier)() = (void (*) GC_PROTO((void)))0;
869 static GC_word last_finalizer_notification = 0;
871 void GC_notify_or_invoke_finalizers GC_PROTO((void))
873 /* This is a convenient place to generate backtraces if appropriate, */
874 /* since that code is not callable with the allocation lock. */
875 # if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
876 static word last_back_trace_gc_no = 1; /* Skip first one. */
878 if (GC_gc_no > last_back_trace_gc_no) {
879 word i;
881 # ifdef KEEP_BACK_PTRS
882 LOCK();
883 /* Stops when GC_gc_no wraps; that's OK. */
884 last_back_trace_gc_no = (word)(-1); /* disable others. */
885 for (i = 0; i < GC_backtraces; ++i) {
886 /* FIXME: This tolerates concurrent heap mutation, */
887 /* which may cause occasional mysterious results. */
888 /* We need to release the GC lock, since GC_print_callers */
889 /* acquires it. It probably shouldn't. */
890 UNLOCK();
891 GC_generate_random_backtrace_no_gc();
892 LOCK();
894 last_back_trace_gc_no = GC_gc_no;
895 UNLOCK();
896 # endif
897 # ifdef MAKE_BACK_GRAPH
898 if (GC_print_back_height)
899 GC_print_back_graph_stats();
900 # endif
902 # endif
903 if (GC_finalize_now == 0) return;
904 if (!GC_finalize_on_demand) {
905 (void) GC_invoke_finalizers();
906 # ifndef THREADS
907 GC_ASSERT(GC_finalize_now == 0);
908 # endif /* Otherwise GC can run concurrently and add more */
909 return;
911 if (GC_finalizer_notifier != (void (*) GC_PROTO((void)))0
912 && last_finalizer_notification != GC_gc_no) {
913 last_finalizer_notification = GC_gc_no;
914 GC_finalizer_notifier();
918 # ifdef __STDC__
919 GC_PTR GC_call_with_alloc_lock(GC_fn_type fn,
920 GC_PTR client_data)
921 # else
922 GC_PTR GC_call_with_alloc_lock(fn, client_data)
923 GC_fn_type fn;
924 GC_PTR client_data;
925 # endif
927 GC_PTR result;
928 DCL_LOCK_STATE;
930 # ifdef THREADS
931 DISABLE_SIGNALS();
932 LOCK();
933 SET_LOCK_HOLDER();
934 # endif
935 result = (*fn)(client_data);
936 # ifdef THREADS
937 # ifndef GC_ASSERTIONS
938 UNSET_LOCK_HOLDER();
939 # endif /* o.w. UNLOCK() does it implicitly */
940 UNLOCK();
941 ENABLE_SIGNALS();
942 # endif
943 return(result);
946 #if !defined(NO_DEBUGGING)
948 void GC_print_finalization_stats()
950 struct finalizable_object *fo = GC_finalize_now;
951 size_t ready = 0;
953 GC_printf2("%lu finalization table entries; %lu disappearing links\n",
954 GC_fo_entries, GC_dl_entries);
955 for (; 0 != fo; fo = fo_next(fo)) ++ready;
956 GC_printf1("%lu objects are eligible for immediate finalization\n", ready);
959 #endif /* NO_DEBUGGING */