Add -B$$r/prev-$(TARGET_SUBDIR)/libsanitizer/asan/
[official-gcc.git] / boehm-gc / mark_rts.c
blob94eb0ddb37f3e81f70260e9361e67f517cce81f7
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
14 # include <stdio.h>
15 # include "private/gc_priv.h"
17 /* Data structure for list of root sets. */
18 /* We keep a hash table, so that we can filter out duplicate additions. */
19 /* Under Win32, we need to do a better job of filtering overlaps, so */
20 /* we resort to sequential search, and pay the price. */
21 /* This is really declared in gc_priv.h:
22 struct roots {
23 ptr_t r_start;
24 ptr_t r_end;
25 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
26 struct roots * r_next;
27 # endif
28 GC_bool r_tmp;
29 -- Delete before registering new dynamic libraries
32 struct roots GC_static_roots[MAX_ROOT_SETS];
35 int GC_no_dls = 0; /* Register dynamic library data segments. */
37 static int n_root_sets = 0;
39 /* GC_static_roots[0..n_root_sets) contains the valid root sets. */
41 # if !defined(NO_DEBUGGING)
42 /* For debugging: */
43 void GC_print_static_roots()
45 register int i;
46 size_t total = 0;
48 for (i = 0; i < n_root_sets; i++) {
49 GC_printf2("From 0x%lx to 0x%lx ",
50 (unsigned long) GC_static_roots[i].r_start,
51 (unsigned long) GC_static_roots[i].r_end);
52 if (GC_static_roots[i].r_tmp) {
53 GC_printf0(" (temporary)\n");
54 } else {
55 GC_printf0("\n");
57 total += GC_static_roots[i].r_end - GC_static_roots[i].r_start;
59 GC_printf1("Total size: %ld\n", (unsigned long) total);
60 if (GC_root_size != total) {
61 GC_printf1("GC_root_size incorrect: %ld!!\n",
62 (unsigned long) GC_root_size);
65 # endif /* NO_DEBUGGING */
67 /* Primarily for debugging support: */
68 /* Is the address p in one of the registered static */
69 /* root sections? */
70 GC_bool GC_is_static_root(p)
71 ptr_t p;
73 static int last_root_set = MAX_ROOT_SETS;
74 register int i;
77 if (last_root_set < n_root_sets
78 && p >= GC_static_roots[last_root_set].r_start
79 && p < GC_static_roots[last_root_set].r_end) return(TRUE);
80 for (i = 0; i < n_root_sets; i++) {
81 if (p >= GC_static_roots[i].r_start
82 && p < GC_static_roots[i].r_end) {
83 last_root_set = i;
84 return(TRUE);
87 return(FALSE);
90 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
91 /*
92 # define LOG_RT_SIZE 6
93 # define RT_SIZE (1 << LOG_RT_SIZE) -- Power of 2, may be != MAX_ROOT_SETS
95 struct roots * GC_root_index[RT_SIZE];
96 -- Hash table header. Used only to check whether a range is
97 -- already present.
98 -- really defined in gc_priv.h
101 static int rt_hash(addr)
102 char * addr;
104 word result = (word) addr;
105 # if CPP_WORDSZ > 8*LOG_RT_SIZE
106 result ^= result >> 8*LOG_RT_SIZE;
107 # endif
108 # if CPP_WORDSZ > 4*LOG_RT_SIZE
109 result ^= result >> 4*LOG_RT_SIZE;
110 # endif
111 result ^= result >> 2*LOG_RT_SIZE;
112 result ^= result >> LOG_RT_SIZE;
113 result &= (RT_SIZE-1);
114 return(result);
117 /* Is a range starting at b already in the table? If so return a */
118 /* pointer to it, else NIL. */
119 struct roots * GC_roots_present(b)
120 char *b;
122 register int h = rt_hash(b);
123 register struct roots *p = GC_root_index[h];
125 while (p != 0) {
126 if (p -> r_start == (ptr_t)b) return(p);
127 p = p -> r_next;
129 return(FALSE);
132 /* Add the given root structure to the index. */
133 static void add_roots_to_index(p)
134 struct roots *p;
136 register int h = rt_hash(p -> r_start);
138 p -> r_next = GC_root_index[h];
139 GC_root_index[h] = p;
142 # else /* MSWIN32 || MSWINCE || CYGWIN32 */
144 # define add_roots_to_index(p)
146 # endif
151 word GC_root_size = 0;
153 void GC_add_roots(b, e)
154 char * b; char * e;
156 DCL_LOCK_STATE;
158 DISABLE_SIGNALS();
159 LOCK();
160 GC_add_roots_inner(b, e, FALSE);
161 UNLOCK();
162 ENABLE_SIGNALS();
166 /* Add [b,e) to the root set. Adding the same interval a second time */
167 /* is a moderately fast noop, and hence benign. We do not handle */
168 /* different but overlapping intervals efficiently. (We do handle */
169 /* them correctly.) */
170 /* Tmp specifies that the interval may be deleted before */
171 /* reregistering dynamic libraries. */
172 void GC_add_roots_inner(b, e, tmp)
173 char * b; char * e;
174 GC_bool tmp;
176 struct roots * old;
178 # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
179 /* Spend the time to ensure that there are no overlapping */
180 /* or adjacent intervals. */
181 /* This could be done faster with e.g. a */
182 /* balanced tree. But the execution time here is */
183 /* virtually guaranteed to be dominated by the time it */
184 /* takes to scan the roots. */
186 register int i;
188 for (i = 0; i < n_root_sets; i++) {
189 old = GC_static_roots + i;
190 if ((ptr_t)b <= old -> r_end && (ptr_t)e >= old -> r_start) {
191 if ((ptr_t)b < old -> r_start) {
192 old -> r_start = (ptr_t)b;
193 GC_root_size += (old -> r_start - (ptr_t)b);
195 if ((ptr_t)e > old -> r_end) {
196 old -> r_end = (ptr_t)e;
197 GC_root_size += ((ptr_t)e - old -> r_end);
199 old -> r_tmp &= tmp;
200 break;
203 if (i < n_root_sets) {
204 /* merge other overlapping intervals */
205 struct roots *other;
207 for (i++; i < n_root_sets; i++) {
208 other = GC_static_roots + i;
209 b = (char *)(other -> r_start);
210 e = (char *)(other -> r_end);
211 if ((ptr_t)b <= old -> r_end && (ptr_t)e >= old -> r_start) {
212 if ((ptr_t)b < old -> r_start) {
213 old -> r_start = (ptr_t)b;
214 GC_root_size += (old -> r_start - (ptr_t)b);
216 if ((ptr_t)e > old -> r_end) {
217 old -> r_end = (ptr_t)e;
218 GC_root_size += ((ptr_t)e - old -> r_end);
220 old -> r_tmp &= other -> r_tmp;
221 /* Delete this entry. */
222 GC_root_size -= (other -> r_end - other -> r_start);
223 other -> r_start = GC_static_roots[n_root_sets-1].r_start;
224 other -> r_end = GC_static_roots[n_root_sets-1].r_end;
225 n_root_sets--;
228 return;
231 # else
232 old = GC_roots_present(b);
233 if (old != 0) {
234 if ((ptr_t)e <= old -> r_end) /* already there */ return;
235 /* else extend */
236 GC_root_size += (ptr_t)e - old -> r_end;
237 old -> r_end = (ptr_t)e;
238 return;
240 # endif
241 if (n_root_sets == MAX_ROOT_SETS) {
242 ABORT("Too many root sets\n");
244 GC_static_roots[n_root_sets].r_start = (ptr_t)b;
245 GC_static_roots[n_root_sets].r_end = (ptr_t)e;
246 GC_static_roots[n_root_sets].r_tmp = tmp;
247 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
248 GC_static_roots[n_root_sets].r_next = 0;
249 # endif
250 add_roots_to_index(GC_static_roots + n_root_sets);
251 GC_root_size += (ptr_t)e - (ptr_t)b;
252 n_root_sets++;
255 static GC_bool roots_were_cleared = FALSE;
257 void GC_clear_roots GC_PROTO((void))
259 DCL_LOCK_STATE;
261 DISABLE_SIGNALS();
262 LOCK();
263 roots_were_cleared = TRUE;
264 n_root_sets = 0;
265 GC_root_size = 0;
266 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
268 register int i;
270 for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
272 # endif
273 UNLOCK();
274 ENABLE_SIGNALS();
277 /* Internal use only; lock held. */
278 static void GC_remove_root_at_pos(i)
279 int i;
281 GC_root_size -= (GC_static_roots[i].r_end - GC_static_roots[i].r_start);
282 GC_static_roots[i].r_start = GC_static_roots[n_root_sets-1].r_start;
283 GC_static_roots[i].r_end = GC_static_roots[n_root_sets-1].r_end;
284 GC_static_roots[i].r_tmp = GC_static_roots[n_root_sets-1].r_tmp;
285 n_root_sets--;
288 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
289 static void GC_rebuild_root_index()
291 register int i;
293 for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
294 for (i = 0; i < n_root_sets; i++)
295 add_roots_to_index(GC_static_roots + i);
297 #endif
299 /* Internal use only; lock held. */
300 void GC_remove_tmp_roots()
302 register int i;
304 for (i = 0; i < n_root_sets; ) {
305 if (GC_static_roots[i].r_tmp) {
306 GC_remove_root_at_pos(i);
307 } else {
308 i++;
311 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
312 GC_rebuild_root_index();
313 #endif
316 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
317 void GC_remove_roots(b, e)
318 char * b; char * e;
320 DCL_LOCK_STATE;
322 DISABLE_SIGNALS();
323 LOCK();
324 GC_remove_roots_inner(b, e);
325 UNLOCK();
326 ENABLE_SIGNALS();
329 /* Should only be called when the lock is held */
330 void GC_remove_roots_inner(b,e)
331 char * b; char * e;
333 int i;
334 for (i = 0; i < n_root_sets; ) {
335 if (GC_static_roots[i].r_start >= (ptr_t)b && GC_static_roots[i].r_end <= (ptr_t)e) {
336 GC_remove_root_at_pos(i);
337 } else {
338 i++;
341 GC_rebuild_root_index();
343 #endif /* !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) */
345 #if defined(MSWIN32) || defined(_WIN32_WCE_EMULATION) || defined(CYGWIN32)
346 /* Workaround for the OS mapping and unmapping behind our back: */
347 /* Is the address p in one of the temporary static root sections? */
348 GC_bool GC_is_tmp_root(p)
349 ptr_t p;
351 static int last_root_set = MAX_ROOT_SETS;
352 register int i;
354 if (last_root_set < n_root_sets
355 && p >= GC_static_roots[last_root_set].r_start
356 && p < GC_static_roots[last_root_set].r_end)
357 return GC_static_roots[last_root_set].r_tmp;
358 for (i = 0; i < n_root_sets; i++) {
359 if (p >= GC_static_roots[i].r_start
360 && p < GC_static_roots[i].r_end) {
361 last_root_set = i;
362 return GC_static_roots[i].r_tmp;
365 return(FALSE);
367 #endif /* MSWIN32 || _WIN32_WCE_EMULATION || defined(CYGWIN32) */
369 ptr_t GC_approx_sp()
371 VOLATILE word dummy;
373 dummy = 42; /* Force stack to grow if necessary. Otherwise the */
374 /* later accesses might cause the kernel to think we're */
375 /* doing something wrong. */
376 # ifdef _MSC_VER
377 # pragma warning(disable:4172)
378 # endif
379 #ifdef __GNUC__
380 /* Eliminate a warning from GCC about taking the address of a
381 local variable. */
382 return __builtin_frame_address (0);
383 #else
384 return ((ptr_t)(&dummy));
385 #endif /* __GNUC__ */
386 # ifdef _MSC_VER
387 # pragma warning(default:4172)
388 # endif
392 * Data structure for excluded static roots.
393 * Real declaration is in gc_priv.h.
395 struct exclusion {
396 ptr_t e_start;
397 ptr_t e_end;
400 struct exclusion GC_excl_table[MAX_EXCLUSIONS];
401 -- Array of exclusions, ascending
402 -- address order.
405 size_t GC_excl_table_entries = 0; /* Number of entries in use. */
407 /* Return the first exclusion range that includes an address >= start_addr */
408 /* Assumes the exclusion table contains at least one entry (namely the */
409 /* GC data structures). */
410 struct exclusion * GC_next_exclusion(start_addr)
411 ptr_t start_addr;
413 size_t low = 0;
414 size_t high = GC_excl_table_entries - 1;
415 size_t mid;
417 while (high > low) {
418 mid = (low + high) >> 1;
419 /* low <= mid < high */
420 if ((word) GC_excl_table[mid].e_end <= (word) start_addr) {
421 low = mid + 1;
422 } else {
423 high = mid;
426 if ((word) GC_excl_table[low].e_end <= (word) start_addr) return 0;
427 return GC_excl_table + low;
430 void GC_exclude_static_roots(start, finish)
431 GC_PTR start;
432 GC_PTR finish;
434 struct exclusion * next;
435 size_t next_index, i;
437 if (0 == GC_excl_table_entries) {
438 next = 0;
439 } else {
440 next = GC_next_exclusion(start);
442 if (0 != next) {
443 if ((word)(next -> e_start) < (word) finish) {
444 /* incomplete error check. */
445 ABORT("exclusion ranges overlap");
447 if ((word)(next -> e_start) == (word) finish) {
448 /* extend old range backwards */
449 next -> e_start = (ptr_t)start;
450 return;
452 next_index = next - GC_excl_table;
453 for (i = GC_excl_table_entries; i > next_index; --i) {
454 GC_excl_table[i] = GC_excl_table[i-1];
456 } else {
457 next_index = GC_excl_table_entries;
459 if (GC_excl_table_entries == MAX_EXCLUSIONS) ABORT("Too many exclusions");
460 GC_excl_table[next_index].e_start = (ptr_t)start;
461 GC_excl_table[next_index].e_end = (ptr_t)finish;
462 ++GC_excl_table_entries;
465 /* Invoke push_conditional on ranges that are not excluded. */
466 void GC_push_conditional_with_exclusions(bottom, top, all)
467 ptr_t bottom;
468 ptr_t top;
469 int all;
471 struct exclusion * next;
472 ptr_t excl_start;
474 while (bottom < top) {
475 next = GC_next_exclusion(bottom);
476 if (0 == next || (excl_start = next -> e_start) >= top) {
477 GC_push_conditional(bottom, top, all);
478 return;
480 if (excl_start > bottom) GC_push_conditional(bottom, excl_start, all);
481 bottom = next -> e_end;
486 * In the absence of threads, push the stack contents.
487 * In the presence of threads, push enough of the current stack
488 * to ensure that callee-save registers saved in collector frames have been
489 * seen.
491 void GC_push_current_stack(cold_gc_frame)
492 ptr_t cold_gc_frame;
494 # if defined(THREADS)
495 if (0 == cold_gc_frame) return;
496 # ifdef STACK_GROWS_DOWN
497 GC_push_all_eager(GC_approx_sp(), cold_gc_frame);
498 /* For IA64, the register stack backing store is handled */
499 /* in the thread-specific code. */
500 # else
501 GC_push_all_eager( cold_gc_frame, GC_approx_sp() );
502 # endif
503 # else
504 # ifdef STACK_GROWS_DOWN
505 GC_push_all_stack_partially_eager( GC_approx_sp(), GC_stackbottom,
506 cold_gc_frame );
507 # ifdef IA64
508 /* We also need to push the register stack backing store. */
509 /* This should really be done in the same way as the */
510 /* regular stack. For now we fudge it a bit. */
511 /* Note that the backing store grows up, so we can't use */
512 /* GC_push_all_stack_partially_eager. */
514 extern word GC_save_regs_ret_val;
515 /* Previously set to backing store pointer. */
516 ptr_t bsp = (ptr_t) GC_save_regs_ret_val;
517 ptr_t cold_gc_bs_pointer;
518 if (GC_all_interior_pointers) {
519 cold_gc_bs_pointer = bsp - 2048;
520 if (cold_gc_bs_pointer < BACKING_STORE_BASE) {
521 cold_gc_bs_pointer = BACKING_STORE_BASE;
522 } else {
523 GC_push_all_stack(BACKING_STORE_BASE, cold_gc_bs_pointer);
525 } else {
526 cold_gc_bs_pointer = BACKING_STORE_BASE;
528 GC_push_all_eager(cold_gc_bs_pointer, bsp);
529 /* All values should be sufficiently aligned that we */
530 /* dont have to worry about the boundary. */
532 # endif
533 # else
534 GC_push_all_stack_partially_eager( GC_stackbottom, GC_approx_sp(),
535 cold_gc_frame );
536 # endif
537 # endif /* !THREADS */
541 * Push GC internal roots. Only called if there is some reason to believe
542 * these would not otherwise get registered.
544 void GC_push_gc_structures GC_PROTO((void))
546 GC_push_finalizer_structures();
547 GC_push_stubborn_structures();
548 # if defined(THREADS)
549 GC_push_thread_structures();
550 # endif
553 #ifdef THREAD_LOCAL_ALLOC
554 void GC_mark_thread_local_free_lists();
555 #endif
557 void GC_cond_register_dynamic_libraries()
559 # if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
560 || defined(CYGWIN32) || defined(PCR)) && !defined(SRC_M3)
561 GC_remove_tmp_roots();
562 if (!GC_no_dls) GC_register_dynamic_libraries();
563 # else
564 GC_no_dls = TRUE;
565 # endif
569 * Call the mark routines (GC_tl_push for a single pointer, GC_push_conditional
570 * on groups of pointers) on every top level accessible pointer.
571 * If all is FALSE, arrange to push only possibly altered values.
572 * Cold_gc_frame is an address inside a GC frame that
573 * remains valid until all marking is complete.
574 * A zero value indicates that it's OK to miss some
575 * register values.
577 void GC_push_roots(all, cold_gc_frame)
578 GC_bool all;
579 ptr_t cold_gc_frame;
581 int i;
582 int kind;
585 * Next push static data. This must happen early on, since it's
586 * not robust against mark stack overflow.
588 /* Reregister dynamic libraries, in case one got added. */
589 /* There is some argument for doing this as late as possible, */
590 /* especially on win32, where it can change asynchronously. */
591 /* In those cases, we do it here. But on other platforms, it's */
592 /* not safe with the world stopped, so we do it earlier. */
593 # if !defined(REGISTER_LIBRARIES_EARLY)
594 GC_cond_register_dynamic_libraries();
595 # endif
597 /* Mark everything in static data areas */
598 for (i = 0; i < n_root_sets; i++) {
599 GC_push_conditional_with_exclusions(
600 GC_static_roots[i].r_start,
601 GC_static_roots[i].r_end, all);
604 /* Mark all free list header blocks, if those were allocated from */
605 /* the garbage collected heap. This makes sure they don't */
606 /* disappear if we are not marking from static data. It also */
607 /* saves us the trouble of scanning them, and possibly that of */
608 /* marking the freelists. */
609 for (kind = 0; kind < GC_n_kinds; kind++) {
610 GC_PTR base = GC_base(GC_obj_kinds[kind].ok_freelist);
611 if (0 != base) {
612 GC_set_mark_bit(base);
616 /* Mark from GC internal roots if those might otherwise have */
617 /* been excluded. */
618 if (GC_no_dls || roots_were_cleared) {
619 GC_push_gc_structures();
622 /* Mark thread local free lists, even if their mark */
623 /* descriptor excludes the link field. */
624 /* If the world is not stopped, this is unsafe. It is */
625 /* also unnecessary, since we will do this again with the */
626 /* world stopped. */
627 # ifdef THREAD_LOCAL_ALLOC
628 if (GC_world_stopped) GC_mark_thread_local_free_lists();
629 # endif
632 * Now traverse stacks, and mark from register contents.
633 * These must be done last, since they can legitimately overflow
634 * the mark stack.
636 # ifdef USE_GENERIC_PUSH_REGS
637 GC_generic_push_regs(cold_gc_frame);
638 /* Also pushes stack, so that we catch callee-save registers */
639 /* saved inside the GC_push_regs frame. */
640 # else
642 * push registers - i.e., call GC_push_one(r) for each
643 * register contents r.
645 GC_push_regs(); /* usually defined in machine_dep.c */
646 GC_push_current_stack(cold_gc_frame);
647 /* In the threads case, this only pushes collector frames. */
648 /* In the case of linux threads on IA64, the hot section of */
649 /* the main stack is marked here, but the register stack */
650 /* backing store is handled in the threads-specific code. */
651 # endif
652 if (GC_push_other_roots != 0) (*GC_push_other_roots)();
653 /* In the threads case, this also pushes thread stacks. */
654 /* Note that without interior pointer recognition lots */
655 /* of stuff may have been pushed already, and this */
656 /* should be careful about mark stack overflows. */