2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
14 /* Boehm, October 9, 1995 1:06 pm PDT */
18 /* MAX_ROOT_SETS is the maximum number of ranges that can be */
19 /* registered as static roots. */
21 # define MAX_ROOT_SETS 4096
24 # define MAX_ROOT_SETS 1024
27 # define MAX_ROOT_SETS 512
28 /* Under NT, we add only written pages, which can result */
29 /* in many small root sets. */
31 # define MAX_ROOT_SETS 64
36 # define MAX_EXCLUSIONS (MAX_ROOT_SETS/4)
37 /* Maximum number of segments that can be excluded from root sets. */
39 /* Data structure for list of root sets. */
40 /* We keep a hash table, so that we can filter out duplicate additions. */
41 /* Under Win32, we need to do a better job of filtering overlaps, so */
42 /* we resort to sequential search, and pay the price. */
47 struct roots
* r_next
;
50 /* Delete before registering new dynamic libraries */
53 static struct roots static_roots
[MAX_ROOT_SETS
];
55 static int n_root_sets
= 0;
57 /* static_roots[0..n_root_sets) contains the valid root sets. */
59 # if !defined(NO_DEBUGGING)
61 void GC_print_static_roots()
66 for (i
= 0; i
< n_root_sets
; i
++) {
67 GC_printf2("From 0x%lx to 0x%lx ",
68 (unsigned long) static_roots
[i
].r_start
,
69 (unsigned long) static_roots
[i
].r_end
);
70 if (static_roots
[i
].r_tmp
) {
71 GC_printf0(" (temporary)\n");
75 total
+= static_roots
[i
].r_end
- static_roots
[i
].r_start
;
77 GC_printf1("Total size: %ld\n", (unsigned long) total
);
78 if (GC_root_size
!= total
) {
79 GC_printf1("GC_root_size incorrect: %ld!!\n",
80 (unsigned long) GC_root_size
);
83 # endif /* NO_DEBUGGING */
85 /* Primarily for debugging support: */
86 /* Is the address p in one of the registered static */
88 GC_bool
GC_is_static_root(p
)
91 static int last_root_set
= 0;
95 if (p
>= static_roots
[last_root_set
].r_start
96 && p
< static_roots
[last_root_set
].r_end
) return(TRUE
);
97 for (i
= 0; i
< n_root_sets
; i
++) {
98 if (p
>= static_roots
[i
].r_start
99 && p
< static_roots
[i
].r_end
) {
108 # define LOG_RT_SIZE 6
109 # define RT_SIZE (1 << LOG_RT_SIZE) /* Power of 2, may be != MAX_ROOT_SETS */
111 static struct roots
* root_index
[RT_SIZE
];
112 /* Hash table header. Used only to check whether a range is */
113 /* already present. */
115 static int rt_hash(addr
)
118 word result
= (word
) addr
;
119 # if CPP_WORDSZ > 8*LOG_RT_SIZE
120 result
^= result
>> 8*LOG_RT_SIZE
;
122 # if CPP_WORDSZ > 4*LOG_RT_SIZE
123 result
^= result
>> 4*LOG_RT_SIZE
;
125 result
^= result
>> 2*LOG_RT_SIZE
;
126 result
^= result
>> LOG_RT_SIZE
;
127 result
&= (RT_SIZE
-1);
131 /* Is a range starting at b already in the table? If so return a */
132 /* pointer to it, else NIL. */
133 struct roots
* GC_roots_present(b
)
136 register int h
= rt_hash(b
);
137 register struct roots
*p
= root_index
[h
];
140 if (p
-> r_start
== (ptr_t
)b
) return(p
);
146 /* Add the given root structure to the index. */
147 static void add_roots_to_index(p
)
150 register int h
= rt_hash(p
-> r_start
);
152 p
-> r_next
= root_index
[h
];
158 # define add_roots_to_index(p)
165 word GC_root_size
= 0;
167 void GC_add_roots(b
, e
)
174 GC_add_roots_inner(b
, e
, FALSE
);
180 /* Add [b,e) to the root set. Adding the same interval a second time */
181 /* is a moderately fast noop, and hence benign. We do not handle */
182 /* different but overlapping intervals efficiently. (We do handle */
183 /* them correctly.) */
184 /* Tmp specifies that the interval may be deleted before */
185 /* reregistering dynamic libraries. */
186 void GC_add_roots_inner(b
, e
, tmp
)
193 /* Spend the time to ensure that there are no overlapping */
194 /* or adjacent intervals. */
195 /* This could be done faster with e.g. a */
196 /* balanced tree. But the execution time here is */
197 /* virtually guaranteed to be dominated by the time it */
198 /* takes to scan the roots. */
202 for (i
= 0; i
< n_root_sets
; i
++) {
203 old
= static_roots
+ i
;
204 if ((ptr_t
)b
<= old
-> r_end
&& (ptr_t
)e
>= old
-> r_start
) {
205 if ((ptr_t
)b
< old
-> r_start
) {
206 old
-> r_start
= (ptr_t
)b
;
207 GC_root_size
+= (old
-> r_start
- (ptr_t
)b
);
209 if ((ptr_t
)e
> old
-> r_end
) {
210 old
-> r_end
= (ptr_t
)e
;
211 GC_root_size
+= ((ptr_t
)e
- old
-> r_end
);
217 if (i
< n_root_sets
) {
218 /* merge other overlapping intervals */
221 for (i
++; i
< n_root_sets
; i
++) {
222 other
= static_roots
+ i
;
223 b
= (char *)(other
-> r_start
);
224 e
= (char *)(other
-> r_end
);
225 if ((ptr_t
)b
<= old
-> r_end
&& (ptr_t
)e
>= old
-> r_start
) {
226 if ((ptr_t
)b
< old
-> r_start
) {
227 old
-> r_start
= (ptr_t
)b
;
228 GC_root_size
+= (old
-> r_start
- (ptr_t
)b
);
230 if ((ptr_t
)e
> old
-> r_end
) {
231 old
-> r_end
= (ptr_t
)e
;
232 GC_root_size
+= ((ptr_t
)e
- old
-> r_end
);
234 old
-> r_tmp
&= other
-> r_tmp
;
235 /* Delete this entry. */
236 GC_root_size
-= (other
-> r_end
- other
-> r_start
);
237 other
-> r_start
= static_roots
[n_root_sets
-1].r_start
;
238 other
-> r_end
= static_roots
[n_root_sets
-1].r_end
;
246 old
= GC_roots_present(b
);
248 if ((ptr_t
)e
<= old
-> r_end
) /* already there */ return;
250 GC_root_size
+= (ptr_t
)e
- old
-> r_end
;
251 old
-> r_end
= (ptr_t
)e
;
255 if (n_root_sets
== MAX_ROOT_SETS
) {
256 ABORT("Too many root sets\n");
258 static_roots
[n_root_sets
].r_start
= (ptr_t
)b
;
259 static_roots
[n_root_sets
].r_end
= (ptr_t
)e
;
260 static_roots
[n_root_sets
].r_tmp
= tmp
;
262 static_roots
[n_root_sets
].r_next
= 0;
264 add_roots_to_index(static_roots
+ n_root_sets
);
265 GC_root_size
+= (ptr_t
)e
- (ptr_t
)b
;
269 void GC_clear_roots
GC_PROTO((void))
281 for (i
= 0; i
< RT_SIZE
; i
++) root_index
[i
] = 0;
288 /* Internal use only; lock held. */
289 void GC_remove_tmp_roots()
293 for (i
= 0; i
< n_root_sets
; ) {
294 if (static_roots
[i
].r_tmp
) {
295 GC_root_size
-= (static_roots
[i
].r_end
- static_roots
[i
].r_start
);
296 static_roots
[i
].r_start
= static_roots
[n_root_sets
-1].r_start
;
297 static_roots
[i
].r_end
= static_roots
[n_root_sets
-1].r_end
;
298 static_roots
[i
].r_tmp
= static_roots
[n_root_sets
-1].r_tmp
;
308 for (i
= 0; i
< RT_SIZE
; i
++) root_index
[i
] = 0;
309 for (i
= 0; i
< n_root_sets
; i
++) add_roots_to_index(static_roots
+ i
);
319 return((ptr_t
)(&dummy
));
323 * Data structure for excluded static roots.
330 struct exclusion excl_table
[MAX_EXCLUSIONS
];
331 /* Array of exclusions, ascending */
333 size_t excl_table_entries
= 0; /* Number of entries in use. */
335 /* Return the first exclusion range that includes an address >= start_addr */
336 /* Assumes the exclusion table contains at least one entry (namely the */
337 /* GC data structures). */
338 struct exclusion
* GC_next_exclusion(start_addr
)
342 size_t high
= excl_table_entries
- 1;
346 mid
= (low
+ high
) >> 1;
347 /* low <= mid < high */
348 if ((word
) excl_table
[mid
].e_end
<= (word
) start_addr
) {
354 if ((word
) excl_table
[low
].e_end
<= (word
) start_addr
) return 0;
355 return excl_table
+ low
;
358 void GC_exclude_static_roots(start
, finish
)
362 struct exclusion
* next
;
363 size_t next_index
, i
;
365 if (0 == excl_table_entries
) {
368 next
= GC_next_exclusion(start
);
371 if ((word
)(next
-> e_start
) < (word
) finish
) {
372 /* incomplete error check. */
373 ABORT("exclusion ranges overlap");
375 if ((word
)(next
-> e_start
) == (word
) finish
) {
376 /* extend old range backwards */
377 next
-> e_start
= (ptr_t
)start
;
380 next_index
= next
- excl_table
;
381 for (i
= excl_table_entries
; i
> next_index
; --i
) {
382 excl_table
[i
] = excl_table
[i
-1];
385 next_index
= excl_table_entries
;
387 if (excl_table_entries
== MAX_EXCLUSIONS
) ABORT("Too many exclusions");
388 excl_table
[next_index
].e_start
= (ptr_t
)start
;
389 excl_table
[next_index
].e_end
= (ptr_t
)finish
;
390 ++excl_table_entries
;
393 /* Invoke push_conditional on ranges that are not excluded. */
394 void GC_push_conditional_with_exclusions(bottom
, top
, all
)
399 struct exclusion
* next
;
402 while (bottom
< top
) {
403 next
= GC_next_exclusion(bottom
);
404 if (0 == next
|| (excl_start
= next
-> e_start
) >= top
) {
405 GC_push_conditional(bottom
, top
, all
);
408 if (excl_start
> bottom
) GC_push_conditional(bottom
, excl_start
, all
);
409 bottom
= next
-> e_end
;
414 * Call the mark routines (GC_tl_push for a single pointer, GC_push_conditional
415 * on groups of pointers) on every top level accessible pointer.
416 * If all is FALSE, arrange to push only possibly altered values.
419 void GC_push_roots(all
)
425 * push registers - i.e., call GC_push_one(r) for each
426 * register contents r.
428 GC_push_regs(); /* usually defined in machine_dep.c */
431 * Next push static data. This must happen early on, since it's
432 * not robust against mark stack overflow.
434 /* Reregister dynamic libraries, in case one got added. */
435 # if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(PCR)) \
437 GC_remove_tmp_roots();
438 GC_register_dynamic_libraries();
440 /* Mark everything in static data areas */
441 for (i
= 0; i
< n_root_sets
; i
++) {
442 GC_push_conditional_with_exclusions(
443 static_roots
[i
].r_start
,
444 static_roots
[i
].r_end
, all
);
448 * Now traverse stacks.
451 /* Mark everything on the stack. */
452 # ifdef STACK_GROWS_DOWN
453 GC_push_all_stack( GC_approx_sp(), GC_stackbottom
);
455 GC_push_all_stack( GC_stackbottom
, GC_approx_sp() );
458 if (GC_push_other_roots
!= 0) (*GC_push_other_roots
)();
459 /* In the threads case, this also pushes thread stacks. */