* config/mips/iris6.h (SUBTARGET_ASM_OPTIMIZING_SPEC): Define to
[official-gcc.git] / boehm-gc / stubborn.c
blobbef7b98a48685950a0e6f97e9c87ff0f568805c3
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
14 /* Boehm, July 31, 1995 5:02 pm PDT */
17 #include "gc_priv.h"
19 # ifdef STUBBORN_ALLOC
20 /* Stubborn object (hard to change, nearly immutable) allocation. */
22 extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
24 #define GENERAL_MALLOC(lb,k) \
25 (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
27 /* Data structure representing immutable objects that */
28 /* are still being initialized. */
29 /* This is a bit baroque in order to avoid acquiring */
30 /* the lock twice for a typical allocation. */
32 GC_PTR * GC_changing_list_start;
34 # ifdef THREADS
35 VOLATILE GC_PTR * VOLATILE GC_changing_list_current;
36 # else
37 GC_PTR * GC_changing_list_current;
38 # endif
39 /* Points at last added element. Also (ab)used for */
40 /* synchronization. Updates and reads are assumed atomic. */
42 GC_PTR * GC_changing_list_limit;
43 /* Points at the last word of the buffer, which is always 0 */
44 /* All entries in (GC_changing_list_current, */
45 /* GC_changing_list_limit] are 0 */
48 void GC_stubborn_init()
50 # define INIT_SIZE 10
52 GC_changing_list_start = (GC_PTR *)
53 GC_generic_malloc_inner(
54 (word)(INIT_SIZE * sizeof(GC_PTR)),
55 PTRFREE);
56 BZERO(GC_changing_list_start,
57 INIT_SIZE * sizeof(GC_PTR));
58 if (GC_changing_list_start == 0) {
59 GC_err_printf0("Insufficient space to start up\n");
60 ABORT("GC_stubborn_init: put of space");
62 GC_changing_list_current = GC_changing_list_start;
63 GC_changing_list_limit = GC_changing_list_start + INIT_SIZE - 1;
64 * GC_changing_list_limit = 0;
67 /* Compact and possibly grow GC_uninit_list. The old copy is */
68 /* left alone. Lock must be held. */
69 /* When called GC_changing_list_current == GC_changing_list_limit */
70 /* which is one past the current element. */
71 /* When we finish GC_changing_list_current again points one past last */
72 /* element. */
73 /* Invariant while this is running: GC_changing_list_current */
74 /* points at a word containing 0. */
75 /* Returns FALSE on failure. */
76 GC_bool GC_compact_changing_list()
78 register GC_PTR *p, *q;
79 register word count = 0;
80 word old_size = (char **)GC_changing_list_limit
81 - (char **)GC_changing_list_start+1;
82 /* The casts are needed as a workaround for an Amiga bug */
83 register word new_size = old_size;
84 GC_PTR * new_list;
86 for (p = GC_changing_list_start; p < GC_changing_list_limit; p++) {
87 if (*p != 0) count++;
89 if (2 * count > old_size) new_size = 2 * count;
90 new_list = (GC_PTR *)
91 GC_generic_malloc_inner(
92 new_size * sizeof(GC_PTR), PTRFREE);
93 /* PTRFREE is a lie. But we don't want the collector to */
94 /* consider these. We do want the list itself to be */
95 /* collectable. */
96 if (new_list == 0) return(FALSE);
97 BZERO(new_list, new_size * sizeof(GC_PTR));
98 q = new_list;
99 for (p = GC_changing_list_start; p < GC_changing_list_limit; p++) {
100 if (*p != 0) *q++ = *p;
102 GC_changing_list_start = new_list;
103 GC_changing_list_limit = new_list + new_size - 1;
104 GC_changing_list_current = q;
105 return(TRUE);
108 /* Add p to changing list. Clear p on failure. */
109 # define ADD_CHANGING(p) \
111 register struct hblk * h = HBLKPTR(p); \
112 register word index = PHT_HASH(h); \
114 set_pht_entry_from_index(GC_changed_pages, index); \
116 if (*GC_changing_list_current != 0 \
117 && ++GC_changing_list_current == GC_changing_list_limit) { \
118 if (!GC_compact_changing_list()) (p) = 0; \
120 *GC_changing_list_current = p;
122 void GC_change_stubborn(p)
123 GC_PTR p;
125 DCL_LOCK_STATE;
127 DISABLE_SIGNALS();
128 LOCK();
129 ADD_CHANGING(p);
130 UNLOCK();
131 ENABLE_SIGNALS();
134 void GC_end_stubborn_change(p)
135 GC_PTR p;
137 # ifdef THREADS
138 register VOLATILE GC_PTR * my_current = GC_changing_list_current;
139 # else
140 register GC_PTR * my_current = GC_changing_list_current;
141 # endif
142 register GC_bool tried_quick;
143 DCL_LOCK_STATE;
145 if (*my_current == p) {
146 /* Hopefully the normal case. */
147 /* Compaction could not have been running when we started. */
148 *my_current = 0;
149 # ifdef THREADS
150 if (my_current == GC_changing_list_current) {
151 /* Compaction can't have run in the interim. */
152 /* We got away with the quick and dirty approach. */
153 return;
155 tried_quick = TRUE;
156 # else
157 return;
158 # endif
159 } else {
160 tried_quick = FALSE;
162 DISABLE_SIGNALS();
163 LOCK();
164 my_current = GC_changing_list_current;
165 for (; my_current >= GC_changing_list_start; my_current--) {
166 if (*my_current == p) {
167 *my_current = 0;
168 UNLOCK();
169 ENABLE_SIGNALS();
170 return;
173 if (!tried_quick) {
174 GC_err_printf1("Bad arg to GC_end_stubborn_change: 0x%lx\n",
175 (unsigned long)p);
176 ABORT("Bad arg to GC_end_stubborn_change");
178 UNLOCK();
179 ENABLE_SIGNALS();
182 /* Allocate lb bytes of composite (pointerful) data */
183 /* No pointer fields may be changed after a call to */
184 /* GC_end_stubborn_change(p) where p is the value */
185 /* returned by GC_malloc_stubborn. */
186 # ifdef __STDC__
187 GC_PTR GC_malloc_stubborn(size_t lb)
188 # else
189 GC_PTR GC_malloc_stubborn(lb)
190 size_t lb;
191 # endif
193 register ptr_t op;
194 register ptr_t *opp;
195 register word lw;
196 ptr_t result;
197 DCL_LOCK_STATE;
199 if( SMALL_OBJ(lb) ) {
200 # ifdef MERGE_SIZES
201 lw = GC_size_map[lb];
202 # else
203 lw = ALIGNED_WORDS(lb);
204 # endif
205 opp = &(GC_sobjfreelist[lw]);
206 FASTLOCK();
207 if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
208 FASTUNLOCK();
209 result = GC_generic_malloc((word)lb, STUBBORN);
210 goto record;
212 *opp = obj_link(op);
213 obj_link(op) = 0;
214 GC_words_allocd += lw;
215 result = (GC_PTR) op;
216 ADD_CHANGING(result);
217 FASTUNLOCK();
218 return((GC_PTR)result);
219 } else {
220 result = (GC_PTR)
221 GC_generic_malloc((word)lb, STUBBORN);
223 record:
224 DISABLE_SIGNALS();
225 LOCK();
226 ADD_CHANGING(result);
227 UNLOCK();
228 ENABLE_SIGNALS();
229 return((GC_PTR)GC_clear_stack(result));
233 /* Functions analogous to GC_read_dirty and GC_page_was_dirty. */
234 /* Report pages on which stubborn objects were changed. */
235 void GC_read_changed()
237 register GC_PTR * p = GC_changing_list_start;
238 register GC_PTR q;
239 register struct hblk * h;
240 register word index;
242 if (p == 0) /* initializing */ return;
243 BCOPY(GC_changed_pages, GC_prev_changed_pages,
244 (sizeof GC_changed_pages));
245 BZERO(GC_changed_pages, (sizeof GC_changed_pages));
246 for (; p <= GC_changing_list_current; p++) {
247 if ((q = *p) != 0) {
248 h = HBLKPTR(q);
249 index = PHT_HASH(h);
250 set_pht_entry_from_index(GC_changed_pages, index);
255 GC_bool GC_page_was_changed(h)
256 struct hblk * h;
258 register word index = PHT_HASH(h);
260 return(get_pht_entry_from_index(GC_prev_changed_pages, index));
263 /* Remove unreachable entries from changed list. Should only be */
264 /* called with mark bits consistent and lock held. */
265 void GC_clean_changing_list()
267 register GC_PTR * p = GC_changing_list_start;
268 register GC_PTR q;
269 register ptr_t r;
270 register unsigned long count = 0;
271 register unsigned long dropped_count = 0;
273 if (p == 0) /* initializing */ return;
274 for (; p <= GC_changing_list_current; p++) {
275 if ((q = *p) != 0) {
276 count++;
277 r = (ptr_t)GC_base(q);
278 if (r == 0 || !GC_is_marked(r)) {
279 *p = 0;
280 dropped_count++;
284 # ifdef PRINTSTATS
285 if (count > 0) {
286 GC_printf2("%lu entries in changing list: reclaimed %lu\n",
287 (unsigned long)count, (unsigned long)dropped_count);
289 # endif
292 #else /* !STUBBORN_ALLOC */
294 # ifdef __STDC__
295 GC_PTR GC_malloc_stubborn(size_t lb)
296 # else
297 GC_PTR GC_malloc_stubborn(lb)
298 size_t lb;
299 # endif
301 return(GC_malloc(lb));
304 /*ARGSUSED*/
305 void GC_end_stubborn_change(p)
306 GC_PTR p;
310 /*ARGSUSED*/
311 void GC_change_stubborn(p)
312 GC_PTR p;
317 #endif