2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
14 /* Boehm, July 31, 1995 5:02 pm PDT */
19 # ifdef STUBBORN_ALLOC
20 /* Stubborn object (hard to change, nearly immutable) allocation. */
22 extern ptr_t
GC_clear_stack(); /* in misc.c, behaves like identity */
24 #define GENERAL_MALLOC(lb,k) \
25 (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
27 /* Data structure representing immutable objects that */
28 /* are still being initialized. */
29 /* This is a bit baroque in order to avoid acquiring */
30 /* the lock twice for a typical allocation. */
32 GC_PTR
* GC_changing_list_start
;
35 VOLATILE GC_PTR
* VOLATILE GC_changing_list_current
;
37 GC_PTR
* GC_changing_list_current
;
39 /* Points at last added element. Also (ab)used for */
40 /* synchronization. Updates and reads are assumed atomic. */
42 GC_PTR
* GC_changing_list_limit
;
43 /* Points at the last word of the buffer, which is always 0 */
44 /* All entries in (GC_changing_list_current, */
45 /* GC_changing_list_limit] are 0 */
48 void GC_stubborn_init()
52 GC_changing_list_start
= (GC_PTR
*)
53 GC_generic_malloc_inner(
54 (word
)(INIT_SIZE
* sizeof(GC_PTR
)),
56 BZERO(GC_changing_list_start
,
57 INIT_SIZE
* sizeof(GC_PTR
));
58 if (GC_changing_list_start
== 0) {
59 GC_err_printf0("Insufficient space to start up\n");
60 ABORT("GC_stubborn_init: put of space");
62 GC_changing_list_current
= GC_changing_list_start
;
63 GC_changing_list_limit
= GC_changing_list_start
+ INIT_SIZE
- 1;
64 * GC_changing_list_limit
= 0;
67 /* Compact and possibly grow GC_uninit_list. The old copy is */
68 /* left alone. Lock must be held. */
69 /* When called GC_changing_list_current == GC_changing_list_limit */
70 /* which is one past the current element. */
71 /* When we finish GC_changing_list_current again points one past last */
73 /* Invariant while this is running: GC_changing_list_current */
74 /* points at a word containing 0. */
75 /* Returns FALSE on failure. */
76 GC_bool
GC_compact_changing_list()
78 register GC_PTR
*p
, *q
;
79 register word count
= 0;
80 word old_size
= (char **)GC_changing_list_limit
81 - (char **)GC_changing_list_start
+1;
82 /* The casts are needed as a workaround for an Amiga bug */
83 register word new_size
= old_size
;
86 for (p
= GC_changing_list_start
; p
< GC_changing_list_limit
; p
++) {
89 if (2 * count
> old_size
) new_size
= 2 * count
;
91 GC_generic_malloc_inner(
92 new_size
* sizeof(GC_PTR
), PTRFREE
);
93 /* PTRFREE is a lie. But we don't want the collector to */
94 /* consider these. We do want the list itself to be */
96 if (new_list
== 0) return(FALSE
);
97 BZERO(new_list
, new_size
* sizeof(GC_PTR
));
99 for (p
= GC_changing_list_start
; p
< GC_changing_list_limit
; p
++) {
100 if (*p
!= 0) *q
++ = *p
;
102 GC_changing_list_start
= new_list
;
103 GC_changing_list_limit
= new_list
+ new_size
- 1;
104 GC_changing_list_current
= q
;
108 /* Add p to changing list. Clear p on failure. */
109 # define ADD_CHANGING(p) \
111 register struct hblk * h = HBLKPTR(p); \
112 register word index = PHT_HASH(h); \
114 set_pht_entry_from_index(GC_changed_pages, index); \
116 if (*GC_changing_list_current != 0 \
117 && ++GC_changing_list_current == GC_changing_list_limit) { \
118 if (!GC_compact_changing_list()) (p) = 0; \
120 *GC_changing_list_current = p;
122 void GC_change_stubborn(p
)
134 void GC_end_stubborn_change(p
)
138 register VOLATILE GC_PTR
* my_current
= GC_changing_list_current
;
140 register GC_PTR
* my_current
= GC_changing_list_current
;
142 register GC_bool tried_quick
;
145 if (*my_current
== p
) {
146 /* Hopefully the normal case. */
147 /* Compaction could not have been running when we started. */
150 if (my_current
== GC_changing_list_current
) {
151 /* Compaction can't have run in the interim. */
152 /* We got away with the quick and dirty approach. */
164 my_current
= GC_changing_list_current
;
165 for (; my_current
>= GC_changing_list_start
; my_current
--) {
166 if (*my_current
== p
) {
174 GC_err_printf1("Bad arg to GC_end_stubborn_change: 0x%lx\n",
176 ABORT("Bad arg to GC_end_stubborn_change");
182 /* Allocate lb bytes of composite (pointerful) data */
183 /* No pointer fields may be changed after a call to */
184 /* GC_end_stubborn_change(p) where p is the value */
185 /* returned by GC_malloc_stubborn. */
187 GC_PTR
GC_malloc_stubborn(size_t lb
)
189 GC_PTR
GC_malloc_stubborn(lb
)
199 if( SMALL_OBJ(lb
) ) {
201 lw
= GC_size_map
[lb
];
203 lw
= ALIGNED_WORDS(lb
);
205 opp
= &(GC_sobjfreelist
[lw
]);
207 if( !FASTLOCK_SUCCEEDED() || (op
= *opp
) == 0 ) {
209 result
= GC_generic_malloc((word
)lb
, STUBBORN
);
214 GC_words_allocd
+= lw
;
215 result
= (GC_PTR
) op
;
216 ADD_CHANGING(result
);
218 return((GC_PTR
)result
);
221 GC_generic_malloc((word
)lb
, STUBBORN
);
226 ADD_CHANGING(result
);
229 return((GC_PTR
)GC_clear_stack(result
));
233 /* Functions analogous to GC_read_dirty and GC_page_was_dirty. */
234 /* Report pages on which stubborn objects were changed. */
235 void GC_read_changed()
237 register GC_PTR
* p
= GC_changing_list_start
;
239 register struct hblk
* h
;
242 if (p
== 0) /* initializing */ return;
243 BCOPY(GC_changed_pages
, GC_prev_changed_pages
,
244 (sizeof GC_changed_pages
));
245 BZERO(GC_changed_pages
, (sizeof GC_changed_pages
));
246 for (; p
<= GC_changing_list_current
; p
++) {
250 set_pht_entry_from_index(GC_changed_pages
, index
);
255 GC_bool
GC_page_was_changed(h
)
258 register word index
= PHT_HASH(h
);
260 return(get_pht_entry_from_index(GC_prev_changed_pages
, index
));
263 /* Remove unreachable entries from changed list. Should only be */
264 /* called with mark bits consistent and lock held. */
265 void GC_clean_changing_list()
267 register GC_PTR
* p
= GC_changing_list_start
;
270 register unsigned long count
= 0;
271 register unsigned long dropped_count
= 0;
273 if (p
== 0) /* initializing */ return;
274 for (; p
<= GC_changing_list_current
; p
++) {
277 r
= (ptr_t
)GC_base(q
);
278 if (r
== 0 || !GC_is_marked(r
)) {
286 GC_printf2("%lu entries in changing list: reclaimed %lu\n",
287 (unsigned long)count
, (unsigned long)dropped_count
);
292 #else /* !STUBBORN_ALLOC */
295 GC_PTR
GC_malloc_stubborn(size_t lb
)
297 GC_PTR
GC_malloc_stubborn(lb
)
301 return(GC_malloc(lb
));
305 void GC_end_stubborn_change(p
)
311 void GC_change_stubborn(p
)