2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
16 #include "private/gc_priv.h"
20 * 1. allocation of heap block headers
21 * 2. A map from addresses to heap block addresses to heap block headers
23 * Access speed is crucial. We implement an index structure based on a 2
27 STATIC bottom_index
* GC_all_bottom_indices
= 0;
28 /* Pointer to first (lowest addr) */
31 STATIC bottom_index
* GC_all_bottom_indices_end
= 0;
32 /* Pointer to last (highest addr) */
35 /* Non-macro version of header location routine */
36 GC_INNER hdr
* GC_find_header(ptr_t h
)
47 /* Handle a header cache miss. Returns a pointer to the */
48 /* header corresponding to p, if p can possibly be a valid */
49 /* object pointer, and 0 otherwise. */
50 /* GUARANTEED to return 0 for a pointer past the first page */
51 /* of an object unless both GC_all_interior_pointers is set */
52 /* and p is in fact a valid object pointer. */
53 /* Never returns a pointer to a free hblk. */
55 #ifdef PRINT_BLACK_LIST
56 GC_header_cache_miss(ptr_t p
, hdr_cache_entry
*hce
, ptr_t source
)
58 GC_header_cache_miss(ptr_t p
, hdr_cache_entry
*hce
)
64 if (IS_FORWARDING_ADDR_OR_NIL(hhdr
)) {
65 if (GC_all_interior_pointers
) {
69 current
= (ptr_t
)HBLKPTR(current
);
71 current
= current
- HBLKSIZE
*(word
)hhdr
;
73 } while(IS_FORWARDING_ADDR_OR_NIL(hhdr
));
74 /* current points to near the start of the large object */
75 if (hhdr
-> hb_flags
& IGNORE_OFF_PAGE
)
77 if (HBLK_IS_FREE(hhdr
)
78 || p
- current
>= (ptrdiff_t)(hhdr
->hb_sz
)) {
79 GC_ADD_TO_BLACK_LIST_NORMAL(p
, source
);
80 /* Pointer past the end of the block */
84 GC_ADD_TO_BLACK_LIST_NORMAL(p
, source
);
85 /* And return zero: */
87 GC_ASSERT(hhdr
== 0 || !HBLK_IS_FREE(hhdr
));
89 /* Pointers past the first page are probably too rare */
90 /* to add them to the cache. We don't. */
91 /* And correctness relies on the fact that we don't. */
94 GC_ADD_TO_BLACK_LIST_NORMAL(p
, source
);
99 if (HBLK_IS_FREE(hhdr
)) {
100 GC_ADD_TO_BLACK_LIST_NORMAL(p
, source
);
103 hce
-> block_addr
= (word
)(p
) >> LOG_HBLKSIZE
;
104 hce
-> hce_hdr
= hhdr
;
110 /* Routines to dynamically allocate collector data structures that will */
111 /* never be freed. */
113 static ptr_t scratch_free_ptr
= 0;
115 /* GC_scratch_last_end_ptr is end point of last obtained scratch area. */
116 /* GC_scratch_end_ptr is end point of current scratch area. */
118 GC_INNER ptr_t
GC_scratch_alloc(size_t bytes
)
120 register ptr_t result
= scratch_free_ptr
;
122 bytes
+= GRANULE_BYTES
-1;
123 bytes
&= ~(GRANULE_BYTES
-1);
124 scratch_free_ptr
+= bytes
;
125 if ((word
)scratch_free_ptr
<= (word
)GC_scratch_end_ptr
) {
129 word bytes_to_get
= MINHINCR
* HBLKSIZE
;
131 if (bytes_to_get
<= bytes
) {
132 /* Undo the damage, and get memory directly */
133 bytes_to_get
= bytes
;
135 bytes_to_get
+= GC_page_size
- 1;
136 bytes_to_get
&= ~(GC_page_size
- 1);
138 result
= (ptr_t
)GET_MEM(bytes_to_get
);
139 GC_add_to_our_memory(result
, bytes_to_get
);
140 scratch_free_ptr
-= bytes
;
141 GC_scratch_last_end_ptr
= result
+ bytes
;
144 result
= (ptr_t
)GET_MEM(bytes_to_get
);
145 GC_add_to_our_memory(result
, bytes_to_get
);
147 GC_COND_LOG_PRINTF("Out of memory - trying to allocate less\n");
148 scratch_free_ptr
-= bytes
;
149 bytes_to_get
= bytes
;
151 bytes_to_get
+= GC_page_size
- 1;
152 bytes_to_get
&= ~(GC_page_size
- 1);
154 result
= (ptr_t
)GET_MEM(bytes_to_get
);
155 GC_add_to_our_memory(result
, bytes_to_get
);
158 scratch_free_ptr
= result
;
159 GC_scratch_end_ptr
= scratch_free_ptr
+ bytes_to_get
;
160 GC_scratch_last_end_ptr
= GC_scratch_end_ptr
;
161 return(GC_scratch_alloc(bytes
));
165 static hdr
* hdr_free_list
= 0;
167 /* Return an uninitialized header */
168 static hdr
* alloc_hdr(void)
170 register hdr
* result
;
172 if (hdr_free_list
== 0) {
173 result
= (hdr
*) GC_scratch_alloc((word
)(sizeof(hdr
)));
175 result
= hdr_free_list
;
176 hdr_free_list
= (hdr
*) (result
-> hb_next
);
181 GC_INLINE
void free_hdr(hdr
* hhdr
)
183 hhdr
-> hb_next
= (struct hblk
*) hdr_free_list
;
184 hdr_free_list
= hhdr
;
187 #ifdef COUNT_HDR_CACHE_HITS
188 /* Used for debugging/profiling (the symbols are externally visible). */
189 word GC_hdr_cache_hits
= 0;
190 word GC_hdr_cache_misses
= 0;
193 GC_INNER
void GC_init_headers(void)
197 GC_all_nils
= (bottom_index
*)GC_scratch_alloc((word
)sizeof(bottom_index
));
198 if (GC_all_nils
== NULL
) {
199 GC_err_printf("Insufficient memory for GC_all_nils\n");
202 BZERO(GC_all_nils
, sizeof(bottom_index
));
203 for (i
= 0; i
< TOP_SZ
; i
++) {
204 GC_top_index
[i
] = GC_all_nils
;
208 /* Make sure that there is a bottom level index block for address addr */
209 /* Return FALSE on failure. */
210 static GC_bool
get_index(word addr
)
212 word hi
= (word
)(addr
) >> (LOG_BOTTOM_SZ
+ LOG_HBLKSIZE
);
215 bottom_index
** prev
;
219 word i
= TL_HASH(hi
);
222 old
= p
= GC_top_index
[i
];
223 while(p
!= GC_all_nils
) {
224 if (p
-> key
== hi
) return(TRUE
);
227 r
= (bottom_index
*)GC_scratch_alloc((word
)(sizeof (bottom_index
)));
228 if (r
== 0) return(FALSE
);
229 BZERO(r
, sizeof (bottom_index
));
230 r
-> hash_link
= old
;
233 if (GC_top_index
[hi
] != GC_all_nils
) return(TRUE
);
234 r
= (bottom_index
*)GC_scratch_alloc((word
)(sizeof (bottom_index
)));
235 if (r
== 0) return(FALSE
);
236 GC_top_index
[hi
] = r
;
237 BZERO(r
, sizeof (bottom_index
));
240 /* Add it to the list of bottom indices */
241 prev
= &GC_all_bottom_indices
; /* pointer to p */
242 pi
= 0; /* bottom_index preceding p */
243 while ((p
= *prev
) != 0 && p
-> key
< hi
) {
245 prev
= &(p
-> asc_link
);
249 GC_all_bottom_indices_end
= r
;
258 /* Install a header for block h. */
259 /* The header is uninitialized. */
260 /* Returns the header or 0 on failure. */
261 GC_INNER
struct hblkhdr
* GC_install_header(struct hblk
*h
)
265 if (!get_index((word
) h
)) return(0);
266 result
= alloc_hdr();
270 result
-> hb_last_reclaimed
= (unsigned short)GC_gc_no
;
276 /* Set up forwarding counts for block h of size sz */
277 GC_INNER GC_bool
GC_install_counts(struct hblk
*h
, size_t sz
/* bytes */)
282 for (hbp
= h
; (word
)hbp
< (word
)h
+ sz
; hbp
+= BOTTOM_SZ
) {
283 if (!get_index((word
) hbp
)) return(FALSE
);
285 if (!get_index((word
)h
+ sz
- 1)) return(FALSE
);
286 for (hbp
= h
+ 1; (word
)hbp
< (word
)h
+ sz
; hbp
+= 1) {
287 i
= HBLK_PTR_DIFF(hbp
, h
);
288 SET_HDR(hbp
, (hdr
*)(i
> MAX_JUMP
? MAX_JUMP
: i
));
293 /* Remove the header for block h */
294 GC_INNER
void GC_remove_header(struct hblk
*h
)
302 /* Remove forwarding counts for h */
303 GC_INNER
void GC_remove_counts(struct hblk
*h
, size_t sz
/* bytes */)
305 register struct hblk
* hbp
;
306 for (hbp
= h
+1; (word
)hbp
< (word
)h
+ sz
; hbp
+= 1) {
311 /* Apply fn to all allocated blocks */
313 void GC_apply_to_all_blocks(void (*fn
)(struct hblk
*h
, word client_data
),
317 bottom_index
* index_p
;
319 for (index_p
= GC_all_bottom_indices
; index_p
!= 0;
320 index_p
= index_p
-> asc_link
) {
321 for (j
= BOTTOM_SZ
-1; j
>= 0;) {
322 if (!IS_FORWARDING_ADDR_OR_NIL(index_p
->index
[j
])) {
323 if (!HBLK_IS_FREE(index_p
->index
[j
])) {
324 (*fn
)(((struct hblk
*)
325 (((index_p
->key
<< LOG_BOTTOM_SZ
) + (word
)j
)
330 } else if (index_p
->index
[j
] == 0) {
333 j
-= (signed_word
)(index_p
->index
[j
]);
339 /* Get the next valid block whose address is at least h */
340 /* Return 0 if there is none. */
341 GC_INNER
struct hblk
* GC_next_used_block(struct hblk
*h
)
343 register bottom_index
* bi
;
344 register word j
= ((word
)h
>> LOG_HBLKSIZE
) & (BOTTOM_SZ
-1);
347 if (bi
== GC_all_nils
) {
348 register word hi
= (word
)h
>> (LOG_BOTTOM_SZ
+ LOG_HBLKSIZE
);
349 bi
= GC_all_bottom_indices
;
350 while (bi
!= 0 && bi
-> key
< hi
) bi
= bi
-> asc_link
;
354 while (j
< BOTTOM_SZ
) {
355 hdr
* hhdr
= bi
-> index
[j
];
356 if (IS_FORWARDING_ADDR_OR_NIL(hhdr
)) {
359 if (!HBLK_IS_FREE(hhdr
)) {
360 return((struct hblk
*)
361 (((bi
-> key
<< LOG_BOTTOM_SZ
) + j
)
364 j
+= divHBLKSZ(hhdr
-> hb_sz
);
374 /* Get the last (highest address) block whose address is */
375 /* at most h. Return 0 if there is none. */
376 /* Unlike the above, this may return a free block. */
377 GC_INNER
struct hblk
* GC_prev_block(struct hblk
*h
)
379 register bottom_index
* bi
;
380 register signed_word j
= ((word
)h
>> LOG_HBLKSIZE
) & (BOTTOM_SZ
-1);
383 if (bi
== GC_all_nils
) {
384 register word hi
= (word
)h
>> (LOG_BOTTOM_SZ
+ LOG_HBLKSIZE
);
385 bi
= GC_all_bottom_indices_end
;
386 while (bi
!= 0 && bi
-> key
> hi
) bi
= bi
-> desc_link
;
391 hdr
* hhdr
= bi
-> index
[j
];
394 } else if (IS_FORWARDING_ADDR_OR_NIL(hhdr
)) {
395 j
-= (signed_word
)hhdr
;
397 return((struct hblk
*)
398 (((bi
-> key
<< LOG_BOTTOM_SZ
) + j
)
403 bi
= bi
-> desc_link
;