2 * Generational Conservative Garbage Collector for SBCL x86
4 * inline functions that gc-common.c needs sight of
9 * This software is part of the SBCL system. See the README file for
12 * This software is derived from the CMU CL system, which was
13 * written at Carnegie Mellon University and released into the
14 * public domain. The software is in the public domain and is
15 * provided with absolutely no warranty. See the COPYING and CREDITS
16 * files for more information.
19 #ifndef _GENCGC_INTERNAL_H_
20 #define _GENCGC_INTERNAL_H_
24 #include "gencgc-alloc-region.h"
25 #include "genesis/code.h"
26 #include "hopscotch.h"
28 void gc_free_heap(void);
29 extern char *page_address(page_index_t
);
30 int gencgc_handle_wp_violation(void *);
34 // It's more economical to store scan_start_offset using 4 bytes than 8.
35 // Doing so makes struct page fit in 8 bytes if bytes_used takes 2 bytes.
36 // scan_start_offset = 4
40 // If bytes_used takes 4 bytes, then the above is 10 bytes which is padded to
41 // 12, which is still an improvement over the 16 that it would have been.
42 # define CONDENSED_PAGE_TABLE
45 #if GENCGC_CARD_BYTES > USHRT_MAX
46 # if GENCGC_CARD_BYTES > UINT_MAX
47 # error "GENCGC_CARD_BYTES unexpectedly large."
49 typedef unsigned int page_bytes_t
;
52 typedef unsigned short page_bytes_t
;
55 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
56 /* It is possible to enable fine-grained object pinning
57 * (versus page-level pinning) for any backend using gengc.
58 * The only "cost" is an extra test in from_space_p()
59 * which may or may not be worth it.
60 * But partial evacuation of pages is a generally nice feature */
61 # define PIN_GRANULARITY_LISPOBJ
64 /* Note that this structure is also used from Lisp-side in
65 * src/code/room.lisp, and the Lisp-side structure layout is currently
66 * not groveled from C code but hardcoded. Any changes to the
67 * structure layout need to be also made there.
69 * FIXME: We should probably just define this structure in Lisp, and
70 * output the C version in genesis. -- JES, 2006-12-30.
73 /* This is the offset from the first byte of some object in memory
74 * prior to and no closer than the start of the page to the start
75 * of the page. Lower values here are better, 0 is ideal. This
76 * is useful for determining where to start when scanning forward
77 * through a heap page (either for conservative root validation or
80 #ifdef CONDENSED_PAGE_TABLE
81 // The low bit of the offset indicates the scale factor:
82 // 0 = double-lispwords, 1 = gc cards. Large objects are card-aligned,
83 // and this representation allows for a 32TB contiguous block using 32K
84 // card size. Larger allocations will have pages that can't directly
85 // store the full offset. That has to be dealt with by the accessor.
86 unsigned int scan_start_offset_
;
88 os_vm_size_t scan_start_offset_
;
91 /* the number of bytes of this page that are used. This may be less
92 * than the actual bytes used for pages within the current
93 * allocation regions. It should be 0 for all unallocated pages (not
95 * When read, the low bit has to be masked off.
97 page_bytes_t bytes_used_
;
106 * Constants for this field are defined in gc-internal.h, the
107 * xxx_PAGE_FLAG definitions.
109 * If the page is free the following slots are invalid, except
110 * for the bytes_used which must be zero. */
112 /* This is set when the page is write-protected. This should
113 * always reflect the actual write_protect status of a page.
114 * (If the page is written into, we catch the exception, make
115 * the page writable, and clear this flag.) */
117 /* This flag is set when the above write_protected flag is
118 * cleared by the SIGBUS handler (or SIGSEGV handler, for some
119 * OSes). This is useful for re-scavenging pages that are
120 * written during a GC. */
121 write_protected_cleared
:1,
122 /* If this page should not be moved during a GC then this flag
123 * is set. It's only valid during a GC for allocated pages. */
125 // FIXME: this should be identical to (dont_move & !large_object),
126 // so we don't need to store it as a bit unto itself.
127 /* If this page is not a large object page and contains
128 * any objects which are pinned */
130 /* If the page is part of a large object then this flag is
131 * set. No other objects should be allocated to these pages.
132 * This is only valid when the page is allocated. */
135 /* the generation that this page belongs to. This should be valid
136 * for all pages that may have objects allocated, even current
137 * allocation region pages - this allows the space of an object to
138 * be easily determined. */
139 generation_index_t gen
;
141 extern struct page
*page_table
;
143 struct __attribute__((packed
)) corefile_pte
{
144 uword_t sso
; // scan start offset
145 page_bytes_t bytes_used
;
148 #ifndef CONDENSED_PAGE_TABLE
150 // 32-bit doesn't need magic to reduce the size of scan_start_offset.
151 #define set_page_scan_start_offset(index,val) \
152 page_table[index].scan_start_offset_ = val
153 #define page_scan_start_offset(index) page_table[index].scan_start_offset_
157 /// A "condensed" offset reduces page table size, which improves scan locality.
158 /// As stored, the offset is scaled down either by card size or double-lispwords.
159 /// If the offset is the maximum, then we must check if the page pointed to by
160 /// that offset is actually the start of a region, and retry if not.
161 /// For debugging the iterative algorithm it helps to use a max value
162 /// that is less than UINT_MAX to get a pass/fail more quickly.
164 //#define SCAN_START_OFS_MAX 0x3fff
165 #define SCAN_START_OFS_MAX UINT_MAX
167 #define page_scan_start_offset(index) \
168 (page_table[index].scan_start_offset_ != SCAN_START_OFS_MAX \
169 ? (os_vm_size_t)(page_table[index].scan_start_offset_ & ~1) \
170 << ((page_table[index].scan_start_offset_ & 1)?(GENCGC_CARD_SHIFT-1):WORD_SHIFT) \
171 : scan_start_offset_iterated(index))
173 static os_vm_size_t
__attribute__((unused
))
174 scan_start_offset_iterated(page_index_t index
)
176 // The low bit of the MAX is the 'scale' bit. The max pages we can look
177 // backwards is therefore the max shifted right by 1 bit.
178 page_index_t tot_offset_in_pages
= 0;
181 page_index_t lookback_page
= index
- tot_offset_in_pages
;
182 offset
= page_table
[lookback_page
].scan_start_offset_
;
183 tot_offset_in_pages
+= offset
>> 1;
184 } while (offset
== SCAN_START_OFS_MAX
);
185 return (os_vm_size_t
)tot_offset_in_pages
<< GENCGC_CARD_SHIFT
;
188 /// This is a macro, but it could/should be an inline function.
189 /// Problem is that we need gc_assert() which is in gc-internal,
190 /// and it's easy enough for GC to flip around some stuff, but then
191 /// you have a different problem that more things get messed up,
192 /// such as {foo}-os.c. Basically we have inclusion order issues
193 /// that nobody ever bothers to deal with, in addition to the fact
194 /// that a something-internal header is *directly* included by others.
195 /// (Indirect inclusion should be allowed, direct should not be)
196 #define set_page_scan_start_offset(index, ofs) \
197 { os_vm_size_t ofs_ = ofs; \
198 unsigned int lsb_ = ofs_!=0 && !(ofs_ & (GENCGC_CARD_BYTES-1)); \
199 os_vm_size_t scaled_ = \
200 (ofs_ >> (lsb_ ? GENCGC_CARD_SHIFT-1 : WORD_SHIFT)) | lsb_; \
201 if (scaled_ > SCAN_START_OFS_MAX) \
202 { gc_assert(lsb_ == 1); scaled_ = SCAN_START_OFS_MAX; } \
203 page_table[index].scan_start_offset_ = scaled_; }
207 /// There is some additional cleverness that could potentially be had -
208 /// the "need_to_zero" bit (a/k/a "page dirty") is obviously 1 if the page
209 /// contains objects. Only for an empty page must we distinguish between pages
210 /// not needing be zero-filled before next use and those which must be.
211 /// Thus, masking off the dirty bit could be avoided by not storing it for
212 /// any in-use page. But since that's not what we do - we set the bit to 1
213 /// as soon as a page is used - we do have to mask off the bit.
214 #define page_bytes_used(index) (page_table[index].bytes_used_ & ~1)
215 #define page_need_to_zero(index) (page_table[index].bytes_used_ & 1)
216 #define set_page_bytes_used(index,val) \
217 page_table[index].bytes_used_ = (val) | page_need_to_zero(index)
218 #define set_page_need_to_zero(index,val) \
219 page_table[index].bytes_used_ = page_bytes_used(index) | val
221 /* values for the page.allocated field */
224 extern page_index_t page_table_pages
;
227 /* forward declarations */
229 sword_t
update_dynamic_space_free_pointer(void);
230 void gc_alloc_update_page_tables(int page_type_flag
, struct alloc_region
*alloc_region
);
231 void gc_alloc_update_all_page_tables(int);
232 void gc_set_region_empty(struct alloc_region
*region
);
238 /* Find the page index within the page_table for the given
239 * address. Return -1 on failure. */
240 static inline page_index_t
241 find_page_index(void *addr
)
243 if (addr
>= (void*)DYNAMIC_SPACE_START
) {
244 page_index_t index
= ((uintptr_t)addr
-
245 (uintptr_t)DYNAMIC_SPACE_START
) / GENCGC_CARD_BYTES
;
246 if (index
< page_table_pages
)
252 #ifdef PIN_GRANULARITY_LISPOBJ
253 static inline boolean
pinned_p(lispobj obj
, page_index_t page
)
255 extern struct hopscotch_table pinned_objects
;
256 return page_table
[page
].has_pins
257 && hopscotch_containsp(&pinned_objects
, obj
);
260 # define pinned_p(obj, page) (0)
263 // Return true only if 'obj' must be *physically* transported to survive gc.
264 // Return false if obj is in the immobile space regardless of its generation.
265 // Pretend pinned objects are not in oldspace so that they don't get moved.
266 static boolean
__attribute__((unused
))
267 from_space_p(lispobj obj
)
269 page_index_t page_index
= find_page_index((void*)obj
);
270 return page_index
>= 0
271 && page_table
[page_index
].gen
== from_space
272 && !pinned_p(obj
, page_index
);
275 extern page_index_t last_free_page
;
276 extern boolean gencgc_partial_pickup
;
281 walk_generation(uword_t (*proc
)(lispobj
*,lispobj
*,uword_t
),
282 generation_index_t generation
, uword_t extra
);