Return char* from page_address(), not void*
[sbcl.git] / src / runtime / gencgc-internal.h
bloba1f11245ba07aaecf03da6fe857d70b9c6b9fc97
1 /*
2 * Generational Conservative Garbage Collector for SBCL x86
4 * inline functions that gc-common.c needs sight of
5 */
8 /*
9 * This software is part of the SBCL system. See the README file for
10 * more information.
12 * This software is derived from the CMU CL system, which was
13 * written at Carnegie Mellon University and released into the
14 * public domain. The software is in the public domain and is
15 * provided with absolutely no warranty. See the COPYING and CREDITS
16 * files for more information.
19 #ifndef _GENCGC_INTERNAL_H_
20 #define _GENCGC_INTERNAL_H_
22 #include <limits.h>
23 #include "gc.h"
24 #include "gencgc-alloc-region.h"
25 #include "genesis/code.h"
26 #include "hopscotch.h"
28 void gc_free_heap(void);
29 extern char *page_address(page_index_t);
30 int gencgc_handle_wp_violation(void *);
33 #if N_WORD_BITS == 64
34 // It's more economical to store scan_start_offset using 4 bytes than 8.
35 // Doing so makes struct page fit in 8 bytes if bytes_used takes 2 bytes.
36 // scan_start_offset = 4
37 // bytes_used = 2
38 // flags = 1
39 // gen = 1
40 // If bytes_used takes 4 bytes, then the above is 10 bytes which is padded to
41 // 12, which is still an improvement over the 16 that it would have been.
42 # define CONDENSED_PAGE_TABLE
43 #endif
45 #if GENCGC_CARD_BYTES > USHRT_MAX
46 # if GENCGC_CARD_BYTES > UINT_MAX
47 # error "GENCGC_CARD_BYTES unexpectedly large."
48 # else
49 typedef unsigned int page_bytes_t;
50 # endif
51 #else
52 typedef unsigned short page_bytes_t;
53 #endif
55 /* Note that this structure is also used from Lisp-side in
56 * src/code/room.lisp, and the Lisp-side structure layout is currently
57 * not groveled from C code but hardcoded. Any changes to the
58 * structure layout need to be also made there.
60 * FIXME: We should probably just define this structure in Lisp, and
61 * output the C version in genesis. -- JES, 2006-12-30.
63 struct page {
64 /* This is the offset from the first byte of some object in memory
65 * prior to and no closer than the start of the page to the start
66 * of the page. Lower values here are better, 0 is ideal. This
67 * is useful for determining where to start when scanning forward
68 * through a heap page (either for conservative root validation or
69 * for scavenging).
71 #ifdef CONDENSED_PAGE_TABLE
72 // The low bit of the offset indicates the scale factor:
73 // 0 = double-lispwords, 1 = gc cards. Large objects are card-aligned,
74 // and this representation allows for a 32TB contiguous block using 32K
75 // card size. Larger allocations will have pages that can't directly
76 // store the full offset. That has to be dealt with by the accessor.
77 unsigned int scan_start_offset_;
78 #else
79 os_vm_size_t scan_start_offset_;
80 #endif
82 /* the number of bytes of this page that are used. This may be less
83 * than the actual bytes used for pages within the current
84 * allocation regions. It should be 0 for all unallocated pages (not
85 * hard to achieve).
86 * When read, the low bit has to be masked off.
88 page_bytes_t bytes_used_;
90 unsigned char
91 /* 000 free
92 * ?01 boxed data
93 * ?10 unboxed data
94 * ?11 code
95 * 1?? open region
97 * Constants for this field are defined in gc-internal.h, the
98 * xxx_PAGE_FLAG definitions.
100 * If the page is free the following slots are invalid, except
101 * for the bytes_used which must be zero. */
102 allocated :3,
103 /* This is set when the page is write-protected. This should
104 * always reflect the actual write_protect status of a page.
105 * (If the page is written into, we catch the exception, make
106 * the page writable, and clear this flag.) */
107 write_protected :1,
108 /* This flag is set when the above write_protected flag is
109 * cleared by the SIGBUS handler (or SIGSEGV handler, for some
110 * OSes). This is useful for re-scavenging pages that are
111 * written during a GC. */
112 write_protected_cleared :1,
113 /* If this page should not be moved during a GC then this flag
114 * is set. It's only valid during a GC for allocated pages. */
115 dont_move :1,
116 // FIXME: this should be identical to (dont_move & !large_object),
117 // so we don't need to store it as a bit unto itself.
118 /* If this page is not a large object page and contains
119 * any objects which are pinned */
120 has_pins :1,
121 /* If the page is part of a large object then this flag is
122 * set. No other objects should be allocated to these pages.
123 * This is only valid when the page is allocated. */
124 large_object :1;
126 /* the generation that this page belongs to. This should be valid
127 * for all pages that may have objects allocated, even current
128 * allocation region pages - this allows the space of an object to
129 * be easily determined. */
130 generation_index_t gen;
132 extern struct page *page_table;
134 #ifndef CONDENSED_PAGE_TABLE
136 // 32-bit doesn't need magic to reduce the size of scan_start_offset.
137 #define set_page_scan_start_offset(index,val) \
138 page_table[index].scan_start_offset_ = val
139 #define page_scan_start_offset(index) page_table[index].scan_start_offset_
141 #else
143 /// A "condensed" offset reduces page table size, which improves scan locality.
144 /// As stored, the offset is scaled down either by card size or double-lispwords.
145 /// If the offset is the maximum, then we must check if the page pointed to by
146 /// that offset is actually the start of a region, and retry if not.
147 /// For debugging the iterative algorithm it helps to use a max value
148 /// that is less than UINT_MAX to get a pass/fail more quickly.
150 //#define SCAN_START_OFS_MAX 0x3fff
151 #define SCAN_START_OFS_MAX UINT_MAX
153 #define page_scan_start_offset(index) \
154 (page_table[index].scan_start_offset_ != SCAN_START_OFS_MAX \
155 ? (os_vm_size_t)(page_table[index].scan_start_offset_ & ~1) \
156 << ((page_table[index].scan_start_offset_ & 1)?(GENCGC_CARD_SHIFT-1):WORD_SHIFT) \
157 : scan_start_offset_iterated(index))
159 static os_vm_size_t __attribute__((unused))
160 scan_start_offset_iterated(page_index_t index)
162 // The low bit of the MAX is the 'scale' bit. The max pages we can look
163 // backwards is therefore the max shifted right by 1 bit.
164 page_index_t tot_offset_in_pages = 0;
165 unsigned int offset;
166 do {
167 page_index_t lookback_page = index - tot_offset_in_pages;
168 offset = page_table[lookback_page].scan_start_offset_;
169 tot_offset_in_pages += offset >> 1;
170 } while (offset == SCAN_START_OFS_MAX);
171 return (os_vm_size_t)tot_offset_in_pages << GENCGC_CARD_SHIFT;
174 /// This is a macro, but it could/should be an inline function.
175 /// Problem is that we need gc_assert() which is in gc-internal,
176 /// and it's easy enough for GC to flip around some stuff, but then
177 /// you have a different problem that more things get messed up,
178 /// such as {foo}-os.c. Basically we have inclusion order issues
179 /// that nobody ever bothers to deal with, in addition to the fact
180 /// that a something-internal header is *directly* included by others.
181 /// (Indirect inclusion should be allowed, direct should not be)
182 #define set_page_scan_start_offset(index, ofs) \
183 { os_vm_size_t ofs_ = ofs; \
184 unsigned int lsb_ = ofs_!=0 && !(ofs_ & (GENCGC_CARD_BYTES-1)); \
185 os_vm_size_t scaled_ = \
186 (ofs_ >> (lsb_ ? GENCGC_CARD_SHIFT-1 : WORD_SHIFT)) | lsb_; \
187 if (scaled_ > SCAN_START_OFS_MAX) \
188 { gc_assert(lsb_ == 1); scaled_ = SCAN_START_OFS_MAX; } \
189 page_table[index].scan_start_offset_ = scaled_; }
191 #endif
193 /// There is some additional cleverness that could potentially be had -
194 /// the "need_to_zero" bit (a/k/a "page dirty") is obviously 1 if the page
195 /// contains objects. Only for an empty page must we distinguish between pages
196 /// not needing be zero-filled before next use and those which must be.
197 /// Thus, masking off the dirty bit could be avoided by not storing it for
198 /// any in-use page. But since that's not what we do - we set the bit to 1
199 /// as soon as a page is used - we do have to mask off the bit.
200 #define page_bytes_used(index) (page_table[index].bytes_used_ & ~1)
201 #define page_need_to_zero(index) (page_table[index].bytes_used_ & 1)
202 #define set_page_bytes_used(index,val) \
203 page_table[index].bytes_used_ = (val) | page_need_to_zero(index)
204 #define set_page_need_to_zero(index,val) \
205 page_table[index].bytes_used_ = page_bytes_used(index) | val
207 /* values for the page.allocated field */
210 extern page_index_t page_table_pages;
213 /* forward declarations */
214 #ifdef LISP_FEATURE_X86
215 void sniff_code_object(struct code *code, os_vm_size_t displacement);
216 void gencgc_apply_code_fixups(struct code *old_code, struct code *new_code);
217 #endif
219 sword_t update_dynamic_space_free_pointer(void);
220 void gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_region);
221 void gc_alloc_update_all_page_tables(int);
222 void gc_set_region_empty(struct alloc_region *region);
225 * predicates
228 /* Find the page index within the page_table for the given
229 * address. Return -1 on failure. */
230 static inline page_index_t
231 find_page_index(void *addr)
233 if (addr >= (void*)DYNAMIC_SPACE_START) {
234 page_index_t index = ((uintptr_t)addr -
235 (uintptr_t)DYNAMIC_SPACE_START) / GENCGC_CARD_BYTES;
236 if (index < page_table_pages)
237 return (index);
239 return (-1);
242 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
243 static inline boolean pinned_p(lispobj obj, page_index_t page)
245 extern struct hopscotch_table pinned_objects;
246 return page_table[page].has_pins
247 && hopscotch_containsp(&pinned_objects, obj);
249 #else
250 # define pinned_p(obj, page) (0)
251 #endif
253 // Return true only if 'obj' must be *physically* transported to survive gc.
254 // Return false if obj is in the immobile space regardless of its generation.
255 // Pretend pinned objects are not in oldspace so that they don't get moved.
256 static boolean __attribute__((unused))
257 from_space_p(lispobj obj)
259 page_index_t page_index = find_page_index((void*)obj);
260 return page_index >= 0
261 && page_table[page_index].gen == from_space
262 && !pinned_p(obj, page_index);
265 extern page_index_t last_free_page;
266 extern boolean gencgc_partial_pickup;
268 #endif
270 extern uword_t
271 walk_generation(uword_t (*proc)(lispobj*,lispobj*,uword_t),
272 generation_index_t generation, uword_t extra);