Define fun_code_header in C for symmetry with Lisp
[sbcl.git] / src / runtime / gencgc-internal.h
blobcc13d9d71df8d3a7557025d110f9af5a6dc7dd15
1 /*
2 * Generational Conservative Garbage Collector for SBCL x86
4 * inline functions that gc-common.c needs sight of
5 */
8 /*
9 * This software is part of the SBCL system. See the README file for
10 * more information.
12 * This software is derived from the CMU CL system, which was
13 * written at Carnegie Mellon University and released into the
14 * public domain. The software is in the public domain and is
15 * provided with absolutely no warranty. See the COPYING and CREDITS
16 * files for more information.
19 #ifndef _GENCGC_INTERNAL_H_
20 #define _GENCGC_INTERNAL_H_
22 #include <limits.h>
23 #include "gc.h"
24 #include "gencgc-alloc-region.h"
25 #include "genesis/code.h"
27 void gc_free_heap(void);
28 extern void *page_address(page_index_t);
29 int gencgc_handle_wp_violation(void *);
32 #if N_WORD_BITS == 64
33 // It's more economical to store scan_start_offset using 4 bytes than 8.
34 // Doing so makes struct page fit in 8 bytes if bytes_used takes 2 bytes.
35 // scan_start_offset = 4
36 // bytes_used = 2
37 // flags = 1
38 // gen = 1
39 // If bytes_used takes 4 bytes, then the above is 10 bytes which is padded to
40 // 12, which is still an improvement over the 16 that it would have been.
41 # define CONDENSED_PAGE_TABLE
42 #endif
44 #if GENCGC_CARD_BYTES > USHRT_MAX
45 # if GENCGC_CARD_BYTES > UINT_MAX
46 # error "GENCGC_CARD_BYTES unexpectedly large."
47 # else
48 typedef unsigned int page_bytes_t;
49 # endif
50 #else
51 typedef unsigned short page_bytes_t;
52 #endif
54 /* Note that this structure is also used from Lisp-side in
55 * src/code/room.lisp, and the Lisp-side structure layout is currently
56 * not groveled from C code but hardcoded. Any changes to the
57 * structure layout need to be also made there.
59 * FIXME: We should probably just define this structure in Lisp, and
60 * output the C version in genesis. -- JES, 2006-12-30.
62 struct page {
63 /* This is the offset from the first byte of some object in memory
64 * prior to and no closer than the start of the page to the start
65 * of the page. Lower values here are better, 0 is ideal. This
66 * is useful for determining where to start when scanning forward
67 * through a heap page (either for conservative root validation or
68 * for scavenging).
70 #ifdef CONDENSED_PAGE_TABLE
71 // The low bit of the offset indicates the scale factor:
72 // 0 = double-lispwords, 1 = gc cards. Large objects are card-aligned,
73 // and this representation allows for a 32TB contiguous block using 32K
74 // card size. Larger allocations will have pages that can't directly
75 // store the full offset. That has to be dealt with by the accessor.
76 unsigned int scan_start_offset_;
77 #else
78 os_vm_size_t scan_start_offset_;
79 #endif
81 /* the number of bytes of this page that are used. This may be less
82 * than the actual bytes used for pages within the current
83 * allocation regions. It should be 0 for all unallocated pages (not
84 * hard to achieve).
85 * When read, the low bit has to be masked off.
87 page_bytes_t bytes_used_;
89 unsigned char
90 /* This is set when the page is write-protected. This should
91 * always reflect the actual write_protect status of a page.
92 * (If the page is written into, we catch the exception, make
93 * the page writable, and clear this flag.) */
94 write_protected :1,
95 /* This flag is set when the above write_protected flag is
96 * cleared by the SIGBUS handler (or SIGSEGV handler, for some
97 * OSes). This is useful for re-scavenging pages that are
98 * written during a GC. */
99 write_protected_cleared :1,
100 /* 000 free
101 * ?01 boxed data
102 * ?10 unboxed data
103 * ?11 code
104 * 1?? open region
106 * Constants for this field are defined in gc-internal.h, the
107 * xxx_PAGE_FLAG definitions.
109 * If the page is free the following slots are invalid, except
110 * for the bytes_used which must be zero. */
111 allocated :3,
112 /* If this page should not be moved during a GC then this flag
113 * is set. It's only valid during a GC for allocated pages. */
114 dont_move :1,
115 /* If this page is not a large object page and contains
116 * any objects which are pinned */
117 has_pin_map :1,
118 /* If the page is part of a large object then this flag is
119 * set. No other objects should be allocated to these pages.
120 * This is only valid when the page is allocated. */
121 large_object :1;
123 /* the generation that this page belongs to. This should be valid
124 * for all pages that may have objects allocated, even current
125 * allocation region pages - this allows the space of an object to
126 * be easily determined. */
127 generation_index_t gen;
129 extern struct page *page_table;
131 #ifndef CONDENSED_PAGE_TABLE
133 // 32-bit doesn't need magic to reduce the size of scan_start_offset.
134 #define set_page_scan_start_offset(index,val) \
135 page_table[index].scan_start_offset_ = val
136 #define page_scan_start_offset(index) page_table[index].scan_start_offset_
138 #else
140 /// A "condensed" offset reduces page table size, which improves scan locality.
141 /// As stored, the offset is scaled down either by card size or double-lispwords.
142 /// If the offset is the maximum, then we must check if the page pointed to by
143 /// that offset is actually the start of a region, and retry if not.
144 /// For debugging the iterative algorithm it helps to use a max value
145 /// that is less than UINT_MAX to get a pass/fail more quickly.
147 //#define SCAN_START_OFS_MAX 0x3fff
148 #define SCAN_START_OFS_MAX UINT_MAX
150 #define page_scan_start_offset(index) \
151 (page_table[index].scan_start_offset_ != SCAN_START_OFS_MAX \
152 ? (os_vm_size_t)(page_table[index].scan_start_offset_ & ~1) \
153 << ((page_table[index].scan_start_offset_ & 1)?(GENCGC_CARD_SHIFT-1):WORD_SHIFT) \
154 : scan_start_offset_iterated(index))
156 static os_vm_size_t __attribute__((unused))
157 scan_start_offset_iterated(page_index_t index)
159 // The low bit of the MAX is the 'scale' bit. The max pages we can look
160 // backwards is therefore the max shifted right by 1 bit.
161 page_index_t tot_offset_in_pages = 0;
162 unsigned int offset;
163 do {
164 page_index_t lookback_page = index - tot_offset_in_pages;
165 offset = page_table[lookback_page].scan_start_offset_;
166 tot_offset_in_pages += offset >> 1;
167 } while (offset == SCAN_START_OFS_MAX);
168 return (os_vm_size_t)tot_offset_in_pages << GENCGC_CARD_SHIFT;
171 #define set_page_scan_start_offset(index, ofs) \
172 { unsigned int lsb_ = ((ofs) & (GENCGC_CARD_BYTES-1)) == 0; \
173 os_vm_size_t scaled_ = ((ofs) >> (lsb_ ? GENCGC_CARD_SHIFT-1 : WORD_SHIFT)) | lsb_; \
174 if (scaled_ > SCAN_START_OFS_MAX) gc_assert(lsb_ == 1); \
175 page_table[index].scan_start_offset_ = \
176 scaled_ > SCAN_START_OFS_MAX ? SCAN_START_OFS_MAX : scaled_; }
178 #endif
180 /// There is some additional cleverness that could potentially be had -
181 /// the "need_to_zero" bit (a/k/a "page dirty") is obviously 1 if the page
182 /// contains objects. Only for an empty page must we distinguish between pages
183 /// not needing be zero-filled before next use and those which must be.
184 /// Thus, masking off the dirty bit could be avoided by not storing it for
185 /// any in-use page. But since that's not what we do - we set the bit to 1
186 /// as soon as a page is used - we do have to mask off the bit.
187 #define page_bytes_used(index) (page_table[index].bytes_used_ & ~1)
188 #define page_need_to_zero(index) (page_table[index].bytes_used_ & 1)
189 #define set_page_bytes_used(index,val) \
190 page_table[index].bytes_used_ = (val) | page_need_to_zero(index)
191 #define set_page_need_to_zero(index,val) \
192 page_table[index].bytes_used_ = page_bytes_used(index) | val
194 /* values for the page.allocated field */
197 extern page_index_t page_table_pages;
200 /* forward declarations */
201 #ifdef LISP_FEATURE_X86
202 void sniff_code_object(struct code *code, os_vm_size_t displacement);
203 void gencgc_apply_code_fixups(struct code *old_code, struct code *new_code);
204 #endif
206 sword_t update_dynamic_space_free_pointer(void);
207 void gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_region);
208 void gc_alloc_update_all_page_tables(int);
209 void gc_set_region_empty(struct alloc_region *region);
212 * predicates
215 /* Find the page index within the page_table for the given
216 * address. Return -1 on failure. */
217 static inline page_index_t
218 find_page_index(void *addr)
220 if (addr >= (void*)DYNAMIC_SPACE_START) {
221 page_index_t index = ((pointer_sized_uint_t)addr -
222 (pointer_sized_uint_t)DYNAMIC_SPACE_START) / GENCGC_CARD_BYTES;
223 if (index < page_table_pages)
224 return (index);
226 return (-1);
229 static const int n_dwords_in_card = GENCGC_CARD_BYTES / N_WORD_BYTES / 2;
230 extern uword_t *page_table_pinned_dwords;
232 static inline boolean pinned_p(lispobj obj, page_index_t page)
234 if (!page_table[page].has_pin_map) return 0;
235 int dword_num = (obj & (GENCGC_CARD_BYTES-1)) >> (1+WORD_SHIFT);
236 uword_t *bits = &page_table_pinned_dwords[page * (n_dwords_in_card/N_WORD_BITS)];
237 return (bits[dword_num / N_WORD_BITS] >> (dword_num % N_WORD_BITS)) & 1;
240 // Return true only if 'obj' must be *physically* transported to survive gc.
241 // Return false if obj is in the immobile space regardless of its generation.
242 // Pretend pinned objects are not in oldspace so that they don't get moved.
243 // Any lowtag bits on 'obj' are ignored.
244 static boolean __attribute__((unused))
245 from_space_p(lispobj obj)
247 page_index_t page_index = find_page_index((void*)obj);
248 return page_index >= 0
249 && page_table[page_index].gen == from_space
250 && !pinned_p(obj, page_index);
252 static inline boolean
253 new_space_p(lispobj obj)
255 page_index_t page_index = find_page_index((void*)obj);
256 return page_index >= 0 && page_table[page_index].gen == new_space;
259 extern page_index_t last_free_page;
260 extern boolean gencgc_partial_pickup;
262 #endif