Add :IMMOBILE-CODE feature.
[sbcl.git] / src / runtime / marknsweepgc.c
blobc4091d48dbb1d2f92047a35d63cac360985a5b3c
1 /*
2 * Extension to GENCGC which provides for pages of objects
3 * that are static in placement but subject to reclamation.
4 */
6 /*
7 * This software is part of the SBCL system. See the README file for
8 * more information.
10 * This software is derived from the CMU CL system, which was
11 * written at Carnegie Mellon University and released into the
12 * public domain. The software is in the public domain and is
13 * provided with absolutely no warranty. See the COPYING and CREDITS
14 * files for more information.
18 * TODO:
19 * 1. Space accounting (GET-BYTES-CONSED etc)
20 * 2. Heuristic for auto-trigger. (Can't yet because no space accounting)
21 * Currently happens with regular GC trigger mechanism.
22 * 3. Specify space size on startup
23 * 4. De-fragment the space on save-lisp-and-die,
24 * but possibly also in out-of-space conditions.
25 * Fragmentation is typically not more than 5%, so this is not a huge issue.
28 #include "gc.h"
29 #include "gc-internal.h"
30 #include "genesis/vector.h"
32 #include <stdlib.h>
33 #include <stdio.h>
35 #define FIRST_VARYOBJ_PAGE (IMMOBILE_FIXEDOBJ_SUBSPACE_SIZE/(int)IMMOBILE_CARD_BYTES)
36 #define WORDS_PER_PAGE ((int)IMMOBILE_CARD_BYTES/N_WORD_BYTES)
37 #define DOUBLEWORDS_PER_PAGE (WORDS_PER_PAGE/2)
39 #undef DEBUG
40 #undef VERIFY_PAGE_GENS
42 #ifdef DEBUG
43 # define dprintf(arg) fprintf arg
44 FILE * logfile;
45 #else
46 # define dprintf(arg)
47 #endif
49 low_page_index_t max_used_fixedobj_page, max_used_varyobj_page;
51 // This table is for objects fixed in size, as opposed to variable-sized.
52 // (Immobile objects are naturally fixed in placement)
53 struct fixedobj_page { // 12 bytes per page
54 union immobile_page_attr {
55 int packed;
56 struct {
57 unsigned char flags;
58 /* space per object in Lisp words. Can exceed obj_size
59 to align on a larger boundary */
60 unsigned char obj_align;
61 unsigned char obj_size; /* in Lisp words, incl. header */
62 /* Which generations have data on this page */
63 unsigned char gens_; // a bitmap
64 } parts;
65 } attr;
66 int free_index; // index is in bytes. 4 bytes
67 short int prior_gc_free_word_index; // index is in words. 2 bytes
68 /* page index of next page with same attributes */
69 short int page_link; // 2 bytes
70 } *fixedobj_pages;
72 unsigned int* immobile_scav_queue;
73 int immobile_scav_queue_head;
74 // Number of items enqueued; can exceed QCAPACITY on overflow.
75 // If overflowed, the queue is unusable until reset.
76 unsigned int immobile_scav_queue_count;
77 #define QCAPACITY (IMMOBILE_CARD_BYTES/sizeof(int))
79 #define gens attr.parts.gens_
81 // These are the high 2 bits of 'flags'
82 #define WRITE_PROTECT 0x80
83 #define WRITE_PROTECT_CLEARED 0x40
85 // Packing and unpacking attributes
86 // the low two flag bits are for write-protect status
87 #define MAKE_ATTR(spacing,size,flags) (((spacing)<<8)|((size)<<16)|flags)
88 #define OBJ_SPACING(attr) ((attr>>8) & 0xFF)
90 // Ignore the write-protect bits and the generations when comparing attributes
91 #define ATTRIBUTES_MATCH_P(page_attr,specified_attr) \
92 ((page_attr & 0xFFFF3F) == specified_attr)
93 #define SET_WP_FLAG(index,flag) \
94 fixedobj_pages[index].attr.parts.flags = (fixedobj_pages[index].attr.parts.flags & 0x3F) | flag
96 #define page_obj_align(i) fixedobj_pages[i].attr.parts.obj_align
97 #define page_obj_size(i) fixedobj_pages[i].attr.parts.obj_size
98 #define set_page_full(i) fixedobj_pages[i].free_index = IMMOBILE_CARD_BYTES
99 #define page_full_p(i) (fixedobj_pages[i].free_index >= (int)IMMOBILE_CARD_BYTES)
100 #define fixedobj_page_wp(i) (fixedobj_pages[i].attr.parts.flags & WRITE_PROTECT)
102 /// Variable-length pages:
104 // Array of inverted write-protect flags, 1 bit per page.
105 unsigned int* varyobj_page_touched_bits;
106 static int n_bitmap_elts; // length of array measured in 'int's
108 // Array of offsets backwards in double-lispwords from the page end
109 // to the lowest-addressed object touching the page. This offset can
110 // point to a hole, but we prefer that it not. If the offset is zero,
111 // the page has no object other than possibly a hole resulting
112 // from a freed object.
113 unsigned short* varyobj_page_scan_start_offset;
115 // Array of page generation masks
116 unsigned char* varyobj_page_header_gens;
117 // Holes to be stuffed back into the managed free list.
118 lispobj varyobj_holes;
120 #define VARYOBJ_PAGE_GENS(x) varyobj_page_header_gens[x-FIRST_VARYOBJ_PAGE]
121 #define varyobj_page_touched(x) \
122 ((varyobj_page_touched_bits[(x-FIRST_VARYOBJ_PAGE)/32] >> (x&31)) & 1)
124 #ifdef VERIFY_PAGE_GENS
125 void check_fixedobj_page(low_page_index_t);
126 void check_varyobj_pages();
127 #endif
129 // Object header: generation byte --| |-- widetag
130 // v v
131 // 0xzzzzzzzz GGzzzzww
132 // arbitrary data -------- ---- length in words
134 // There is a hard constraint on NUM_GENERATIONS, which is currently 8.
135 // (0..5=normal, 6=pseudostatic, 7=scratch)
136 // It could be as high as 16 for 32-bit words (wherein scratch=gen15)
137 // or 32 for 64-bits words (wherein scratch=gen31).
138 // In each case, the VISITED flag bit weight would need to be modified.
139 // Shifting a 1 bit left by the contents of the generation byte
140 // must not overflow a register.
142 #ifdef LISP_FEATURE_LITTLE_ENDIAN
143 static inline void assign_generation(lispobj* obj, generation_index_t gen)
145 ((generation_index_t*)obj)[3] = gen;
147 // Turn a grey node black.
148 static inline void set_visited(lispobj* obj)
150 #ifdef DEBUG
151 gc_assert(__immobile_obj_gen_bits(obj) == new_space);
152 #endif
153 ((generation_index_t*)obj)[3] |= IMMOBILE_OBJ_VISITED_FLAG;
155 #else
156 #error "Need to define assign_generation() for big-endian"
157 #endif
159 static inline void *
160 low_page_address(low_page_index_t page_num)
162 return ((void*)IMMOBILE_SPACE_START + (page_num * IMMOBILE_CARD_BYTES));
165 //// Variable-length utilities
167 /* Calculate the address where the first object touching this page starts. */
168 static inline lispobj*
169 varyobj_scan_start(low_page_index_t page_index)
171 return (lispobj*)((char*)low_page_address(page_index+1)
172 - varyobj_page_scan_start_offset[page_index - FIRST_VARYOBJ_PAGE]
173 * (2 * N_WORD_BYTES));
176 /* Return the generation mask for objects headers on 'page_index'
177 including at most one object that starts before the page but ends on
178 or after it.
179 If the scan start is within the page, i.e. less than DOUBLEWORDS_PER_PAGE
180 (note that the scan start is measured relative to the page end) then
181 we don't need to OR in the generation byte from an extra object,
182 as all headers on the page are accounted for in the page generation mask.
183 Also an empty page (where scan start is zero) avoids looking
184 at the next page's first object by accident via the same test. */
185 unsigned char varyobj_page_gens_augmented(low_page_index_t page_index)
187 return (varyobj_page_scan_start_offset[page_index - FIRST_VARYOBJ_PAGE] <= DOUBLEWORDS_PER_PAGE
188 ? 0 : (1<<__immobile_obj_generation(varyobj_scan_start(page_index))))
189 | VARYOBJ_PAGE_GENS(page_index);
192 //// Fixed-length object allocator
194 /* Return the index of an immobile page that is probably not totally full,
195 starting with 'hint_page' and wrapping around.
196 'attributes' determine an eligible page.
197 *IMMOBILE-SPACE-FREE-POINTER* is updated to point beyond the found page
198 if it previously did not. */
200 static int get_freeish_page(int hint_page, int attributes)
202 int page = hint_page;
203 lispobj new_free_pointer, old_free_pointer, actual_old;
204 struct symbol * free_pointer_sym;
205 int page_attr_packed;
206 unsigned char best_genmask = 0xff;
207 int best_page = -1;
209 // Speed of this could be improved by keeping a linked list of pages
210 // with any space available, headed by a field in the page struct.
211 // This is totally lock-free / wait-free though, so it's really not
212 // too shabby, because it never has to deal with a page-table mutex.
213 do {
214 page_attr_packed = fixedobj_pages[page].attr.packed;
215 if (page_attr_packed == 0)
216 if ((page_attr_packed =
217 __sync_val_compare_and_swap(&fixedobj_pages[page].attr.packed,
218 0, attributes)) == 0) {
219 // Atomically assign MAX(old_free_pointer, new_free_pointer)
220 // into the free pointer.
221 free_pointer_sym = SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER);
222 new_free_pointer = (lispobj)low_page_address(page+1);
223 old_free_pointer = free_pointer_sym->value;
224 while (new_free_pointer > old_free_pointer) {
225 actual_old =
226 __sync_val_compare_and_swap(&free_pointer_sym->value,
227 old_free_pointer,
228 new_free_pointer);
229 if (actual_old == old_free_pointer)
230 break;
231 old_free_pointer = actual_old;
233 return page;
235 if (ATTRIBUTES_MATCH_P(page_attr_packed, attributes)
236 && !page_full_p(page)) {
237 if (fixedobj_pages[page].gens <= 1) { // instant win
238 return page;
239 } else if (fixedobj_pages[page].gens < best_genmask) {
240 best_genmask = fixedobj_pages[page].gens;
241 best_page = page;
244 if (++page >= FIRST_VARYOBJ_PAGE) page = 0;
245 } while (page != hint_page);
246 if (best_page >= 0)
247 return best_page;
248 lose("No more immobile pages available");
251 // Unused, but possibly will be for some kind of collision-avoidance scheme
252 // on claiming of new free pages.
253 long immobile_alloc_collisions;
255 /* Beginning at page index *hint, attempt to find space
256 for an object on a page with page_attributes. Write its header word
257 and return a C (native) pointer. The start page MUST have the proper
258 characteristisc, but might be totally full.
260 Precondition: Lisp has established a pseudo-atomic section. */
262 /* There is a slightly different algorithm that would probably be faster
263 than what is currently implemented:
264 - hint should be the address of a word that you try to claim
265 as an object header; it moves from high-to-low instead of low-to-high.
266 It's easier to compute the page base than the last valid object start
267 if there are some wasted words at the end due to page size not being
268 a perfect multiple of object size.
269 - you do a CAS into that word, and either suceed or fail
270 - if you succeed, subtract the object spacing and compare
271 to the page's base address, which can be computed by
272 masking. if the next address is above or equal to the page start,
273 store it in the hint, otherwise mark the page full */
275 lispobj alloc_immobile_obj(int page_attributes, lispobj header, int* hint)
277 int page;
278 lispobj word;
279 char * page_data, * obj_ptr, * next_obj_ptr, * limit, * next_free;
280 int spacing_in_bytes = OBJ_SPACING(page_attributes) << WORD_SHIFT;
282 page = *hint;
283 #ifdef DEBUG
284 gc_assert(low_page_address(page) < (void*)SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER)->value);
285 #endif
286 do {
287 page_data = low_page_address(page);
288 obj_ptr = page_data + fixedobj_pages[page].free_index;
289 limit = page_data + IMMOBILE_CARD_BYTES - spacing_in_bytes;
290 while (obj_ptr <= limit) {
291 word = *(lispobj*)obj_ptr;
292 next_obj_ptr = obj_ptr + spacing_in_bytes;
293 if (fixnump(word) // a fixnum marks free space
294 && __sync_bool_compare_and_swap((lispobj*)obj_ptr,
295 word, header)) {
296 // The value formerly in the header word was the offset to
297 // the next hole. Use it to update the freelist pointer.
298 // Just slam it in.
299 fixedobj_pages[page].free_index = next_obj_ptr + word - page_data;
300 return (lispobj)obj_ptr;
302 // If some other thread updated the free_index
303 // to a larger value, use that. (See example below)
304 next_free = page_data + fixedobj_pages[page].free_index;
305 obj_ptr = next_free > next_obj_ptr ? next_free : next_obj_ptr;
307 set_page_full(page);
308 page = get_freeish_page(page+1 >= FIRST_VARYOBJ_PAGE ? 0 : page+1,
309 page_attributes);
310 *hint = page;
311 } while (1);
315 Example: Conside the freelist initially pointing to word index 6
316 Threads A, and B, and C each want to claim index 6.
317 - Thread A wins and then is switched out immediately after the CAS.
318 - Thread B fails to claim cell 6, claims cell 12 instead.
319 - Thread C fails to claim a cell and is switched out immediately
320 after the CAS.
321 - Thread B writes the index of the next hole, cell 18 into the
322 page's freelist cell.
323 - Thread A wakes up and writes 12 into the freelist cell.
324 - Thread C wakes up sees 12 for next_offset. 12 is greater than 6,
325 so it sets its next probe location to 12.
326 It fails the fixnump(header) test.
327 - Thread C sees that next_offset is still 12,
328 so it skips by the page's object spacing instead, and will continue
329 to do so until hitting the end of the page.
332 //// The collector
334 void update_immobile_nursery_bits()
336 low_page_index_t page;
337 lispobj fixedobj_free_ptr = SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER)->value;
338 lispobj varyobj_free_ptr = SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value;
340 // Find the high water marks for this GC scavenge phase
341 // [avoid passing exactly IMMOBILE_SPACE_END, which has no page index]
342 max_used_fixedobj_page = find_immobile_page_index((void*)(fixedobj_free_ptr-1));
343 max_used_varyobj_page = find_immobile_page_index((void*)(varyobj_free_ptr-1));
345 immobile_scav_queue = (unsigned int*)low_page_address(max_used_varyobj_page+1);
346 gc_assert((IMMOBILE_SPACE_END - (uword_t)immobile_scav_queue) / sizeof(int)
347 >= QCAPACITY);
349 // Unprotect the in-use ranges. Any page could be written during scavenge
350 os_protect((os_vm_address_t)IMMOBILE_SPACE_START,
351 fixedobj_free_ptr - IMMOBILE_SPACE_START,
352 OS_VM_PROT_ALL);
354 // varyobj_free_ptr is typically not page-aligned - only by random chance
355 // might it be. Additionally we need a page beyond that for the re-scan queue.
356 os_vm_address_t limit = (char*)immobile_scav_queue + IMMOBILE_CARD_BYTES;
357 os_protect((os_vm_address_t)(IMMOBILE_VARYOBJ_SUBSPACE_START),
358 limit - (os_vm_address_t)IMMOBILE_VARYOBJ_SUBSPACE_START,
359 OS_VM_PROT_ALL);
361 for (page=0; page <= max_used_fixedobj_page ; ++page) {
362 // any page whose free index changed contains nursery objects
363 if (fixedobj_pages[page].free_index >> WORD_SHIFT !=
364 fixedobj_pages[page].prior_gc_free_word_index)
365 fixedobj_pages[page].gens |= 1;
366 #ifdef VERIFY_PAGE_GENS
367 check_fixedobj_page(page);
368 #endif
370 #ifdef VERIFY_PAGE_GENS
371 check_varyobj_pages();
372 #endif
375 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
376 #define MAXIMUM_STRING_WIDETAG SIMPLE_CHARACTER_STRING_WIDETAG
377 #else
378 #define MAXIMUM_STRING_WIDETAG SIMPLE_BASE_STRING_WIDETAG
379 #endif
381 static inline boolean unboxed_obj_p(int widetag)
383 // This is not an exhaustive test for unboxed objects,
384 // but it's enough to avoid some unnecessary scavenging.
385 return (widetag >= SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
386 && widetag <= MAXIMUM_STRING_WIDETAG
387 && widetag != SIMPLE_VECTOR_WIDETAG);
390 /* Turn a white object grey. Also enqueue the object for re-scan if required */
391 void
392 promote_immobile_obj(lispobj *ptr, int rescan) // a native pointer
394 if (widetag_of(*ptr) == SIMPLE_FUN_HEADER_WIDETAG)
395 ptr = (lispobj*)code_obj_from_simple_fun((struct simple_fun*)ptr);
396 gc_assert(__immobile_obj_gen_bits(ptr) == from_space);
397 int pointerish = !unboxed_obj_p(widetag_of(*ptr));
398 assign_generation(ptr, (pointerish ? 0 : IMMOBILE_OBJ_VISITED_FLAG) | new_space);
399 low_page_index_t page_index = find_immobile_page_index(ptr);
401 if (page_index >= FIRST_VARYOBJ_PAGE) {
402 VARYOBJ_PAGE_GENS(page_index) |= 1<<new_space;
403 } else {
404 fixedobj_pages[page_index].gens |= 1<<new_space;
406 // If called from preserve_pointer(), then we haven't scanned immobile
407 // roots yet, so we only need ensure that this object's page's WP bit
408 // is cleared so that the page is not skipped during root scan.
409 if (!rescan) {
410 if (pointerish) {
411 if (page_index >= FIRST_VARYOBJ_PAGE)
412 varyobj_page_touched_bits[(page_index-FIRST_VARYOBJ_PAGE)/32]
413 |= 1 << (page_index & 31);
414 else
415 SET_WP_FLAG(page_index, WRITE_PROTECT_CLEARED);
417 return; // No need to enqueue.
420 // Do nothing if either we don't need to look for pointers in this object,
421 // or the work queue has already overflowed, causing a full scan.
422 if (!pointerish || immobile_scav_queue_count > QCAPACITY) return;
424 // count is either less than or equal to QCAPACITY.
425 // If equal, just bump the count to signify overflow.
426 if (immobile_scav_queue_count < QCAPACITY) {
427 immobile_scav_queue[immobile_scav_queue_head] =
428 (uword_t)ptr & 0xFFFFFFFF; // Drop the high bits
429 immobile_scav_queue_head = (immobile_scav_queue_head + 1) & (QCAPACITY - 1);
431 ++immobile_scav_queue_count;
434 /* If 'addr' points to an immobile object, then make the object
435 live by promotion. But if the object is not in the generation
436 being collected, do nothing */
437 void immobile_space_preserve_pointer(void* addr)
439 low_page_index_t page_index = find_immobile_page_index(addr);
440 if (page_index < 0)
441 return;
443 lispobj* header_addr;
444 if (page_index >= FIRST_VARYOBJ_PAGE) {
445 // Restrict addr to lie below IMMOBILE_SPACE_FREE_POINTER.
446 // This way, if the gens byte is nonzero but there is
447 // a final array acting as filler on the remainder of the
448 // final page, we won't accidentally find that.
449 lispobj* start;
450 if ((lispobj)addr >= SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value
451 || !(varyobj_page_gens_augmented(page_index) & (1<<from_space))
452 || (start = varyobj_scan_start(page_index)) > (lispobj*)addr)
453 return;
454 header_addr = gc_search_space(start,
455 native_pointer((lispobj)addr)+2 - start,
456 (lispobj*)addr);
457 if (!header_addr || immobile_filler_p(header_addr))
458 return;
459 gc_assert(other_immediate_lowtag_p(*header_addr));
460 } else if (fixedobj_pages[page_index].gens & (1<<from_space)) {
461 int obj_spacing = (page_obj_align(page_index) << WORD_SHIFT);
462 int obj_index = ((uword_t)addr & (IMMOBILE_CARD_BYTES-1)) / obj_spacing;
463 dprintf((logfile,"Pointer %p is to immobile page %d, object %d\n",
464 addr, page_index, obj_index));
465 char* page_start_addr = (char*)((uword_t)addr & ~(IMMOBILE_CARD_BYTES-1));
466 header_addr = (lispobj*)(page_start_addr + obj_index * obj_spacing);
467 if (fixnump(*header_addr))
468 return;
469 } else {
470 return;
472 if (__immobile_obj_gen_bits(header_addr) == from_space) {
473 dprintf((logfile,"immobile obj @ %p (<- %p) is conservatively live\n",
474 header_addr, addr));
475 promote_immobile_obj(header_addr, 0);
479 // Loop over the newly-live objects, scavenging them for pointers.
480 // As with the ordinary gencgc algorithm, this uses almost no stack.
481 static void full_scavenge_immobile_newspace()
483 page_index_t page;
484 unsigned char bit = 1<<new_space;
486 // Fixed-size object pages.
488 for (page = 0; page <= max_used_fixedobj_page; ++page) {
489 if (!(fixedobj_pages[page].gens & bit)) continue;
490 // Skip amount within the loop is in bytes.
491 int obj_spacing = page_obj_align(page) << WORD_SHIFT;
492 int n_words = page_obj_size(page);
493 lispobj* obj = low_page_address(page);
494 lispobj* limit = (lispobj*)((char*)obj +
495 IMMOBILE_CARD_BYTES - obj_spacing);
496 for ( ; obj <= limit ; obj = (lispobj*)((char*)obj + obj_spacing) ) {
497 if (!fixnump(*obj) && __immobile_obj_gen_bits(obj) == new_space) {
498 set_visited(obj);
499 scavenge(obj, n_words);
504 // Variable-size object pages
506 page = FIRST_VARYOBJ_PAGE - 1; // Subtract 1 because of pre-increment
507 while (1) {
508 // Find the next page with anything in newspace.
509 do {
510 if (++page > max_used_varyobj_page) return;
511 } while ((VARYOBJ_PAGE_GENS(page) & bit) == 0);
512 lispobj* obj = varyobj_scan_start(page);
513 do {
514 lispobj* limit = (lispobj*)low_page_address(page) + WORDS_PER_PAGE;
515 int widetag, n_words;
516 for ( ; obj < limit ; obj += n_words ) {
517 n_words = sizetab[widetag = widetag_of(*obj)](obj);
518 if (__immobile_obj_gen_bits(obj) == new_space) {
519 set_visited(obj);
520 scavenge(obj, n_words);
523 page = find_immobile_page_index(obj);
524 // Bail out if exact absolute end of immobile space was reached.
525 if (page < 0) return;
526 // If 'page' should be scanned, then pick up where we left off,
527 // without recomputing 'obj' but setting a higher 'limit'.
528 } while (VARYOBJ_PAGE_GENS(page) & bit);
532 /// Repeatedly scavenge immobile newspace work queue until we find no more
533 /// reachable objects within. (They might be in dynamic space though).
534 /// If queue overflow already happened, then a worst-case full scan is needed.
535 /// If it didn't, we try to drain the queue, hoping that overflow does
536 /// not happen while doing so.
537 /// The approach taken is more subtle than just dequeuing each item,
538 /// scavenging, and letting the outer 'while' loop take over.
539 /// That would be ok, but could cause more full scans than necessary.
540 /// Instead, since each entry in the queue is useful information
541 /// in the non-overflow condition, perform all the work indicated thereby,
542 /// rather than considering the queue discardable as soon as overflow happens.
543 /// Essentially we just have to capture the valid span of enqueued items,
544 /// because the queue state is inconsistent when 'count' exceeds 'capacity'.
545 void scavenge_immobile_newspace()
547 while (immobile_scav_queue_count) {
548 if (immobile_scav_queue_count > QCAPACITY) {
549 immobile_scav_queue_count = 0;
550 full_scavenge_immobile_newspace();
551 } else {
552 int queue_index_from = (immobile_scav_queue_head - immobile_scav_queue_count)
553 & (QCAPACITY - 1);
554 int queue_index_to = immobile_scav_queue_head;
555 int i = queue_index_from;
556 // The termination condition can't be expressed as an inequality,
557 // since the indices might be reversed due to wraparound.
558 // To express as equality entails forcing at least one iteration
559 // since the ending index might be the starting index.
560 do {
561 lispobj* obj = (lispobj*)(uword_t)immobile_scav_queue[i];
562 i = (1 + i) & (QCAPACITY-1);
563 // Only decrement the count if overflow did not happen.
564 // The first iteration of this loop will decrement for sure,
565 // but subsequent iterations might not.
566 if (immobile_scav_queue_count <= QCAPACITY)
567 --immobile_scav_queue_count;
568 if (!(__immobile_obj_gen_bits(obj) & IMMOBILE_OBJ_VISITED_FLAG)) {
569 set_visited(obj);
570 scavenge(obj, sizetab[widetag_of(*obj)](obj));
572 } while (i != queue_index_to);
577 // Return a page >= page_index having potential old->young pointers,
578 // or -1 if there isn't one.
579 static int next_varyobj_root_page(unsigned int page_index,
580 unsigned int end_bitmap_index,
581 unsigned char genmask)
583 unsigned int map_index = (page_index - FIRST_VARYOBJ_PAGE) / 32;
584 if (map_index >= end_bitmap_index) return -1;
585 int bit_index = page_index & 31;
586 // Look only at bits of equal or greater weight than bit_index.
587 unsigned int word = (0xFFFFFFFFU << bit_index) & varyobj_page_touched_bits[map_index];
588 while (1) {
589 if (word) {
590 bit_index = ffs(word) - 1;
591 page_index = FIRST_VARYOBJ_PAGE + map_index * 32 + bit_index;
592 if (varyobj_page_gens_augmented(page_index) & genmask)
593 return page_index;
594 else {
595 word ^= (1<<bit_index);
596 continue;
599 if (++map_index >= end_bitmap_index) return -1;
600 word = varyobj_page_touched_bits[map_index];
604 void
605 scavenge_immobile_roots(generation_index_t min_gen, generation_index_t max_gen)
607 // example: scavenging gens 2..6, the mask of root gens is #b1111100
608 int genmask = ((1 << (max_gen - min_gen + 1)) - 1) << min_gen;
610 low_page_index_t page;
611 for (page = 0; page <= max_used_fixedobj_page ; ++page) {
612 if (fixedobj_page_wp(page) || !(fixedobj_pages[page].gens & genmask))
613 continue;
614 int obj_spacing = page_obj_align(page) << WORD_SHIFT;
615 int n_words = page_obj_size(page);
616 lispobj* obj = low_page_address(page);
617 lispobj* limit = (lispobj*)((char*)obj +
618 IMMOBILE_CARD_BYTES - obj_spacing);
619 int gen;
620 // Immobile space can only contain objects with a header word,
621 // no conses, so any fixnum where a header could be is not a live
622 // object.
623 do {
624 if (!fixnump(*obj) && (genmask >> (gen=__immobile_obj_gen_bits(obj)) & 1)) {
625 if (gen == new_space) { set_visited(obj); }
626 scavenge(obj, n_words);
628 } while ((obj = (lispobj*)((char*)obj + obj_spacing)) <= limit);
631 // Variable-length object pages
632 unsigned n_varyobj_pages = 1+max_used_varyobj_page-FIRST_VARYOBJ_PAGE;
633 unsigned end_bitmap_index = (n_varyobj_pages+31)/32;
634 page = next_varyobj_root_page(FIRST_VARYOBJ_PAGE, end_bitmap_index, genmask);
635 while (page >= 0) {
636 lispobj* obj = varyobj_scan_start(page);
637 do {
638 lispobj* limit = (lispobj*)low_page_address(page) + WORDS_PER_PAGE;
639 int widetag, n_words, gen;
640 for ( ; obj < limit ; obj += n_words ) {
641 n_words = sizetab[widetag = widetag_of(*obj)](obj);
642 if (genmask >> (gen=__immobile_obj_gen_bits(obj)) & 1) {
643 if (gen == new_space) { set_visited(obj); }
644 scavenge(obj, n_words);
647 page = find_immobile_page_index(obj);
648 } while (page > 0
649 && (VARYOBJ_PAGE_GENS(page) & genmask)
650 && varyobj_page_touched(page));
651 page = next_varyobj_root_page(1+page, end_bitmap_index, genmask);
653 scavenge_immobile_newspace();
656 #include "genesis/layout.h"
657 #define LAYOUT_SIZE (sizeof (struct layout)/N_WORD_BYTES)
659 // As long as Lisp doesn't have any native allocators (vops and whatnot)
660 // it doesn't need to access these values.
661 int layout_page_hint, symbol_page_hint, fdefn_page_hint;
663 // For the three different page characteristics that we need,
664 // claim a page that works for those characteristics.
665 void set_immobile_space_hints()
667 // The allocator doesn't check whether each 'hint' points to an
668 // expected kind of page, so we have to ensure up front that
669 // allocations start on different pages. i.e. You can point to
670 // a totally full page, but you can't point to a wrong page.
671 // It doesn't work to just assign these to consecutive integers
672 // without also updating the page attributes.
674 // Object sizes must be multiples of 2 because the n_words value we pass
675 // to scavenge() is gotten from the page attributes, and scavenge asserts
676 // that the ending address is aligned to a doubleword boundary as expected.
678 // LAYOUTs are 256-byte-aligned so that the low byte contains no information.
679 // This makes it possible to recover a layout pointer from an instance header
680 // by simply changing the low byte to instance-pointer-lowtag.
681 // As a test of objects using larger-than-required alignment,
682 // the 64-bit implementation uses 256-byte alignment for layouts,
683 // even though the header can store all bits of the layout pointer.
684 // The 32-bit implementation would also need somewhere different to store
685 // the generation byte of each layout, which is a minor annoyance.
686 layout_page_hint = get_freeish_page(0, MAKE_ATTR(256/N_WORD_BYTES, // spacing
687 CEILING(LAYOUT_SIZE,2),
688 0));
689 symbol_page_hint = get_freeish_page(0, MAKE_ATTR(CEILING(SYMBOL_SIZE,2),
690 CEILING(SYMBOL_SIZE,2),
691 0));
692 fdefn_page_hint = get_freeish_page(0, MAKE_ATTR(CEILING(FDEFN_SIZE,2),
693 CEILING(FDEFN_SIZE,2),
694 0));
697 void write_protect_immobile_space()
699 immobile_scav_queue = NULL;
700 immobile_scav_queue_head = 0;
702 set_immobile_space_hints();
704 // Now find contiguous ranges of pages that are protectable,
705 // minimizing the number of system calls as much as possible.
706 int i, start = -1, end = -1; // inclusive bounds on page indices
707 for (i = max_used_fixedobj_page ; i >= 0 ; --i) {
708 if (fixedobj_page_wp(i)) {
709 if (end < 0) end = i;
710 start = i;
712 if (end >= 0 && (!fixedobj_page_wp(i) || i == 0)) {
713 os_protect(low_page_address(start),
714 IMMOBILE_CARD_BYTES * (1 + end - start),
715 OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
716 start = end = -1;
719 #define varyobj_page_wp(x) !varyobj_page_touched(x)
720 for (i = max_used_varyobj_page ; i >= FIRST_VARYOBJ_PAGE ; --i) {
721 if (varyobj_page_wp(i)) {
722 if (end < 0) end = i;
723 start = i;
725 if (end >= 0 && (!varyobj_page_wp(i) || i == FIRST_VARYOBJ_PAGE)) {
726 os_protect(low_page_address(start),
727 IMMOBILE_CARD_BYTES * (1 + end - start),
728 OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
729 start = end = -1;
732 #undef varyobj_page_wp
735 // Scan range between start and end (exclusive) for old-to-young pointers.
736 // 'keep_gen' is the value of the generation byte of objects that were
737 // candidates to become garbage, but remain live after this gc.
738 // It will necessarily have the VISITED flag on.
739 // 'new_gen' is the generation number that those objects will have
740 // after collection, which is either the same generation or one higher,
741 // depending on the 'raise' flag for this GC cycle.
742 static int
743 range_points_to_younger_p(lispobj* obj, lispobj* end,
744 int gen, int keep_gen, int new_gen)
746 #ifdef DEBUG
747 lispobj* __attribute__((unused)) saved_obj = obj, __attribute__((unused)) header = *obj;
748 #endif
749 do {
750 lispobj thing = *obj;
751 if (is_lisp_pointer(thing)) {
752 int to_page = find_page_index((void*)thing),
753 to_gen = 255;
754 if (to_page >= 0) { // points to ordinary dynamic space
755 to_gen = page_table[to_page].gen;
756 if (to_gen == PSEUDO_STATIC_GENERATION+1) // scratch gen
757 to_gen = new_gen; // is actually this
758 } else if (immobile_space_p(thing)) {
759 // Processing the code-entry-points slot of a code component
760 // requires the general variant of immobile_obj_gen_bits
761 // because the pointed-to object is a simple-fun.
762 to_gen = immobile_obj_gen_bits(native_pointer(thing));
763 if (to_gen == keep_gen) // keep gen
764 to_gen = new_gen; // is actually this
766 if (to_gen < gen) {
767 return 1; // yes, points to younger
770 } while (++obj < end);
771 return 0; // no, does not point to younger
774 // Scan a fixed-size object for old-to-young pointers.
775 // Since fixed-size objects are boxed and on known boundaries,
776 // we never start in the middle of random bytes, so the answer is exact.
777 static inline boolean
778 fixedobj_points_to_younger_p(lispobj* obj, int n_words,
779 int gen, int keep_gen, int new_gen)
781 return range_points_to_younger_p(obj+1, obj+n_words,
782 gen, keep_gen, new_gen);
785 static boolean
786 varyobj_points_to_younger_p(lispobj* obj, int gen, int keep_gen, int new_gen,
787 os_vm_address_t page_begin,
788 os_vm_address_t page_end) // upper (exclusive) bound
790 lispobj *begin, *end, word = *obj;
791 unsigned char widetag = widetag_of(word);
792 if (widetag == CODE_HEADER_WIDETAG) { // usual case. Like scav_code_header()
793 lispobj entry_point; /* tagged pointer to entry point */
794 struct simple_fun *function_ptr; /* untagged pointer to entry point */
795 for (entry_point = ((struct code*)obj)->entry_points;
796 entry_point != NIL;
797 entry_point = function_ptr->next) {
798 function_ptr = (struct simple_fun *) native_pointer(entry_point);
799 begin = SIMPLE_FUN_SCAV_START(function_ptr);
800 end = begin + SIMPLE_FUN_SCAV_NWORDS(function_ptr);
801 if (page_begin > (os_vm_address_t)begin) begin = (lispobj*)page_begin;
802 if (page_end < (os_vm_address_t)end) end = (lispobj*)page_end;
803 if (end > begin
804 && range_points_to_younger_p(begin, end, gen, keep_gen, new_gen))
805 return 1;
807 begin = obj;
808 end = obj + code_header_words(word); // exclusive bound
809 } else if (widetag == SIMPLE_VECTOR_WIDETAG) {
810 sword_t length = fixnum_value(((struct vector *)obj)->length);
811 begin = obj;
812 end = obj + CEILING(length + 2, 2);
813 } else if (widetag >= SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG &&
814 widetag <= MAXIMUM_STRING_WIDETAG) {
815 return 0;
816 } else {
817 lose("Unexpected widetag @ %p", obj);
819 // Fallthrough: scan words from begin to end
820 if (page_begin > (os_vm_address_t)begin) begin = (lispobj*)page_begin;
821 if (page_end < (os_vm_address_t)end) end = (lispobj*)page_end;
822 if (end > begin && range_points_to_younger_p(begin, end, gen, keep_gen, new_gen))
823 return 1;
824 return 0;
827 /// The next two functions are analogous to 'update_page_write_prot()'
828 /// but they differ in that they are "precise" - random code bytes that look
829 /// like pointers are not accidentally treated as pointers.
831 // If 'page' does not contain any objects that points to an object
832 // younger than themselves, then return true.
833 // This is called on pages that do not themselves contain objects of
834 // the generation being collected, but might contain pointers
835 // to younger generations, which we detect by a cleared WP status bit.
836 // The bit is cleared on any write, though, even of a non-pointer,
837 // so this unfortunately has to be tested much more often than we'd like.
838 static inline boolean can_wp_fixedobj_page(page_index_t page, int keep_gen, int new_gen)
840 int obj_spacing = page_obj_align(page) << WORD_SHIFT;
841 int obj_size_words = page_obj_size(page);
842 lispobj* obj = low_page_address(page);
843 lispobj* limit = (lispobj*)((char*)obj + IMMOBILE_CARD_BYTES - obj_spacing);
844 for ( ; obj <= limit ; obj = (lispobj*)((char*)obj + obj_spacing) )
845 if (!fixnump(*obj) && // an object header
846 fixedobj_points_to_younger_p(obj, obj_size_words,
847 __immobile_obj_generation(obj),
848 keep_gen, new_gen))
849 return 0;
850 return 1;
853 // To scan _only_ 'page' is impossible in general, but we can act like only
854 // one page was scanned by backing up to the first object whose end is on
855 // or after it, and then restricting points_to_younger within the boundaries.
856 // Doing it this way is probably much better than conservatively assuming
857 // that any word satisfying is_lisp_pointer() is a pointer.
858 static inline boolean can_wp_varyobj_page(page_index_t page, int keep_gen, int new_gen)
860 lispobj *begin = (lispobj*)low_page_address(page);
861 lispobj *end = begin + WORDS_PER_PAGE;
862 lispobj *obj = varyobj_scan_start(page);
863 for ( ; obj < end ; obj += sizetab[widetag_of(*obj)](obj) ) {
864 gc_assert(other_immediate_lowtag_p(*obj));
865 if (!immobile_filler_p(obj) &&
866 varyobj_points_to_younger_p(obj,
867 __immobile_obj_generation(obj),
868 keep_gen, new_gen,
869 (os_vm_address_t)begin,
870 (os_vm_address_t)end))
871 return 0;
873 return 1;
877 Sweep immobile space by zeroing the memory of trashed objects
878 and linking them into the freelist.
880 Possible improvements:
881 - If an entire page becomes nothing but holes, we could bzero it
882 instead of object-at-a-time clearing. But it's not known to be
883 so until after the sweep, so it would entail two passes per page,
884 one to mark holes and one to zero them.
885 - And perhaps bzero could be used on ranges of holes, because
886 in that case each hole's pointer to the next hole is zero as well.
889 #define SETUP_GENS() \
890 /* Only care about pages with something in old or new space. */ \
891 int relevant_genmask = (1 << from_space) | (1 << new_space); \
892 /* Objects whose gen byte is 'keep_gen' are alive. */ \
893 int keep_gen = IMMOBILE_OBJ_VISITED_FLAG | new_space; \
894 /* Objects whose gen byte is 'from_space' are trash. */ \
895 int discard_gen = from_space; \
896 /* Moving non-garbage into either 'from_space' or 'from_space+1' */ \
897 generation_index_t new_gen = from_space + (raise!=0)
899 // The new value of the page generation mask is computed as follows:
900 // If 'raise' = 1 then:
901 // Nothing resides in 'from_space', and 'from_space+1' gains new objects
902 // if and only if any objects on the page were retained.
903 // If 'raise' = 0 then:
904 // Nothing resides in the scratch generation, and 'from_space'
905 // has objects if and only if any objects were retained.
906 #define COMPUTE_NEW_MASK(var, old) \
907 int var = old & ~(1<<from_space); \
908 if ( raise ) \
909 var |= 1<<(from_space+1) & any_kept; \
910 else \
911 var = (var & ~(1<<new_space)) | (1<<from_space & any_kept)
913 static void
914 sweep_fixedobj_pages(int raise)
916 char *page_base;
917 lispobj *obj, *limit, *hole;
918 // This will be needed for space accounting.
919 // threads might fail to consume all the space on a page.
920 // By storing in the page table the count of holes that really existed
921 // at the start of the prior GC, and subtracting from that the number
922 // that exist now, we know how much usable space was obtained (per page).
923 int n_holes = 0;
924 int word_idx;
926 SETUP_GENS();
928 low_page_index_t page;
929 for (page = 0; page <= max_used_fixedobj_page; ++page) {
930 // On pages that won't need manipulation of the freelist,
931 // we try to do less work than for pages that need it.
932 if (!(fixedobj_pages[page].gens & relevant_genmask)) {
933 // Scan for old->young pointers, and WP if there are none.
934 if (!fixedobj_page_wp(page) && fixedobj_pages[page].gens > 1
935 && can_wp_fixedobj_page(page, keep_gen, new_gen))
936 SET_WP_FLAG(page, WRITE_PROTECT);
937 continue;
939 int obj_spacing = page_obj_align(page) << WORD_SHIFT;
940 int obj_size_words = page_obj_size(page);
941 page_base = low_page_address(page);
942 limit = (lispobj*)(page_base + IMMOBILE_CARD_BYTES - obj_spacing);
943 obj = (lispobj*)page_base;
944 hole = NULL;
945 int any_kept = 0; // was anything moved to the kept generation
946 n_holes = 0;
948 // wp_it is 1 if we should try to write-protect it now.
949 // If already write-protected, skip the tests.
950 int wp_it = !fixedobj_page_wp(page);
951 int gen;
952 for ( ; obj <= limit ; obj = (lispobj*)((char*)obj + obj_spacing) ) {
953 if (fixnump(*obj)) { // was already a hole
954 trash_it:
955 // re-link it into the new freelist
956 if (hole)
957 // store the displacement from the end of the object
958 // at prev_hole to the start of this object.
959 *hole = (lispobj)((char*)obj - ((char*)hole + obj_spacing));
960 else // this is the first seen hole on the page
961 // record the byte offset to that hole
962 fixedobj_pages[page].free_index = (char*)obj - page_base;
963 hole = obj;
964 n_holes ++;
965 } else if ((gen = __immobile_obj_gen_bits(obj)) == discard_gen) { // trash
966 for (word_idx=obj_size_words-1 ; word_idx > 0 ; --word_idx)
967 obj[word_idx] = 0;
968 goto trash_it;
969 } else if (gen == keep_gen) {
970 assign_generation(obj, gen = new_gen);
971 #ifdef DEBUG
972 gc_assert(!fixedobj_points_to_younger_p(obj, obj_size_words,
973 gen, keep_gen, new_gen));
974 #endif
975 any_kept = -1;
976 } else if (wp_it && fixedobj_points_to_younger_p(obj, obj_size_words,
977 gen, keep_gen, new_gen))
978 wp_it = 0;
980 if ( hole ) // terminate the chain of holes
981 *hole = (lispobj)((char*)obj - ((char*)hole + obj_spacing));
982 fixedobj_pages[page].prior_gc_free_word_index =
983 fixedobj_pages[page].free_index >> WORD_SHIFT;
985 COMPUTE_NEW_MASK(mask, fixedobj_pages[page].gens);
986 if ( mask ) {
987 fixedobj_pages[page].gens = mask;
988 if (wp_it) {
989 SET_WP_FLAG(page, WRITE_PROTECT);
990 dprintf((logfile, "Lowspace: set WP on page %d\n", page));
992 } else {
993 dprintf((logfile,"page %d is all garbage\n", page));
994 fixedobj_pages[page].attr.packed = 0;
996 #ifdef DEBUG
997 check_fixedobj_page(page);
998 #endif
999 dprintf((logfile,"page %d: %d holes\n", page, n_holes));
1003 void verify_immobile_page_protection(int,int);
1005 // Scan for freshly trashed objects and turn them into filler.
1006 // Lisp is responsible for consuming the free space
1007 // when it next allocates a variable-size object.
1008 static void
1009 sweep_varyobj_pages(int raise)
1011 SETUP_GENS();
1013 lispobj* free_pointer = (lispobj*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value;
1014 low_page_index_t page;
1015 for (page = FIRST_VARYOBJ_PAGE; page <= max_used_varyobj_page; ++page) {
1016 int genmask = VARYOBJ_PAGE_GENS(page);
1017 if (!(genmask & relevant_genmask)) { // Has nothing in oldspace or newspace.
1018 // Scan for old->young pointers, and WP if there are none.
1019 if (varyobj_page_touched(page)
1020 && varyobj_page_gens_augmented(page) > 1
1021 && can_wp_varyobj_page(page, keep_gen, new_gen))
1022 varyobj_page_touched_bits[(page - FIRST_VARYOBJ_PAGE)/32] &= ~(1<<(page & 31));
1023 continue;
1025 lispobj* page_base = (lispobj*)low_page_address(page);
1026 lispobj* limit = page_base + WORDS_PER_PAGE;
1027 if (limit > free_pointer) limit = free_pointer;
1028 int any_kept = 0; // was anything moved to the kept generation
1029 // wp_it is 1 if we should try to write-protect it now.
1030 // If already write-protected, skip the tests.
1031 int wp_it = varyobj_page_touched(page);
1032 lispobj* obj = varyobj_scan_start(page);
1033 int size, gen;
1035 if (obj < page_base) {
1036 // An object whose tail is on this page, or which spans this page,
1037 // would have been promoted/kept while dealing with the page with
1038 // the object header. Therefore we don't need to consider that object,
1039 // * except * that we do need to consider whether it is an old object
1040 // pointing to a young object.
1041 if (wp_it // If we wanted to try write-protecting this page,
1042 // and the object starting before this page is strictly older
1043 // than the generation that we're moving retained objects into
1044 && (gen = __immobile_obj_gen_bits(obj)) > new_gen
1045 // and it contains an old->young pointer
1046 && varyobj_points_to_younger_p(obj, gen, keep_gen, new_gen,
1047 (os_vm_address_t)page_base,
1048 (os_vm_address_t)limit)) {
1049 wp_it = 0;
1051 // We MUST skip this object in the sweep, because in the case of
1052 // non-promotion (raise=0), we could see an object in from_space
1053 // and believe it to be dead.
1054 obj += sizetab[widetag_of(*obj)](obj);
1055 // obj can't hop over this page. If it did, there would be no
1056 // headers on the page, and genmask would have been zero.
1057 gc_assert(obj < limit);
1059 for ( ; obj < limit ; obj += size ) {
1060 lispobj word = *obj;
1061 size = sizetab[widetag_of(word)](obj);
1062 if (immobile_filler_p(obj)) { // do nothing
1063 } else if ((gen = __immobile_obj_gen_bits(obj)) == discard_gen) {
1064 if (size < 4)
1065 lose("immobile object @ %p too small to free", obj);
1066 else { // Create a filler object.
1067 struct code* code = (struct code*)obj;
1068 code->header = 2<<N_WIDETAG_BITS | CODE_HEADER_WIDETAG;
1069 code->code_size = make_fixnum((size - 2) * N_WORD_BYTES);
1070 code->entry_points = NIL;
1071 code->debug_info = varyobj_holes;
1072 varyobj_holes = (lispobj)code;
1074 } else if (gen == keep_gen) {
1075 assign_generation(obj, gen = new_gen);
1076 #ifdef DEBUG
1077 gc_assert(!varyobj_points_to_younger_p(obj, gen, keep_gen, new_gen,
1078 (os_vm_address_t)page_base,
1079 (os_vm_address_t)limit));
1080 #endif
1081 any_kept = -1;
1082 } else if (wp_it &&
1083 varyobj_points_to_younger_p(obj, gen, keep_gen, new_gen,
1084 (os_vm_address_t)page_base,
1085 (os_vm_address_t)limit))
1086 wp_it = 0;
1088 COMPUTE_NEW_MASK(mask, VARYOBJ_PAGE_GENS(page));
1089 VARYOBJ_PAGE_GENS(page) = mask;
1090 if ( mask && wp_it )
1091 varyobj_page_touched_bits[(page - FIRST_VARYOBJ_PAGE)/32] &= ~(1 << (page & 31));
1093 #ifdef DEBUG
1094 verify_immobile_page_protection(keep_gen, new_gen);
1095 #endif
1098 static void compute_immobile_space_bound()
1100 int max;
1101 // find the highest page in use
1102 for (max = FIRST_VARYOBJ_PAGE-1 ; max >= 0 ; --max)
1103 if (fixedobj_pages[max].attr.parts.obj_size)
1104 break;
1105 max_used_fixedobj_page = max; // this is a page index, not the number of pages.
1106 SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER)->value =
1107 IMMOBILE_SPACE_START + IMMOBILE_CARD_BYTES*(1+max);
1109 for (max = (IMMOBILE_SPACE_SIZE/IMMOBILE_CARD_BYTES)-1 ;
1110 max >= FIRST_VARYOBJ_PAGE ; --max)
1111 if (VARYOBJ_PAGE_GENS(max))
1112 break;
1113 max_used_varyobj_page = max; // this is a page index, not the number of pages.
1116 // TODO: (Maybe this won't work. Not sure yet.) rather than use the
1117 // same 'raise' concept as in gencgc, each immobile object can store bits
1118 // indicating whether it has survived any GC at its current generation.
1119 // If it has, then it gets promoted next time, rather than all or nothing
1120 // being promoted from the generation getting collected.
1121 void
1122 sweep_immobile_space(int raise)
1124 gc_assert(immobile_scav_queue_count == 0);
1125 sweep_fixedobj_pages(raise);
1126 sweep_varyobj_pages(raise);
1127 compute_immobile_space_bound();
1130 void gc_init_immobile()
1132 #ifdef DEBUG
1133 logfile = stderr;
1134 #endif
1135 int n_fixedobj_pages = FIRST_VARYOBJ_PAGE;
1136 int n_varyobj_pages = (IMMOBILE_SPACE_SIZE - IMMOBILE_FIXEDOBJ_SUBSPACE_SIZE)
1137 / IMMOBILE_CARD_BYTES;
1138 fixedobj_pages = calloc(n_fixedobj_pages, sizeof(struct fixedobj_page));
1139 gc_assert(fixedobj_pages);
1141 n_bitmap_elts = (n_varyobj_pages + 31) / 32;
1142 int request = n_bitmap_elts * sizeof (int)
1143 + n_varyobj_pages * (sizeof (short)+sizeof (char));
1144 char* varyobj_page_tables = malloc(request);
1145 gc_assert(varyobj_page_tables);
1146 memset(varyobj_page_tables, 0, request);
1147 varyobj_page_touched_bits = (unsigned int*)varyobj_page_tables;
1148 // The conservative value for 'touched' is 1.
1149 memset(varyobj_page_touched_bits, 0xff, n_bitmap_elts * sizeof (int));
1150 varyobj_page_scan_start_offset = (unsigned short*)(varyobj_page_touched_bits + n_bitmap_elts);
1151 varyobj_page_header_gens = (unsigned char*)(varyobj_page_scan_start_offset + n_varyobj_pages);
1154 /* Because the immobile page table is not dumped into a core image,
1155 we have to reverse-engineer the characteristics of each page,
1156 which means figuring out what the object spacing should be.
1157 This is not difficult, but is a bit of a kludge */
1159 static inline int immobile_obj_spacing(lispobj header_word, lispobj *obj,
1160 int actual_size)
1162 lispobj this_layout, layout_layout;
1164 // 64-bit build does not need to align layouts on 256-byte boundary.
1165 // But this is a proof-of-concept that should work on 32-bit build,
1166 // which would need the alignment if compact instance headers are used.
1167 if (widetag_of(header_word)==INSTANCE_HEADER_WIDETAG) {
1168 this_layout = instance_layout(obj);
1169 layout_layout = instance_layout(native_pointer(this_layout));
1170 // If this object's layout is layout-of-layout, then this is a layout,
1171 // hence this page must have object spacing of 256 bytes.
1172 if (this_layout == layout_layout)
1173 return 256 / N_WORD_BYTES;
1175 return actual_size; // in words
1178 // Set the characteristics of each used page at image startup time.
1179 void immobile_space_coreparse(uword_t address, uword_t len)
1181 int n_pages, word_idx, page;
1183 n_pages = (len + IMMOBILE_CARD_BYTES - 1) / IMMOBILE_CARD_BYTES;
1184 if (address == IMMOBILE_SPACE_START) {
1185 for (page = 0 ; page < n_pages ; ++page) {
1186 lispobj* page_data = low_page_address(page);
1187 for (word_idx = 0 ; word_idx < WORDS_PER_PAGE ; ++word_idx) {
1188 lispobj* obj = page_data + word_idx;
1189 lispobj header = *obj;
1190 if (!fixnump(header)) {
1191 gc_assert(other_immediate_lowtag_p(*obj));
1192 fixedobj_pages[page].attr.parts.obj_size
1193 = sizetab[widetag_of(header)](obj);
1194 fixedobj_pages[page].attr.parts.obj_align
1195 = immobile_obj_spacing(header, obj,
1196 fixedobj_pages[page].attr.parts.obj_size);
1197 fixedobj_pages[page].attr.parts.flags = WRITE_PROTECT;
1198 fixedobj_pages[page].gens |= 1 << __immobile_obj_gen_bits(obj);
1199 break;
1203 } else if (address == IMMOBILE_VARYOBJ_SUBSPACE_START) {
1204 lispobj* obj = (lispobj*)address;
1205 lispobj* limit = (lispobj*)(address + len);
1206 int n_words;
1207 low_page_index_t last_page = 0;
1208 for ( ; obj < limit ; obj += n_words ) {
1209 n_words = sizetab[widetag_of(*obj)](obj);
1210 if (obj[1] == 0 && (obj[0] == INSTANCE_HEADER_WIDETAG ||
1211 obj[0] == 0)) {
1212 if (obj[0]) {
1213 // Round up to the next immobile page.
1214 lispobj page_end = CEILING((lispobj)obj, IMMOBILE_CARD_BYTES);
1215 n_words = (lispobj*)page_end - obj;
1216 obj[0] = SIMPLE_ARRAY_FIXNUM_WIDETAG;
1217 obj[1] = make_fixnum(n_words - 2);
1218 } else {
1219 // There are trailing zeros to fill the core file page.
1220 // This happens when the next object is exactly aligned
1221 // to an immobile page. There is no padding object.
1222 gc_assert(((lispobj)obj & (IMMOBILE_CARD_BYTES-1)) == 0);
1224 limit = obj;
1225 break;
1227 if (immobile_filler_p(obj)) {
1228 // Holes were chained through the debug_info slot at save.
1229 // Just update the head of the chain.
1230 varyobj_holes = (lispobj)obj;
1231 continue;
1233 low_page_index_t first_page = find_immobile_page_index(obj);
1234 last_page = find_immobile_page_index(obj+n_words-1);
1235 // Only the page with this object header gets a bit in its gen mask.
1236 VARYOBJ_PAGE_GENS(first_page) |= 1<<__immobile_obj_gen_bits(obj);
1237 // For each page touched by this object, set the page's
1238 // scan_start_offset, unless it was already set.
1239 int page;
1240 for (page = first_page ; page <= last_page ; ++page) {
1241 if (!varyobj_page_scan_start_offset[page - FIRST_VARYOBJ_PAGE]) {
1242 long offset = (char*)low_page_address(page+1) - (char*)obj;
1243 varyobj_page_scan_start_offset[page - FIRST_VARYOBJ_PAGE]
1244 = offset >> (WORD_SHIFT + 1);
1248 // Write-protect the pages occupied by the core file.
1249 // (There can be no inter-generation pointers.)
1250 int page;
1251 for (page = FIRST_VARYOBJ_PAGE ; page <= last_page ; ++page)
1252 varyobj_page_touched_bits[(page-FIRST_VARYOBJ_PAGE)/32] &= ~(1<<(page & 31));
1253 SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value = (lispobj)limit;
1254 compute_immobile_space_bound();
1255 write_protect_immobile_space();
1256 } else {
1257 lose("unknown immobile subspace");
1261 // Demote pseudo-static to highest normal generation
1262 // so that all objects become eligible for collection.
1263 void prepare_immobile_space_for_final_gc()
1265 int page;
1266 char* page_base;
1267 char* page_end = (char*)SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER)->value;
1269 // The list of holes need not be saved.
1270 SYMBOL(IMMOBILE_FREELIST)->value = NIL;
1272 for (page_base = (char*)IMMOBILE_SPACE_START, page = 0 ;
1273 page_base < page_end ;
1274 page_base += IMMOBILE_CARD_BYTES, ++page) {
1275 unsigned char mask = fixedobj_pages[page].gens;
1276 if (mask & 1<<PSEUDO_STATIC_GENERATION) {
1277 int obj_spacing = page_obj_align(page) << WORD_SHIFT;
1278 lispobj* obj = (lispobj*)page_base;
1279 lispobj* limit = (lispobj*)(page_base + IMMOBILE_CARD_BYTES - obj_spacing);
1280 for ( ; obj <= limit ; obj = (lispobj*)((char*)obj + obj_spacing) ) {
1281 if (!fixnump(*obj)
1282 && __immobile_obj_gen_bits(obj) == PSEUDO_STATIC_GENERATION)
1283 assign_generation(obj, HIGHEST_NORMAL_GENERATION);
1285 fixedobj_pages[page].gens = (mask & ~(1<<PSEUDO_STATIC_GENERATION))
1286 | 1<<HIGHEST_NORMAL_GENERATION;
1290 lispobj* obj = (lispobj*)IMMOBILE_VARYOBJ_SUBSPACE_START;
1291 lispobj* limit = (lispobj*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value;
1292 for ( ; obj < limit ; obj += sizetab[widetag_of(*obj)](obj) ) {
1293 if (__immobile_obj_gen_bits(obj) == PSEUDO_STATIC_GENERATION)
1294 assign_generation(obj, HIGHEST_NORMAL_GENERATION);
1296 int max_page = find_immobile_page_index(limit-1);
1297 for ( page = FIRST_VARYOBJ_PAGE ; page <= max_page ; ++page ) {
1298 int mask = VARYOBJ_PAGE_GENS(page);
1299 if (mask & (1<<PSEUDO_STATIC_GENERATION)) {
1300 VARYOBJ_PAGE_GENS(page) = (mask & ~(1<<PSEUDO_STATIC_GENERATION))
1301 | 1<<HIGHEST_NORMAL_GENERATION;
1306 // Now once again promote all objects to pseudo-static just prior to save.
1307 // 'coreparse' makes all pages in regular dynamic space pseudo-static.
1308 // But since immobile objects store their generation, it must be done at save,
1309 // or else it would have to be done on image restart
1310 // which would require writing to a lot of pages for no reason.
1311 void prepare_immobile_space_for_save()
1313 int page;
1314 char *page_base;
1315 char* page_end = (char*)SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER)->value;
1317 for (page_base = (char*)IMMOBILE_SPACE_START, page = 0 ;
1318 page_base < page_end ;
1319 page_base += IMMOBILE_CARD_BYTES, ++page) {
1320 int obj_spacing = page_obj_align(page) << WORD_SHIFT;
1321 if (obj_spacing) {
1322 lispobj* obj = (lispobj*)page_base;
1323 lispobj* limit = (lispobj*)(page_base + IMMOBILE_CARD_BYTES - obj_spacing);
1324 for ( ; obj <= limit ; obj = (lispobj*)((char*)obj + obj_spacing) ) {
1325 if (!fixnump(*obj))
1326 assign_generation(obj, PSEUDO_STATIC_GENERATION);
1330 lispobj* obj = (lispobj*)IMMOBILE_VARYOBJ_SUBSPACE_START;
1331 lispobj* limit = (lispobj*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value;
1332 for ( varyobj_holes = 0 ; obj < limit ; obj += sizetab[widetag_of(*obj)](obj) ) {
1333 if (immobile_filler_p(obj)) {
1334 struct code* code = (struct code*)obj;
1335 code->debug_info = varyobj_holes;
1336 varyobj_holes = (lispobj)code;
1337 // 0-fill the unused space.
1338 int nwords = sizetab[widetag_of(*obj)](obj);
1339 memset(code->constants, 0,
1340 (nwords * N_WORD_BYTES) - offsetof(struct code, constants));
1341 } else
1342 assign_generation(obj, PSEUDO_STATIC_GENERATION);
1344 if ((lispobj)limit & (IMMOBILE_CARD_BYTES-1)) { // Last page is partially used.
1345 gc_assert(*limit == SIMPLE_ARRAY_FIXNUM_WIDETAG);
1346 // Write an otherwise illegal object at the free pointer.
1347 limit[0] = INSTANCE_HEADER_WIDETAG; // 0 payload length
1348 limit[1] = 0; // no layout
1352 //// Interface
1354 int immobile_space_handle_wp_violation(void* fault_addr)
1356 low_page_index_t page_index = find_immobile_page_index(fault_addr);
1357 if (page_index < 0) return 0;
1359 if (page_index >= FIRST_VARYOBJ_PAGE) {
1360 // The free pointer can move up or down. Attempting to insist that a WP
1361 // fault not occur above the free pointer (plus some slack) is not
1362 // threadsafe, so allow it anywhere. More strictness could be imparted
1363 // by tracking the max value attained by the free pointer.
1364 __sync_or_and_fetch(&varyobj_page_touched_bits[(page_index-FIRST_VARYOBJ_PAGE)/32],
1365 1 << (page_index & 31));
1366 } else {
1367 // FIXME: a single bitmap of touched bits would make more sense,
1368 // and the _CLEARED flag doesn't achieve much if anything.
1369 if (!(fixedobj_pages[page_index].attr.parts.flags
1370 & (WRITE_PROTECT|WRITE_PROTECT_CLEARED)))
1371 return 0;
1372 SET_WP_FLAG(page_index, WRITE_PROTECT_CLEARED);
1374 os_protect((os_vm_address_t)((lispobj)fault_addr & ~(IMMOBILE_CARD_BYTES-1)),
1375 IMMOBILE_CARD_BYTES, OS_VM_PROT_ALL);
1376 return 1;
1379 // Find the object that encloses pointer.
1380 lispobj *
1381 search_immobile_space(void *pointer)
1383 lispobj *start;
1385 if ((lispobj)pointer >= IMMOBILE_SPACE_START
1386 && (lispobj)pointer < SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value) {
1387 low_page_index_t page_index = find_immobile_page_index(pointer);
1388 if ((lispobj)pointer >= IMMOBILE_VARYOBJ_SUBSPACE_START) {
1389 start = (lispobj*)varyobj_scan_start(page_index);
1390 if (start > (lispobj*)pointer) return NULL;
1391 return (gc_search_space(start,
1392 (((lispobj*)pointer)+2)-start,
1393 (lispobj*)pointer));
1394 } else if ((lispobj)pointer < SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER)->value) {
1395 char *page_base = (char*)((lispobj)pointer & ~(IMMOBILE_CARD_BYTES-1));
1396 int spacing = page_obj_align(page_index) << WORD_SHIFT;
1397 int index = ((char*)pointer - page_base) / spacing;
1398 char *begin = page_base + spacing * index;
1399 char *end = begin + (page_obj_size(page_index) << WORD_SHIFT);
1400 if ((char*)pointer < end) return (lispobj*)begin;
1404 return NULL;
1407 // For coalescing holes, we need to scan backwards, which is done by
1408 // looking backwards for a page that contains the start of a
1409 // block of objects one of which must abut 'obj'.
1410 lispobj* find_preceding_object(lispobj* obj)
1412 int page = find_immobile_page_index(obj);
1413 while (1) {
1414 int offset = varyobj_page_scan_start_offset[page - FIRST_VARYOBJ_PAGE];
1415 if (offset) { // 0 means the page is empty.
1416 lispobj* start = varyobj_scan_start(page);
1417 if (start < obj) { // Scan from here forward
1418 while (1) {
1419 lispobj* end = start + sizetab[widetag_of(*start)](start);
1420 if (end == obj) return start;
1421 gc_assert(end < obj);
1422 start = end;
1426 if (page == FIRST_VARYOBJ_PAGE) {
1427 gc_assert(obj == low_page_address(FIRST_VARYOBJ_PAGE));
1428 return 0; // Predecessor does not exist
1430 --page;
1434 #include "genesis/vector.h"
1435 #include "genesis/instance.h"
1436 lispobj alloc_layout(lispobj layout_layout, lispobj slots)
1438 struct vector* v = (struct vector*)native_pointer(slots);
1439 // If INSTANCE_DATA_START is 0, subtract 1 word for the header.
1440 // If 1, subtract 2 words: 1 for the header and 1 for the layout.
1441 if (fixnum_value(v->length) != (LAYOUT_SIZE - INSTANCE_DATA_START - 1))
1442 lose("bad arguments to alloc_layout");
1443 struct instance* l = (struct instance*)
1444 alloc_immobile_obj(MAKE_ATTR(256 / N_WORD_BYTES,
1445 CEILING(LAYOUT_SIZE,2),
1447 #ifdef LISP_FEATURE_COMPACT_INSTANCE_HEADER
1448 (layout_layout << 32) |
1449 #endif
1450 (LAYOUT_SIZE-1)<<8 | INSTANCE_HEADER_WIDETAG,
1451 &layout_page_hint);
1452 #ifndef LISP_FEATURE_COMPACT_INSTANCE_HEADER
1453 l->slots[0] = layout_layout;
1454 #endif
1455 memcpy(&l->slots[INSTANCE_DATA_START], v->data,
1456 (LAYOUT_SIZE - INSTANCE_DATA_START - 1)*N_WORD_BYTES);
1458 // Possible efficiency win: make the "wasted" bytes after the layout into a
1459 // simple unboxed array so that heap-walking can skip in one step.
1460 // Probably only a performance issue for MAP-ALLOCATED-OBJECTS,
1461 // since scavenging know to skip by the object alignment anyway.
1462 return (lispobj)l | INSTANCE_POINTER_LOWTAG;
1465 #include "genesis/symbol.h"
1466 lispobj alloc_sym(lispobj name, int kind)
1468 // In case we want different kinds of symbol pages (as was the hope)
1469 // to keep special variables apart from random trash.
1470 // i.e. symbols whose page is often written versus symbols
1471 // that exist only as monikers. This would minimize the number
1472 // of different pages that become touched between GC cycles.
1473 int* hint = &symbol_page_hint;
1474 struct symbol* s = (struct symbol*)
1475 alloc_immobile_obj(MAKE_ATTR(CEILING(SYMBOL_SIZE,2), // spacing
1476 CEILING(SYMBOL_SIZE,2), // size
1477 kind),
1478 (SYMBOL_SIZE-1)<<8 | SYMBOL_HEADER_WIDETAG,
1479 hint);
1480 s->value = UNBOUND_MARKER_WIDETAG;
1481 s->hash = 0;
1482 s->info = NIL;
1483 s->name = name;
1484 s->package = NIL;
1485 return (lispobj)s | OTHER_POINTER_LOWTAG;
1488 #include "genesis/fdefn.h"
1489 lispobj alloc_fdefn(lispobj name)
1491 struct fdefn* f = (struct fdefn*)
1492 alloc_immobile_obj(MAKE_ATTR(CEILING(FDEFN_SIZE,2), // spacing
1493 CEILING(FDEFN_SIZE,2), // size
1495 (FDEFN_SIZE-1)<<8 | FDEFN_WIDETAG,
1496 &fdefn_page_hint);
1497 f->name = name;
1498 f->fun = NIL;
1499 f->raw_addr = 0;
1500 return (lispobj)f | OTHER_POINTER_LOWTAG;
1503 void verify_immobile_page_protection(int keep_gen, int new_gen)
1505 low_page_index_t page;
1506 lispobj* end = (lispobj*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value;
1507 low_page_index_t end_page = find_immobile_page_index((char*)end-1);
1508 lispobj* obj;
1510 for (page = FIRST_VARYOBJ_PAGE; page <= end_page; ++page) {
1511 if (!varyobj_page_touched(page)) {
1512 lispobj* page_begin = low_page_address(page);
1513 lispobj* page_end = page_begin + WORDS_PER_PAGE;
1514 // Assert that there are no old->young pointers.
1515 obj = varyobj_scan_start(page);
1516 // Never scan past the free pointer.
1517 // FIXME: It is is supposed to work to scan past the free pointer
1518 // on the last page, but the allocator needs to plop an array header there,
1519 // and sometimes it doesn't.
1520 lispobj* varyobj_free_ptr = (lispobj*)(SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value);
1521 if (page_end > varyobj_free_ptr) page_end = varyobj_free_ptr;
1522 for ( ; obj < page_end ; obj += sizetab[widetag_of(*obj)](obj) ) {
1523 if (!immobile_filler_p(obj)
1524 && varyobj_points_to_younger_p(obj, __immobile_obj_gen_bits(obj),
1525 keep_gen, new_gen,
1526 (char*)page_begin, (char*)page_end))
1527 lose("page WP bit on page %d is wrong\n", page);
1533 #ifdef VERIFY_PAGE_GENS
1534 void check_fixedobj_page(int page)
1536 // Every page should have a 'gens' mask which exactly reflects
1537 // the aggregate over all objects on that page. Verify that invariant,
1538 // checking all pages, not just the ones below the free pointer.
1539 int genmask, obj_size, obj_spacing, i, all_ok = 1;
1540 lispobj *obj, *limit, header;
1541 int sees_younger = 0;
1543 obj_size = page_obj_size(page);
1544 obj_spacing = page_obj_align(page);
1545 obj = low_page_address(page);
1546 limit = obj + WORDS_PER_PAGE - obj_spacing;
1547 genmask = 0;
1548 if (obj_size == 0) {
1549 for (i=0; i<WORDS_PER_PAGE; ++i)
1550 gc_assert(obj[i]==0);
1551 gc_assert(fixedobj_pages[page].gens ==0);
1552 return;
1554 for ( ; obj <= limit ; obj += obj_spacing ) {
1555 header = *obj;
1556 if (!fixnump(header)) {
1557 int gen = __immobile_obj_gen_bits(obj);
1558 gc_assert(0 <= gen && gen <= PSEUDO_STATIC_GENERATION);
1559 genmask |= 1<<gen;
1560 if (fixedobj_points_to_younger_p(obj, obj_size, gen, 0xff, 0xff))
1561 sees_younger = 1;
1564 // It's not wrong if the gen0 bit is set spuriously, but it should only
1565 // happen at most once, on the first GC after image startup.
1566 // At all other times, the invariant should hold that if the freelist
1567 // indicated that space was available, and the new pointer differs,
1568 // then some gen0 object exists on the page.
1569 // The converse is true because of pseudo-atomicity of the allocator:
1570 // if some thread claimed a hole, then it also updated the freelist.
1571 // If it died before doing the latter, then the object allegedly created
1572 // was never really live, so won't contain any pointers.
1573 if (fixedobj_pages[page].gens != genmask
1574 && fixedobj_pages[page].gens != (genmask|1)) {
1575 fprintf(stderr, "Page #x%x @ %p: stored mask=%x actual=%x\n",
1576 page, low_page_address(page),
1577 fixedobj_pages[page].gens, genmask);
1578 all_ok = 0;
1580 if (fixedobj_page_wp(page) && sees_younger) {
1581 fprintf(stderr, "Page #x%x @ %p: WP is wrong\n",
1582 page, low_page_address(page));
1583 all_ok = 0;
1585 gc_assert(all_ok);
1588 int n_immobile_objects;
1589 int *immobile_objects, *immobile_objects_limit;
1591 int comparator_eq(const void* a, const void* b) {
1592 return *(int*)a - *(int*)b;
1595 // Find the largest item less than or equal.
1596 // (useful for finding the object that contains a given pointer)
1597 int comparator_le(const void* a, const void* b) {
1598 int diff = *(int*)a - *(int*)b;
1599 if (diff <= 0) return diff;
1600 // If looking to the right would see an item strictly greater
1601 // than the sought key, or there is nothing to the right,
1602 // then deem this an exact match.
1603 if (b == (void*)immobile_objects_limit || ((int*)b)[1] > *(int*)a) return 0;
1604 return 1;
1607 // Find the smallest item greater than or equal.
1608 // useful for finding the lowest item at or after a page base address.
1609 int comparator_ge(const void* a, const void* b) {
1610 int diff = *(int*)a - *(int*)b;
1611 if (diff >= 0) return diff;
1612 // If looking to the left would see an item strictly less
1613 // than the sought key, or there is nothing to the left
1614 // then deem this an exact match.
1615 if (b == (void*)immobile_objects || ((int*)b)[-1] < *(int*)a) return 0;
1616 return -1;
1619 void check_varyobj_pages()
1621 // 1. Check that a linear scan sees only valid object headers,
1622 // and that it terminates exactly at IMMOBILE_CODE_FREE_POINTER.
1623 lispobj* obj = (lispobj*)IMMOBILE_VARYOBJ_SUBSPACE_START;
1624 lispobj* end = (lispobj*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value;
1625 low_page_index_t end_page = find_immobile_page_index((char*)end-1);
1627 n_immobile_objects = 0;
1628 while (obj < end) {
1629 lispobj word = *obj;
1630 gc_assert(other_immediate_lowtag_p(word));
1631 int n_words = sizetab[widetag_of(word)](obj);
1632 obj += n_words;
1633 ++n_immobile_objects;
1635 gc_assert(obj == end);
1637 // 2. Check that all scan_start_offsets are plausible.
1638 // Begin by collecting all object header locations into an array;
1639 immobile_objects = calloc(n_immobile_objects, sizeof (lispobj));
1640 immobile_objects_limit = immobile_objects + n_immobile_objects - 1;
1641 obj = (lispobj*)IMMOBILE_VARYOBJ_SUBSPACE_START;
1642 int i = 0;
1643 while (obj < end) {
1644 immobile_objects[i++] = (lispobj)obj;
1645 lispobj word = *obj;
1646 int n_words = sizetab[widetag_of(word)](obj);
1647 obj += n_words;
1649 // Check that each page's scan start is a known immobile object
1650 // and that it is the right object.
1651 low_page_index_t page;
1652 for (page = FIRST_VARYOBJ_PAGE; page <= end_page; ++page) {
1653 lispobj page_addr = (lispobj)low_page_address(page);
1654 int* found_below = bsearch(&page_addr, immobile_objects, n_immobile_objects,
1655 sizeof (int), comparator_le);
1656 int* found_above = bsearch(&page_addr, immobile_objects, n_immobile_objects,
1657 sizeof (int), comparator_ge);
1658 int stored_scan_start = (int)(long)varyobj_scan_start(page);
1659 lispobj* scan_start_obj = (lispobj*)(long)*found_below;
1660 if (scan_start_obj != (lispobj*)(long)stored_scan_start) {
1661 //printf("page %d: found-below=%p stored=%p\n", page, scan_start_obj, stored_scan_start);
1662 while (immobile_filler_p(scan_start_obj)) {
1663 int nwords = sizetab[widetag_of(*scan_start_obj)](scan_start_obj);
1664 // printf("skipping %d words to %p\n", nwords, scan_start_obj + nwords);
1665 scan_start_obj += nwords;
1666 // the stored scan start does not guarantee that it points
1667 // to a non-hole; we only assert that it *probably* does not.
1668 // As such, when computing the "correct" value, we allow
1669 // any value in between the legal bounding values for it.
1670 if ((int)(long)scan_start_obj == stored_scan_start)
1671 break;
1672 // If you hit the free pointer, or run off the page,
1673 // then the page is completely empty.
1674 if (scan_start_obj == (lispobj*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value
1675 || scan_start_obj >= (lispobj*)low_page_address(page+1)) {
1676 scan_start_obj = low_page_address(page+1);
1677 break;
1681 if (scan_start_obj != (lispobj*)(long)stored_scan_start)
1682 lose("page %d: stored_scan_start=%p does not match found %p\n",
1683 page, stored_scan_start, *found_below);
1684 if (found_below != found_above) {
1685 // the object below must touch this page.
1686 // if it didn't, there should be a higher object below.
1687 lispobj* below = (lispobj*)(long)*found_below;
1688 int n_words = sizetab[widetag_of(*below)](below);
1689 lispobj* end = below + n_words;
1690 gc_assert(end > (lispobj*)page_addr);
1693 free(immobile_objects);
1695 // 3. The generation mask for each page is exactly the union
1696 // of generation numbers of object headers on the page.
1697 for (page = FIRST_VARYOBJ_PAGE; page <= end_page; ++page) {
1698 if (!varyobj_page_scan_start_offset[page - FIRST_VARYOBJ_PAGE])
1699 continue; // page is all holes or never used
1700 obj = varyobj_scan_start(page);
1701 lispobj word = *obj;
1702 int n_words = sizetab[widetag_of(word)](obj);
1703 // Skip the first object if it doesn't start on this page.
1704 if (obj < (lispobj*)low_page_address(page)) obj += n_words;
1705 lispobj* limit = (lispobj*)low_page_address(page) + WORDS_PER_PAGE;
1706 lispobj* freeptr = (lispobj*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value;
1707 if (limit > freeptr) limit = freeptr;
1708 int mask = 0;
1709 for ( ; obj < limit ; obj += sizetab[widetag_of(*obj)](obj) ) {
1710 int gen = __immobile_obj_gen_bits(obj);
1711 if (immobile_filler_p(obj)) {
1712 gc_assert(gen == 0);
1713 } else {
1714 gc_assert(0 <= gen && gen <= PSEUDO_STATIC_GENERATION);
1715 mask |= 1 << gen;
1717 if (widetag_of(*obj) == CODE_HEADER_WIDETAG) {
1718 lispobj entry_point; /* tagged pointer to entry point */
1719 struct simple_fun *function_ptr; /* untagged pointer to entry point */
1720 for (entry_point = ((struct code*)obj)->entry_points;
1721 entry_point != NIL;
1722 entry_point = function_ptr->next) {
1723 function_ptr = (struct simple_fun *) native_pointer(entry_point);
1724 gc_assert_verbose(is_lisp_pointer(entry_point),
1725 "Code %p entry point %p is not a lisp pointer.",
1726 obj, (void*)entry_point);
1727 gc_assert(widetag_of(function_ptr->header)==SIMPLE_FUN_HEADER_WIDETAG);
1731 gc_assert(mask == VARYOBJ_PAGE_GENS(page));
1734 #endif