2 * Extension to GENCGC which provides for pages of objects
3 * that are static in placement but subject to reclamation.
7 * This software is part of the SBCL system. See the README file for
10 * This software is derived from the CMU CL system, which was
11 * written at Carnegie Mellon University and released into the
12 * public domain. The software is in the public domain and is
13 * provided with absolutely no warranty. See the COPYING and CREDITS
14 * files for more information.
19 * 1. Space accounting (GET-BYTES-CONSED etc)
20 * 2. Heuristic for auto-trigger. (Can't yet because no space accounting)
21 * Currently happens with regular GC trigger mechanism.
22 * 3. Specify space size on startup
25 // Work around a bug in some llvm/clang versions affecting the memcpy
26 // call in defrag_immobile_space:
28 // When compiled with _FORTIFY_SOURCE nonzero, as seems to be the
29 // default, memcpy is a macro that expands to
30 // __builtin_memcpy_chk(dst, source, size, __builtin_object_size(...)).
32 // Now usually if the compiler knows that it does not know
33 // __builtin_object_size for the source of the copy, the
34 // __builtin_memcpy_chk call becomes plain old memcpy. But in the
35 // buggy case, the compiler is convinced that it does know the
36 // size. This shows up clearly in the disassembly, where the library
37 // routine receives a source size that was erroneously determined to
38 // be a compile-time constant 0. Thus the assertion failure is that
39 // you are reading from a range with 0 bytes in it.
41 // Defining _FORTIFY_LEVEL 0 disables the above macro and thus works
42 // around the problem. Since it is unclear which clang versions are
43 // affected, only apply the workaround for the known-bad version.
44 #if (defined(__clang__) && (__clang_major__ == 6) && (__clang_minor__ == 0))
45 #define _FORTIFY_SOURCE 0
49 #include "gc-internal.h"
50 #include "genesis/gc-tables.h"
51 #include "genesis/vector.h"
52 #include "forwarding-ptr.h"
58 #define FIRST_VARYOBJ_PAGE (IMMOBILE_FIXEDOBJ_SUBSPACE_SIZE/(int)IMMOBILE_CARD_BYTES)
59 #define WORDS_PER_PAGE ((int)IMMOBILE_CARD_BYTES/N_WORD_BYTES)
60 #define DOUBLEWORDS_PER_PAGE (WORDS_PER_PAGE/2)
62 // In case of problems while debugging, this is selectable.
63 #define DEFRAGMENT_FIXEDOBJ_SUBSPACE 1
66 #undef VERIFY_PAGE_GENS
69 # define dprintf(arg) fprintf arg
75 // Inclusive bounds on highest in-use pages per subspace.
76 low_page_index_t max_used_fixedobj_page
, max_used_varyobj_page
;
78 // This table is for objects fixed in size, as opposed to variable-sized.
79 // (Immobile objects are naturally fixed in placement)
80 struct fixedobj_page
{ // 12 bytes per page
81 union immobile_page_attr
{
85 /* space per object in Lisp words. Can exceed obj_size
86 to align on a larger boundary */
87 unsigned char obj_align
;
88 unsigned char obj_size
; /* in Lisp words, incl. header */
89 /* Which generations have data on this page */
90 unsigned char gens_
; // a bitmap
93 int free_index
; // index is in bytes. 4 bytes
94 short int prior_gc_free_word_index
; // index is in words. 2 bytes
95 /* page index of next page with same attributes */
96 short int page_link
; // 2 bytes
99 unsigned int* immobile_scav_queue
;
100 int immobile_scav_queue_head
;
101 // Number of items enqueued; can exceed QCAPACITY on overflow.
102 // If overflowed, the queue is unusable until reset.
103 unsigned int immobile_scav_queue_count
;
104 #define QCAPACITY (IMMOBILE_CARD_BYTES/sizeof(int))
106 #define gens attr.parts.gens_
108 // These are the high 2 bits of 'flags'
109 #define WRITE_PROTECT 0x80
110 #define WRITE_PROTECT_CLEARED 0x40
112 // Packing and unpacking attributes
113 // the low two flag bits are for write-protect status
114 #define MAKE_ATTR(spacing,size,flags) (((spacing)<<8)|((size)<<16)|flags)
115 #define OBJ_SPACING(attr) ((attr>>8) & 0xFF)
117 // Ignore the write-protect bits and the generations when comparing attributes
118 #define ATTRIBUTES_MATCH_P(page_attr,specified_attr) \
119 ((page_attr & 0xFFFF3F) == specified_attr)
120 #define SET_WP_FLAG(index,flag) \
121 fixedobj_pages[index].attr.parts.flags = (fixedobj_pages[index].attr.parts.flags & 0x3F) | flag
123 #define page_obj_align(i) fixedobj_pages[i].attr.parts.obj_align
124 #define page_obj_size(i) fixedobj_pages[i].attr.parts.obj_size
125 #define set_page_full(i) fixedobj_pages[i].free_index = IMMOBILE_CARD_BYTES
126 #define page_full_p(i) (fixedobj_pages[i].free_index >= (int)IMMOBILE_CARD_BYTES)
127 #define fixedobj_page_wp(i) (fixedobj_pages[i].attr.parts.flags & WRITE_PROTECT)
129 /// Variable-length pages:
131 // Array of inverted write-protect flags, 1 bit per page.
132 unsigned int* varyobj_page_touched_bits
;
133 static int n_bitmap_elts
; // length of array measured in 'int's
135 // Array of offsets backwards in double-lispwords from the page end
136 // to the lowest-addressed object touching the page. This offset can
137 // point to a hole, but we prefer that it not. If the offset is zero,
138 // the page has no object other than possibly a hole resulting
139 // from a freed object.
140 unsigned short* varyobj_page_scan_start_offset
;
142 // Array of page generation masks
143 unsigned char* varyobj_page_header_gens
;
144 // Holes to be stuffed back into the managed free list.
145 lispobj varyobj_holes
;
147 #define VARYOBJ_PAGE_GENS(x) varyobj_page_header_gens[x-FIRST_VARYOBJ_PAGE]
148 #define varyobj_page_touched(x) \
149 ((varyobj_page_touched_bits[(x-FIRST_VARYOBJ_PAGE)/32] >> (x&31)) & 1)
151 #ifdef VERIFY_PAGE_GENS
152 void check_fixedobj_page(low_page_index_t
);
153 void check_varyobj_pages();
156 // Object header: generation byte --| |-- widetag
158 // 0xzzzzzzzz GGzzzzww
159 // arbitrary data -------- ---- length in words
161 // There is a hard constraint on NUM_GENERATIONS, which is currently 8.
162 // (0..5=normal, 6=pseudostatic, 7=scratch)
163 // It could be as high as 16 for 32-bit words (wherein scratch=gen15)
164 // or 32 for 64-bits words (wherein scratch=gen31).
165 // In each case, the VISITED flag bit weight would need to be modified.
166 // Shifting a 1 bit left by the contents of the generation byte
167 // must not overflow a register.
169 #ifdef LISP_FEATURE_LITTLE_ENDIAN
170 static inline void assign_generation(lispobj
* obj
, generation_index_t gen
)
172 ((generation_index_t
*)obj
)[3] = gen
;
174 // Turn a grey node black.
175 static inline void set_visited(lispobj
* obj
)
177 gc_dcheck(__immobile_obj_gen_bits(obj
) == new_space
);
178 ((generation_index_t
*)obj
)[3] |= IMMOBILE_OBJ_VISITED_FLAG
;
181 #error "Need to define assign_generation() for big-endian"
185 low_page_address(low_page_index_t page_num
)
187 return ((void*)IMMOBILE_SPACE_START
+ (page_num
* IMMOBILE_CARD_BYTES
));
190 //// Variable-length utilities
192 /* Calculate the address where the first object touching this page starts. */
193 static inline lispobj
*
194 varyobj_scan_start(low_page_index_t page_index
)
196 return (lispobj
*)((char*)low_page_address(page_index
+1)
197 - varyobj_page_scan_start_offset
[page_index
- FIRST_VARYOBJ_PAGE
]
198 * (2 * N_WORD_BYTES
));
201 /* Return the generation mask for objects headers on 'page_index'
202 including at most one object that starts before the page but ends on
204 If the scan start is within the page, i.e. less than DOUBLEWORDS_PER_PAGE
205 (note that the scan start is measured relative to the page end) then
206 we don't need to OR in the generation byte from an extra object,
207 as all headers on the page are accounted for in the page generation mask.
208 Also an empty page (where scan start is zero) avoids looking
209 at the next page's first object by accident via the same test. */
210 unsigned char varyobj_page_gens_augmented(low_page_index_t page_index
)
212 return (varyobj_page_scan_start_offset
[page_index
- FIRST_VARYOBJ_PAGE
] <= DOUBLEWORDS_PER_PAGE
213 ? 0 : (1<<__immobile_obj_generation(varyobj_scan_start(page_index
))))
214 | VARYOBJ_PAGE_GENS(page_index
);
217 //// Fixed-length object allocator
219 /* Return the index of an immobile page that is probably not totally full,
220 starting with 'hint_page' and wrapping around.
221 'attributes' determine an eligible page.
222 *IMMOBILE-SPACE-FREE-POINTER* is updated to point beyond the found page
223 if it previously did not. */
225 static int get_freeish_page(int hint_page
, int attributes
)
227 int page
= hint_page
;
228 lispobj new_free_pointer
, old_free_pointer
, actual_old
;
229 struct symbol
* free_pointer_sym
;
230 int page_attr_packed
;
231 unsigned char best_genmask
= 0xff;
234 // Speed of this could be improved by keeping a linked list of pages
235 // with any space available, headed by a field in the page struct.
236 // This is totally lock-free / wait-free though, so it's really not
237 // too shabby, because it never has to deal with a page-table mutex.
239 page_attr_packed
= fixedobj_pages
[page
].attr
.packed
;
240 if (page_attr_packed
== 0)
241 if ((page_attr_packed
=
242 __sync_val_compare_and_swap(&fixedobj_pages
[page
].attr
.packed
,
243 0, attributes
)) == 0) {
244 // Atomically assign MAX(old_free_pointer, new_free_pointer)
245 // into the free pointer.
246 free_pointer_sym
= SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER
);
247 new_free_pointer
= (lispobj
)low_page_address(page
+1);
248 old_free_pointer
= free_pointer_sym
->value
;
249 while (new_free_pointer
> old_free_pointer
) {
251 __sync_val_compare_and_swap(&free_pointer_sym
->value
,
254 if (actual_old
== old_free_pointer
)
256 old_free_pointer
= actual_old
;
260 if (ATTRIBUTES_MATCH_P(page_attr_packed
, attributes
)
261 && !page_full_p(page
)) {
262 // Try to avoid new objects on pages with any pseudo-static objects,
263 // because then touching the young object forces scanning the page,
264 // which is unfortunate if most things on it were untouched.
265 if (fixedobj_pages
[page
].gens
< (1<<PSEUDO_STATIC_GENERATION
)) {
268 } else if (fixedobj_pages
[page
].gens
< best_genmask
) {
269 best_genmask
= fixedobj_pages
[page
].gens
;
273 if (++page
>= FIRST_VARYOBJ_PAGE
) page
= 0;
274 } while (page
!= hint_page
);
277 lose("No more immobile pages available");
280 // Unused, but possibly will be for some kind of collision-avoidance scheme
281 // on claiming of new free pages.
282 long immobile_alloc_collisions
;
284 /* Beginning at page index *hint, attempt to find space
285 for an object on a page with page_attributes. Write its header word
286 and return a C (native) pointer. The start page MUST have the proper
287 characteristisc, but might be totally full.
289 Precondition: Lisp has established a pseudo-atomic section. */
291 /* There is a slightly different algorithm that would probably be faster
292 than what is currently implemented:
293 - hint should be the address of a word that you try to claim
294 as an object header; it moves from high-to-low instead of low-to-high.
295 It's easier to compute the page base than the last valid object start
296 if there are some wasted words at the end due to page size not being
297 a perfect multiple of object size.
298 - you do a CAS into that word, and either suceed or fail
299 - if you succeed, subtract the object spacing and compare
300 to the page's base address, which can be computed by
301 masking. if the next address is above or equal to the page start,
302 store it in the hint, otherwise mark the page full */
304 lispobj
alloc_immobile_obj(int page_attributes
, lispobj header
, int* hint
)
308 char * page_data
, * obj_ptr
, * next_obj_ptr
, * limit
, * next_free
;
309 int spacing_in_bytes
= OBJ_SPACING(page_attributes
) << WORD_SHIFT
;
312 gc_dcheck(low_page_address(page
) < (void*)SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER
)->value
);
314 page_data
= low_page_address(page
);
315 obj_ptr
= page_data
+ fixedobj_pages
[page
].free_index
;
316 limit
= page_data
+ IMMOBILE_CARD_BYTES
- spacing_in_bytes
;
317 while (obj_ptr
<= limit
) {
318 word
= *(lispobj
*)obj_ptr
;
319 next_obj_ptr
= obj_ptr
+ spacing_in_bytes
;
320 if (fixnump(word
) // a fixnum marks free space
321 && __sync_bool_compare_and_swap((lispobj
*)obj_ptr
,
323 // The value formerly in the header word was the offset to
324 // the next hole. Use it to update the freelist pointer.
326 fixedobj_pages
[page
].free_index
= next_obj_ptr
+ word
- page_data
;
327 return (lispobj
)obj_ptr
;
329 // If some other thread updated the free_index
330 // to a larger value, use that. (See example below)
331 next_free
= page_data
+ fixedobj_pages
[page
].free_index
;
332 obj_ptr
= next_free
> next_obj_ptr
? next_free
: next_obj_ptr
;
335 page
= get_freeish_page(page
+1 >= FIRST_VARYOBJ_PAGE
? 0 : page
+1,
342 Example: Conside the freelist initially pointing to word index 6
343 Threads A, and B, and C each want to claim index 6.
344 - Thread A wins and then is switched out immediately after the CAS.
345 - Thread B fails to claim cell 6, claims cell 12 instead.
346 - Thread C fails to claim a cell and is switched out immediately
348 - Thread B writes the index of the next hole, cell 18 into the
349 page's freelist cell.
350 - Thread A wakes up and writes 12 into the freelist cell.
351 - Thread C wakes up sees 12 for next_offset. 12 is greater than 6,
352 so it sets its next probe location to 12.
353 It fails the fixnump(header) test.
354 - Thread C sees that next_offset is still 12,
355 so it skips by the page's object spacing instead, and will continue
356 to do so until hitting the end of the page.
361 void update_immobile_nursery_bits()
363 low_page_index_t page
;
364 lispobj fixedobj_free_ptr
= SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER
)->value
;
365 lispobj varyobj_free_ptr
= SYMBOL(IMMOBILE_SPACE_FREE_POINTER
)->value
;
367 // Find the high water marks for this GC scavenge phase
368 // [avoid passing exactly IMMOBILE_SPACE_END, which has no page index]
369 max_used_fixedobj_page
= find_immobile_page_index((void*)(fixedobj_free_ptr
-1));
370 max_used_varyobj_page
= find_immobile_page_index((void*)(varyobj_free_ptr
-1));
372 immobile_scav_queue
= (unsigned int*)low_page_address(max_used_varyobj_page
+1);
373 gc_assert((IMMOBILE_SPACE_END
- (uword_t
)immobile_scav_queue
) / sizeof(int)
376 // Unprotect the in-use ranges. Any page could be written during scavenge
377 os_protect((os_vm_address_t
)IMMOBILE_SPACE_START
,
378 fixedobj_free_ptr
- IMMOBILE_SPACE_START
,
381 // varyobj_free_ptr is typically not page-aligned - only by random chance
382 // might it be. Additionally we need a page beyond that for the re-scan queue.
383 os_vm_address_t limit
= (char*)immobile_scav_queue
+ IMMOBILE_CARD_BYTES
;
384 os_protect((os_vm_address_t
)(IMMOBILE_VARYOBJ_SUBSPACE_START
),
385 limit
- (os_vm_address_t
)IMMOBILE_VARYOBJ_SUBSPACE_START
,
388 for (page
=0; page
<= max_used_fixedobj_page
; ++page
) {
389 // any page whose free index changed contains nursery objects
390 if (fixedobj_pages
[page
].free_index
>> WORD_SHIFT
!=
391 fixedobj_pages
[page
].prior_gc_free_word_index
)
392 fixedobj_pages
[page
].gens
|= 1;
393 #ifdef VERIFY_PAGE_GENS
394 check_fixedobj_page(page
);
397 #ifdef VERIFY_PAGE_GENS
398 check_varyobj_pages();
402 /* Turn a white object grey. Also enqueue the object for re-scan if required */
404 promote_immobile_obj(lispobj
*ptr
, int rescan
) // a native pointer
406 if (widetag_of(*ptr
) == SIMPLE_FUN_HEADER_WIDETAG
)
407 ptr
= fun_code_header(ptr
);
408 gc_assert(__immobile_obj_gen_bits(ptr
) == from_space
);
409 int pointerish
= !unboxed_obj_widetag_p(widetag_of(*ptr
));
410 assign_generation(ptr
, (pointerish
? 0 : IMMOBILE_OBJ_VISITED_FLAG
) | new_space
);
411 low_page_index_t page_index
= find_immobile_page_index(ptr
);
413 if (page_index
>= FIRST_VARYOBJ_PAGE
) {
414 VARYOBJ_PAGE_GENS(page_index
) |= 1<<new_space
;
416 fixedobj_pages
[page_index
].gens
|= 1<<new_space
;
418 // If called from preserve_pointer(), then we haven't scanned immobile
419 // roots yet, so we only need ensure that this object's page's WP bit
420 // is cleared so that the page is not skipped during root scan.
423 if (page_index
>= FIRST_VARYOBJ_PAGE
)
424 varyobj_page_touched_bits
[(page_index
-FIRST_VARYOBJ_PAGE
)/32]
425 |= 1 << (page_index
& 31);
427 SET_WP_FLAG(page_index
, WRITE_PROTECT_CLEARED
);
429 return; // No need to enqueue.
432 // Do nothing if either we don't need to look for pointers in this object,
433 // or the work queue has already overflowed, causing a full scan.
434 if (!pointerish
|| immobile_scav_queue_count
> QCAPACITY
) return;
436 // count is either less than or equal to QCAPACITY.
437 // If equal, just bump the count to signify overflow.
438 if (immobile_scav_queue_count
< QCAPACITY
) {
439 immobile_scav_queue
[immobile_scav_queue_head
] =
440 (uword_t
)ptr
& 0xFFFFFFFF; // Drop the high bits
441 immobile_scav_queue_head
= (immobile_scav_queue_head
+ 1) & (QCAPACITY
- 1);
443 ++immobile_scav_queue_count
;
446 /* If 'addr' points to an immobile object, then make the object
447 live by promotion. But if the object is not in the generation
448 being collected, do nothing */
449 void immobile_space_preserve_pointer(void* addr
)
451 low_page_index_t page_index
= find_immobile_page_index(addr
);
455 lispobj
* object_start
;
457 if (page_index
>= FIRST_VARYOBJ_PAGE
) {
458 // Restrict addr to lie below IMMOBILE_SPACE_FREE_POINTER.
459 // This way, if the gens byte is nonzero but there is
460 // a final array acting as filler on the remainder of the
461 // final page, we won't accidentally find that.
463 promote
= (lispobj
)addr
< SYMBOL(IMMOBILE_SPACE_FREE_POINTER
)->value
464 && (varyobj_page_gens_augmented(page_index
) & (1<<from_space
)) != 0
465 && (scan_start
= varyobj_scan_start(page_index
)) <= (lispobj
*)addr
466 && (object_start
= gc_search_space(scan_start
, addr
)) != 0
467 && !immobile_filler_p(object_start
)
468 && (instruction_ptr_p(addr
, object_start
)
469 || properly_tagged_descriptor_p(addr
, object_start
));
470 } else if (fixedobj_pages
[page_index
].gens
& (1<<from_space
)) {
471 int obj_spacing
= (page_obj_align(page_index
) << WORD_SHIFT
);
472 int obj_index
= ((uword_t
)addr
& (IMMOBILE_CARD_BYTES
-1)) / obj_spacing
;
473 dprintf((logfile
,"Pointer %p is to immobile page %d, object %d\n",
474 addr
, page_index
, obj_index
));
475 char* page_start_addr
= (char*)((uword_t
)addr
& ~(IMMOBILE_CARD_BYTES
-1));
476 object_start
= (lispobj
*)(page_start_addr
+ obj_index
* obj_spacing
);
477 promote
= !fixnump(*object_start
)
478 && (lispobj
*)addr
< object_start
+ page_obj_size(page_index
)
479 && properly_tagged_descriptor_p(addr
, object_start
);
481 if (promote
&& __immobile_obj_gen_bits(object_start
) == from_space
) {
482 dprintf((logfile
,"immobile obj @ %p (<- %p) is conservatively live\n",
484 promote_immobile_obj(object_start
, 0);
488 // Loop over the newly-live objects, scavenging them for pointers.
489 // As with the ordinary gencgc algorithm, this uses almost no stack.
490 static void full_scavenge_immobile_newspace()
493 unsigned char bit
= 1<<new_space
;
495 // Fixed-size object pages.
497 for (page
= 0; page
<= max_used_fixedobj_page
; ++page
) {
498 if (!(fixedobj_pages
[page
].gens
& bit
)) continue;
499 // Skip amount within the loop is in bytes.
500 int obj_spacing
= page_obj_align(page
) << WORD_SHIFT
;
501 lispobj
* obj
= low_page_address(page
);
502 // Use an inclusive, not exclusive, limit. On pages with dense packing
503 // (i.e. non-LAYOUT), if the object size does not evenly divide the page
504 // size, it is wrong to examine memory at an address which could be
505 // an object start, but for the fact that it runs off the page boundary.
506 // On the other hand, unused words hold 0, so it's kind of ok to read them.
507 lispobj
* limit
= (lispobj
*)((char*)obj
+
508 IMMOBILE_CARD_BYTES
- obj_spacing
);
509 for ( ; obj
<= limit
; obj
= (lispobj
*)((char*)obj
+ obj_spacing
) ) {
510 if (!fixnump(*obj
) && __immobile_obj_gen_bits(obj
) == new_space
) {
512 lispobj header
= *obj
;
513 scavtab
[widetag_of(header
)](obj
, header
);
518 // Variable-size object pages
520 page
= FIRST_VARYOBJ_PAGE
- 1; // Subtract 1 because of pre-increment
522 // Find the next page with anything in newspace.
524 if (++page
> max_used_varyobj_page
) return;
525 } while ((VARYOBJ_PAGE_GENS(page
) & bit
) == 0);
526 lispobj
* obj
= varyobj_scan_start(page
);
528 lispobj
* limit
= (lispobj
*)low_page_address(page
) + WORDS_PER_PAGE
;
530 for ( ; obj
< limit
; obj
+= n_words
) {
531 lispobj header
= *obj
;
532 if (__immobile_obj_gen_bits(obj
) == new_space
) {
534 n_words
= scavtab
[widetag_of(header
)](obj
, header
);
536 n_words
= sizetab
[widetag_of(header
)](obj
);
539 page
= find_immobile_page_index(obj
);
540 // Bail out if exact absolute end of immobile space was reached.
541 if (page
< 0) return;
542 // If 'page' should be scanned, then pick up where we left off,
543 // without recomputing 'obj' but setting a higher 'limit'.
544 } while (VARYOBJ_PAGE_GENS(page
) & bit
);
548 /// Repeatedly scavenge immobile newspace work queue until we find no more
549 /// reachable objects within. (They might be in dynamic space though).
550 /// If queue overflow already happened, then a worst-case full scan is needed.
551 /// If it didn't, we try to drain the queue, hoping that overflow does
552 /// not happen while doing so.
553 /// The approach taken is more subtle than just dequeuing each item,
554 /// scavenging, and letting the outer 'while' loop take over.
555 /// That would be ok, but could cause more full scans than necessary.
556 /// Instead, since each entry in the queue is useful information
557 /// in the non-overflow condition, perform all the work indicated thereby,
558 /// rather than considering the queue discardable as soon as overflow happens.
559 /// Essentially we just have to capture the valid span of enqueued items,
560 /// because the queue state is inconsistent when 'count' exceeds 'capacity'.
561 void scavenge_immobile_newspace()
563 while (immobile_scav_queue_count
) {
564 if (immobile_scav_queue_count
> QCAPACITY
) {
565 immobile_scav_queue_count
= 0;
566 full_scavenge_immobile_newspace();
568 int queue_index_from
= (immobile_scav_queue_head
- immobile_scav_queue_count
)
570 int queue_index_to
= immobile_scav_queue_head
;
571 int i
= queue_index_from
;
572 // The termination condition can't be expressed as an inequality,
573 // since the indices might be reversed due to wraparound.
574 // To express as equality entails forcing at least one iteration
575 // since the ending index might be the starting index.
577 lispobj
* obj
= (lispobj
*)(uword_t
)immobile_scav_queue
[i
];
578 i
= (1 + i
) & (QCAPACITY
-1);
579 // Only decrement the count if overflow did not happen.
580 // The first iteration of this loop will decrement for sure,
581 // but subsequent iterations might not.
582 if (immobile_scav_queue_count
<= QCAPACITY
)
583 --immobile_scav_queue_count
;
584 if (!(__immobile_obj_gen_bits(obj
) & IMMOBILE_OBJ_VISITED_FLAG
)) {
586 lispobj header
= *obj
;
587 scavtab
[widetag_of(header
)](obj
, header
);
589 } while (i
!= queue_index_to
);
594 // Return a page >= page_index having potential old->young pointers,
595 // or -1 if there isn't one.
596 static int next_varyobj_root_page(unsigned int page_index
,
597 unsigned int end_bitmap_index
,
598 unsigned char genmask
)
600 unsigned int map_index
= (page_index
- FIRST_VARYOBJ_PAGE
) / 32;
601 if (map_index
>= end_bitmap_index
) return -1;
602 int bit_index
= page_index
& 31;
603 // Look only at bits of equal or greater weight than bit_index.
604 unsigned int word
= (0xFFFFFFFFU
<< bit_index
) & varyobj_page_touched_bits
[map_index
];
607 bit_index
= ffs(word
) - 1;
608 page_index
= FIRST_VARYOBJ_PAGE
+ map_index
* 32 + bit_index
;
609 if (varyobj_page_gens_augmented(page_index
) & genmask
)
612 word
^= (1<<bit_index
);
616 if (++map_index
>= end_bitmap_index
) return -1;
617 word
= varyobj_page_touched_bits
[map_index
];
622 scavenge_immobile_roots(generation_index_t min_gen
, generation_index_t max_gen
)
624 // example: scavenging gens 2..6, the mask of root gens is #b1111100
625 int genmask
= ((1 << (max_gen
- min_gen
+ 1)) - 1) << min_gen
;
627 low_page_index_t page
;
628 for (page
= 0; page
<= max_used_fixedobj_page
; ++page
) {
629 if (fixedobj_page_wp(page
) || !(fixedobj_pages
[page
].gens
& genmask
))
631 int obj_spacing
= page_obj_align(page
) << WORD_SHIFT
;
632 lispobj
* obj
= low_page_address(page
);
633 lispobj
* limit
= (lispobj
*)((char*)obj
+
634 IMMOBILE_CARD_BYTES
- obj_spacing
);
636 // Immobile space can only contain objects with a header word,
637 // no conses, so any fixnum where a header could be is not a live
640 if (!fixnump(*obj
) && (genmask
>> (gen
=__immobile_obj_gen_bits(obj
)) & 1)) {
641 if (gen
== new_space
) { set_visited(obj
); }
642 lispobj header
= *obj
;
643 scavtab
[widetag_of(header
)](obj
, header
);
645 } while ((obj
= (lispobj
*)((char*)obj
+ obj_spacing
)) <= limit
);
648 // Variable-length object pages
649 unsigned n_varyobj_pages
= 1+max_used_varyobj_page
-FIRST_VARYOBJ_PAGE
;
650 unsigned end_bitmap_index
= (n_varyobj_pages
+31)/32;
651 page
= next_varyobj_root_page(FIRST_VARYOBJ_PAGE
, end_bitmap_index
, genmask
);
653 lispobj
* obj
= varyobj_scan_start(page
);
655 lispobj
* limit
= (lispobj
*)low_page_address(page
) + WORDS_PER_PAGE
;
657 for ( ; obj
< limit
; obj
+= n_words
) {
658 lispobj header
= *obj
;
659 if (genmask
>> (gen
=__immobile_obj_gen_bits(obj
)) & 1) {
660 if (gen
== new_space
) { set_visited(obj
); }
661 n_words
= scavtab
[widetag_of(header
)](obj
, header
);
663 n_words
= sizetab
[widetag_of(header
)](obj
);
666 page
= find_immobile_page_index(obj
);
668 && (VARYOBJ_PAGE_GENS(page
) & genmask
)
669 && varyobj_page_touched(page
));
670 page
= next_varyobj_root_page(1+page
, end_bitmap_index
, genmask
);
672 scavenge_immobile_newspace();
675 #include "genesis/layout.h"
676 #define LAYOUT_SIZE (sizeof (struct layout)/N_WORD_BYTES)
677 #define LAYOUT_ALIGN 256 /*bytes*/
678 #define LAYOUT_OF_LAYOUT ((IMMOBILE_SPACE_START+2*LAYOUT_ALIGN)|INSTANCE_POINTER_LOWTAG)
679 #define LAYOUT_OF_PACKAGE ((IMMOBILE_SPACE_START+3*LAYOUT_ALIGN)|INSTANCE_POINTER_LOWTAG)
681 // As long as Lisp doesn't have any native allocators (vops and whatnot)
682 // it doesn't need to access these values.
683 int layout_page_hint
, symbol_page_hint
, fdefn_page_hint
;
685 // For the three different page characteristics that we need,
686 // claim a page that works for those characteristics.
687 void set_immobile_space_hints()
689 // The allocator doesn't check whether each 'hint' points to an
690 // expected kind of page, so we have to ensure up front that
691 // allocations start on different pages. i.e. You can point to
692 // a totally full page, but you can't point to a wrong page.
693 // It doesn't work to just assign these to consecutive integers
694 // without also updating the page attributes.
696 // Object sizes must be multiples of 2 because the n_words value we pass
697 // to scavenge() is gotten from the page attributes, and scavenge asserts
698 // that the ending address is aligned to a doubleword boundary as expected.
700 // LAYOUTs are 256-byte-aligned so that the low byte contains no information.
701 // This makes it possible to recover a layout pointer from an instance header
702 // by simply changing the low byte to instance-pointer-lowtag.
703 // As a test of objects using larger-than-required alignment,
704 // the 64-bit implementation uses 256-byte alignment for layouts,
705 // even though the header can store all bits of the layout pointer.
706 // The 32-bit implementation would also need somewhere different to store
707 // the generation byte of each layout, which is a minor annoyance.
708 layout_page_hint
= get_freeish_page(0, MAKE_ATTR(LAYOUT_ALIGN
/ N_WORD_BYTES
, // spacing
709 CEILING(LAYOUT_SIZE
,2),
711 symbol_page_hint
= get_freeish_page(0, MAKE_ATTR(CEILING(SYMBOL_SIZE
,2),
712 CEILING(SYMBOL_SIZE
,2),
714 fdefn_page_hint
= get_freeish_page(0, MAKE_ATTR(CEILING(FDEFN_SIZE
,2),
715 CEILING(FDEFN_SIZE
,2),
719 void write_protect_immobile_space()
721 immobile_scav_queue
= NULL
;
722 immobile_scav_queue_head
= 0;
724 set_immobile_space_hints();
726 // Now find contiguous ranges of pages that are protectable,
727 // minimizing the number of system calls as much as possible.
728 int i
, start
= -1, end
= -1; // inclusive bounds on page indices
729 for (i
= max_used_fixedobj_page
; i
>= 0 ; --i
) {
730 if (fixedobj_page_wp(i
)) {
731 if (end
< 0) end
= i
;
734 if (end
>= 0 && (!fixedobj_page_wp(i
) || i
== 0)) {
735 os_protect(low_page_address(start
),
736 IMMOBILE_CARD_BYTES
* (1 + end
- start
),
737 OS_VM_PROT_READ
|OS_VM_PROT_EXECUTE
);
741 #define varyobj_page_wp(x) !varyobj_page_touched(x)
742 for (i
= max_used_varyobj_page
; i
>= FIRST_VARYOBJ_PAGE
; --i
) {
743 if (varyobj_page_wp(i
)) {
744 if (end
< 0) end
= i
;
747 if (end
>= 0 && (!varyobj_page_wp(i
) || i
== FIRST_VARYOBJ_PAGE
)) {
748 os_protect(low_page_address(start
),
749 IMMOBILE_CARD_BYTES
* (1 + end
- start
),
750 OS_VM_PROT_READ
|OS_VM_PROT_EXECUTE
);
754 #undef varyobj_page_wp
757 // Scan range between start and end (exclusive) for old-to-young pointers.
758 // 'keep_gen' is the value of the generation byte of objects that were
759 // candidates to become garbage, but remain live after this gc.
760 // It will necessarily have the VISITED flag on.
761 // 'new_gen' is the generation number that those objects will have
762 // after collection, which is either the same generation or one higher,
763 // depending on the 'raise' flag for this GC cycle.
765 range_points_to_younger_p(lispobj
* obj
, lispobj
* end
,
766 int gen
, int keep_gen
, int new_gen
)
769 lispobj
* __attribute__((unused
)) saved_obj
= obj
, __attribute__((unused
)) header
= *obj
;
772 lispobj thing
= *obj
;
773 if (is_lisp_pointer(thing
)) {
774 int to_page
= find_page_index((void*)thing
),
776 if (to_page
>= 0) { // points to ordinary dynamic space
777 to_gen
= page_table
[to_page
].gen
;
778 if (to_gen
== PSEUDO_STATIC_GENERATION
+1) // scratch gen
779 to_gen
= new_gen
; // is actually this
780 } else if (immobile_space_p(thing
)) {
781 // Processing the code-entry-points slot of a code component
782 // requires the general variant of immobile_obj_gen_bits
783 // because the pointed-to object is a simple-fun.
784 to_gen
= immobile_obj_gen_bits(native_pointer(thing
));
785 if (to_gen
== keep_gen
) // keep gen
786 to_gen
= new_gen
; // is actually this
789 return 1; // yes, points to younger
792 } while (++obj
< end
);
793 return 0; // no, does not point to younger
796 // Scan a fixed-size object for old-to-young pointers.
797 // Since fixed-size objects are boxed and on known boundaries,
798 // we never start in the middle of random bytes, so the answer is exact.
799 static inline boolean
800 fixedobj_points_to_younger_p(lispobj
* obj
, int n_words
,
801 int gen
, int keep_gen
, int new_gen
)
803 unsigned char widetag
= widetag_of(*obj
);
804 lispobj
__attribute__((unused
)) funobj
[1], layout
[1];
808 #ifdef LISP_FEATURE_IMMOBILE_CODE
810 funobj
[0] = fdefn_raw_referent((struct fdefn
*)obj
);
811 return range_points_to_younger_p(funobj
, funobj
+1, gen
, keep_gen
, new_gen
)
812 || range_points_to_younger_p(obj
+1, obj
+3, gen
, keep_gen
, new_gen
);
814 #ifdef LISP_FEATURE_COMPACT_INSTANCE_HEADER
815 case INSTANCE_HEADER_WIDETAG
:
816 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
817 layout
[0] = instance_layout(obj
);
818 if (range_points_to_younger_p(layout
, layout
+1, gen
, keep_gen
, new_gen
))
820 lbitmap
= ((struct layout
*)native_pointer(layout
[0]))->bitmap
;
821 if (lbitmap
!= make_fixnum(-1)) {
822 gc_assert(fixnump(lbitmap
)); // No bignums (yet)
823 sword_t bitmap
= (sword_t
)lbitmap
>> N_FIXNUM_TAG_BITS
;
824 lispobj
* where
= obj
+ 1;
825 for ( ; --n_words
; ++where
, bitmap
>>= 1 )
826 if ((bitmap
& 1) != 0 &&
827 range_points_to_younger_p(where
, where
+1, gen
, keep_gen
, new_gen
))
831 // FALLTHROUGH_INTENDED
834 return range_points_to_younger_p(obj
+1, obj
+n_words
, gen
, keep_gen
, new_gen
);
838 varyobj_points_to_younger_p(lispobj
* obj
, int gen
, int keep_gen
, int new_gen
,
839 os_vm_address_t page_begin
,
840 os_vm_address_t page_end
) // upper (exclusive) bound
842 lispobj
*begin
, *end
, word
= *obj
;
843 unsigned char widetag
= widetag_of(word
);
844 if (widetag
== CODE_HEADER_WIDETAG
) { // usual case. Like scav_code_header()
845 for_each_simple_fun(i
, function_ptr
, (struct code
*)obj
, 0, {
846 begin
= SIMPLE_FUN_SCAV_START(function_ptr
);
847 end
= begin
+ SIMPLE_FUN_SCAV_NWORDS(function_ptr
);
848 if (page_begin
> (os_vm_address_t
)begin
) begin
= (lispobj
*)page_begin
;
849 if (page_end
< (os_vm_address_t
)end
) end
= (lispobj
*)page_end
;
851 && range_points_to_younger_p(begin
, end
, gen
, keep_gen
, new_gen
))
854 begin
= obj
+ 1; // skip the header
855 end
= obj
+ code_header_words(word
); // exclusive bound on boxed slots
856 } else if (widetag
== SIMPLE_VECTOR_WIDETAG
) {
857 sword_t length
= fixnum_value(((struct vector
*)obj
)->length
);
858 begin
= obj
+ 2; // skip the header and length
859 end
= obj
+ CEILING(length
+ 2, 2);
860 } else if (unboxed_obj_widetag_p(widetag
)) {
863 lose("Unexpected widetag @ %p", obj
);
865 // Fallthrough: scan words from begin to end
866 if (page_begin
> (os_vm_address_t
)begin
) begin
= (lispobj
*)page_begin
;
867 if (page_end
< (os_vm_address_t
)end
) end
= (lispobj
*)page_end
;
868 if (end
> begin
&& range_points_to_younger_p(begin
, end
, gen
, keep_gen
, new_gen
))
873 /// The next two functions are analogous to 'update_page_write_prot()'
874 /// but they differ in that they are "precise" - random code bytes that look
875 /// like pointers are not accidentally treated as pointers.
877 // If 'page' does not contain any objects that points to an object
878 // younger than themselves, then return true.
879 // This is called on pages that do not themselves contain objects of
880 // the generation being collected, but might contain pointers
881 // to younger generations, which we detect by a cleared WP status bit.
882 // The bit is cleared on any write, though, even of a non-pointer,
883 // so this unfortunately has to be tested much more often than we'd like.
884 static inline boolean
can_wp_fixedobj_page(page_index_t page
, int keep_gen
, int new_gen
)
886 int obj_spacing
= page_obj_align(page
) << WORD_SHIFT
;
887 int obj_size_words
= page_obj_size(page
);
888 lispobj
* obj
= low_page_address(page
);
889 lispobj
* limit
= (lispobj
*)((char*)obj
+ IMMOBILE_CARD_BYTES
- obj_spacing
);
890 for ( ; obj
<= limit
; obj
= (lispobj
*)((char*)obj
+ obj_spacing
) )
891 if (!fixnump(*obj
) && // an object header
892 fixedobj_points_to_younger_p(obj
, obj_size_words
,
893 __immobile_obj_generation(obj
),
899 // To scan _only_ 'page' is impossible in general, but we can act like only
900 // one page was scanned by backing up to the first object whose end is on
901 // or after it, and then restricting points_to_younger within the boundaries.
902 // Doing it this way is probably much better than conservatively assuming
903 // that any word satisfying is_lisp_pointer() is a pointer.
904 static inline boolean
can_wp_varyobj_page(page_index_t page
, int keep_gen
, int new_gen
)
906 lispobj
*begin
= (lispobj
*)low_page_address(page
);
907 lispobj
*end
= begin
+ WORDS_PER_PAGE
;
908 lispobj
*obj
= varyobj_scan_start(page
);
909 for ( ; obj
< end
; obj
+= sizetab
[widetag_of(*obj
)](obj
) ) {
910 gc_assert(other_immediate_lowtag_p(*obj
));
911 if (!immobile_filler_p(obj
) &&
912 varyobj_points_to_younger_p(obj
,
913 __immobile_obj_generation(obj
),
915 (os_vm_address_t
)begin
,
916 (os_vm_address_t
)end
))
923 Sweep immobile space by zeroing the memory of trashed objects
924 and linking them into the freelist.
926 Possible improvements:
927 - If an entire page becomes nothing but holes, we could bzero it
928 instead of object-at-a-time clearing. But it's not known to be
929 so until after the sweep, so it would entail two passes per page,
930 one to mark holes and one to zero them.
931 - And perhaps bzero could be used on ranges of holes, because
932 in that case each hole's pointer to the next hole is zero as well.
935 #define SETUP_GENS() \
936 /* Only care about pages with something in old or new space. */ \
937 int relevant_genmask = (1 << from_space) | (1 << new_space); \
938 /* Objects whose gen byte is 'keep_gen' are alive. */ \
939 int keep_gen = IMMOBILE_OBJ_VISITED_FLAG | new_space; \
940 /* Objects whose gen byte is 'from_space' are trash. */ \
941 int discard_gen = from_space; \
942 /* Moving non-garbage into either 'from_space' or 'from_space+1' */ \
943 generation_index_t new_gen = from_space + (raise!=0)
945 // The new value of the page generation mask is computed as follows:
946 // If 'raise' = 1 then:
947 // Nothing resides in 'from_space', and 'from_space+1' gains new objects
948 // if and only if any objects on the page were retained.
949 // If 'raise' = 0 then:
950 // Nothing resides in the scratch generation, and 'from_space'
951 // has objects if and only if any objects were retained.
952 #define COMPUTE_NEW_MASK(var, old) \
953 int var = old & ~(1<<from_space); \
955 var |= 1<<(from_space+1) & any_kept; \
957 var = (var & ~(1<<new_space)) | (1<<from_space & any_kept)
960 sweep_fixedobj_pages(int raise
)
963 lispobj
*obj
, *limit
, *hole
;
964 // This will be needed for space accounting.
965 // threads might fail to consume all the space on a page.
966 // By storing in the page table the count of holes that really existed
967 // at the start of the prior GC, and subtracting from that the number
968 // that exist now, we know how much usable space was obtained (per page).
974 low_page_index_t page
;
975 for (page
= 0; page
<= max_used_fixedobj_page
; ++page
) {
976 // On pages that won't need manipulation of the freelist,
977 // we try to do less work than for pages that need it.
978 if (!(fixedobj_pages
[page
].gens
& relevant_genmask
)) {
979 // Scan for old->young pointers, and WP if there are none.
980 if (!fixedobj_page_wp(page
) && fixedobj_pages
[page
].gens
> 1
981 && can_wp_fixedobj_page(page
, keep_gen
, new_gen
))
982 SET_WP_FLAG(page
, WRITE_PROTECT
);
985 int obj_spacing
= page_obj_align(page
) << WORD_SHIFT
;
986 int obj_size_words
= page_obj_size(page
);
987 page_base
= low_page_address(page
);
988 limit
= (lispobj
*)(page_base
+ IMMOBILE_CARD_BYTES
- obj_spacing
);
989 obj
= (lispobj
*)page_base
;
991 int any_kept
= 0; // was anything moved to the kept generation
994 // wp_it is 1 if we should try to write-protect it now.
995 // If already write-protected, skip the tests.
996 int wp_it
= !fixedobj_page_wp(page
);
998 for ( ; obj
<= limit
; obj
= (lispobj
*)((char*)obj
+ obj_spacing
) ) {
999 if (fixnump(*obj
)) { // was already a hole
1001 // re-link it into the new freelist
1003 // store the displacement from the end of the object
1004 // at prev_hole to the start of this object.
1005 *hole
= (lispobj
)((char*)obj
- ((char*)hole
+ obj_spacing
));
1006 else // this is the first seen hole on the page
1007 // record the byte offset to that hole
1008 fixedobj_pages
[page
].free_index
= (char*)obj
- page_base
;
1011 } else if ((gen
= __immobile_obj_gen_bits(obj
)) == discard_gen
) { // trash
1012 for (word_idx
=obj_size_words
-1 ; word_idx
> 0 ; --word_idx
)
1015 } else if (gen
== keep_gen
) {
1016 assign_generation(obj
, gen
= new_gen
);
1018 gc_assert(!fixedobj_points_to_younger_p(obj
, obj_size_words
,
1019 gen
, keep_gen
, new_gen
));
1022 } else if (wp_it
&& fixedobj_points_to_younger_p(obj
, obj_size_words
,
1023 gen
, keep_gen
, new_gen
))
1026 if ( hole
) // terminate the chain of holes
1027 *hole
= (lispobj
)((char*)obj
- ((char*)hole
+ obj_spacing
));
1028 fixedobj_pages
[page
].prior_gc_free_word_index
=
1029 fixedobj_pages
[page
].free_index
>> WORD_SHIFT
;
1031 COMPUTE_NEW_MASK(mask
, fixedobj_pages
[page
].gens
);
1033 fixedobj_pages
[page
].gens
= mask
;
1035 SET_WP_FLAG(page
, WRITE_PROTECT
);
1036 dprintf((logfile
, "Lowspace: set WP on page %d\n", page
));
1039 dprintf((logfile
,"page %d is all garbage\n", page
));
1040 fixedobj_pages
[page
].attr
.packed
= 0;
1043 check_fixedobj_page(page
);
1045 dprintf((logfile
,"page %d: %d holes\n", page
, n_holes
));
1049 void verify_immobile_page_protection(int,int);
1051 // Scan for freshly trashed objects and turn them into filler.
1052 // Lisp is responsible for consuming the free space
1053 // when it next allocates a variable-size object.
1055 sweep_varyobj_pages(int raise
)
1059 lispobj
* free_pointer
= (lispobj
*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER
)->value
;
1060 low_page_index_t page
;
1061 for (page
= FIRST_VARYOBJ_PAGE
; page
<= max_used_varyobj_page
; ++page
) {
1062 int genmask
= VARYOBJ_PAGE_GENS(page
);
1063 if (!(genmask
& relevant_genmask
)) { // Has nothing in oldspace or newspace.
1064 // Scan for old->young pointers, and WP if there are none.
1065 if (varyobj_page_touched(page
)
1066 && varyobj_page_gens_augmented(page
) > 1
1067 && can_wp_varyobj_page(page
, keep_gen
, new_gen
))
1068 varyobj_page_touched_bits
[(page
- FIRST_VARYOBJ_PAGE
)/32] &= ~(1<<(page
& 31));
1071 lispobj
* page_base
= (lispobj
*)low_page_address(page
);
1072 lispobj
* limit
= page_base
+ WORDS_PER_PAGE
;
1073 if (limit
> free_pointer
) limit
= free_pointer
;
1074 int any_kept
= 0; // was anything moved to the kept generation
1075 // wp_it is 1 if we should try to write-protect it now.
1076 // If already write-protected, skip the tests.
1077 int wp_it
= varyobj_page_touched(page
);
1078 lispobj
* obj
= varyobj_scan_start(page
);
1081 if (obj
< page_base
) {
1082 // An object whose tail is on this page, or which spans this page,
1083 // would have been promoted/kept while dealing with the page with
1084 // the object header. Therefore we don't need to consider that object,
1085 // * except * that we do need to consider whether it is an old object
1086 // pointing to a young object.
1087 if (wp_it
// If we wanted to try write-protecting this page,
1088 // and the object starting before this page is strictly older
1089 // than the generation that we're moving retained objects into
1090 && (gen
= __immobile_obj_gen_bits(obj
)) > new_gen
1091 // and it contains an old->young pointer
1092 && varyobj_points_to_younger_p(obj
, gen
, keep_gen
, new_gen
,
1093 (os_vm_address_t
)page_base
,
1094 (os_vm_address_t
)limit
)) {
1097 // We MUST skip this object in the sweep, because in the case of
1098 // non-promotion (raise=0), we could see an object in from_space
1099 // and believe it to be dead.
1100 obj
+= sizetab
[widetag_of(*obj
)](obj
);
1101 // obj can't hop over this page. If it did, there would be no
1102 // headers on the page, and genmask would have been zero.
1103 gc_assert(obj
< limit
);
1105 for ( ; obj
< limit
; obj
+= size
) {
1106 lispobj word
= *obj
;
1107 size
= sizetab
[widetag_of(word
)](obj
);
1108 if (immobile_filler_p(obj
)) { // do nothing
1109 } else if ((gen
= __immobile_obj_gen_bits(obj
)) == discard_gen
) {
1111 lose("immobile object @ %p too small to free", obj
);
1112 else { // Create a filler object.
1113 struct code
* code
= (struct code
*)obj
;
1114 code
->header
= 2<<N_WIDETAG_BITS
| CODE_HEADER_WIDETAG
;
1115 code
->code_size
= make_fixnum((size
- 2) * N_WORD_BYTES
);
1116 code
->debug_info
= varyobj_holes
;
1117 varyobj_holes
= (lispobj
)code
;
1119 } else if (gen
== keep_gen
) {
1120 assign_generation(obj
, gen
= new_gen
);
1122 gc_assert(!varyobj_points_to_younger_p(obj
, gen
, keep_gen
, new_gen
,
1123 (os_vm_address_t
)page_base
,
1124 (os_vm_address_t
)limit
));
1128 varyobj_points_to_younger_p(obj
, gen
, keep_gen
, new_gen
,
1129 (os_vm_address_t
)page_base
,
1130 (os_vm_address_t
)limit
))
1133 COMPUTE_NEW_MASK(mask
, VARYOBJ_PAGE_GENS(page
));
1134 VARYOBJ_PAGE_GENS(page
) = mask
;
1135 if ( mask
&& wp_it
)
1136 varyobj_page_touched_bits
[(page
- FIRST_VARYOBJ_PAGE
)/32] &= ~(1 << (page
& 31));
1139 verify_immobile_page_protection(keep_gen
, new_gen
);
1143 static void compute_immobile_space_bound()
1146 // find the highest page in use
1147 for (max
= FIRST_VARYOBJ_PAGE
-1 ; max
>= 0 ; --max
)
1148 if (fixedobj_pages
[max
].attr
.parts
.obj_size
)
1150 max_used_fixedobj_page
= max
; // this is a page index, not the number of pages.
1151 SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER
)->value
=
1152 IMMOBILE_SPACE_START
+ IMMOBILE_CARD_BYTES
*(1+max
);
1154 for (max
= (IMMOBILE_SPACE_SIZE
/IMMOBILE_CARD_BYTES
)-1 ;
1155 max
>= FIRST_VARYOBJ_PAGE
; --max
)
1156 if (varyobj_page_gens_augmented(max
))
1158 max_used_varyobj_page
= max
; // this is a page index, not the number of pages.
1161 // TODO: (Maybe this won't work. Not sure yet.) rather than use the
1162 // same 'raise' concept as in gencgc, each immobile object can store bits
1163 // indicating whether it has survived any GC at its current generation.
1164 // If it has, then it gets promoted next time, rather than all or nothing
1165 // being promoted from the generation getting collected.
1167 sweep_immobile_space(int raise
)
1169 gc_assert(immobile_scav_queue_count
== 0);
1170 sweep_fixedobj_pages(raise
);
1171 sweep_varyobj_pages(raise
);
1172 compute_immobile_space_bound();
1175 void gc_init_immobile()
1180 int n_fixedobj_pages
= FIRST_VARYOBJ_PAGE
;
1181 int n_varyobj_pages
= (IMMOBILE_SPACE_SIZE
- IMMOBILE_FIXEDOBJ_SUBSPACE_SIZE
)
1182 / IMMOBILE_CARD_BYTES
;
1183 fixedobj_pages
= calloc(n_fixedobj_pages
, sizeof(struct fixedobj_page
));
1184 gc_assert(fixedobj_pages
);
1186 n_bitmap_elts
= (n_varyobj_pages
+ 31) / 32;
1187 int request
= n_bitmap_elts
* sizeof (int)
1188 + n_varyobj_pages
* (sizeof (short)+sizeof (char));
1189 char* varyobj_page_tables
= malloc(request
);
1190 gc_assert(varyobj_page_tables
);
1191 memset(varyobj_page_tables
, 0, request
);
1192 varyobj_page_touched_bits
= (unsigned int*)varyobj_page_tables
;
1193 // The conservative value for 'touched' is 1.
1194 memset(varyobj_page_touched_bits
, 0xff, n_bitmap_elts
* sizeof (int));
1195 varyobj_page_scan_start_offset
= (unsigned short*)(varyobj_page_touched_bits
+ n_bitmap_elts
);
1196 varyobj_page_header_gens
= (unsigned char*)(varyobj_page_scan_start_offset
+ n_varyobj_pages
);
1199 /* Because the immobile page table is not dumped into a core image,
1200 we have to reverse-engineer the characteristics of each page,
1201 which means figuring out what the object spacing should be.
1202 This is not difficult, but is a bit of a kludge */
1204 static inline int immobile_obj_spacing(lispobj header_word
, lispobj
*obj
,
1207 if (widetag_of(header_word
) == INSTANCE_HEADER_WIDETAG
&&
1208 instance_layout(obj
) == LAYOUT_OF_LAYOUT
)
1209 return LAYOUT_ALIGN
/ N_WORD_BYTES
;
1211 return actual_size
; // in words
1214 // Set the characteristics of each used page at image startup time.
1215 void immobile_space_coreparse(uword_t address
, uword_t len
)
1217 int n_pages
, word_idx
, page
;
1219 n_pages
= (len
+ IMMOBILE_CARD_BYTES
- 1) / IMMOBILE_CARD_BYTES
;
1220 if (address
== IMMOBILE_SPACE_START
) {
1221 for (page
= 0 ; page
< n_pages
; ++page
) {
1222 lispobj
* page_data
= low_page_address(page
);
1223 for (word_idx
= 0 ; word_idx
< WORDS_PER_PAGE
; ++word_idx
) {
1224 lispobj
* obj
= page_data
+ word_idx
;
1225 lispobj header
= *obj
;
1226 if (!fixnump(header
)) {
1227 gc_assert(other_immediate_lowtag_p(*obj
));
1228 int size
= sizetab
[widetag_of(header
)](obj
);
1229 fixedobj_pages
[page
].attr
.parts
.obj_size
= size
;
1230 fixedobj_pages
[page
].attr
.parts
.obj_align
1231 = immobile_obj_spacing(header
, obj
, size
);
1232 fixedobj_pages
[page
].attr
.parts
.flags
= WRITE_PROTECT
;
1233 fixedobj_pages
[page
].gens
|= 1 << __immobile_obj_gen_bits(obj
);
1238 } else if (address
== IMMOBILE_VARYOBJ_SUBSPACE_START
) {
1239 lispobj
* obj
= (lispobj
*)address
;
1240 lispobj
* limit
= (lispobj
*)(address
+ len
);
1242 low_page_index_t last_page
= 0;
1243 for ( ; obj
< limit
; obj
+= n_words
) {
1244 n_words
= sizetab
[widetag_of(*obj
)](obj
);
1245 if (obj
[1] == 0 && (obj
[0] == INSTANCE_HEADER_WIDETAG
||
1248 // Round up to the next immobile page.
1249 lispobj page_end
= CEILING((lispobj
)obj
, IMMOBILE_CARD_BYTES
);
1250 n_words
= (lispobj
*)page_end
- obj
;
1251 obj
[0] = SIMPLE_ARRAY_FIXNUM_WIDETAG
;
1252 obj
[1] = make_fixnum(n_words
- 2);
1254 // There are trailing zeros to fill the core file page.
1255 // This happens when the next object is exactly aligned
1256 // to an immobile page. There is no padding object.
1257 gc_assert(((lispobj
)obj
& (IMMOBILE_CARD_BYTES
-1)) == 0);
1262 if (immobile_filler_p(obj
)) {
1263 // Holes were chained through the debug_info slot at save.
1264 // Just update the head of the chain.
1265 varyobj_holes
= (lispobj
)obj
;
1268 low_page_index_t first_page
= find_immobile_page_index(obj
);
1269 last_page
= find_immobile_page_index(obj
+n_words
-1);
1270 // Only the page with this object header gets a bit in its gen mask.
1271 VARYOBJ_PAGE_GENS(first_page
) |= 1<<__immobile_obj_gen_bits(obj
);
1272 // For each page touched by this object, set the page's
1273 // scan_start_offset, unless it was already set.
1275 for (page
= first_page
; page
<= last_page
; ++page
) {
1276 if (!varyobj_page_scan_start_offset
[page
- FIRST_VARYOBJ_PAGE
]) {
1277 long offset
= (char*)low_page_address(page
+1) - (char*)obj
;
1278 varyobj_page_scan_start_offset
[page
- FIRST_VARYOBJ_PAGE
]
1279 = offset
>> (WORD_SHIFT
+ 1);
1283 // Write-protect the pages occupied by the core file.
1284 // (There can be no inter-generation pointers.)
1286 for (page
= FIRST_VARYOBJ_PAGE
; page
<= last_page
; ++page
)
1287 varyobj_page_touched_bits
[(page
-FIRST_VARYOBJ_PAGE
)/32] &= ~(1<<(page
& 31));
1288 SYMBOL(IMMOBILE_SPACE_FREE_POINTER
)->value
= (lispobj
)limit
;
1289 compute_immobile_space_bound();
1290 write_protect_immobile_space();
1292 lose("unknown immobile subspace");
1296 // Demote pseudo-static to highest normal generation
1297 // so that all objects become eligible for collection.
1298 void prepare_immobile_space_for_final_gc()
1302 char* page_end
= (char*)SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER
)->value
;
1304 // The list of holes need not be saved.
1305 SYMBOL(IMMOBILE_FREELIST
)->value
= NIL
;
1307 for (page_base
= (char*)IMMOBILE_SPACE_START
, page
= 0 ;
1308 page_base
< page_end
;
1309 page_base
+= IMMOBILE_CARD_BYTES
, ++page
) {
1310 unsigned char mask
= fixedobj_pages
[page
].gens
;
1311 if (mask
& 1<<PSEUDO_STATIC_GENERATION
) {
1312 int obj_spacing
= page_obj_align(page
) << WORD_SHIFT
;
1313 lispobj
* obj
= (lispobj
*)page_base
;
1314 lispobj
* limit
= (lispobj
*)(page_base
+ IMMOBILE_CARD_BYTES
- obj_spacing
);
1315 for ( ; obj
<= limit
; obj
= (lispobj
*)((char*)obj
+ obj_spacing
) ) {
1317 && __immobile_obj_gen_bits(obj
) == PSEUDO_STATIC_GENERATION
)
1318 assign_generation(obj
, HIGHEST_NORMAL_GENERATION
);
1320 fixedobj_pages
[page
].gens
= (mask
& ~(1<<PSEUDO_STATIC_GENERATION
))
1321 | 1<<HIGHEST_NORMAL_GENERATION
;
1325 lispobj
* obj
= (lispobj
*)IMMOBILE_VARYOBJ_SUBSPACE_START
;
1326 lispobj
* limit
= (lispobj
*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER
)->value
;
1327 for ( ; obj
< limit
; obj
+= sizetab
[widetag_of(*obj
)](obj
) ) {
1328 if (__immobile_obj_gen_bits(obj
) == PSEUDO_STATIC_GENERATION
)
1329 assign_generation(obj
, HIGHEST_NORMAL_GENERATION
);
1331 int max_page
= find_immobile_page_index(limit
-1);
1332 for ( page
= FIRST_VARYOBJ_PAGE
; page
<= max_page
; ++page
) {
1333 int mask
= VARYOBJ_PAGE_GENS(page
);
1334 if (mask
& (1<<PSEUDO_STATIC_GENERATION
)) {
1335 VARYOBJ_PAGE_GENS(page
) = (mask
& ~(1<<PSEUDO_STATIC_GENERATION
))
1336 | 1<<HIGHEST_NORMAL_GENERATION
;
1341 // Now once again promote all objects to pseudo-static just prior to save.
1342 // 'coreparse' makes all pages in regular dynamic space pseudo-static.
1343 // But since immobile objects store their generation, it must be done at save,
1344 // or else it would have to be done on image restart
1345 // which would require writing to a lot of pages for no reason.
1346 void prepare_immobile_space_for_save()
1348 // Don't use the page attributes now - defrag doesn't update them.
1349 lispobj
* obj
= (lispobj
*)IMMOBILE_SPACE_START
;
1350 lispobj
* limit
= (lispobj
*)SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER
)->value
;
1351 while (obj
< limit
) {
1352 if (other_immediate_lowtag_p(*obj
))
1353 assign_generation(obj
, PSEUDO_STATIC_GENERATION
);
1354 obj
+= sizetab
[widetag_of(*obj
)](obj
);
1357 obj
= (lispobj
*)IMMOBILE_VARYOBJ_SUBSPACE_START
;
1358 limit
= (lispobj
*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER
)->value
;
1359 for ( varyobj_holes
= 0 ; obj
< limit
; obj
+= sizetab
[widetag_of(*obj
)](obj
) ) {
1360 if (immobile_filler_p(obj
)) {
1361 struct code
* code
= (struct code
*)obj
;
1362 code
->debug_info
= varyobj_holes
;
1363 varyobj_holes
= (lispobj
)code
;
1364 // 0-fill the unused space.
1365 int nwords
= sizetab
[widetag_of(*obj
)](obj
);
1366 memset(code
->constants
, 0,
1367 (nwords
* N_WORD_BYTES
) - offsetof(struct code
, constants
));
1369 assign_generation(obj
, PSEUDO_STATIC_GENERATION
);
1371 if ((lispobj
)limit
& (IMMOBILE_CARD_BYTES
-1)) { // Last page is partially used.
1372 gc_assert(*limit
== SIMPLE_ARRAY_FIXNUM_WIDETAG
);
1373 // Write an otherwise illegal object at the free pointer.
1374 limit
[0] = INSTANCE_HEADER_WIDETAG
; // 0 payload length
1375 limit
[1] = 0; // no layout
1381 int immobile_space_handle_wp_violation(void* fault_addr
)
1383 low_page_index_t page_index
= find_immobile_page_index(fault_addr
);
1384 if (page_index
< 0) return 0;
1386 os_protect((os_vm_address_t
)((lispobj
)fault_addr
& ~(IMMOBILE_CARD_BYTES
-1)),
1387 IMMOBILE_CARD_BYTES
, OS_VM_PROT_ALL
);
1388 if (page_index
>= FIRST_VARYOBJ_PAGE
) {
1389 // The free pointer can move up or down. Attempting to insist that a WP
1390 // fault not occur above the free pointer (plus some slack) is not
1391 // threadsafe, so allow it anywhere. More strictness could be imparted
1392 // by tracking the max value attained by the free pointer.
1393 __sync_or_and_fetch(&varyobj_page_touched_bits
[(page_index
-FIRST_VARYOBJ_PAGE
)/32],
1394 1 << (page_index
& 31));
1396 // FIXME: a single bitmap of touched bits would make more sense,
1397 // and the _CLEARED flag doesn't achieve much if anything.
1398 if (!(fixedobj_pages
[page_index
].attr
.parts
.flags
1399 & (WRITE_PROTECT
|WRITE_PROTECT_CLEARED
)))
1401 SET_WP_FLAG(page_index
, WRITE_PROTECT_CLEARED
);
1406 // Find the object that encloses pointer.
1407 static int page_attributes_valid
= 1; // For verify_space() after defrag
1409 search_immobile_space(void *pointer
)
1413 if ((lispobj
)pointer
>= IMMOBILE_SPACE_START
1414 && (lispobj
)pointer
< SYMBOL(IMMOBILE_SPACE_FREE_POINTER
)->value
) {
1415 low_page_index_t page_index
= find_immobile_page_index(pointer
);
1416 if ((lispobj
)pointer
>= IMMOBILE_VARYOBJ_SUBSPACE_START
) {
1417 if (page_attributes_valid
) {
1418 start
= (lispobj
*)varyobj_scan_start(page_index
);
1419 if (start
> (lispobj
*)pointer
) return NULL
;
1421 start
= (lispobj
*)IMMOBILE_VARYOBJ_SUBSPACE_START
;
1423 lispobj
* found
= gc_search_space(start
, pointer
);
1424 return (found
&& immobile_filler_p(found
)) ? 0 : found
;
1425 } else if ((lispobj
)pointer
< SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER
)->value
) {
1426 char *page_base
= (char*)((lispobj
)pointer
& ~(IMMOBILE_CARD_BYTES
-1));
1427 if (page_attributes_valid
) {
1428 int spacing
= page_obj_align(page_index
) << WORD_SHIFT
;
1429 int index
= ((char*)pointer
- page_base
) / spacing
;
1430 char *begin
= page_base
+ spacing
* index
;
1431 char *end
= begin
+ (page_obj_size(page_index
) << WORD_SHIFT
);
1432 if ((char*)pointer
< end
) return (lispobj
*)begin
;
1434 return gc_search_space((lispobj
*)page_base
, pointer
);
1441 // For coalescing holes, we need to scan backwards, which is done by
1442 // looking backwards for a page that contains the start of a
1443 // block of objects one of which must abut 'obj'.
1444 lispobj
* find_preceding_object(lispobj
* obj
)
1446 int page
= find_immobile_page_index(obj
);
1448 int offset
= varyobj_page_scan_start_offset
[page
- FIRST_VARYOBJ_PAGE
];
1449 if (offset
) { // 0 means the page is empty.
1450 lispobj
* start
= varyobj_scan_start(page
);
1451 if (start
< obj
) { // Scan from here forward
1453 lispobj
* end
= start
+ sizetab
[widetag_of(*start
)](start
);
1454 if (end
== obj
) return start
;
1455 gc_assert(end
< obj
);
1460 if (page
== FIRST_VARYOBJ_PAGE
) {
1461 gc_assert(obj
== low_page_address(FIRST_VARYOBJ_PAGE
));
1462 return 0; // Predecessor does not exist
1468 #include "genesis/vector.h"
1469 #include "genesis/instance.h"
1470 lispobj
alloc_layout(lispobj slots
)
1472 struct vector
* v
= (struct vector
*)native_pointer(slots
);
1473 // If INSTANCE_DATA_START is 0, subtract 1 word for the header.
1474 // If 1, subtract 2 words: 1 for the header and 1 for the layout.
1475 if (fixnum_value(v
->length
) != (LAYOUT_SIZE
- INSTANCE_DATA_START
- 1))
1476 lose("bad arguments to alloc_layout");
1477 struct instance
* l
= (struct instance
*)
1478 alloc_immobile_obj(MAKE_ATTR(LAYOUT_ALIGN
/ N_WORD_BYTES
,
1479 CEILING(LAYOUT_SIZE
,2),
1481 #ifdef LISP_FEATURE_COMPACT_INSTANCE_HEADER
1482 (LAYOUT_OF_LAYOUT
<< 32) |
1484 (LAYOUT_SIZE
-1)<<8 | INSTANCE_HEADER_WIDETAG
,
1486 #ifndef LISP_FEATURE_COMPACT_INSTANCE_HEADER
1487 l
->slots
[0] = LAYOUT_OF_LAYOUT
;
1489 memcpy(&l
->slots
[INSTANCE_DATA_START
], v
->data
,
1490 (LAYOUT_SIZE
- INSTANCE_DATA_START
- 1)*N_WORD_BYTES
);
1492 // Possible efficiency win: make the "wasted" bytes after the layout into a
1493 // simple unboxed array so that heap-walking can skip in one step.
1494 // Probably only a performance issue for MAP-ALLOCATED-OBJECTS,
1495 // since scavenging know to skip by the object alignment anyway.
1496 return make_lispobj(l
, INSTANCE_POINTER_LOWTAG
);
1499 #include "genesis/symbol.h"
1500 lispobj
alloc_sym(lispobj name
)
1502 // While there are different "kinds" of symbols in the defragmentation
1503 // logic, we don't distinguish them when allocating,
1504 // on the theory that contiguous allocations are preferable anyway.
1505 struct symbol
* s
= (struct symbol
*)
1506 alloc_immobile_obj(MAKE_ATTR(CEILING(SYMBOL_SIZE
,2), // spacing
1507 CEILING(SYMBOL_SIZE
,2), // size
1509 (SYMBOL_SIZE
-1)<<8 | SYMBOL_HEADER_WIDETAG
,
1511 s
->value
= UNBOUND_MARKER_WIDETAG
;
1516 return make_lispobj(s
, OTHER_POINTER_LOWTAG
);
1519 #include "genesis/fdefn.h"
1520 lispobj
alloc_fdefn(lispobj name
)
1522 struct fdefn
* f
= (struct fdefn
*)
1523 alloc_immobile_obj(MAKE_ATTR(CEILING(FDEFN_SIZE
,2), // spacing
1524 CEILING(FDEFN_SIZE
,2), // size
1526 (FDEFN_SIZE
-1)<<8 | FDEFN_WIDETAG
,
1531 return make_lispobj(f
, OTHER_POINTER_LOWTAG
);
1534 #if defined(LISP_FEATURE_IMMOBILE_CODE) && defined(LISP_FEATURE_COMPACT_INSTANCE_HEADER)
1535 #include "genesis/funcallable-instance.h"
1536 #define GF_SIZE (sizeof(struct funcallable_instance)/sizeof(lispobj)+2) /* = 6 */
1537 lispobj
alloc_generic_function(lispobj slots
)
1539 // GFs have no C header file to represent the layout, which is 6 words:
1540 // header, entry-point, fin-function, slots, raw data (x2)
1541 lispobj
* obj
= (lispobj
*)
1542 alloc_immobile_obj(MAKE_ATTR(CEILING(GF_SIZE
,2), // spacing
1543 CEILING(GF_SIZE
,2), // size
1545 // 5 payload words following the header
1546 ((GF_SIZE
-1)<<8) | FUNCALLABLE_INSTANCE_HEADER_WIDETAG
,
1547 // KLUDGE: same page attributes as symbols,
1548 // so use the same hint.
1550 ((struct funcallable_instance
*)obj
)->info
[0] = slots
;
1551 ((struct funcallable_instance
*)obj
)->trampoline
= (lispobj
)(obj
+ 4);
1552 return make_lispobj(obj
, FUN_POINTER_LOWTAG
);
1556 #ifdef LISP_FEATURE_IMMOBILE_CODE
1557 //// Defragmentation
1562 } fixedobj_tempspace
, varyobj_tempspace
;
1564 // Given an adddress in the target core, return the equivalent
1565 // physical address to read or write during defragmentation
1566 static lispobj
* tempspace_addr(void* address
)
1568 int byte_index
= (char*)address
- (char*)IMMOBILE_VARYOBJ_SUBSPACE_START
;
1569 gc_assert(immobile_space_p((lispobj
)address
));
1570 if (byte_index
< 0) { // fixedobj subspace
1571 if (fixedobj_tempspace
.n_bytes
== 0) return address
;
1572 byte_index
= (char*)address
- (char*)IMMOBILE_SPACE_START
;
1573 gc_assert(byte_index
< fixedobj_tempspace
.n_bytes
);
1574 return (void*)(fixedobj_tempspace
.start
+ byte_index
);
1575 } else { // varyobj subspace
1576 gc_assert(byte_index
< varyobj_tempspace
.n_bytes
);
1577 return (void*)(varyobj_tempspace
.start
+ byte_index
);
1581 /* Search for an object during defragmentation */
1582 static lispobj
* defrag_search_varyobj_subspace(lispobj addr
)
1584 low_page_index_t page
= find_immobile_page_index((void*)(long)addr
);
1585 lispobj
*where
= varyobj_scan_start(page
);
1588 if (immobile_filler_p(where
)) {
1589 count
= sizetab
[widetag_of(*where
)](where
);
1591 gc_assert(forwarding_pointer_p(where
));
1592 lispobj
*forwarded_obj
= native_pointer(forwarding_pointer_value(where
));
1593 lispobj
*temp_obj
= tempspace_addr(forwarded_obj
);
1594 count
= sizetab
[widetag_of(*temp_obj
)](temp_obj
);
1595 if ((lispobj
*)(uword_t
)addr
< where
+count
) {
1596 int widetag
= widetag_of(*temp_obj
);
1597 gc_assert(widetag
== CODE_HEADER_WIDETAG
||
1598 widetag
== FDEFN_WIDETAG
||
1599 widetag
== FUNCALLABLE_INSTANCE_HEADER_WIDETAG
);
1603 } while ((where
+= count
) <= (lispobj
*)(uword_t
)addr
);
1604 lose("Can't find jump target");
1607 static void adjust_words(lispobj
*where
, sword_t n_words
)
1610 for (i
=0;i
<n_words
;++i
) {
1611 lispobj ptr
= where
[i
];
1612 if (is_lisp_pointer(ptr
) && forwarding_pointer_p(native_pointer(ptr
)))
1613 where
[i
] = forwarding_pointer_value(native_pointer(ptr
));
1617 static lispobj
adjust_fun_entrypoint(lispobj raw_entry
)
1619 if (raw_entry
> READ_ONLY_SPACE_END
) {
1620 // if not pointing read-only space, then it's neither closure_tramp
1621 // nor funcallable_instance_tramp.
1622 lispobj simple_fun
= raw_entry
- FUN_RAW_ADDR_OFFSET
;
1623 adjust_words(&simple_fun
, 1);
1624 return simple_fun
+ FUN_RAW_ADDR_OFFSET
;
1626 return raw_entry
; // otherwise it's one of those trampolines
1629 /// Fixup the fdefn at 'where' based on it moving by 'displacement'.
1630 /// 'fdefn_old' is needed for computing the pre-fixup callee if the
1631 /// architecture uses a call-relative instruction.
1632 static void adjust_fdefn_entrypoint(lispobj
* where
, int displacement
,
1633 struct fdefn
* fdefn_old
)
1635 struct fdefn
* fdefn
= (struct fdefn
*)where
;
1636 int callee_adjust
= 0;
1637 // Get the tagged object referred to by the fdefn_raw_addr.
1638 lispobj callee_old
= fdefn_raw_referent(fdefn_old
);
1639 // If it's the undefined function trampoline, or the referent
1640 // did not move, then the callee_adjust stays 0.
1641 // Otherwise we adjust the rel32 field by the change in callee address.
1642 if (callee_old
&& forwarding_pointer_p(native_pointer(callee_old
))) {
1643 lispobj callee_new
= forwarding_pointer_value(native_pointer(callee_old
));
1644 callee_adjust
= callee_new
- callee_old
;
1646 #ifdef LISP_FEATURE_X86_64
1647 *(int*)((char*)&fdefn
->raw_addr
+ 1) += callee_adjust
- displacement
;
1649 #error "Can't adjust fdefn_raw_addr for this architecture"
1653 // Fix the layout of OBJ, and return the layout's address in tempspace.
1654 struct layout
* fix_object_layout(lispobj
* obj
)
1656 // This works on instances, funcallable instances (and/or closures)
1657 // but the latter only if the layout is in the header word.
1658 #ifdef LISP_FEATURE_COMPACT_INSTANCE_HEADER
1659 gc_assert(widetag_of(*obj
) == INSTANCE_HEADER_WIDETAG
1660 || widetag_of(*obj
) == FUNCALLABLE_INSTANCE_HEADER_WIDETAG
1661 || widetag_of(*obj
) == CLOSURE_HEADER_WIDETAG
);
1663 gc_assert(widetag_of(*obj
) == INSTANCE_HEADER_WIDETAG
);
1665 lispobj layout
= instance_layout(obj
);
1666 if (layout
== 0) return 0;
1667 if (forwarding_pointer_p(native_pointer(layout
))) { // usually
1668 layout
= forwarding_pointer_value(native_pointer(layout
));
1669 set_instance_layout(obj
, layout
);
1671 struct layout
* native_layout
= (struct layout
*)
1672 tempspace_addr(native_pointer(layout
));
1673 gc_assert(widetag_of(native_layout
->header
) == INSTANCE_HEADER_WIDETAG
);
1674 gc_assert(instance_layout((lispobj
*)native_layout
) == LAYOUT_OF_LAYOUT
);
1675 return native_layout
;
1678 /// It's tricky to try to use the scavtab[] functions for fixing up moved
1679 /// objects, because scavenger functions might invoke transport functions.
1680 /// The best approach is to do an explicit switch over all object types.
1681 #include "genesis/hash-table.h"
1682 static void fixup_space(lispobj
* where
, size_t n_words
)
1684 lispobj
* end
= where
+ n_words
;
1685 lispobj header_word
;
1688 void fixup_immobile_refs(struct code
*);
1689 int static_space_p
= ((lispobj
)where
== STATIC_SPACE_START
);
1691 while (where
< end
) {
1692 gc_assert(!forwarding_pointer_p(where
));
1693 header_word
= *where
;
1694 if (is_cons_half(header_word
)) {
1695 adjust_words(where
, 2); // A cons.
1699 widetag
= widetag_of(header_word
);
1700 size
= sizetab
[widetag
](where
);
1703 if (!unboxed_obj_widetag_p(widetag
))
1704 lose("Unhandled widetag in fixup_space: %p\n", (void*)header_word
);
1706 #ifdef LISP_FEATURE_COMPACT_INSTANCE_HEADER
1707 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
1709 case INSTANCE_HEADER_WIDETAG
:
1710 instance_scan(adjust_words
, where
+1,
1711 instance_length(header_word
) | 1,
1712 fix_object_layout(where
)->bitmap
);
1714 case CODE_HEADER_WIDETAG
:
1715 // Fixup the constant pool.
1716 adjust_words(where
+1, code_header_words(header_word
)-1);
1717 // Fixup all embedded simple-funs
1718 for_each_simple_fun(i
, f
, (struct code
*)where
, 1, {
1719 f
->self
= adjust_fun_entrypoint(f
->self
);
1720 adjust_words(SIMPLE_FUN_SCAV_START(f
), SIMPLE_FUN_SCAV_NWORDS(f
));
1722 if (((struct code
*)where
)->fixups
)
1723 fixup_immobile_refs((struct code
*)where
);
1725 case CLOSURE_HEADER_WIDETAG
:
1726 where
[1] = adjust_fun_entrypoint(where
[1]);
1727 // FALLTHROUGH_INTENDED
1728 #ifndef LISP_FEATURE_COMPACT_INSTANCE_HEADER
1729 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
1731 // skip the trampoline word at where[1]
1732 adjust_words(where
+2, size
-2);
1735 adjust_words(where
+1, 2);
1736 // If fixed-size objects (hence FDEFNs) are movable, then fixing the
1737 // raw address can not be done here, because it is impossible to compute
1738 // the absolute jump target - we don't know what the fdefn's original
1739 // address was to compute a pc-relative address. So we do those while
1740 // permuting the FDEFNs. But because static fdefns do not move,
1741 // we do process their raw address slot here.
1742 #if DEFRAGMENT_FIXEDOBJ_SUBSPACE
1745 adjust_fdefn_entrypoint(where
, 0, (struct fdefn
*)where
);
1748 // Special case because we might need to mark hashtables
1749 // as needing rehash.
1750 case SIMPLE_VECTOR_WIDETAG
:
1751 if ((HeaderValue(header_word
) & 0xFF) == subtype_VectorValidHashing
) {
1752 struct vector
* v
= (struct vector
*)where
;
1753 lispobj
* data
= v
->data
;
1754 gc_assert(v
->length
> 0 &&
1755 lowtag_of(data
[0]) == INSTANCE_POINTER_LOWTAG
&&
1756 !(fixnum_value(v
->length
) & 1)); // length must be even
1757 boolean needs_rehash
= 0;
1759 for (i
= fixnum_value(v
->length
)-1 ; i
>=0 ; --i
) {
1760 lispobj ptr
= data
[i
];
1761 if (is_lisp_pointer(ptr
) && forwarding_pointer_p(native_pointer(ptr
))) {
1762 data
[i
] = forwarding_pointer_value(native_pointer(ptr
));
1767 struct hash_table
*ht
= (struct hash_table
*)native_pointer(v
->data
[0]);
1768 ht
->needs_rehash_p
= T
;
1772 // FALLTHROUGH_INTENDED
1774 // All the other array header widetags.
1775 case SIMPLE_ARRAY_WIDETAG
:
1776 #ifdef COMPLEX_CHARACTER_STRING_WIDETAG
1777 case COMPLEX_CHARACTER_STRING_WIDETAG
:
1779 case COMPLEX_BASE_STRING_WIDETAG
:
1780 case COMPLEX_VECTOR_NIL_WIDETAG
:
1781 case COMPLEX_BIT_VECTOR_WIDETAG
:
1782 case COMPLEX_VECTOR_WIDETAG
:
1783 case COMPLEX_ARRAY_WIDETAG
:
1784 // And the other entirely boxed objects.
1785 case SYMBOL_HEADER_WIDETAG
:
1786 case VALUE_CELL_HEADER_WIDETAG
:
1787 case WEAK_POINTER_WIDETAG
:
1789 case COMPLEX_WIDETAG
:
1790 // Use the sizing functions for generality.
1791 // Symbols can contain strange header bytes,
1792 // and vectors might have a padding word, etc.
1793 adjust_words(where
+1, size
-1);
1801 walk_generation(void (*proc
)(lispobj
*,size_t),
1802 generation_index_t generation
);
1804 int* immobile_space_reloc_index
;
1805 int* immobile_space_relocs
;
1807 static int calc_n_pages(int n_objects
, int words_per_object
)
1809 words_per_object
= CEILING(words_per_object
, 2);
1810 int objects_per_page
= WORDS_PER_PAGE
/ words_per_object
;
1811 return (n_objects
+ objects_per_page
- 1) / objects_per_page
;
1814 // Take and return an untagged pointer, or 0 if the object did not survive GC.
1815 static lispobj
* get_load_address(lispobj
* old
)
1817 if (forwarding_pointer_p(old
))
1818 return native_pointer(forwarding_pointer_value(old
));
1819 gc_assert(immobile_filler_p(old
));
1823 // This does not accept (SIMPLE-ARRAY NIL (*))
1824 // (You'd have a pretty bad time trying making a symbol like that)
1825 static int schar(struct vector
* string
, int index
)
1827 #ifdef LISP_FEATURE_SB_UNICODE
1828 if (widetag_of(string
->header
) == SIMPLE_CHARACTER_STRING_WIDETAG
)
1829 return ((int*)string
->data
)[index
];
1831 return ((char*)string
->data
)[index
];
1834 #include "genesis/package.h"
1835 #define N_SYMBOL_KINDS 4
1837 // Return an integer 0..3 telling which block of symbols to relocate 'sym' into.
1838 // This is the same as the "symbol kind" in the allocator.
1839 // 0 = uninterned, 1 = keyword, 2 = other interned, 3 = special var
1840 static int classify_symbol(lispobj
* obj
)
1842 struct symbol
* symbol
= (struct symbol
*)obj
;
1843 if (symbol
->package
== NIL
) return 0;
1844 struct vector
* package_name
= (struct vector
*)
1845 native_pointer(((struct package
*)native_pointer(symbol
->package
))->_name
);
1846 if (widetag_of(package_name
->header
) == SIMPLE_BASE_STRING_WIDETAG
1847 && !strcmp((char*)package_name
->data
, "KEYWORD"))
1849 struct vector
* symbol_name
= (struct vector
*)native_pointer(symbol
->name
);
1850 if (symbol_name
->length
>= make_fixnum(2) &&
1851 schar(symbol_name
, 0) == '*' &&
1852 schar(symbol_name
, fixnum_value(symbol_name
->length
)-1) == '*')
1857 static char* compute_defrag_start_address()
1859 // For technical reasons, objects on the first few pages created by genesis
1860 // must never move at all. So figure out where the end of that subspace is.
1861 lispobj
* obj
= (lispobj
*)IMMOBILE_SPACE_START
;
1862 gc_assert(widetag_of(*obj
) == INSTANCE_HEADER_WIDETAG
);
1863 while (instance_layout(obj
) != LAYOUT_OF_PACKAGE
) {
1864 obj
= (lispobj
*)((char*)obj
+ IMMOBILE_CARD_BYTES
);
1865 gc_assert(widetag_of(*obj
) == INSTANCE_HEADER_WIDETAG
);
1867 // Now find a page that does NOT have a package.
1869 obj
= (lispobj
*)((char*)obj
+ IMMOBILE_CARD_BYTES
);
1870 } while (widetag_of(*obj
) == INSTANCE_HEADER_WIDETAG
1871 && instance_layout(obj
) == LAYOUT_OF_PACKAGE
);
1875 void defrag_immobile_space(int* components
)
1877 // Find the starting address of fixed-size objects that will undergo defrag.
1878 // Never move the first few pages of LAYOUTs or PACKAGEs created by genesis.
1879 // If codegen becomes smarter, things like layout of FUNCTION and some
1880 // some others can be used as immediate constants in compiled code.
1881 // With initial packages, it's mainly a debugging convenience that they not move.
1882 char* defrag_base
= compute_defrag_start_address();
1883 low_page_index_t page_index
= find_immobile_page_index(defrag_base
);
1887 // Count the number of symbols, fdefns, and layouts that will be relocated
1888 unsigned int obj_type_histo
[64], sym_kind_histo
[4];
1889 bzero(obj_type_histo
, sizeof obj_type_histo
);
1890 bzero(sym_kind_histo
, sizeof sym_kind_histo
);
1892 #if DEFRAGMENT_FIXEDOBJ_SUBSPACE
1893 for ( ; page_index
<= max_used_fixedobj_page
; ++page_index
) {
1894 int obj_spacing
= page_obj_align(page_index
) << WORD_SHIFT
;
1896 lispobj
* obj
= low_page_address(page_index
);
1897 lispobj
* limit
= (lispobj
*)((char*)obj
+ IMMOBILE_CARD_BYTES
);
1898 for ( ; obj
< limit
; obj
= (lispobj
*)((char*)obj
+ obj_spacing
) ) {
1899 lispobj word
= *obj
;
1900 if (!fixnump(word
)) {
1901 if (widetag_of(word
) == SYMBOL_HEADER_WIDETAG
)
1902 ++sym_kind_histo
[classify_symbol(obj
)];
1904 ++obj_type_histo
[widetag_of(word
)/4];
1909 gc_assert(obj_type_histo
[INSTANCE_HEADER_WIDETAG
/4]);
1911 // Calculate space needed for fixedobj pages after defrag.
1912 // page order is: layouts, fdefns, GFs, symbols
1913 int n_layout_pages
= calc_n_pages(obj_type_histo
[INSTANCE_HEADER_WIDETAG
/4],
1914 LAYOUT_ALIGN
/ N_WORD_BYTES
);
1915 int n_fdefn_pages
= calc_n_pages(obj_type_histo
[FDEFN_WIDETAG
/4], FDEFN_SIZE
);
1916 int n_fin_pages
= calc_n_pages(obj_type_histo
[FUNCALLABLE_INSTANCE_HEADER_WIDETAG
/4],
1918 #if !(defined(LISP_FEATURE_IMMOBILE_CODE) && defined(LISP_FEATURE_COMPACT_INSTANCE_HEADER))
1919 gc_assert(n_fin_pages
== 0);
1921 char* layout_alloc_ptr
= defrag_base
;
1922 char* fdefn_alloc_ptr
= layout_alloc_ptr
+ n_layout_pages
* IMMOBILE_CARD_BYTES
;
1923 char* fin_alloc_ptr
= fdefn_alloc_ptr
+ n_fdefn_pages
* IMMOBILE_CARD_BYTES
;
1924 char* symbol_alloc_ptr
[N_SYMBOL_KINDS
+1];
1925 symbol_alloc_ptr
[0] = fin_alloc_ptr
+ n_fin_pages
* IMMOBILE_CARD_BYTES
;
1926 for (i
=0; i
<N_SYMBOL_KINDS
; ++i
)
1927 symbol_alloc_ptr
[i
+1] = symbol_alloc_ptr
[i
]
1928 + calc_n_pages(sym_kind_histo
[i
], SYMBOL_SIZE
) * IMMOBILE_CARD_BYTES
;
1929 char* ending_alloc_ptr
= symbol_alloc_ptr
[N_SYMBOL_KINDS
];
1931 fixedobj_tempspace
.n_bytes
= ending_alloc_ptr
- (char*)IMMOBILE_SPACE_START
;
1932 fixedobj_tempspace
.start
= calloc(fixedobj_tempspace
.n_bytes
, 1);
1933 // Copy the first few pages (the permanent pages) from immobile space
1934 // into the temporary copy, so that tempspace_addr()
1935 // does not have to return the unadjusted addr if below defrag_base.
1936 memcpy(fixedobj_tempspace
.start
, (char*)IMMOBILE_SPACE_START
,
1937 (lispobj
)defrag_base
- IMMOBILE_SPACE_START
);
1940 // Compute where each code component will be moved to.
1941 int n_code_components
= 0;
1942 for (i
=0 ; components
[i
*2] ; ++i
) {
1943 addr
= (lispobj
*)(long)components
[i
*2];
1944 gc_assert(lowtag_of((lispobj
)addr
) == OTHER_POINTER_LOWTAG
);
1945 addr
= native_pointer((lispobj
)addr
);
1946 int widetag
= widetag_of(*addr
);
1947 lispobj new_vaddr
= 0;
1948 // FIXME: generalize
1949 gc_assert(widetag
== CODE_HEADER_WIDETAG
);
1950 if (!immobile_filler_p(addr
)) {
1951 ++n_code_components
;
1952 new_vaddr
= IMMOBILE_VARYOBJ_SUBSPACE_START
+ varyobj_tempspace
.n_bytes
;
1953 varyobj_tempspace
.n_bytes
+= sizetab
[widetag
](addr
) << WORD_SHIFT
;
1955 components
[i
*2+1] = new_vaddr
;
1957 varyobj_tempspace
.start
= calloc(varyobj_tempspace
.n_bytes
, 1);
1959 printf("%d+%d+%d+%d objects... ",
1960 obj_type_histo
[INSTANCE_HEADER_WIDETAG
/4],
1961 obj_type_histo
[FDEFN_WIDETAG
/4],
1962 (sym_kind_histo
[0]+sym_kind_histo
[1]+
1963 sym_kind_histo
[2]+sym_kind_histo
[3]),
1966 // Permute varyobj space into tempspace and deposit forwarding pointers.
1968 for (i
=0 ; components
[i
*2] ; ++i
) {
1969 if ((new_vaddr
= components
[i
*2+1]) != 0) {
1970 addr
= native_pointer(components
[i
*2]);
1971 memcpy(tempspace_addr((void*)new_vaddr
), addr
,
1972 sizetab
[widetag_of(*addr
)](addr
) << WORD_SHIFT
);
1973 int displacement
= new_vaddr
- (lispobj
)addr
;
1974 switch (widetag_of(*addr
)) {
1975 case CODE_HEADER_WIDETAG
:
1976 for_each_simple_fun(index
, fun
, (struct code
*)addr
, 1, {
1977 set_forwarding_pointer((lispobj
*)fun
,
1978 make_lispobj((char*)fun
+ displacement
,
1979 FUN_POINTER_LOWTAG
));
1983 set_forwarding_pointer(addr
,
1984 make_lispobj((void*)new_vaddr
,
1985 OTHER_POINTER_LOWTAG
));
1989 #if DEFRAGMENT_FIXEDOBJ_SUBSPACE
1990 // Permute fixed-sized object pages and deposit forwarding pointers.
1991 for ( page_index
= find_immobile_page_index(defrag_base
) ;
1992 page_index
<= max_used_fixedobj_page
; ++page_index
) {
1993 int obj_spacing
= page_obj_align(page_index
) << WORD_SHIFT
;
1994 if (!obj_spacing
) continue;
1995 lispobj
* obj
= low_page_address(page_index
);
1996 lispobj
* limit
= (lispobj
*)((char*)obj
+ IMMOBILE_CARD_BYTES
);
1997 for ( ; obj
< limit
; obj
= (lispobj
*)((char*)obj
+ obj_spacing
) ) {
1998 lispobj word
= *obj
;
1999 if (fixnump(word
)) continue;
2001 int lowtag
= OTHER_POINTER_LOWTAG
;
2002 int widetag
= widetag_of(word
);
2004 case INSTANCE_HEADER_WIDETAG
:
2005 alloc_ptr
= &layout_alloc_ptr
;
2006 lowtag
= INSTANCE_POINTER_LOWTAG
;
2008 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
2009 alloc_ptr
= &fin_alloc_ptr
;
2010 lowtag
= FUN_POINTER_LOWTAG
;
2013 alloc_ptr
= &fdefn_alloc_ptr
;
2015 case SYMBOL_HEADER_WIDETAG
:
2016 alloc_ptr
= &symbol_alloc_ptr
[classify_symbol(obj
)];
2019 lose("Unexpected widetag");
2021 lispobj
* new = (lispobj
*)*alloc_ptr
;
2022 lispobj end
= (lispobj
)new + obj_spacing
;
2023 #define ALIGN_MASK (IMMOBILE_CARD_BYTES - 1)
2024 if ((end
& ALIGN_MASK
) < ((lispobj
)new & ALIGN_MASK
) // wrapped
2025 && (end
& ALIGN_MASK
) != 0) // ok if exactly on the boundary
2026 new = (lispobj
*)(end
& ~ALIGN_MASK
); // snap to page
2028 memcpy(tempspace_addr(new), obj
, sizetab
[widetag
](obj
) << WORD_SHIFT
);
2029 set_forwarding_pointer(obj
, make_lispobj(new, lowtag
));
2030 *alloc_ptr
= (char*)new + obj_spacing
;
2033 #ifdef LISP_FEATURE_X86_64
2034 // Fixup JMP offset in fdefns, and self pointers in funcallable instances.
2035 // The former can not be done in the same pass as space permutation,
2036 // because we don't know the order in which a generic function and its
2037 // related fdefn will be reached. Were this attempted in a single pass,
2038 // it could miss a GF that will be moved after the fdefn is moved.
2039 // And it can't be done in fixup_space() because that does not know the
2040 // original address of each fdefn, so can't compute the absolute callee.
2041 for ( page_index
= find_immobile_page_index(defrag_base
) ;
2042 page_index
<= max_used_fixedobj_page
; ++page_index
) {
2043 int obj_spacing
= page_obj_align(page_index
) << WORD_SHIFT
;
2044 if (!obj_spacing
) continue;
2045 lispobj
* obj
= low_page_address(page_index
);
2046 lispobj
* limit
= (lispobj
*)((char*)obj
+ IMMOBILE_CARD_BYTES
);
2047 for ( ; obj
< limit
; obj
= (lispobj
*)((char*)obj
+ obj_spacing
) ) {
2048 if (fixnump(*obj
)) continue;
2049 gc_assert(forwarding_pointer_p(obj
));
2050 lispobj
* new = native_pointer(forwarding_pointer_value(obj
));
2051 switch (widetag_of(*tempspace_addr(new))) {
2053 // Fix displacement in JMP or CALL instruction.
2054 adjust_fdefn_entrypoint(tempspace_addr(new),
2055 (char*)new - (char*)obj
,
2056 (struct fdefn
*)obj
);
2058 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
2059 tempspace_addr(new)[1] = (lispobj
)(new + 4);
2064 #endif /* LISP_FEATURE_X86_64 */
2065 #endif /* DEFRAGMENT_FIXEDOBJ_SUBSPACE */
2067 #ifdef LISP_FEATURE_X86_64
2068 // Fix displacements in JMP and CALL instructions in code objects.
2069 // There are 2 arrays in use:
2070 // - the relocs[] array contains the address of any machine instruction
2071 // that needs to be altered on account of space relocation.
2072 // - the reloc_index[] array identifes the code component each reloc belongs to.
2073 // It is an array of pairs:
2074 // comp_ptr1, index1, comp_ptr2, index2 ... comp_ptrN, indexN, 0, index_max
2075 // The index following a component specifies the starting index within the
2076 // relocs[] array of the first reloc belonging to the respective component.
2077 // The ending reloc can be deduced from the next component's first reloc.
2078 for (i
= 0 ; immobile_space_reloc_index
[i
*2] ; ++i
) {
2079 lispobj code
= immobile_space_reloc_index
[i
*2] - OTHER_POINTER_LOWTAG
;
2081 if (code
>= READ_ONLY_SPACE_START
&& code
< READ_ONLY_SPACE_END
)
2082 load_addr
= code
; // This code can not be moved or GCed.
2084 load_addr
= (lispobj
)get_load_address((lispobj
*)code
);
2085 if (!load_addr
) continue; // Skip code that was dropped by GC
2086 int reloc_index
= immobile_space_reloc_index
[i
*2+1];
2087 int end_reloc_index
= immobile_space_reloc_index
[i
*2+3];
2088 for ( ; reloc_index
< end_reloc_index
; ++reloc_index
) {
2089 unsigned char* inst_addr
= (unsigned char*)(long)immobile_space_relocs
[reloc_index
];
2090 gc_assert(*inst_addr
== 0xE8 || *inst_addr
== 0xE9);
2091 unsigned int target_addr
= (int)(long)inst_addr
+ 5 + *(int*)(inst_addr
+1);
2092 int target_adjust
= 0;
2093 // Both this code and the jumped-to code can move.
2094 // For this component, adjust by the displacement by (old - new).
2095 // If the jump target moved, also adjust by its (new - old).
2096 // The target address can point to one of:
2097 // - an FDEFN raw addr slot (fixedobj subspace)
2098 // - funcallable-instance with self-contained trampoline (ditto)
2099 // - a simple-fun that was statically linked (varyobj subspace)
2100 if (immobile_space_p(target_addr
)) {
2101 lispobj
*obj
= target_addr
< IMMOBILE_VARYOBJ_SUBSPACE_START
2102 ? search_immobile_space((void*)(uword_t
)target_addr
)
2103 : defrag_search_varyobj_subspace(target_addr
);
2104 target_adjust
= (int)((char*)native_pointer(forwarding_pointer_value(obj
))
2107 // If the instruction to fix has moved, then adjust for
2108 // its new address, and perform the fixup in tempspace.
2109 // Otherwise perform the fixup where the instruction is now.
2110 char* fixup_loc
= (immobile_space_p((lispobj
)inst_addr
) ?
2111 (char*)tempspace_addr(inst_addr
- code
+ load_addr
) :
2112 (char*)inst_addr
) + 1;
2113 *(int*)fixup_loc
+= target_adjust
+ (code
- load_addr
);
2117 free(immobile_space_relocs
);
2118 free(immobile_space_reloc_index
);
2120 // Fix Lisp pointers in static, immobile, and dynamic spaces
2121 fixup_space((lispobj
*)STATIC_SPACE_START
,
2122 (SYMBOL(STATIC_SPACE_FREE_POINTER
)->value
2123 - STATIC_SPACE_START
) >> WORD_SHIFT
);
2125 // Objects in immobile space are physically at 'tempspace',
2126 // but logically at their natural address. Perform fixups
2127 // at their current physical address.
2128 #if DEFRAGMENT_FIXEDOBJ_SUBSPACE
2129 fixup_space((lispobj
*)fixedobj_tempspace
.start
,
2130 fixedobj_tempspace
.n_bytes
>> WORD_SHIFT
);
2132 fixup_space((lispobj
*)IMMOBILE_SPACE_START
,
2133 IMMOBILE_FIXEDOBJ_SUBSPACE_SIZE
>> WORD_SHIFT
);
2135 fixup_space((lispobj
*)varyobj_tempspace
.start
,
2136 varyobj_tempspace
.n_bytes
>> WORD_SHIFT
);
2139 // We can safely ignore allocation region boundaries.
2140 fixup_space((lispobj
*)DYNAMIC_SPACE_START
,
2143 dynamic_space_free_pointer
2145 SymbolValue(ALLOCATION_POINTER
,0)
2147 - DYNAMIC_SPACE_START
) >> WORD_SHIFT
);
2149 // Copy the spaces back where they belong.
2151 // Fixed-size objects: don't copy below the defrag_base - the first few
2152 // pages are totally static in regard to both lifetime and placement.
2153 // (It would "work" to copy them back - since they were copied into
2154 // the temp space, but it's just wasting time to do so)
2155 lispobj old_free_ptr
;
2157 #if DEFRAGMENT_FIXEDOBJ_SUBSPACE
2158 int n_static_bytes
= ((lispobj
)defrag_base
- IMMOBILE_SPACE_START
);
2159 memcpy((char*)defrag_base
,
2160 fixedobj_tempspace
.start
+ n_static_bytes
,
2161 fixedobj_tempspace
.n_bytes
- n_static_bytes
);
2162 // Zero-fill the unused remainder
2163 old_free_ptr
= SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER
)->value
;
2164 free_ptr
= IMMOBILE_SPACE_START
+ fixedobj_tempspace
.n_bytes
;
2165 bzero((char*)free_ptr
, old_free_ptr
- free_ptr
);
2166 SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER
)->value
= free_ptr
;
2169 // Variable-size object pages.
2170 memcpy((char*)IMMOBILE_VARYOBJ_SUBSPACE_START
,
2171 varyobj_tempspace
.start
, varyobj_tempspace
.n_bytes
);
2172 // Zero-fill the unused remainder
2173 old_free_ptr
= SYMBOL(IMMOBILE_SPACE_FREE_POINTER
)->value
;
2174 free_ptr
= IMMOBILE_VARYOBJ_SUBSPACE_START
+ varyobj_tempspace
.n_bytes
;
2175 bzero((char*)free_ptr
, old_free_ptr
- free_ptr
);
2176 SYMBOL(IMMOBILE_SPACE_FREE_POINTER
)->value
= free_ptr
;
2177 if (free_ptr
& (IMMOBILE_CARD_BYTES
-1)) { // unless page-aligned
2178 int remainder
= IMMOBILE_CARD_BYTES
- (free_ptr
& (IMMOBILE_CARD_BYTES
-1));
2179 ((lispobj
*)free_ptr
)[0] = SIMPLE_ARRAY_FIXNUM_WIDETAG
;
2180 ((lispobj
*)free_ptr
)[1] = make_fixnum((remainder
>> WORD_SHIFT
) - 2);
2185 // It's easy to mess things up, so assert correctness before saving a core.
2186 printf("verifying defrag\n");
2187 page_attributes_valid
= 0;
2189 printf("verify passed\n");
2191 free(fixedobj_tempspace
.start
);
2192 free(varyobj_tempspace
.start
);
2196 void verify_immobile_page_protection(int keep_gen
, int new_gen
)
2198 low_page_index_t page
;
2199 lispobj
* end
= (lispobj
*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER
)->value
;
2200 low_page_index_t end_page
= find_immobile_page_index((char*)end
-1);
2203 for (page
= FIRST_VARYOBJ_PAGE
; page
<= end_page
; ++page
) {
2204 if (!varyobj_page_touched(page
)) {
2205 lispobj
* page_begin
= low_page_address(page
);
2206 lispobj
* page_end
= page_begin
+ WORDS_PER_PAGE
;
2207 // Assert that there are no old->young pointers.
2208 obj
= varyobj_scan_start(page
);
2209 // Never scan past the free pointer.
2210 // FIXME: It is is supposed to work to scan past the free pointer
2211 // on the last page, but the allocator needs to plop an array header there,
2212 // and sometimes it doesn't.
2213 lispobj
* varyobj_free_ptr
= (lispobj
*)(SYMBOL(IMMOBILE_SPACE_FREE_POINTER
)->value
);
2214 if (page_end
> varyobj_free_ptr
) page_end
= varyobj_free_ptr
;
2215 for ( ; obj
< page_end
; obj
+= sizetab
[widetag_of(*obj
)](obj
) ) {
2216 if (!immobile_filler_p(obj
)
2217 && varyobj_points_to_younger_p(obj
, __immobile_obj_gen_bits(obj
),
2219 (char*)page_begin
, (char*)page_end
))
2220 lose("page WP bit on page %d is wrong\n", page
);
2226 // Fixup immediate values that encode Lisp object addresses
2227 // in immobile space.
2228 #include "forwarding-ptr.h"
2229 #ifdef LISP_FEATURE_X86_64
2230 void fixup_immobile_refs(struct code
* code
)
2232 struct varint_unpacker fixups
;
2233 varint_unpacker_init(&fixups
, code
->fixups
);
2234 char* instructions
= (char*)((lispobj
*)code
+ code_header_words(code
->header
));
2235 int prev_loc
= 0, loc
;
2236 while (varint_unpack(&fixups
, &loc
) && loc
!= 0) {
2237 // For extra compactness, each loc is relative to the prior,
2238 // so that the magnitudes are smaller.
2241 int* fixup_where
= (int*)(instructions
+ loc
);
2242 lispobj ptr
= (lispobj
)(*fixup_where
);
2243 if (is_lisp_pointer(ptr
)) {
2244 if (forwarding_pointer_p(native_pointer(ptr
)))
2245 *fixup_where
= (int)
2246 forwarding_pointer_value(native_pointer(ptr
));
2248 gc_assert(IMMOBILE_SPACE_START
<= ptr
&&
2249 ptr
< (IMMOBILE_SPACE_START
+IMMOBILE_FIXEDOBJ_SUBSPACE_SIZE
));
2250 // It's an absolute interior pointer. search_immobile_space() works
2251 // at this point, because the page attributes on the pointee's page are valid
2252 lispobj
* obj
= search_immobile_space((void*)ptr
);
2253 if (forwarding_pointer_p(obj
)) {
2254 lispobj fpval
= forwarding_pointer_value(obj
);
2255 *fixup_where
= (int)(long)native_pointer(fpval
) + (ptr
- (lispobj
)obj
);
2262 #ifdef VERIFY_PAGE_GENS
2263 void check_fixedobj_page(int page
)
2265 // Every page should have a 'gens' mask which exactly reflects
2266 // the aggregate over all objects on that page. Verify that invariant,
2267 // checking all pages, not just the ones below the free pointer.
2268 int genmask
, obj_size
, obj_spacing
, i
, all_ok
= 1;
2269 lispobj
*obj
, *limit
, header
;
2270 int sees_younger
= 0;
2272 obj_size
= page_obj_size(page
);
2273 obj_spacing
= page_obj_align(page
);
2274 obj
= low_page_address(page
);
2275 limit
= obj
+ WORDS_PER_PAGE
- obj_spacing
;
2277 if (obj_size
== 0) {
2278 for (i
=0; i
<WORDS_PER_PAGE
; ++i
)
2279 gc_assert(obj
[i
]==0);
2280 gc_assert(fixedobj_pages
[page
].gens
==0);
2283 for ( ; obj
<= limit
; obj
+= obj_spacing
) {
2285 if (!fixnump(header
)) {
2286 int gen
= __immobile_obj_gen_bits(obj
);
2287 gc_assert(0 <= gen
&& gen
<= PSEUDO_STATIC_GENERATION
);
2289 if (fixedobj_points_to_younger_p(obj
, obj_size
, gen
, 0xff, 0xff))
2293 // It's not wrong if the gen0 bit is set spuriously, but it should only
2294 // happen at most once, on the first GC after image startup.
2295 // At all other times, the invariant should hold that if the freelist
2296 // indicated that space was available, and the new pointer differs,
2297 // then some gen0 object exists on the page.
2298 // The converse is true because of pseudo-atomicity of the allocator:
2299 // if some thread claimed a hole, then it also updated the freelist.
2300 // If it died before doing the latter, then the object allegedly created
2301 // was never really live, so won't contain any pointers.
2302 if (fixedobj_pages
[page
].gens
!= genmask
2303 && fixedobj_pages
[page
].gens
!= (genmask
|1)) {
2304 fprintf(stderr
, "Page #x%x @ %p: stored mask=%x actual=%x\n",
2305 page
, low_page_address(page
),
2306 fixedobj_pages
[page
].gens
, genmask
);
2309 if (fixedobj_page_wp(page
) && sees_younger
) {
2310 fprintf(stderr
, "Page #x%x @ %p: WP is wrong\n",
2311 page
, low_page_address(page
));
2317 int n_immobile_objects
;
2318 int *immobile_objects
, *immobile_objects_limit
;
2320 int comparator_eq(const void* a
, const void* b
) {
2321 return *(int*)a
- *(int*)b
;
2324 // Find the largest item less than or equal.
2325 // (useful for finding the object that contains a given pointer)
2326 int comparator_le(const void* a
, const void* b
) {
2327 int diff
= *(int*)a
- *(int*)b
;
2328 if (diff
<= 0) return diff
;
2329 // If looking to the right would see an item strictly greater
2330 // than the sought key, or there is nothing to the right,
2331 // then deem this an exact match.
2332 if (b
== (void*)immobile_objects_limit
|| ((int*)b
)[1] > *(int*)a
) return 0;
2336 // Find the smallest item greater than or equal.
2337 // useful for finding the lowest item at or after a page base address.
2338 int comparator_ge(const void* a
, const void* b
) {
2339 int diff
= *(int*)a
- *(int*)b
;
2340 if (diff
>= 0) return diff
;
2341 // If looking to the left would see an item strictly less
2342 // than the sought key, or there is nothing to the left
2343 // then deem this an exact match.
2344 if (b
== (void*)immobile_objects
|| ((int*)b
)[-1] < *(int*)a
) return 0;
2348 void check_varyobj_pages()
2350 // 1. Check that a linear scan sees only valid object headers,
2351 // and that it terminates exactly at IMMOBILE_CODE_FREE_POINTER.
2352 lispobj
* obj
= (lispobj
*)IMMOBILE_VARYOBJ_SUBSPACE_START
;
2353 lispobj
* end
= (lispobj
*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER
)->value
;
2354 low_page_index_t end_page
= find_immobile_page_index((char*)end
-1);
2356 n_immobile_objects
= 0;
2358 lispobj word
= *obj
;
2359 gc_assert(other_immediate_lowtag_p(word
));
2360 int n_words
= sizetab
[widetag_of(word
)](obj
);
2362 ++n_immobile_objects
;
2364 gc_assert(obj
== end
);
2366 // 2. Check that all scan_start_offsets are plausible.
2367 // Begin by collecting all object header locations into an array;
2368 immobile_objects
= calloc(n_immobile_objects
, sizeof (lispobj
));
2369 immobile_objects_limit
= immobile_objects
+ n_immobile_objects
- 1;
2370 obj
= (lispobj
*)IMMOBILE_VARYOBJ_SUBSPACE_START
;
2373 immobile_objects
[i
++] = (lispobj
)obj
;
2374 lispobj word
= *obj
;
2375 int n_words
= sizetab
[widetag_of(word
)](obj
);
2378 // Check that each page's scan start is a known immobile object
2379 // and that it is the right object.
2380 low_page_index_t page
;
2381 for (page
= FIRST_VARYOBJ_PAGE
; page
<= end_page
; ++page
) {
2382 lispobj page_addr
= (lispobj
)low_page_address(page
);
2383 int* found_below
= bsearch(&page_addr
, immobile_objects
, n_immobile_objects
,
2384 sizeof (int), comparator_le
);
2385 int* found_above
= bsearch(&page_addr
, immobile_objects
, n_immobile_objects
,
2386 sizeof (int), comparator_ge
);
2387 int stored_scan_start
= (int)(long)varyobj_scan_start(page
);
2388 lispobj
* scan_start_obj
= (lispobj
*)(long)*found_below
;
2389 if (scan_start_obj
!= (lispobj
*)(long)stored_scan_start
) {
2390 //printf("page %d: found-below=%p stored=%p\n", page, scan_start_obj, stored_scan_start);
2391 while (immobile_filler_p(scan_start_obj
)) {
2392 int nwords
= sizetab
[widetag_of(*scan_start_obj
)](scan_start_obj
);
2393 // printf("skipping %d words to %p\n", nwords, scan_start_obj + nwords);
2394 scan_start_obj
+= nwords
;
2395 // the stored scan start does not guarantee that it points
2396 // to a non-hole; we only assert that it *probably* does not.
2397 // As such, when computing the "correct" value, we allow
2398 // any value in between the legal bounding values for it.
2399 if ((int)(long)scan_start_obj
== stored_scan_start
)
2401 // If you hit the free pointer, or run off the page,
2402 // then the page is completely empty.
2403 if (scan_start_obj
== (lispobj
*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER
)->value
2404 || scan_start_obj
>= (lispobj
*)low_page_address(page
+1)) {
2405 scan_start_obj
= low_page_address(page
+1);
2410 if (scan_start_obj
!= (lispobj
*)(long)stored_scan_start
)
2411 lose("page %d: stored_scan_start=%p does not match found %p\n",
2412 page
, stored_scan_start
, *found_below
);
2413 if (found_below
!= found_above
) {
2414 // the object below must touch this page.
2415 // if it didn't, there should be a higher object below.
2416 lispobj
* below
= (lispobj
*)(long)*found_below
;
2417 int n_words
= sizetab
[widetag_of(*below
)](below
);
2418 lispobj
* end
= below
+ n_words
;
2419 gc_assert(end
> (lispobj
*)page_addr
);
2422 free(immobile_objects
);
2424 // 3. The generation mask for each page is exactly the union
2425 // of generation numbers of object headers on the page.
2426 for (page
= FIRST_VARYOBJ_PAGE
; page
<= end_page
; ++page
) {
2427 if (!varyobj_page_scan_start_offset
[page
- FIRST_VARYOBJ_PAGE
])
2428 continue; // page is all holes or never used
2429 obj
= varyobj_scan_start(page
);
2430 lispobj word
= *obj
;
2431 int n_words
= sizetab
[widetag_of(word
)](obj
);
2432 // Skip the first object if it doesn't start on this page.
2433 if (obj
< (lispobj
*)low_page_address(page
)) obj
+= n_words
;
2434 lispobj
* limit
= (lispobj
*)low_page_address(page
) + WORDS_PER_PAGE
;
2435 lispobj
* freeptr
= (lispobj
*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER
)->value
;
2436 if (limit
> freeptr
) limit
= freeptr
;
2438 for ( ; obj
< limit
; obj
+= sizetab
[widetag_of(*obj
)](obj
) ) {
2439 int gen
= __immobile_obj_gen_bits(obj
);
2440 if (immobile_filler_p(obj
)) {
2441 gc_assert(gen
== 0);
2443 gc_assert(0 <= gen
&& gen
<= PSEUDO_STATIC_GENERATION
);
2447 gc_assert(mask
== VARYOBJ_PAGE_GENS(page
));