2 * Garbage Collection common functions for scavenging, moving and sizing
3 * objects. These are for use with both GC (stop & copy GC) and GENCGC
7 * This software is part of the SBCL system. See the README file for
10 * This software is derived from the CMU CL system, which was
11 * written at Carnegie Mellon University and released into the
12 * public domain. The software is in the public domain and is
13 * provided with absolutely no warranty. See the COPYING and CREDITS
14 * files for more information.
18 * For a review of garbage collection techniques (e.g. generational
19 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
20 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
21 * had been accepted for _ACM Computing Surveys_ and was available
22 * as a PostScript preprint through
23 * <http://www.cs.utexas.edu/users/oops/papers.html>
25 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
36 #include "interrupt.h"
41 #include "hopscotch.h"
42 #include "genesis/primitive-objects.h"
43 #include "genesis/static-symbols.h"
44 #include "genesis/layout.h"
45 #include "genesis/hash-table.h"
46 #define WANT_SCAV_TRANS_SIZE_TABLES
47 #include "gc-internal.h"
48 #include "forwarding-ptr.h"
51 #ifdef LISP_FEATURE_SPARC
52 #define LONG_FLOAT_SIZE 4
53 #elif defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
54 #define LONG_FLOAT_SIZE 3
57 os_vm_size_t dynamic_space_size
= DEFAULT_DYNAMIC_SPACE_SIZE
;
58 os_vm_size_t thread_control_stack_size
= DEFAULT_CONTROL_STACK_SIZE
;
60 sword_t (*scavtab
[256])(lispobj
*where
, lispobj object
);
61 lispobj (*transother
[256])(lispobj object
);
62 sword_t (*sizetab
[256])(lispobj
*where
);
63 struct weak_pointer
*weak_pointers
;
65 os_vm_size_t bytes_consed_between_gcs
= 12*1024*1024;
67 /// These sizing macros return the number of *payload* words,
68 /// exclusive of the object header word. Payload length is always
69 /// an odd number so that total word count is an even number.
70 #define BOXED_NWORDS(obj) (HeaderValue(obj) | 1)
71 // Payload count expressed in 15 bits
72 #define SHORT_BOXED_NWORDS(obj) ((HeaderValue(obj) & SHORT_HEADER_MAX_WORDS) | 1)
73 // Payload count expressed in 8 bits
74 #define TINY_BOXED_NWORDS(obj) ((HeaderValue(obj) & 0xFF) | 1)
80 /* gc_general_copy_object is inline from gc-internal.h */
82 /* to copy a boxed object */
84 copy_object(lispobj object
, sword_t nwords
)
86 return gc_general_copy_object(object
, nwords
, BOXED_PAGE_FLAG
);
89 static sword_t
scav_lose(lispobj
*where
, lispobj object
); /* forward decl */
91 static inline void scav1(lispobj
* object_ptr
, lispobj object
)
94 // * With 32-bit words, is_lisp_pointer(object) returns true if object_ptr
95 // points to a forwarding pointer, so we need a sanity check inside the
96 // branch for is_lisp_pointer(). For maximum efficiency, check that only
97 // after from_space_p() returns false, so that valid pointers into
98 // from_space incur no extra test. This could be improved further by
99 // skipping the FP check if 'object' points within dynamic space, i.e.,
100 // when find_page_index() returns >= 0. That would entail injecting
101 // from_space_p() explicitly into the loop, so as to separate the
102 // "was a page found at all" condition from the page generation test.
104 // * With 64-bit words, is_lisp_pointer(object) is false when object_ptr
105 // points to a forwarding pointer, and the fixnump() test also returns
106 // false, so we'll indirect through scavtab[]. This will safely invoke
107 // scav_lose(), detecting corruption without any extra cost.
108 // The major difference between that and the explicit test is that you
109 // won't see 'start' and 'n_words', but if you need those, chances are
110 // you'll want to run under an external debugger in the first place.
111 // [And btw it sure would be nice to assert statically
112 // that is_lisp_pointer(0x01) is indeed false]
114 #define FIX_POINTER() { \
115 lispobj *ptr = native_pointer(object); \
116 if (forwarding_pointer_p(ptr)) \
117 *object_ptr = LOW_WORD(forwarding_pointer_value(ptr)); \
118 else /* Scavenge that pointer. */ \
119 (void)scavtab[widetag_of(object)](object_ptr, object); \
121 #ifdef LISP_FEATURE_IMMOBILE_SPACE
123 // It would be fine, though suboptimal, to use from_space_p() here.
124 // If it returns false, we don't want to call immobile_space_p()
125 // unless the pointer is *not* into dynamic space.
126 if ((page
= find_page_index((void*)object
)) >= 0) {
127 if (page_table
[page
].gen
== from_space
&& !pinned_p(object
, page
))
129 } else if (immobile_space_p(object
)) {
130 lispobj
*ptr
= native_pointer(object
);
131 if (immobile_obj_gen_bits(ptr
) == from_space
)
132 promote_immobile_obj(ptr
, 1);
135 if (from_space_p(object
)) {
138 #if (N_WORD_BITS == 32) && defined(LISP_FEATURE_GENCGC)
139 if (forwarding_pointer_p(object_ptr
))
140 lose("unexpected forwarding pointer in scavenge @ %p\n",
143 /* It points somewhere other than oldspace. Leave it
149 // Scavenge a block of memory from 'start' to 'end'
150 // that may contain object headers.
151 void heap_scavenge(lispobj
*start
, lispobj
*end
)
155 for (object_ptr
= start
; object_ptr
< end
;) {
156 lispobj object
= *object_ptr
;
157 if (other_immediate_lowtag_p(object
))
158 /* It's some sort of header object or another. */
159 object_ptr
+= (scavtab
[widetag_of(object
)])(object_ptr
, object
);
160 else { // it's a cons
161 if (is_lisp_pointer(object
))
162 scav1(object_ptr
, object
);
163 object
= *++object_ptr
;
164 if (is_lisp_pointer(object
))
165 scav1(object_ptr
, object
);
169 // This assertion is usually the one that fails when something
170 // is subtly wrong with the heap, so definitely always do it.
171 gc_assert_verbose(object_ptr
== end
, "Final object pointer %p, start %p, end %p\n",
172 object_ptr
, start
, end
);
175 // Scavenge a block of memory from 'start' extending for 'n_words'
176 // that must not contain any object headers.
177 sword_t
scavenge(lispobj
*start
, sword_t n_words
)
179 lispobj
*end
= start
+ n_words
;
181 for (object_ptr
= start
; object_ptr
< end
; object_ptr
++) {
182 lispobj object
= *object_ptr
;
183 if (is_lisp_pointer(object
)) scav1(object_ptr
, object
);
188 void scav_binding_stack(lispobj
* where
, lispobj
* end
)
190 #ifdef LISP_FEATURE_SB_THREAD
191 // The binding stack stores TLS indices where symbols would be,
192 // and there's no reason to scavenge those words since they're fixnums.
193 // This means a symbol can not be enlivened if it exists *solely* on
194 // the binding stack - which is, practically speaking, impossible.
196 for (object_ptr
= where
; object_ptr
< end
; object_ptr
+= 2) {
197 lispobj object
= *object_ptr
;
198 if (is_lisp_pointer(object
)) scav1(object_ptr
, object
);
201 scavenge(where
, end
- where
);
205 static lispobj
trans_fun_header(lispobj object
); /* forward decls */
206 static lispobj
trans_short_boxed(lispobj object
);
209 scav_fun_pointer(lispobj
*where
, lispobj object
)
211 gc_dcheck(lowtag_of(object
) == FUN_POINTER_LOWTAG
);
213 /* Object is a pointer into from_space - not a FP. */
214 lispobj
*first_pointer
= native_pointer(object
);
216 /* must transport object -- object may point to either a function
217 * header, a funcallable instance header, or a closure header. */
218 lispobj copy
= widetag_of(*first_pointer
) == SIMPLE_FUN_WIDETAG
219 ? trans_fun_header(object
) : trans_short_boxed(object
);
221 if (copy
!= object
) {
222 /* Set forwarding pointer */
223 set_forwarding_pointer(first_pointer
,copy
);
226 CHECK_COPY_POSTCONDITIONS(copy
, FUN_POINTER_LOWTAG
);
235 trans_code(struct code
*code
)
237 /* if object has already been transported, just return pointer */
238 if (forwarding_pointer_p((lispobj
*)code
)) {
240 printf("Was already transported\n");
242 return (struct code
*)native_pointer(forwarding_pointer_value((lispobj
*)code
));
245 gc_dcheck(widetag_of(code
->header
) == CODE_HEADER_WIDETAG
);
247 /* prepare to transport the code vector */
248 lispobj l_code
= (lispobj
) LOW_WORD(code
) | OTHER_POINTER_LOWTAG
;
249 sword_t nheader_words
= code_header_words(code
->header
);
250 sword_t ncode_words
= code_instruction_words(code
->code_size
);
251 sword_t nwords
= nheader_words
+ ncode_words
;
252 lispobj l_new_code
= gc_general_copy_object(l_code
, nwords
, CODE_PAGE_FLAG
);
253 struct code
*new_code
= (struct code
*) native_pointer(l_new_code
);
255 #if defined(DEBUG_CODE_GC)
256 printf("Old code object at 0x%08x, new code object at 0x%08x.\n",
257 (uword_t
) code
, (uword_t
) new_code
);
258 printf("Code object is %d words long.\n", nwords
);
261 #ifdef LISP_FEATURE_GENCGC
262 if (new_code
== code
)
266 set_forwarding_pointer((lispobj
*)code
, l_new_code
);
268 /* set forwarding pointers for all the function headers in the */
269 /* code object. also fix all self pointers */
270 /* Do this by scanning the new code, since the old header is unusable */
272 uword_t displacement
= l_new_code
- l_code
;
274 for_each_simple_fun(i
, nfheaderp
, new_code
, 1, {
275 /* Calculate the old raw function pointer */
276 struct simple_fun
* fheaderp
=
277 (struct simple_fun
*)LOW_WORD((char*)nfheaderp
- displacement
);
278 /* Calculate the new lispobj */
279 lispobj nfheaderl
= make_lispobj(nfheaderp
, FUN_POINTER_LOWTAG
);
282 printf("fheaderp->header (at %x) <- %x\n",
283 &(fheaderp
->header
) , nfheaderl
);
285 set_forwarding_pointer((lispobj
*)fheaderp
, nfheaderl
);
287 /* fix self pointer. */
289 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
290 FUN_RAW_ADDR_OFFSET
+
294 #ifdef LISP_FEATURE_GENCGC
295 /* Cheneygc doesn't need this os_flush_icache, it flushes the whole
296 spaces once when all copying is done. */
297 os_flush_icache((os_vm_address_t
) (((sword_t
*)new_code
) + nheader_words
),
298 ncode_words
* sizeof(sword_t
));
302 gencgc_apply_code_fixups(code
, new_code
);
308 scav_code_header(lispobj
*where
, lispobj header
)
310 struct code
*code
= (struct code
*) where
;
311 sword_t n_header_words
= code_header_words(header
);
313 /* Scavenge the boxed section of the code data block. */
314 scavenge(where
+ 1, n_header_words
- 1);
316 /* Scavenge the boxed section of each function object in the
317 * code data block. */
318 for_each_simple_fun(i
, function_ptr
, code
, 1, {
319 scavenge(SIMPLE_FUN_SCAV_START(function_ptr
),
320 SIMPLE_FUN_SCAV_NWORDS(function_ptr
));
323 return n_header_words
+ code_instruction_words(code
->code_size
);
327 trans_code_header(lispobj object
)
329 struct code
*ncode
= trans_code((struct code
*) native_pointer(object
));
330 return (lispobj
) LOW_WORD(ncode
) | OTHER_POINTER_LOWTAG
;
334 size_code_header(lispobj
*where
)
336 return code_header_words(((struct code
*)where
)->header
)
337 + code_instruction_words(((struct code
*)where
)->code_size
);
340 #ifdef RETURN_PC_WIDETAG
342 scav_return_pc_header(lispobj
*where
, lispobj object
)
344 lose("attempted to scavenge a return PC header where=%p object=%#lx\n",
345 where
, (uword_t
) object
);
346 return 0; /* bogus return value to satisfy static type checking */
350 trans_return_pc_header(lispobj object
)
352 struct simple_fun
*return_pc
= (struct simple_fun
*) native_pointer(object
);
353 uword_t offset
= HeaderValue(return_pc
->header
) * N_WORD_BYTES
;
355 /* Transport the whole code object */
356 struct code
*code
= (struct code
*) ((uword_t
) return_pc
- offset
);
357 struct code
*ncode
= trans_code(code
);
359 return ((lispobj
) LOW_WORD(ncode
) + offset
) | OTHER_POINTER_LOWTAG
;
361 #endif /* RETURN_PC_WIDETAG */
363 /* On the 386, closures hold a pointer to the raw address instead of the
364 * function object, so we can use CALL [$FDEFN+const] to invoke
365 * the function without loading it into a register. Given that code
366 * objects don't move, we don't need to update anything, but we do
367 * have to figure out that the function is still live. */
369 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
371 scav_closure(lispobj
*where
, lispobj header
)
373 struct closure
*closure
= (struct closure
*)where
;
374 int payload_words
= SHORT_BOXED_NWORDS(header
);
375 lispobj fun
= closure
->fun
- FUN_RAW_ADDR_OFFSET
;
377 #ifdef LISP_FEATURE_GENCGC
378 /* The function may have moved so update the raw address. But
379 * don't write unnecessarily. */
380 if (closure
->fun
!= fun
+ FUN_RAW_ADDR_OFFSET
)
381 closure
->fun
= fun
+ FUN_RAW_ADDR_OFFSET
;
383 // Payload includes 'fun' which was just looked at, so subtract it.
384 scavenge(closure
->info
, payload_words
- 1);
385 return 1 + payload_words
;
389 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
391 scav_fun_header(lispobj
*where
, lispobj object
)
393 lose("attempted to scavenge a function header where=%p object=%#lx\n",
394 where
, (uword_t
) object
);
395 return 0; /* bogus return value to satisfy static type checking */
397 #endif /* LISP_FEATURE_X86 */
400 trans_fun_header(lispobj object
)
402 struct simple_fun
*fheader
= (struct simple_fun
*) native_pointer(object
);
404 (HeaderValue(fheader
->header
) & FUN_HEADER_NWORDS_MASK
) * N_WORD_BYTES
;
406 /* Transport the whole code object */
407 struct code
*code
= (struct code
*) ((uword_t
) fheader
- offset
);
408 struct code
*ncode
= trans_code(code
);
410 return ((lispobj
) LOW_WORD(ncode
) + offset
) | FUN_POINTER_LOWTAG
;
419 trans_instance(lispobj object
)
421 gc_dcheck(lowtag_of(object
) == INSTANCE_POINTER_LOWTAG
);
422 lispobj header
= *(lispobj
*)(object
- INSTANCE_POINTER_LOWTAG
);
423 return copy_object(object
, 1 + (instance_length(header
)|1));
427 scav_instance_pointer(lispobj
*where
, lispobj object
)
429 /* Object is a pointer into from space - not a FP. */
430 lispobj copy
= trans_instance(object
);
432 gc_dcheck(copy
!= object
);
434 set_forwarding_pointer(native_pointer(object
), copy
);
445 static lispobj
trans_list(lispobj object
);
448 scav_list_pointer(lispobj
*where
, lispobj object
)
450 gc_dcheck(lowtag_of(object
) == LIST_POINTER_LOWTAG
);
452 lispobj copy
= trans_list(object
);
453 gc_dcheck(copy
!= object
);
455 CHECK_COPY_POSTCONDITIONS(copy
, LIST_POINTER_LOWTAG
);
463 trans_list(lispobj object
)
466 struct cons
*copy
= (struct cons
*)
467 gc_general_alloc(sizeof(struct cons
), BOXED_PAGE_FLAG
, ALLOC_QUICK
);
468 lispobj new_list_pointer
= make_lispobj(copy
, LIST_POINTER_LOWTAG
);
469 copy
->car
= CONS(object
)->car
;
470 /* Grab the cdr: set_forwarding_pointer will clobber it in GENCGC */
471 lispobj cdr
= CONS(object
)->cdr
;
472 set_forwarding_pointer((lispobj
*)CONS(object
), new_list_pointer
);
474 /* Try to linearize the list in the cdr direction to help reduce
476 while (lowtag_of(cdr
) == LIST_POINTER_LOWTAG
&& from_space_p(cdr
)) {
477 lispobj
* native_cdr
= (lispobj
*)CONS(cdr
);
478 if (forwarding_pointer_p(native_cdr
)) { // Might as well fix now.
479 cdr
= forwarding_pointer_value(native_cdr
);
483 struct cons
*cdr_copy
= (struct cons
*)
484 gc_general_alloc(sizeof(struct cons
), BOXED_PAGE_FLAG
, ALLOC_QUICK
);
485 cdr_copy
->car
= ((struct cons
*)native_cdr
)->car
;
486 /* Grab the cdr before it is clobbered. */
487 lispobj next
= ((struct cons
*)native_cdr
)->cdr
;
488 /* Set cdr of the predecessor, and store an FP. */
489 set_forwarding_pointer(native_cdr
,
490 copy
->cdr
= make_lispobj(cdr_copy
,
491 LIST_POINTER_LOWTAG
));
496 return new_list_pointer
;
501 * scavenging and transporting other pointers
505 scav_other_pointer(lispobj
*where
, lispobj object
)
507 gc_dcheck(lowtag_of(object
) == OTHER_POINTER_LOWTAG
);
509 /* Object is a pointer into from space - not FP. */
510 lispobj
*first_pointer
= (lispobj
*)(object
- OTHER_POINTER_LOWTAG
);
511 lispobj copy
= transother
[widetag_of(*first_pointer
)](object
);
513 // If the object was large, then instead of transporting it,
514 // gencgc might simply promote the pages and return the same pointer.
515 // That decision is made in general_copy_large_object().
516 if (copy
!= object
) {
517 set_forwarding_pointer(first_pointer
, copy
);
518 #ifdef LISP_FEATURE_GENCGC
522 #ifndef LISP_FEATURE_GENCGC
525 CHECK_COPY_POSTCONDITIONS(copy
, OTHER_POINTER_LOWTAG
);
530 * immediate, boxed, and unboxed objects
533 /* The immediate object scavenger basically wants to be "scav_cons",
534 * and so returns 2. To see why it's right, observe that scavenge() will
535 * not invoke a scavtab entry on any object except for one satisfying
536 * is_lisp_pointer(). So if a scavtab[] function got here,
537 * then it must be via heap_scavenge(). But heap_scavenge() should only
538 * dispatch via scavtab[] if it thought it saw an object header.
539 * So why do we act like it saw a cons? Because conses can contain an
540 * immediate object that satisfies both other_immediate_lowtag_p()
541 * and is_lisp_immediate(), namely, the objects specifically mentioned at
542 * is_cons_half(). So heap_scavenge() is nearly testing is_cons_half()
543 * but even more efficiently, by ignoring the unusual immediate widetags
544 * until we get to scav_immediate.
546 * And just to hammer the point home: we won't blow past the end of a specific
547 * range of words when scavenging a binding or control stack or anything else,
548 * because scavenge() skips immediate objects all by itself,
549 * or rather it skips anything not satisfying is_lisp_pointer().
551 * As to the unbound marker, see rev. 09c78105eabc6bf2b339f421d4ed1df4678003db
552 * which says that we might see it in conses for reasons somewhat unknown.
555 scav_immediate(lispobj
*where
, lispobj object
)
558 if (is_lisp_pointer(object
)) scav1(where
, object
);
563 trans_immediate(lispobj object
)
565 lose("trying to transport an immediate\n");
566 return NIL
; /* bogus return value to satisfy static type checking */
570 size_immediate(lispobj
*where
)
575 static inline boolean
bignum_logbitp_inline(int index
, struct bignum
* bignum
)
577 int len
= HeaderValue(bignum
->header
);
578 int word_index
= index
/ N_WORD_BITS
;
579 int bit_index
= index
% N_WORD_BITS
;
580 return word_index
< len
? (bignum
->digits
[word_index
] >> bit_index
) & 1 : 0;
582 boolean
positive_bignum_logbitp(int index
, struct bignum
* bignum
)
584 /* If the bignum in the layout has another pointer to it (besides the layout)
585 acting as a root, and which is scavenged first, then transporting the
586 bignum causes the layout to see a FP, as would copying an instance whose
587 layout that is. This is a nearly impossible scenario to create organically
588 in Lisp, because mostly nothing ever looks again at that exact (EQ) bignum
589 except for a few things that would cause it to be pinned anyway,
590 such as it being kept in a local variable during structure manipulation.
591 See 'interleaved-raw.impure.lisp' for a way to trigger this */
592 if (forwarding_pointer_p((lispobj
*)bignum
)) {
593 lispobj forwarded
= forwarding_pointer_value((lispobj
*)bignum
);
595 fprintf(stderr
, "GC bignum_logbitp(): fwd from %p to %p\n",
596 (void*)bignum
, (void*)forwarded
);
598 bignum
= (struct bignum
*)native_pointer(forwarded
);
600 return bignum_logbitp_inline(index
, bignum
);
603 // Helper function for stepping through the tagged slots of an instance in
604 // scav_instance and verify_space.
606 instance_scan(void (*proc
)(lispobj
*, sword_t
),
607 lispobj
*instance_slots
,
608 sword_t nslots
, /* number of payload words */
609 lispobj layout_bitmap
)
613 if (fixnump(layout_bitmap
)) {
614 if (layout_bitmap
== make_fixnum(-1))
615 proc(instance_slots
, nslots
);
617 sword_t bitmap
= (sword_t
)layout_bitmap
>> N_FIXNUM_TAG_BITS
; // signed integer!
618 for (index
= 0; index
< nslots
; index
++, bitmap
>>= 1)
620 proc(instance_slots
+ index
, 1);
622 } else { /* huge bitmap */
623 struct bignum
* bitmap
;
624 bitmap
= (struct bignum
*)native_pointer(layout_bitmap
);
625 if (forwarding_pointer_p((lispobj
*)bitmap
))
626 bitmap
= (struct bignum
*)
627 native_pointer(forwarding_pointer_value((lispobj
*)bitmap
));
628 for (index
= 0; index
< nslots
; index
++)
629 if (bignum_logbitp_inline(index
, bitmap
))
630 proc(instance_slots
+ index
, 1);
635 scav_instance(lispobj
*where
, lispobj header
)
637 lispobj
* layout
= (lispobj
*)instance_layout(where
);
638 lispobj lbitmap
= make_fixnum(-1);
641 layout
= native_pointer((lispobj
)layout
);
642 #ifdef LISP_FEATURE_COMPACT_INSTANCE_HEADER
643 if (__immobile_obj_gen_bits(layout
) == from_space
)
644 promote_immobile_obj(layout
, 1);
646 if (forwarding_pointer_p(layout
))
647 layout
= native_pointer(forwarding_pointer_value(layout
));
649 lbitmap
= ((struct layout
*)layout
)->bitmap
;
651 sword_t nslots
= instance_length(header
) | 1;
652 if (lbitmap
== make_fixnum(-1))
653 scavenge(where
+1, nslots
);
654 else if (!fixnump(lbitmap
))
655 instance_scan((void(*)(lispobj
*,sword_t
))scavenge
,
656 where
+1, nslots
, lbitmap
);
658 sword_t bitmap
= (sword_t
)lbitmap
>> N_FIXNUM_TAG_BITS
; // signed integer!
661 for ( ; n
-- ; bitmap
>>= 1) {
663 if ((bitmap
& 1) && is_lisp_pointer(obj
= *where
))
670 #ifdef LISP_FEATURE_COMPACT_INSTANCE_HEADER
672 scav_funinstance(lispobj
*where
, lispobj header
)
674 // This works because the layout is in the header word of all instances,
675 // ordinary and funcallable, when compact headers are enabled.
676 // The trampoline slot in the funcallable-instance is raw, but can be
677 // scavenged, because it points to readonly space, never oldspace.
678 // (And for certain backends it looks like a fixnum, not a pointer)
679 return scav_instance(where
, header
);
683 //// Boxed object scav/trans/size functions
685 #define DEF_SCAV_BOXED(suffix, sizer) \
686 static sword_t __attribute__((unused)) \
687 scav_##suffix(lispobj *where, lispobj header) { \
688 return 1 + scavenge(where+1, sizer(header)); \
690 static lispobj trans_##suffix(lispobj object) { \
691 return copy_object(object, 1 + sizer(*native_pointer(object))); \
693 static sword_t size_##suffix(lispobj *where) { return 1 + sizer(*where); }
695 DEF_SCAV_BOXED(boxed
, BOXED_NWORDS
)
696 DEF_SCAV_BOXED(short_boxed
, SHORT_BOXED_NWORDS
)
697 DEF_SCAV_BOXED(tiny_boxed
, TINY_BOXED_NWORDS
)
699 /* Note: on the sparc we don't have to do anything special for fdefns, */
700 /* 'cause the raw-addr has a function lowtag. */
701 #if !defined(LISP_FEATURE_SPARC) && !defined(LISP_FEATURE_ARM)
703 scav_fdefn(lispobj
*where
, lispobj object
)
705 struct fdefn
*fdefn
= (struct fdefn
*)where
;
707 /* FSHOW((stderr, "scav_fdefn, function = %p, raw_addr = %p\n",
708 fdefn->fun, fdefn->raw_addr)); */
710 scavenge(where
+ 1, 2); // 'name' and 'fun'
711 #ifndef LISP_FEATURE_IMMOBILE_CODE
712 lispobj raw_fun
= (lispobj
)fdefn
->raw_addr
;
713 if (raw_fun
> READ_ONLY_SPACE_END
) {
714 lispobj simple_fun
= raw_fun
- FUN_RAW_ADDR_OFFSET
;
715 scavenge(&simple_fun
, 1);
716 /* Don't write unnecessarily. */
717 if (simple_fun
!= raw_fun
- FUN_RAW_ADDR_OFFSET
)
718 fdefn
->raw_addr
= (char *)simple_fun
+ FUN_RAW_ADDR_OFFSET
;
720 #elif defined(LISP_FEATURE_X86_64)
721 lispobj obj
= fdefn_raw_referent(fdefn
);
724 scavenge(&new, 1); // enliven
725 gc_dcheck(new == obj
); // must not move
728 # error "Need to implement scav_fdefn"
735 scav_unboxed(lispobj
*where
, lispobj object
)
737 sword_t length
= HeaderValue(object
) + 1;
738 return CEILING(length
, 2);
742 trans_unboxed(lispobj object
)
744 gc_dcheck(lowtag_of(object
) == OTHER_POINTER_LOWTAG
);
745 sword_t length
= HeaderValue(*native_pointer(object
)) + 1;
746 return copy_unboxed_object(object
, CEILING(length
, 2));
750 trans_ratio_or_complex(lispobj object
)
752 gc_dcheck(lowtag_of(object
) == OTHER_POINTER_LOWTAG
);
753 lispobj
* x
= native_pointer(object
);
757 /* A zero ratio or complex means it was just allocated by fixed-alloc and
758 a bignum can still be written there. Not a problem with a conservative GC
759 since it will be pinned down. */
760 if (fixnump(a
) && fixnump(b
)
761 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
766 return copy_unboxed_object(object
, 4);
768 return copy_object(object
, 4);
771 /* vector-like objects */
773 trans_vector(lispobj object
)
775 gc_dcheck(lowtag_of(object
) == OTHER_POINTER_LOWTAG
);
777 sword_t length
= fixnum_value(VECTOR(object
)->length
);
778 return copy_large_object(object
, CEILING(length
+ 2, 2));
782 size_vector(lispobj
*where
)
784 sword_t length
= fixnum_value(((struct vector
*)where
)->length
);
785 return CEILING(length
+ 2, 2);
788 static inline uword_t
789 NWORDS(uword_t x
, uword_t n_bits
)
791 /* A good compiler should be able to constant-fold this whole thing,
792 even with the conditional. */
793 if(n_bits
<= N_WORD_BITS
) {
794 uword_t elements_per_word
= N_WORD_BITS
/n_bits
;
796 return CEILING(x
, elements_per_word
)/elements_per_word
;
799 /* FIXME: should have some sort of assertion that N_WORD_BITS
800 evenly divides n_bits */
801 return x
* (n_bits
/N_WORD_BITS
);
805 #define DEF_SCAV_TRANS_SIZE_UB(nbits) \
806 DEF_SPECIALIZED_VECTOR(vector_unsigned_byte_##nbits, NWORDS(length, nbits))
807 #define DEF_SPECIALIZED_VECTOR(name, nwords) \
808 static sword_t __attribute__((unused)) scav_##name(lispobj *where, lispobj header) { \
809 sword_t length = fixnum_value(((struct vector*)where)->length); \
810 return CEILING(nwords + 2, 2); \
812 static lispobj __attribute__((unused)) trans_##name(lispobj object) { \
813 gc_dcheck(lowtag_of(object) == OTHER_POINTER_LOWTAG); \
814 sword_t length = fixnum_value(VECTOR(object)->length); \
815 return copy_large_unboxed_object(object, CEILING(nwords + 2, 2)); \
817 static sword_t __attribute__((unused)) size_##name(lispobj *where) { \
818 sword_t length = fixnum_value(((struct vector*)where)->length); \
819 return CEILING(nwords + 2, 2); \
822 DEF_SPECIALIZED_VECTOR(vector_nil
, 0*length
)
823 DEF_SPECIALIZED_VECTOR(vector_bit
, NWORDS(length
,1))
824 /* NOTE: strings contain one more element of data (a terminating '\0'
825 * to help interface with C functions) than indicated by the length slot.
826 * This is true even for UCS4 strings, despite that C APIs are unlikely
827 * to have a convention that expects 4 zero bytes. */
828 DEF_SPECIALIZED_VECTOR(base_string
, NWORDS((length
+1), 8))
829 DEF_SPECIALIZED_VECTOR(character_string
, NWORDS((length
+1), 32))
830 DEF_SCAV_TRANS_SIZE_UB(2)
831 DEF_SCAV_TRANS_SIZE_UB(4)
832 DEF_SCAV_TRANS_SIZE_UB(8)
833 DEF_SCAV_TRANS_SIZE_UB(16)
834 DEF_SCAV_TRANS_SIZE_UB(32)
835 DEF_SCAV_TRANS_SIZE_UB(64)
836 DEF_SCAV_TRANS_SIZE_UB(128)
837 #ifdef LONG_FLOAT_SIZE
838 DEF_SPECIALIZED_VECTOR(vector_long_float
, length
* LONG_FLOAT_SIZE
)
839 DEF_SPECIALIZED_VECTOR(vector_complex_long_float
, length
* (2 * LONG_FLOAT_SIZE
))
843 trans_weak_pointer(lispobj object
)
846 gc_dcheck(lowtag_of(object
) == OTHER_POINTER_LOWTAG
);
848 #if defined(DEBUG_WEAK)
849 printf("Transporting weak pointer from 0x%08x\n", object
);
852 /* Need to remember where all the weak pointers are that have */
853 /* been transported so they can be fixed up in a post-GC pass. */
855 copy
= copy_object(object
, WEAK_POINTER_NWORDS
);
856 #ifndef LISP_FEATURE_GENCGC
857 struct weak_pointer
*wp
= (struct weak_pointer
*) native_pointer(copy
);
859 gc_dcheck(widetag_of(wp
->header
)==WEAK_POINTER_WIDETAG
);
860 /* Push the weak pointer onto the list of weak pointers. */
861 if (weak_pointer_breakable_p(wp
)) {
862 wp
->next
= (struct weak_pointer
*)LOW_WORD(weak_pointers
);
869 void scan_weak_pointers(void)
871 struct weak_pointer
*wp
, *next_wp
;
872 for (wp
= weak_pointers
, next_wp
= NULL
; wp
!= NULL
; wp
= next_wp
) {
873 gc_assert(widetag_of(wp
->header
)==WEAK_POINTER_WIDETAG
);
877 if (next_wp
== wp
) /* gencgc uses a ref to self for end of list */
880 lispobj pointee
= wp
->value
;
881 gc_assert(is_lisp_pointer(pointee
));
882 lispobj
*objaddr
= native_pointer(pointee
);
884 /* Now, we need to check whether the object has been forwarded. If
885 * it has been, the weak pointer is still good and needs to be
886 * updated. Otherwise, the weak pointer needs to be broken. */
888 if (from_space_p(pointee
)) {
889 wp
->value
= forwarding_pointer_p(objaddr
) ?
890 LOW_WORD(forwarding_pointer_value(objaddr
)) : UNBOUND_MARKER_WIDETAG
;
892 #ifdef LISP_FEATURE_IMMOBILE_SPACE
893 else if (immobile_space_p(pointee
)) {
894 if (immobile_obj_gen_bits(objaddr
) == from_space
)
895 wp
->value
= UNBOUND_MARKER_WIDETAG
;
899 lose("unbreakable pointer %p", wp
);
906 #if N_WORD_BITS == 32
907 #define EQ_HASH_MASK 0x1fffffff
908 #elif N_WORD_BITS == 64
909 #define EQ_HASH_MASK 0x1fffffffffffffff
912 /* Compute the EQ-hash of KEY. This must match POINTER-HASH in
913 * target-hash-table.lisp. */
914 #define EQ_HASH(key) ((key) & EQ_HASH_MASK)
916 /* List of weak hash tables chained through their NEXT-WEAK-HASH-TABLE
917 * slot. Set to NULL at the end of a collection.
919 * This is not optimal because, when a table is tenured, it won't be
920 * processed automatically; only the yougest generation is GC'd by
921 * default. On the other hand, all applications will need an
922 * occasional full GC anyway, so it's not that bad either. */
923 struct hash_table
*weak_hash_tables
= NULL
;
925 /* Return true if OBJ has already survived the current GC. */
926 static inline int pointer_survived_gc_yet(lispobj obj
)
928 #ifdef LISP_FEATURE_CHENEYGC
929 // This is the most straightforward definition.
930 return (!from_space_p(obj
) || forwarding_pointer_p(native_pointer(obj
)));
932 /* Check for a pointer to dynamic space before considering immobile space.
933 Based on the relative size of the spaces, this should be a win because
934 if the object is in the dynamic space and not the 'from' generation
935 we don't want to test immobile_space_p() at all.
936 Additionally, pinned_p() is both more expensive and less likely than
937 forwarding_pointer_p(), so we want to reverse those conditions, which
938 would not be possible with pinned_p() buried inside from_space_p(). */
939 page_index_t page_index
= find_page_index((void*)obj
);
941 return page_table
[page_index
].gen
!= from_space
||
942 forwarding_pointer_p(native_pointer(obj
)) ||
943 pinned_p(obj
, page_index
);
944 #ifdef LISP_FEATURE_IMMOBILE_SPACE
945 if (immobile_space_p(obj
))
946 return immobile_obj_gen_bits(native_pointer(obj
)) != from_space
;
952 static int survived_gc_yet_KEY(lispobj key
, lispobj value
) {
953 return !is_lisp_pointer(key
) || pointer_survived_gc_yet(key
);
955 static int survived_gc_yet_VALUE(lispobj key
, lispobj value
) {
956 return !is_lisp_pointer(value
) || pointer_survived_gc_yet(value
);
958 static int survived_gc_yet_AND(lispobj key
, lispobj value
) {
959 int key_nonpointer
= !is_lisp_pointer(key
);
960 int val_nonpointer
= !is_lisp_pointer(value
);
961 if (key_nonpointer
&& val_nonpointer
) return 1;
962 return (key_nonpointer
|| pointer_survived_gc_yet(key
))
963 && (val_nonpointer
|| pointer_survived_gc_yet(value
));
965 static int survived_gc_yet_OR(lispobj key
, lispobj value
) {
966 int key_nonpointer
= !is_lisp_pointer(key
);
967 int val_nonpointer
= !is_lisp_pointer(value
);
968 if (key_nonpointer
|| val_nonpointer
) return 1;
969 // Both MUST be pointers
970 return pointer_survived_gc_yet(key
) || pointer_survived_gc_yet(value
);
973 static int (*weak_hash_entry_alivep_fun
[5])(lispobj
,lispobj
) = {
976 survived_gc_yet_VALUE
,
981 /* Return the beginning of data in ARRAY (skipping the header and the
982 * length) or NULL if it isn't an array of the specified widetag after
984 static inline lispobj
*
985 get_array_data (lispobj array
, int widetag
, uword_t
*length
)
987 if (is_lisp_pointer(array
) && widetag_of(*native_pointer(array
)) == widetag
) {
989 *length
= fixnum_value(native_pointer(array
)[1]);
990 return native_pointer(array
) + 2;
996 /* Only need to worry about scavenging the _real_ entries in the
997 * table. Phantom entries such as the hash table itself at index 0 and
998 * the empty marker at index 1 were scavenged by scav_vector that
999 * either called this function directly or arranged for it to be
1000 * called later by pushing the hash table onto weak_hash_tables. */
1002 scav_hash_table_entries (struct hash_table
*hash_table
)
1006 lispobj
*index_vector
;
1008 lispobj
*next_vector
;
1009 uword_t next_vector_length
;
1010 lispobj
*hash_vector
;
1011 uword_t hash_vector_length
;
1012 lispobj empty_symbol
;
1015 kv_vector
= get_array_data(hash_table
->table
,
1016 SIMPLE_VECTOR_WIDETAG
, &kv_length
);
1017 if (kv_vector
== NULL
)
1018 lose("invalid kv_vector %x\n", hash_table
->table
);
1020 index_vector
= get_array_data(hash_table
->index_vector
,
1021 SIMPLE_ARRAY_WORD_WIDETAG
, &length
);
1022 if (index_vector
== NULL
)
1023 lose("invalid index_vector %x\n", hash_table
->index_vector
);
1025 next_vector
= get_array_data(hash_table
->next_vector
,
1026 SIMPLE_ARRAY_WORD_WIDETAG
,
1027 &next_vector_length
);
1028 if (next_vector
== NULL
)
1029 lose("invalid next_vector %x\n", hash_table
->next_vector
);
1031 hash_vector
= get_array_data(hash_table
->hash_vector
,
1032 SIMPLE_ARRAY_WORD_WIDETAG
,
1033 &hash_vector_length
);
1034 if (hash_vector
!= NULL
)
1035 gc_assert(hash_vector_length
== next_vector_length
);
1037 /* These lengths could be different as the index_vector can be a
1038 * different length from the others, a larger index_vector could
1039 * help reduce collisions. */
1040 gc_assert(next_vector_length
*2 == kv_length
);
1042 if ((empty_symbol
= kv_vector
[1]) != UNBOUND_MARKER_WIDETAG
)
1043 lose("unexpected empty-hash-table-slot marker: %p\n", empty_symbol
);
1045 /* Work through the KV vector. */
1046 int (*alivep_test
)(lispobj
,lispobj
)
1047 = weak_hash_entry_alivep_fun
[fixnum_value(hash_table
->_weakness
)];
1048 #define SCAV_ENTRIES(aliveness_predicate) \
1049 for (i = 1; i < next_vector_length; i++) { \
1050 lispobj old_key = kv_vector[2*i]; \
1051 lispobj __attribute__((unused)) value = kv_vector[2*i+1]; \
1052 if (aliveness_predicate) { \
1053 /* Scavenge the key and value. */ \
1054 scavenge(&kv_vector[2*i], 2); \
1055 /* If an EQ-based key has moved, mark the hash-table for rehash */ \
1056 if (!hash_vector || hash_vector[i] == MAGIC_HASH_VECTOR_VALUE) { \
1057 lispobj new_key = kv_vector[2*i]; \
1058 if (old_key != new_key && new_key != empty_symbol) \
1059 hash_table->needs_rehash_p = T; \
1062 SCAV_ENTRIES(alivep_test(old_key
, value
))
1068 scav_vector (lispobj
*where
, lispobj object
)
1070 sword_t kv_length
= fixnum_value(where
[1]);
1071 struct hash_table
*hash_table
;
1073 /* SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based and weak
1074 * hash tables in the Lisp HASH-TABLE code to indicate need for
1075 * special GC support. */
1076 if ((HeaderValue(object
) & 0xFF) == subtype_VectorNormal
) {
1078 scavenge(where
+ 2, kv_length
);
1079 return CEILING(kv_length
+ 2, 2);
1082 /* Scavenge element 0, which may be a hash-table structure. */
1083 scavenge(where
+2, 1);
1084 if (!is_lisp_pointer(where
[2])) {
1085 /* This'll happen when REHASH clears the header of old-kv-vector
1086 * and fills it with zero, but some other thread simulatenously
1087 * sets the header in %%PUTHASH.
1090 "Warning: no pointer at %p in hash table: this indicates "
1091 "non-fatal corruption caused by concurrent access to a "
1092 "hash-table from multiple threads. Any accesses to "
1093 "hash-tables shared between threads should be protected "
1094 "by locks.\n", (void*)&where
[2]);
1097 hash_table
= (struct hash_table
*)native_pointer(where
[2]);
1098 /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/
1099 if (widetag_of(hash_table
->header
) != INSTANCE_WIDETAG
) {
1100 lose("hash table not instance (%x at %x)\n",
1105 /* Verify that vector element 1 is as expected.
1106 Don't bother scavenging it, since we lose() if it's not an immediate. */
1107 if (where
[3] != UNBOUND_MARKER_WIDETAG
)
1108 lose("unexpected empty-hash-table-slot marker: %p\n", where
[3]);
1110 /* Scavenge hash table, which will fix the positions of the other
1111 * needed objects. */
1112 scav_instance((lispobj
*)hash_table
, hash_table
->header
);
1114 /* Cross-check the kv_vector. */
1115 if (where
!= native_pointer(hash_table
->table
)) {
1116 lose("hash_table table!=this table %x\n", hash_table
->table
);
1119 if (!hash_table
->_weakness
) {
1120 scav_hash_table_entries(hash_table
);
1122 /* Delay scavenging of this table by pushing it onto
1123 * weak_hash_tables (if it's not there already) for the weak
1125 if (hash_table
->next_weak_hash_table
== NIL
) {
1126 hash_table
->next_weak_hash_table
= (lispobj
)weak_hash_tables
;
1127 weak_hash_tables
= hash_table
;
1131 return (CEILING(kv_length
+ 2, 2));
1135 scav_weak_hash_tables (void)
1137 struct hash_table
*table
;
1139 /* Scavenge entries whose triggers are known to survive. */
1140 for (table
= weak_hash_tables
; table
!= NULL
;
1141 table
= (struct hash_table
*)table
->next_weak_hash_table
) {
1142 scav_hash_table_entries(table
);
1146 /* Walk through the chain whose first element is *FIRST and remove
1147 * dead weak entries. */
1149 scan_weak_hash_table_chain (struct hash_table
*hash_table
, lispobj
*prev
,
1150 lispobj
*kv_vector
, lispobj
*index_vector
,
1151 lispobj
*next_vector
, lispobj
*hash_vector
,
1152 lispobj empty_symbol
, int (*alivep_test
)(lispobj
,lispobj
))
1154 unsigned index
= *prev
;
1156 unsigned next
= next_vector
[index
];
1157 lispobj key
= kv_vector
[2 * index
];
1158 lispobj value
= kv_vector
[2 * index
+ 1];
1159 gc_assert(key
!= empty_symbol
);
1160 gc_assert(value
!= empty_symbol
);
1161 if (!alivep_test(key
, value
)) {
1162 unsigned count
= fixnum_value(hash_table
->number_entries
);
1163 gc_assert(count
> 0);
1165 hash_table
->number_entries
= make_fixnum(count
- 1);
1166 next_vector
[index
] = fixnum_value(hash_table
->next_free_kv
);
1167 hash_table
->next_free_kv
= make_fixnum(index
);
1168 kv_vector
[2 * index
] = empty_symbol
;
1169 kv_vector
[2 * index
+ 1] = empty_symbol
;
1171 hash_vector
[index
] = MAGIC_HASH_VECTOR_VALUE
;
1173 prev
= &next_vector
[index
];
1180 scan_weak_hash_table (struct hash_table
*hash_table
)
1183 lispobj
*index_vector
;
1184 uword_t length
= 0; /* prevent warning */
1185 lispobj
*next_vector
;
1186 uword_t next_vector_length
= 0; /* prevent warning */
1187 lispobj
*hash_vector
;
1188 lispobj empty_symbol
;
1189 int (*alivep_test
)(lispobj
,lispobj
) =
1190 weak_hash_entry_alivep_fun
[fixnum_value(hash_table
->_weakness
)];
1193 kv_vector
= get_array_data(hash_table
->table
,
1194 SIMPLE_VECTOR_WIDETAG
, NULL
);
1195 index_vector
= get_array_data(hash_table
->index_vector
,
1196 SIMPLE_ARRAY_WORD_WIDETAG
, &length
);
1197 next_vector
= get_array_data(hash_table
->next_vector
,
1198 SIMPLE_ARRAY_WORD_WIDETAG
,
1199 &next_vector_length
);
1200 hash_vector
= get_array_data(hash_table
->hash_vector
,
1201 SIMPLE_ARRAY_WORD_WIDETAG
, NULL
);
1202 empty_symbol
= kv_vector
[1];
1204 for (i
= 0; i
< length
; i
++) {
1205 scan_weak_hash_table_chain(hash_table
, &index_vector
[i
],
1206 kv_vector
, index_vector
, next_vector
,
1207 hash_vector
, empty_symbol
, alivep_test
);
1211 /* Remove dead entries from weak hash tables. */
1213 scan_weak_hash_tables (void)
1215 struct hash_table
*table
, *next
;
1217 for (table
= weak_hash_tables
; table
!= NULL
; table
= next
) {
1218 next
= (struct hash_table
*)table
->next_weak_hash_table
;
1219 table
->next_weak_hash_table
= NIL
;
1220 scan_weak_hash_table(table
);
1223 weak_hash_tables
= NULL
;
1232 scav_lose(lispobj
*where
, lispobj object
)
1234 lose("no scavenge function for object %p (widetag 0x%x)\n",
1236 widetag_of(*where
));
1238 return 0; /* bogus return value to satisfy static type checking */
1242 trans_lose(lispobj object
)
1244 lose("no transport function for object %p (widetag 0x%x)\n",
1246 widetag_of(*native_pointer(object
)));
1247 return NIL
; /* bogus return value to satisfy static type checking */
1251 size_lose(lispobj
*where
)
1253 lose("no size function for object at %p (widetag 0x%x)\n",
1255 widetag_of(*where
));
1256 return 1; /* bogus return value to satisfy static type checking */
1264 #include "genesis/gc-tables.h"
1267 static lispobj
*search_spaces(void *pointer
)
1270 if (((start
= search_dynamic_space(pointer
)) != NULL
) ||
1271 #ifdef LISP_FEATURE_IMMOBILE_SPACE
1272 ((start
= search_immobile_space(pointer
)) != NULL
) ||
1274 ((start
= search_static_space(pointer
)) != NULL
) ||
1275 ((start
= search_read_only_space(pointer
)) != NULL
))
1280 /* Find the code object for the given pc, or return NULL on
1283 component_ptr_from_pc(lispobj
*pc
)
1285 lispobj
*object
= search_spaces(pc
);
1287 if (object
!= NULL
&& widetag_of(*object
) == CODE_HEADER_WIDETAG
)
1293 /* Scan an area looking for an object which encloses the given pointer.
1294 * Return the object start on success, or NULL on failure. */
1296 gc_search_space3(void *pointer
, lispobj
*start
, void *limit
)
1298 if (pointer
< (void*)start
|| pointer
>= limit
) return NULL
;
1302 /* CAUTION: this code is _significantly_ slower than the production version
1303 due to the extra checks for forwarding. Only use it if debugging */
1304 for ( ; (void*)start
< limit
; start
+= count
) {
1305 lispobj
*forwarded_start
;
1306 if (forwarding_pointer_p(start
))
1307 forwarded_start
= native_pointer(forwarding_pointer_value(start
));
1309 forwarded_start
= start
;
1310 lispobj thing
= *forwarded_start
;
1311 count
= OBJECT_SIZE(thing
, forwarded_start
);
1312 /* Check whether the pointer is within this object. */
1313 if (pointer
< (void*)(start
+count
)) return start
;
1316 for ( ; (void*)start
< limit
; start
+= count
) {
1317 lispobj thing
= *start
;
1318 count
= OBJECT_SIZE(thing
, start
);
1319 /* Check whether the pointer is within this object. */
1320 if (pointer
< (void*)(start
+count
)) return start
;
1326 /* Helper for valid_lisp_pointer_p (below) and
1327 * conservative_root_p (gencgc).
1329 * pointer is the pointer to check validity of,
1330 * and start_addr is the address of the enclosing object.
1332 * This is actually quite simple to check: because the heap state is assumed
1333 * consistent, and 'start_addr' is known good, having come from
1334 * gc_search_space(), only the 'pointer' argument is dubious.
1335 * So make 'start_addr' into a tagged pointer and see if that matches 'pointer'.
1336 * If it does, then 'pointer' is valid.
1339 properly_tagged_p_internal(lispobj pointer
, lispobj
*start_addr
)
1341 // If a headerless object, confirm that 'pointer' is a list pointer.
1342 // Given the precondition that the heap is in a valid state,
1343 // it may be assumed that one check of is_cons_half() suffices;
1344 // we don't need to check the other half.
1345 lispobj header
= *start_addr
;
1346 if (is_cons_half(header
))
1347 return make_lispobj(start_addr
, LIST_POINTER_LOWTAG
) == pointer
;
1349 // Because this heap object was not deemed to be a cons,
1350 // it must be an object header. Don't need a check except when paranoid.
1351 gc_dcheck(other_immediate_lowtag_p(header
));
1353 // The space of potential widetags has 64 elements, not 256,
1354 // because of the constant low 2 bits.
1355 int widetag
= widetag_of(header
);
1356 int lowtag
= lowtag_for_widetag
[widetag
>>2];
1357 if (lowtag
&& make_lispobj(start_addr
, lowtag
) == pointer
)
1358 return 1; // instant win
1360 if (widetag
== CODE_HEADER_WIDETAG
) {
1361 // Check for RETURN_PC_HEADER first since it's quicker.
1362 // Then consider the embedded simple-funs.
1363 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
1364 /* The all-architecture test below is good as far as it goes,
1365 * but an LRA object is similar to a FUN-POINTER: It is
1366 * embedded within a CODE-OBJECT pointed to by start_addr, and
1367 * cannot be found by simply walking the heap, therefore we
1368 * need to check for it. -- AB, 2010-Jun-04 */
1369 if (lowtag_of(pointer
) == OTHER_POINTER_LOWTAG
) {
1370 lispobj
*potential_lra
= native_pointer(pointer
);
1371 if ((widetag_of(potential_lra
[0]) == RETURN_PC_WIDETAG
) &&
1372 ((potential_lra
- HeaderValue(potential_lra
[0])) == start_addr
)) {
1373 return 1; /* It's as good as we can verify. */
1377 if (lowtag_of(pointer
) == FUN_POINTER_LOWTAG
) {
1378 struct simple_fun
*pfun
=
1379 (struct simple_fun
*)(pointer
-FUN_POINTER_LOWTAG
);
1380 for_each_simple_fun(i
, function
, (struct code
*)start_addr
, 0, {
1381 if (pfun
== function
) return 1;
1385 return 0; // no good
1388 /* META: Note the ambiguous word "validate" in the comment below.
1389 * This means "Decide whether <x> is valid".
1390 * But when you see os_validate() elsewhere, that doesn't mean to ask
1391 * whether something is valid, it says to *make* it valid.
1392 * I think it would be nice if we could avoid using the word in the
1393 * sense in which os_validate() uses it, which would entail renaming
1394 * a bunch of stuff, which is harder than just explaining why
1395 * the comments can be deceptive */
1397 /* Used by the debugger to validate possibly bogus pointers before
1398 * calling MAKE-LISP-OBJ on them.
1400 * FIXME: We would like to make this perfect, because if the debugger
1401 * constructs a reference to a bugs lisp object, and it ends up in a
1402 * location scavenged by the GC all hell breaks loose.
1404 * Whereas conservative_root_p has to be conservative
1405 * and return true for all valid pointers, this could actually be eager
1406 * and lie about a few pointers without bad results... but that should
1407 * be reflected in the name.
1410 valid_lisp_pointer_p(lispobj pointer
)
1412 lispobj
*start
= search_spaces((void*)pointer
);
1414 return properly_tagged_descriptor_p((void*)pointer
, start
);
1419 maybe_gc(os_context_t
*context
)
1421 lispobj gc_happened
;
1422 struct thread
*thread
= arch_os_get_current_thread();
1423 boolean were_in_lisp
= !foreign_function_call_active_p(thread
);
1426 fake_foreign_function_call(context
);
1429 /* SUB-GC may return without GCing if *GC-INHIBIT* is set, in
1430 * which case we will be running with no gc trigger barrier
1431 * thing for a while. But it shouldn't be long until the end
1434 * FIXME: It would be good to protect the end of dynamic space for
1435 * CheneyGC and signal a storage condition from there.
1438 /* Restore the signal mask from the interrupted context before
1439 * calling into Lisp if interrupts are enabled. Why not always?
1441 * Suppose there is a WITHOUT-INTERRUPTS block far, far out. If an
1442 * interrupt hits while in SUB-GC, it is deferred and the
1443 * os_context_sigmask of that interrupt is set to block further
1444 * deferrable interrupts (until the first one is
1445 * handled). Unfortunately, that context refers to this place and
1446 * when we return from here the signals will not be blocked.
1448 * A kludgy alternative is to propagate the sigmask change to the
1451 #if !(defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_SAFEPOINT))
1452 check_gc_signals_unblocked_or_lose(os_context_sigmask_addr(context
));
1453 unblock_gc_signals(0, 0);
1455 FSHOW((stderr
, "/maybe_gc: calling SUB_GC\n"));
1456 /* FIXME: Nothing must go wrong during GC else we end up running
1457 * the debugger, error handlers, and user code in general in a
1458 * potentially unsafe place. Running out of the control stack or
1459 * the heap in SUB-GC are ways to lose. Of course, deferrables
1460 * cannot be unblocked because there may be a pending handler, or
1461 * we may even be in a WITHOUT-INTERRUPTS. */
1462 gc_happened
= funcall0(StaticSymbolFunction(SUB_GC
));
1463 FSHOW((stderr
, "/maybe_gc: gc_happened=%s\n",
1464 (gc_happened
== NIL
)
1466 : ((gc_happened
== T
)
1469 /* gc_happened can take three values: T, NIL, 0.
1471 * T means that the thread managed to trigger a GC, and post-gc
1474 * NIL means that the thread is within without-gcing, and no GC
1477 * Finally, 0 means that *a* GC has occurred, but it wasn't
1478 * triggered by this thread; success, but post-gc doesn't have
1481 if ((gc_happened
== T
) &&
1482 /* See if interrupts are enabled or it's possible to enable
1483 * them. POST-GC has a similar check, but we don't want to
1484 * unlock deferrables in that case and get a pending interrupt
1486 ((SymbolValue(INTERRUPTS_ENABLED
,thread
) != NIL
) ||
1487 (SymbolValue(ALLOW_WITH_INTERRUPTS
,thread
) != NIL
))) {
1488 #ifndef LISP_FEATURE_WIN32
1489 sigset_t
*context_sigmask
= os_context_sigmask_addr(context
);
1490 if (!deferrables_blocked_p(context_sigmask
)) {
1491 thread_sigmask(SIG_SETMASK
, context_sigmask
, 0);
1492 #ifndef LISP_FEATURE_SB_SAFEPOINT
1493 check_gc_signals_unblocked_or_lose(0);
1496 FSHOW((stderr
, "/maybe_gc: calling POST_GC\n"));
1497 funcall0(StaticSymbolFunction(POST_GC
));
1498 #ifndef LISP_FEATURE_WIN32
1500 FSHOW((stderr
, "/maybe_gc: punting on POST_GC due to blockage\n"));
1506 undo_fake_foreign_function_call(context
);
1508 /* Otherwise done by undo_fake_foreign_function_call. And
1509 something later wants them to be blocked. What a nice
1511 block_blockable_signals(0);
1514 FSHOW((stderr
, "/maybe_gc: returning\n"));
1515 return (gc_happened
!= NIL
);
1518 #define BYTES_ZERO_BEFORE_END (1<<12)
1520 /* There used to be a similar function called SCRUB-CONTROL-STACK in
1521 * Lisp and another called zero_stack() in cheneygc.c, but since it's
1522 * shorter to express in, and more often called from C, I keep only
1523 * the C one after fixing it. -- MG 2009-03-25 */
1525 /* Zero the unused portion of the control stack so that old objects
1526 * are not kept alive because of uninitialized stack variables.
1528 * "To summarize the problem, since not all allocated stack frame
1529 * slots are guaranteed to be written by the time you call an another
1530 * function or GC, there may be garbage pointers retained in your dead
1531 * stack locations. The stack scrubbing only affects the part of the
1532 * stack from the SP to the end of the allocated stack." - ram, on
1533 * cmucl-imp, Tue, 25 Sep 2001
1535 * So, as an (admittedly lame) workaround, from time to time we call
1536 * scrub-control-stack to zero out all the unused portion. This is
1537 * supposed to happen when the stack is mostly empty, so that we have
1538 * a chance of clearing more of it: callers are currently (2002.07.18)
1539 * REPL, SUB-GC and sig_stop_for_gc_handler. */
1541 /* Take care not to tread on the guard page and the hard guard page as
1542 * it would be unkind to sig_stop_for_gc_handler. Touching the return
1543 * guard page is not dangerous. For this to work the guard page must
1544 * be zeroed when protected. */
1546 /* FIXME: I think there is no guarantee that once
1547 * BYTES_ZERO_BEFORE_END bytes are zero the rest are also zero. This
1548 * may be what the "lame" adjective in the above comment is for. In
1549 * this case, exact gc may lose badly. */
1551 scrub_control_stack()
1553 scrub_thread_control_stack(arch_os_get_current_thread());
1557 scrub_thread_control_stack(struct thread
*th
)
1559 os_vm_address_t guard_page_address
= CONTROL_STACK_GUARD_PAGE(th
);
1560 os_vm_address_t hard_guard_page_address
= CONTROL_STACK_HARD_GUARD_PAGE(th
);
1561 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1562 /* On these targets scrubbing from C is a bad idea, so we punt to
1563 * a routine in $ARCH-assem.S. */
1564 extern void arch_scrub_control_stack(struct thread
*, os_vm_address_t
, os_vm_address_t
);
1565 arch_scrub_control_stack(th
, guard_page_address
, hard_guard_page_address
);
1567 lispobj
*sp
= access_control_stack_pointer(th
);
1569 if ((((os_vm_address_t
)sp
< (hard_guard_page_address
+ os_vm_page_size
)) &&
1570 ((os_vm_address_t
)sp
>= hard_guard_page_address
)) ||
1571 (((os_vm_address_t
)sp
< (guard_page_address
+ os_vm_page_size
)) &&
1572 ((os_vm_address_t
)sp
>= guard_page_address
) &&
1573 (th
->control_stack_guard_page_protected
!= NIL
)))
1575 #ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
1578 } while (((uword_t
)sp
--) & (BYTES_ZERO_BEFORE_END
- 1));
1579 if ((os_vm_address_t
)sp
< (hard_guard_page_address
+ os_vm_page_size
))
1584 } while (((uword_t
)sp
--) & (BYTES_ZERO_BEFORE_END
- 1));
1588 } while (((uword_t
)++sp
) & (BYTES_ZERO_BEFORE_END
- 1));
1589 if ((os_vm_address_t
)sp
>= hard_guard_page_address
)
1594 } while (((uword_t
)++sp
) & (BYTES_ZERO_BEFORE_END
- 1));
1596 #endif /* LISP_FEATURE_C_STACK_IS_CONTROL_STACK */
1599 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
1602 scavenge_control_stack(struct thread
*th
)
1604 lispobj
*object_ptr
;
1606 /* In order to properly support dynamic-extent allocation of
1607 * non-CONS objects, the control stack requires special handling.
1608 * Rather than calling scavenge() directly, grovel over it fixing
1609 * broken hearts, scavenging pointers to oldspace, and pitching a
1610 * fit when encountering unboxed data. This prevents stray object
1611 * headers from causing the scavenger to blow past the end of the
1612 * stack (an error case checked in scavenge()). We don't worry
1613 * about treating unboxed words as boxed or vice versa, because
1614 * the compiler isn't allowed to store unboxed objects on the
1615 * control stack. -- AB, 2011-Dec-02 */
1617 for (object_ptr
= th
->control_stack_start
;
1618 object_ptr
< access_control_stack_pointer(th
);
1621 lispobj object
= *object_ptr
;
1622 #ifdef LISP_FEATURE_GENCGC
1623 if (forwarding_pointer_p(object_ptr
))
1624 lose("unexpected forwarding pointer in scavenge_control_stack: %p, start=%p, end=%p\n",
1625 object_ptr
, th
->control_stack_start
, access_control_stack_pointer(th
));
1627 if (is_lisp_pointer(object
) && from_space_p(object
)) {
1628 /* It currently points to old space. Check for a
1629 * forwarding pointer. */
1630 lispobj
*ptr
= native_pointer(object
);
1631 if (forwarding_pointer_p(ptr
)) {
1632 /* Yes, there's a forwarding pointer. */
1633 *object_ptr
= LOW_WORD(forwarding_pointer_value(ptr
));
1635 /* Scavenge that pointer. */
1636 long n_words_scavenged
=
1637 (scavtab
[widetag_of(object
)])(object_ptr
, object
);
1638 gc_assert(n_words_scavenged
== 1);
1640 } else if (scavtab
[widetag_of(object
)] == scav_lose
) {
1641 lose("unboxed object in scavenge_control_stack: %p->%x, start=%p, end=%p\n",
1642 object_ptr
, object
, th
->control_stack_start
, access_control_stack_pointer(th
));
1647 /* Scavenging Interrupt Contexts */
1649 static int boxed_registers
[] = BOXED_REGISTERS
;
1651 /* The GC has a notion of an "interior pointer" register, an unboxed
1652 * register that typically contains a pointer to inside an object
1653 * referenced by another pointer. The most obvious of these is the
1654 * program counter, although many compiler backends define a "Lisp
1655 * Interior Pointer" register known to the runtime as reg_LIP, and
1656 * various CPU architectures have other registers that also partake of
1657 * the interior-pointer nature. As the code for pairing an interior
1658 * pointer value up with its "base" register, and fixing it up after
1659 * scavenging is complete is horribly repetitive, a few macros paper
1660 * over the monotony. --AB, 2010-Jul-14 */
1662 /* These macros are only ever used over a lexical environment which
1663 * defines a pointer to an os_context_t called context, thus we don't
1664 * bother to pass that context in as a parameter. */
1666 /* Define how to access a given interior pointer. */
1667 #define ACCESS_INTERIOR_POINTER_pc \
1668 *os_context_pc_addr(context)
1669 #define ACCESS_INTERIOR_POINTER_lip \
1670 *os_context_register_addr(context, reg_LIP)
1671 #define ACCESS_INTERIOR_POINTER_lr \
1672 *os_context_lr_addr(context)
1673 #define ACCESS_INTERIOR_POINTER_npc \
1674 *os_context_npc_addr(context)
1675 #define ACCESS_INTERIOR_POINTER_ctr \
1676 *os_context_ctr_addr(context)
1678 #define INTERIOR_POINTER_VARS(name) \
1679 uword_t name##_offset; \
1680 int name##_register_pair
1682 #define PAIR_INTERIOR_POINTER(name) \
1683 pair_interior_pointer(context, \
1684 ACCESS_INTERIOR_POINTER_##name, \
1686 &name##_register_pair)
1688 /* One complexity here is that if a paired register is not found for
1689 * an interior pointer, then that pointer does not get updated.
1690 * Originally, there was some commentary about using an index of -1
1691 * when calling os_context_register_addr() on SPARC referring to the
1692 * program counter, but the real reason is to allow an interior
1693 * pointer register to point to the runtime, read-only space, or
1694 * static space without problems. */
1695 #define FIXUP_INTERIOR_POINTER(name) \
1697 if (name##_register_pair >= 0) { \
1698 ACCESS_INTERIOR_POINTER_##name = \
1699 (*os_context_register_addr(context, \
1700 name##_register_pair) \
1708 pair_interior_pointer(os_context_t
*context
, uword_t pointer
,
1709 uword_t
*saved_offset
, int *register_pair
)
1714 * I (RLT) think this is trying to find the boxed register that is
1715 * closest to the LIP address, without going past it. Usually, it's
1716 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
1718 /* 0x7FFFFFFF on 32-bit platforms;
1719 0x7FFFFFFFFFFFFFFF on 64-bit platforms */
1720 *saved_offset
= (((uword_t
)1) << (N_WORD_BITS
- 1)) - 1;
1721 *register_pair
= -1;
1722 for (i
= 0; i
< (sizeof(boxed_registers
) / sizeof(int)); i
++) {
1727 index
= boxed_registers
[i
];
1728 reg
= *os_context_register_addr(context
, index
);
1730 /* An interior pointer is never relative to a non-pointer
1731 * register (an oversight in the original implementation).
1732 * The simplest argument for why this is true is to consider
1733 * the fixnum that happens by coincide to be the word-index in
1734 * memory of the header for some object plus two. This is
1735 * happenstance would cause the register containing the fixnum
1736 * to be selected as the register_pair if the interior pointer
1737 * is to anywhere after the first two words of the object.
1738 * The fixnum won't be changed during GC, but the object might
1739 * move, thus destroying the interior pointer. --AB,
1742 if (is_lisp_pointer(reg
) &&
1743 ((reg
& ~LOWTAG_MASK
) <= pointer
)) {
1744 offset
= pointer
- (reg
& ~LOWTAG_MASK
);
1745 if (offset
< *saved_offset
) {
1746 *saved_offset
= offset
;
1747 *register_pair
= index
;
1754 scavenge_interrupt_context(os_context_t
* context
)
1758 /* FIXME: The various #ifdef noise here is precisely that: noise.
1759 * Is it possible to fold it into the macrology so that we have
1760 * one set of #ifdefs and then INTERIOR_POINTER_VARS /et alia/
1761 * compile out for the registers that don't exist on a given
1764 INTERIOR_POINTER_VARS(pc
);
1766 INTERIOR_POINTER_VARS(lip
);
1768 #ifdef ARCH_HAS_LINK_REGISTER
1769 INTERIOR_POINTER_VARS(lr
);
1771 #ifdef ARCH_HAS_NPC_REGISTER
1772 INTERIOR_POINTER_VARS(npc
);
1774 #ifdef LISP_FEATURE_PPC
1775 INTERIOR_POINTER_VARS(ctr
);
1778 PAIR_INTERIOR_POINTER(pc
);
1780 PAIR_INTERIOR_POINTER(lip
);
1782 #ifdef ARCH_HAS_LINK_REGISTER
1783 PAIR_INTERIOR_POINTER(lr
);
1785 #ifdef ARCH_HAS_NPC_REGISTER
1786 PAIR_INTERIOR_POINTER(npc
);
1788 #ifdef LISP_FEATURE_PPC
1789 PAIR_INTERIOR_POINTER(ctr
);
1792 /* Scavenge all boxed registers in the context. */
1793 for (i
= 0; i
< (sizeof(boxed_registers
) / sizeof(int)); i
++) {
1794 os_context_register_t
*boxed_reg
;
1797 /* We can't "just" cast os_context_register_addr() to a
1798 * pointer to lispobj and pass it to scavenge, because some
1799 * systems can have a wider register width than we use for
1800 * lisp objects, and on big-endian systems casting a pointer
1801 * to a narrower target type doesn't work properly.
1802 * Therefore, we copy the value out to a temporary lispobj
1803 * variable, scavenge there, and copy the value back in.
1805 * FIXME: lispobj is unsigned, os_context_register_t may be
1806 * signed or unsigned, are we truncating or sign-extending
1807 * values here that shouldn't be modified? Possibly affects
1808 * any architecture that has 32-bit and 64-bit variants where
1809 * we run in 32-bit mode on 64-bit hardware when the OS is set
1810 * up for 64-bit from the start. Or an environment with
1811 * 32-bit addresses and 64-bit registers. */
1813 boxed_reg
= os_context_register_addr(context
, boxed_registers
[i
]);
1815 scavenge(&datum
, 1);
1819 /* Now that the scavenging is done, repair the various interior
1821 FIXUP_INTERIOR_POINTER(pc
);
1823 FIXUP_INTERIOR_POINTER(lip
);
1825 #ifdef ARCH_HAS_LINK_REGISTER
1826 FIXUP_INTERIOR_POINTER(lr
);
1828 #ifdef ARCH_HAS_NPC_REGISTER
1829 FIXUP_INTERIOR_POINTER(npc
);
1831 #ifdef LISP_FEATURE_PPC
1832 FIXUP_INTERIOR_POINTER(ctr
);
1837 scavenge_interrupt_contexts(struct thread
*th
)
1840 os_context_t
*context
;
1842 index
= fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
1844 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
1845 printf("Number of active contexts: %d\n", index
);
1848 for (i
= 0; i
< index
; i
++) {
1849 context
= th
->interrupt_contexts
[i
];
1850 scavenge_interrupt_context(context
);
1853 #endif /* x86oid targets */
1855 void varint_unpacker_init(struct varint_unpacker
* unpacker
, lispobj integer
)
1857 if (fixnump(integer
)) {
1858 unpacker
->word
= fixnum_value(integer
);
1859 unpacker
->limit
= N_WORD_BYTES
;
1860 unpacker
->data
= (char*)&unpacker
->word
;
1862 struct bignum
* bignum
= (struct bignum
*)(integer
- OTHER_POINTER_LOWTAG
);
1864 unpacker
->limit
= HeaderValue(bignum
->header
) * N_WORD_BYTES
;
1865 unpacker
->data
= (char*)bignum
->digits
;
1867 unpacker
->index
= 0;
1870 // Fetch the next varint from 'unpacker' into 'result'.
1871 // Because there is no length prefix on the number of varints encoded,
1872 // spurious trailing zeros might be observed. The data consumer can
1873 // circumvent that by storing a count as the first value in the series.
1874 // Return 1 for success, 0 for EOF.
1875 int varint_unpack(struct varint_unpacker
* unpacker
, int* result
)
1877 if (unpacker
->index
>= unpacker
->limit
) return 0;
1878 int accumulator
= 0;
1881 #ifdef LISP_FEATURE_LITTLE_ENDIAN
1882 int byte
= unpacker
->data
[unpacker
->index
];
1884 // bignums are little-endian in word order,
1885 // but machine-native within each word.
1886 // We could pack bytes MSB-to-LSB in the bigdigits,
1887 // but that seems less intuitive on the Lisp side.
1888 int word_index
= unpacker
->index
/ N_WORD_BYTES
;
1889 int byte_index
= unpacker
->index
% N_WORD_BYTES
;
1890 int byte
= (((unsigned int*)unpacker
->data
)[word_index
]
1891 >> (byte_index
* 8)) & 0xFF;
1894 accumulator
|= (byte
& 0x7F) << shift
;
1895 if (!(byte
& 0x80)) break;
1896 gc_assert(unpacker
->index
< unpacker
->limit
);
1899 *result
= accumulator
;
1903 /* Our own implementation of heapsort, because some C libraries have a qsort()
1904 * that calls malloc() apparently, which we MUST NOT do. */
1906 typedef uword_t
* heap
;
1908 #define swap(a,i,j) { uword_t temp=a[i];a[i]=a[j];a[j]=temp; }
1909 static void sift_down(heap array
, int start
, int end
)
1912 while (root
* 2 + 1 <= end
) {
1913 int child
= root
* 2 + 1;
1914 if (child
+ 1 <= end
&& array
[child
] < array
[child
+1])
1916 if (array
[root
] < array
[child
]) {
1917 swap(array
, root
, child
);
1925 static void heapify(heap array
, int length
)
1927 int start
= (length
- 2) / 2;
1928 while (start
>= 0) {
1929 sift_down(array
, start
, length
-1);
1934 void gc_heapsort_uwords(heap array
, int length
)
1936 heapify(array
, length
);
1937 int end
= length
- 1;
1939 swap(array
, end
, 0);
1941 sift_down(array
, 0, end
);
1945 //// Coalescing of constant vectors for SAVE-LISP-AND-DIE
1947 static boolean
coalescible_number_p(lispobj
* where
)
1949 int widetag
= widetag_of(*where
);
1950 return widetag
== BIGNUM_WIDETAG
1951 // Ratios and complex integers containing pointers to bignums don't work.
1952 || ((widetag
== RATIO_WIDETAG
|| widetag
== COMPLEX_WIDETAG
)
1953 && fixnump(where
[1]) && fixnump(where
[2]))
1954 #ifndef LISP_FEATURE_64_BIT
1955 || widetag
== SINGLE_FLOAT_WIDETAG
1957 || widetag
== DOUBLE_FLOAT_WIDETAG
1958 || widetag
== COMPLEX_SINGLE_FLOAT_WIDETAG
1959 || widetag
== COMPLEX_DOUBLE_FLOAT_WIDETAG
;
1962 /// Return true of fixnums, bignums, strings, symbols.
1963 /// Strings are considered eql-comparable,
1964 /// because they're coalesced before comparing.
1965 static boolean
eql_comparable_p(lispobj obj
)
1967 if (fixnump(obj
) || obj
== NIL
) return 1;
1968 if (lowtag_of(obj
) != OTHER_POINTER_LOWTAG
) return 0;
1969 int widetag
= widetag_of(*native_pointer(obj
));
1970 return widetag
== BIGNUM_WIDETAG
1971 || widetag
== SYMBOL_WIDETAG
1972 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
1973 || widetag
== SIMPLE_CHARACTER_STRING_WIDETAG
1975 || widetag
== SIMPLE_BASE_STRING_WIDETAG
;
1978 static boolean
vector_isevery(boolean (*pred
)(lispobj
), struct vector
* v
)
1981 for (i
= fixnum_value(v
->length
)-1; i
>= 0; --i
)
1982 if (!pred(v
->data
[i
])) return 0;
1986 static void coalesce_obj(lispobj
* where
, struct hopscotch_table
* ht
)
1988 lispobj ptr
= *where
;
1989 if (lowtag_of(ptr
) != OTHER_POINTER_LOWTAG
)
1992 extern char gc_coalesce_string_literals
;
1993 // gc_coalesce_string_literals represents the "aggressiveness" level.
1994 // If 1, then we share vectors tagged as +VECTOR-SHAREABLE+,
1995 // but if >1, those and also +VECTOR-SHAREABLE-NONSTD+.
1996 int mask
= gc_coalesce_string_literals
> 1
1997 ? (VECTOR_SHAREABLE
|VECTOR_SHAREABLE_NONSTD
)<<N_WIDETAG_BITS
1998 : (VECTOR_SHAREABLE
)<<N_WIDETAG_BITS
;
2000 lispobj
* obj
= native_pointer(ptr
);
2001 lispobj header
= *obj
;
2002 int widetag
= widetag_of(header
);
2004 if ((((header
& mask
) != 0) // optimistically assume it's a vector
2005 && ((widetag
== SIMPLE_VECTOR_WIDETAG
2006 && vector_isevery(eql_comparable_p
, (struct vector
*)obj
))
2007 || specialized_vector_widetag_p(widetag
)))
2008 || coalescible_number_p(obj
)) {
2009 if (widetag
== SIMPLE_VECTOR_WIDETAG
) {
2010 sword_t n_elts
= fixnum_value(obj
[1]), i
;
2011 for (i
= 2 ; i
< n_elts
+2 ; ++i
)
2012 coalesce_obj(obj
+ i
, ht
);
2014 int index
= hopscotch_get(ht
, (uword_t
)obj
, 0);
2015 if (!index
) // Not found
2016 hopscotch_insert(ht
, (uword_t
)obj
, 1);
2018 *where
= make_lispobj((void*)ht
->keys
[index
-1], OTHER_POINTER_LOWTAG
);
2022 static uword_t
coalesce_range(lispobj
* where
, lispobj
* limit
, uword_t arg
)
2024 struct hopscotch_table
* ht
= (struct hopscotch_table
*)arg
;
2025 lispobj layout
, bitmap
, *next
;
2026 sword_t nwords
, i
, j
;
2028 for ( ; where
< limit
; where
= next
) {
2029 lispobj header
= *where
;
2030 if (is_cons_half(header
)) {
2031 coalesce_obj(where
+0, ht
);
2032 coalesce_obj(where
+1, ht
);
2035 int widetag
= widetag_of(header
);
2036 nwords
= sizetab
[widetag
](where
);
2037 next
= where
+ nwords
;
2039 case INSTANCE_WIDETAG
: // mixed boxed/unboxed objects
2040 #ifdef LISP_FEATURE_COMPACT_INSTANCE_HEADER
2041 case FUNCALLABLE_INSTANCE_WIDETAG
:
2043 layout
= instance_layout(where
);
2044 bitmap
= ((struct layout
*)native_pointer(layout
))->bitmap
;
2045 for(i
=1; i
<nwords
; ++i
)
2046 if (layout_bitmap_logbitp(i
-1, bitmap
))
2047 coalesce_obj(where
+i
, ht
);
2049 case CODE_HEADER_WIDETAG
:
2050 for_each_simple_fun(i
, fun
, (struct code
*)where
, 0, {
2051 lispobj
* fun_slots
= SIMPLE_FUN_SCAV_START(fun
);
2052 for (j
=0; j
<SIMPLE_FUN_SCAV_NWORDS(fun
); ++j
)
2053 coalesce_obj(fun_slots
+j
, ht
);
2055 nwords
= code_header_words(header
);
2058 if (unboxed_obj_widetag_p(widetag
))
2059 continue; // Ignore this object.
2061 for(i
=1; i
<nwords
; ++i
)
2062 coalesce_obj(where
+i
, ht
);
2068 void coalesce_similar_objects()
2070 struct hopscotch_table ht
;
2071 hopscotch_create(&ht
, HOPSCOTCH_VECTOR_HASH
, 0, 1<<17, 0);
2072 #ifndef LISP_FEATURE_WIN32
2073 // Apparently this triggers the "Unable to recommit" lossage message
2074 // in handle_access_violation() in src/runtime/win32-os.c
2075 coalesce_range((lispobj
*)STATIC_SPACE_START
,
2076 (lispobj
*)STATIC_SPACE_END
,
2079 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2080 coalesce_range((lispobj
*)IMMOBILE_SPACE_START
,
2081 (lispobj
*)SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER
)->value
,
2083 coalesce_range((lispobj
*)IMMOBILE_VARYOBJ_SUBSPACE_START
,
2084 (lispobj
*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER
)->value
,
2087 #ifdef LISP_FEATURE_GENCGC
2088 walk_generation(coalesce_range
, -1, (uword_t
)&ht
);
2092 hopscotch_destroy(&ht
);