2 * Garbage Collection common functions for scavenging, moving and sizing
3 * objects. These are for use with both GC (stop & copy GC) and GENCGC
7 * This software is part of the SBCL system. See the README file for
10 * This software is derived from the CMU CL system, which was
11 * written at Carnegie Mellon University and released into the
12 * public domain. The software is in the public domain and is
13 * provided with absolutely no warranty. See the COPYING and CREDITS
14 * files for more information.
18 * For a review of garbage collection techniques (e.g. generational
19 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
20 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
21 * had been accepted for _ACM Computing Surveys_ and was available
22 * as a PostScript preprint through
23 * <http://www.cs.utexas.edu/users/oops/papers.html>
25 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
28 #define _GNU_SOURCE /* for ffsl(3) from string.h */
38 #include "interrupt.h"
43 #include "genesis/primitive-objects.h"
44 #include "genesis/static-symbols.h"
45 #include "genesis/layout.h"
46 #include "genesis/hash-table.h"
47 #include "gc-internal.h"
48 #include "forwarding-ptr.h"
51 #ifdef LISP_FEATURE_SPARC
52 #define LONG_FLOAT_SIZE 4
53 #elif defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
54 #define LONG_FLOAT_SIZE 3
57 os_vm_size_t dynamic_space_size
= DEFAULT_DYNAMIC_SPACE_SIZE
;
58 os_vm_size_t thread_control_stack_size
= DEFAULT_CONTROL_STACK_SIZE
;
60 sword_t (*scavtab
[256])(lispobj
*where
, lispobj object
);
61 lispobj (*transother
[256])(lispobj object
);
62 sword_t (*sizetab
[256])(lispobj
*where
);
63 struct weak_pointer
*weak_pointers
;
65 os_vm_size_t bytes_consed_between_gcs
= 12*1024*1024;
71 /* gc_general_copy_object is inline from gc-internal.h */
73 /* to copy a boxed object */
75 copy_object(lispobj object
, sword_t nwords
)
77 return gc_general_copy_object(object
, nwords
, BOXED_PAGE_FLAG
);
81 copy_code_object(lispobj object
, sword_t nwords
)
83 return gc_general_copy_object(object
, nwords
, CODE_PAGE_FLAG
);
86 static sword_t
scav_lose(lispobj
*where
, lispobj object
); /* forward decl */
88 #ifdef LISP_FEATURE_IMMOBILE_SPACE
89 static const int n_dwords_in_card
= GENCGC_CARD_BYTES
/ N_WORD_BYTES
/ 2;
90 extern uword_t
*page_table_pinned_dwords
;
92 static inline boolean
__attribute__((unused
))
93 pinned_p(lispobj obj
, page_index_t page
)
95 if (!page_table
[page
].has_pin_map
) return 0;
96 int dword_num
= (obj
& (GENCGC_CARD_BYTES
-1)) >> (1+WORD_SHIFT
);
97 uword_t
*bits
= &page_table_pinned_dwords
[page
* (n_dwords_in_card
/N_WORD_BITS
)];
98 return (bits
[dword_num
/ N_WORD_BITS
] >> (dword_num
% N_WORD_BITS
)) & 1;
103 scavenge(lispobj
*start
, sword_t n_words
)
105 lispobj
*end
= start
+ n_words
;
109 // * With 32-bit words, is_lisp_pointer(object) returns true if object_ptr
110 // points to a forwarding pointer, so we need a sanity check inside the
111 // branch for is_lisp_pointer(). For maximum efficiency, check that only
112 // after from_space_p() returns false, so that valid pointers into
113 // from_space incur no extra test. This could be improved further by
114 // skipping the FP check if 'object' points within dynamic space, i.e.,
115 // when find_page_index() returns >= 0. That would entail injecting
116 // from_space_p() explicitly into the loop, so as to separate the
117 // "was a page found at all" condition from the page generation test.
119 // * With 64-bit words, is_lisp_pointer(object) is false when object_ptr
120 // points to a forwarding pointer, and the fixnump() test also returns
121 // false, so we'll indirect through scavtab[]. This will safely invoke
122 // scav_lose(), detecting corruption without any extra cost.
123 // The major difference between that and the explicit test is that you
124 // won't see 'start' and 'n_words', but if you need those, chances are
125 // you'll want to run under an external debugger in the first place.
126 // [And btw it sure would be nice to assert statically
127 // that is_lisp_pointer(0x01) is indeed false]
129 #define FIX_POINTER() { \
130 lispobj *ptr = native_pointer(object); \
131 if (forwarding_pointer_p(ptr)) \
132 *object_ptr = LOW_WORD(forwarding_pointer_value(ptr)); \
133 else /* Scavenge that pointer. */ \
134 (void)scavtab[widetag_of(object)](object_ptr, object); \
137 for (object_ptr
= start
; object_ptr
< end
;) {
138 lispobj object
= *object_ptr
;
139 if (is_lisp_pointer(object
)) {
140 #ifdef LISP_FEATURE_IMMOBILE_SPACE
142 // It would be fine, though suboptimal, to use from_space_p() here.
143 // If it returns false, we don't want to call immobile_space_p()
144 // unless the pointer is *not* into dynamic space.
145 if ((page
= find_page_index((void*)object
)) >= 0) {
146 if (page_table
[page
].gen
== from_space
&& !pinned_p(object
, page
))
148 } else if (immobile_space_p(object
)) {
149 lispobj
*ptr
= native_pointer(object
);
150 if (immobile_obj_gen_bits(ptr
) == from_space
)
151 promote_immobile_obj(ptr
, 1);
154 if (from_space_p(object
)) {
157 #if (N_WORD_BITS == 32) && defined(LISP_FEATURE_GENCGC)
158 if (forwarding_pointer_p(object_ptr
))
159 lose("unexpected forwarding pointer in scavenge: %p, start=%p, n=%ld\n",
160 object_ptr
, start
, n_words
);
162 /* It points somewhere other than oldspace. Leave it
168 else if (fixnump(object
)) {
169 /* It's a fixnum: really easy.. */
172 /* It's some sort of header object or another. */
173 object_ptr
+= (scavtab
[widetag_of(object
)])(object_ptr
, object
);
176 gc_assert_verbose(object_ptr
== end
, "Final object pointer %p, start %p, end %p\n",
177 object_ptr
, start
, end
);
180 static lispobj
trans_fun_header(lispobj object
); /* forward decls */
181 static lispobj
trans_short_boxed(lispobj object
);
184 scav_fun_pointer(lispobj
*where
, lispobj object
)
186 lispobj
*first_pointer
;
189 gc_assert(lowtag_of(object
) == FUN_POINTER_LOWTAG
);
191 /* Object is a pointer into from_space - not a FP. */
192 first_pointer
= native_pointer(object
);
194 /* must transport object -- object may point to either a function
195 * header, a closure function header, or to a closure header. */
197 switch (widetag_of(*first_pointer
)) {
198 case SIMPLE_FUN_HEADER_WIDETAG
:
199 copy
= trans_fun_header(object
);
202 copy
= trans_short_boxed(object
);
206 if (copy
!= object
) {
207 /* Set forwarding pointer */
208 set_forwarding_pointer(first_pointer
,copy
);
211 CHECK_COPY_POSTCONDITIONS(copy
, FUN_POINTER_LOWTAG
);
220 trans_code(struct code
*code
)
222 /* if object has already been transported, just return pointer */
223 if (forwarding_pointer_p((lispobj
*)code
)) {
225 printf("Was already transported\n");
227 return (struct code
*)native_pointer(forwarding_pointer_value((lispobj
*)code
));
230 gc_assert(widetag_of(code
->header
) == CODE_HEADER_WIDETAG
);
232 /* prepare to transport the code vector */
233 lispobj l_code
= (lispobj
) LOW_WORD(code
) | OTHER_POINTER_LOWTAG
;
234 sword_t nheader_words
= code_header_words(code
->header
);
235 sword_t ncode_words
= code_instruction_words(code
->code_size
);
236 sword_t nwords
= nheader_words
+ ncode_words
;
237 lispobj l_new_code
= copy_code_object(l_code
, nwords
);
238 struct code
*new_code
= (struct code
*) native_pointer(l_new_code
);
240 #if defined(DEBUG_CODE_GC)
241 printf("Old code object at 0x%08x, new code object at 0x%08x.\n",
242 (uword_t
) code
, (uword_t
) new_code
);
243 printf("Code object is %d words long.\n", nwords
);
246 #ifdef LISP_FEATURE_GENCGC
247 if (new_code
== code
)
251 set_forwarding_pointer((lispobj
*)code
, l_new_code
);
253 /* set forwarding pointers for all the function headers in the */
254 /* code object. also fix all self pointers */
255 /* Do this by scanning the new code, since the old header is unusable */
257 uword_t displacement
= l_new_code
- l_code
;
259 for_each_simple_fun(i
, nfheaderp
, new_code
, 1, {
260 /* Calculate the old raw function pointer */
261 struct simple_fun
* fheaderp
=
262 (struct simple_fun
*)LOW_WORD((char*)nfheaderp
- displacement
);
263 /* Calculate the new lispobj */
264 lispobj nfheaderl
= make_lispobj(nfheaderp
, FUN_POINTER_LOWTAG
);
267 printf("fheaderp->header (at %x) <- %x\n",
268 &(fheaderp
->header
) , nfheaderl
);
270 set_forwarding_pointer((lispobj
*)fheaderp
, nfheaderl
);
272 /* fix self pointer. */
274 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
275 FUN_RAW_ADDR_OFFSET
+
279 #ifdef LISP_FEATURE_GENCGC
280 /* Cheneygc doesn't need this os_flush_icache, it flushes the whole
281 spaces once when all copying is done. */
282 os_flush_icache((os_vm_address_t
) (((sword_t
*)new_code
) + nheader_words
),
283 ncode_words
* sizeof(sword_t
));
287 #ifdef LISP_FEATURE_X86
288 gencgc_apply_code_fixups(code
, new_code
);
295 scav_code_header(lispobj
*where
, lispobj header
)
297 struct code
*code
= (struct code
*) where
;
298 sword_t n_header_words
= code_header_words(header
);
300 /* Scavenge the boxed section of the code data block. */
301 scavenge(where
+ 1, n_header_words
- 1);
303 /* Scavenge the boxed section of each function object in the
304 * code data block. */
305 for_each_simple_fun(i
, function_ptr
, code
, 1, {
306 scavenge(SIMPLE_FUN_SCAV_START(function_ptr
),
307 SIMPLE_FUN_SCAV_NWORDS(function_ptr
));
310 return n_header_words
+ code_instruction_words(code
->code_size
);
314 trans_code_header(lispobj object
)
318 ncode
= trans_code((struct code
*) native_pointer(object
));
319 return (lispobj
) LOW_WORD(ncode
) | OTHER_POINTER_LOWTAG
;
323 size_code_header(lispobj
*where
)
325 return code_header_words(((struct code
*)where
)->header
)
326 + code_instruction_words(((struct code
*)where
)->code_size
);
329 #ifdef RETURN_PC_HEADER_WIDETAG
331 scav_return_pc_header(lispobj
*where
, lispobj object
)
333 lose("attempted to scavenge a return PC header where=%p object=%#lx\n",
334 where
, (uword_t
) object
);
335 return 0; /* bogus return value to satisfy static type checking */
339 trans_return_pc_header(lispobj object
)
341 struct simple_fun
*return_pc
;
343 struct code
*code
, *ncode
;
345 return_pc
= (struct simple_fun
*) native_pointer(object
);
346 offset
= HeaderValue(return_pc
->header
) * N_WORD_BYTES
;
348 /* Transport the whole code object */
349 code
= (struct code
*) ((uword_t
) return_pc
- offset
);
350 ncode
= trans_code(code
);
352 return ((lispobj
) LOW_WORD(ncode
) + offset
) | OTHER_POINTER_LOWTAG
;
354 #endif /* RETURN_PC_HEADER_WIDETAG */
356 /* On the 386, closures hold a pointer to the raw address instead of the
357 * function object, so we can use CALL [$FDEFN+const] to invoke
358 * the function without loading it into a register. Given that code
359 * objects don't move, we don't need to update anything, but we do
360 * have to figure out that the function is still live. */
362 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
364 scav_closure_header(lispobj
*where
, lispobj object
)
366 struct closure
*closure
;
369 closure
= (struct closure
*)where
;
370 fun
= closure
->fun
- FUN_RAW_ADDR_OFFSET
;
372 #ifdef LISP_FEATURE_GENCGC
373 /* The function may have moved so update the raw address. But
374 * don't write unnecessarily. */
375 if (closure
->fun
!= fun
+ FUN_RAW_ADDR_OFFSET
)
376 closure
->fun
= fun
+ FUN_RAW_ADDR_OFFSET
;
382 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
384 scav_fun_header(lispobj
*where
, lispobj object
)
386 lose("attempted to scavenge a function header where=%p object=%#lx\n",
387 where
, (uword_t
) object
);
388 return 0; /* bogus return value to satisfy static type checking */
390 #endif /* LISP_FEATURE_X86 */
393 trans_fun_header(lispobj object
)
395 struct simple_fun
*fheader
;
397 struct code
*code
, *ncode
;
399 fheader
= (struct simple_fun
*) native_pointer(object
);
400 offset
= HeaderValue(fheader
->header
) * N_WORD_BYTES
;
402 /* Transport the whole code object */
403 code
= (struct code
*) ((uword_t
) fheader
- offset
);
404 ncode
= trans_code(code
);
406 return ((lispobj
) LOW_WORD(ncode
) + offset
) | FUN_POINTER_LOWTAG
;
415 trans_instance(lispobj object
)
417 gc_assert(lowtag_of(object
) == INSTANCE_POINTER_LOWTAG
);
418 lispobj header
= *(lispobj
*)(object
- INSTANCE_POINTER_LOWTAG
);
419 return copy_object(object
, 1 + (instance_length(header
)|1));
423 size_instance(lispobj
*where
)
425 return 1 + (instance_length(*where
)|1);
429 scav_instance_pointer(lispobj
*where
, lispobj object
)
431 lispobj copy
, *first_pointer
;
433 /* Object is a pointer into from space - not a FP. */
434 copy
= trans_instance(object
);
436 #ifdef LISP_FEATURE_GENCGC
437 gc_assert(copy
!= object
);
440 first_pointer
= native_pointer(object
);
441 set_forwarding_pointer(first_pointer
,copy
);
452 static lispobj
trans_list(lispobj object
);
455 scav_list_pointer(lispobj
*where
, lispobj object
)
458 gc_assert(lowtag_of(object
) == LIST_POINTER_LOWTAG
);
460 copy
= trans_list(object
);
461 gc_assert(copy
!= object
);
463 CHECK_COPY_POSTCONDITIONS(copy
, LIST_POINTER_LOWTAG
);
471 trans_list(lispobj object
)
474 struct cons
*copy
= (struct cons
*)
475 gc_general_alloc(sizeof(struct cons
), BOXED_PAGE_FLAG
, ALLOC_QUICK
);
476 lispobj new_list_pointer
= make_lispobj(copy
, LIST_POINTER_LOWTAG
);
477 copy
->car
= CONS(object
)->car
;
478 /* Grab the cdr: set_forwarding_pointer will clobber it in GENCGC */
479 lispobj cdr
= CONS(object
)->cdr
;
480 set_forwarding_pointer((lispobj
*)CONS(object
), new_list_pointer
);
482 /* Try to linearize the list in the cdr direction to help reduce
484 while (lowtag_of(cdr
) == LIST_POINTER_LOWTAG
&& from_space_p(cdr
)) {
485 lispobj
* native_cdr
= (lispobj
*)CONS(cdr
);
486 if (forwarding_pointer_p(native_cdr
)) { // Might as well fix now.
487 cdr
= forwarding_pointer_value(native_cdr
);
491 struct cons
*cdr_copy
= (struct cons
*)
492 gc_general_alloc(sizeof(struct cons
), BOXED_PAGE_FLAG
, ALLOC_QUICK
);
493 cdr_copy
->car
= ((struct cons
*)native_cdr
)->car
;
494 /* Grab the cdr before it is clobbered. */
495 lispobj next
= ((struct cons
*)native_cdr
)->cdr
;
496 /* Set cdr of the predecessor, and store an FP. */
497 set_forwarding_pointer(native_cdr
,
498 copy
->cdr
= make_lispobj(cdr_copy
,
499 LIST_POINTER_LOWTAG
));
504 return new_list_pointer
;
509 * scavenging and transporting other pointers
513 scav_other_pointer(lispobj
*where
, lispobj object
)
515 lispobj copy
, *first_pointer
;
517 gc_assert(lowtag_of(object
) == OTHER_POINTER_LOWTAG
);
519 /* Object is a pointer into from space - not FP. */
520 first_pointer
= (lispobj
*)(object
- OTHER_POINTER_LOWTAG
);
521 copy
= (transother
[widetag_of(*first_pointer
)])(object
);
523 // If the object was large, then instead of transporting it,
524 // gencgc might simply promote the pages and return the same pointer.
525 // That decision is made in general_copy_large_object().
526 if (copy
!= object
) {
527 set_forwarding_pointer(first_pointer
, copy
);
528 #ifdef LISP_FEATURE_GENCGC
532 #ifndef LISP_FEATURE_GENCGC
535 CHECK_COPY_POSTCONDITIONS(copy
, OTHER_POINTER_LOWTAG
);
540 * immediate, boxed, and unboxed objects
544 size_pointer(lispobj
*where
)
550 scav_immediate(lispobj
*where
, lispobj object
)
556 trans_immediate(lispobj object
)
558 lose("trying to transport an immediate\n");
559 return NIL
; /* bogus return value to satisfy static type checking */
563 size_immediate(lispobj
*where
)
570 scav_boxed(lispobj
*where
, lispobj object
)
575 boolean
positive_bignum_logbitp(int index
, struct bignum
* bignum
)
577 /* If the bignum in the layout has another pointer to it (besides the layout)
578 acting as a root, and which is scavenged first, then transporting the
579 bignum causes the layout to see a FP, as would copying an instance whose
580 layout that is. This is a nearly impossible scenario to create organically
581 in Lisp, because mostly nothing ever looks again at that exact (EQ) bignum
582 except for a few things that would cause it to be pinned anyway,
583 such as it being kept in a local variable during structure manipulation.
584 See 'interleaved-raw.impure.lisp' for a way to trigger this */
585 if (forwarding_pointer_p((lispobj
*)bignum
)) {
586 lispobj forwarded
= forwarding_pointer_value((lispobj
*)bignum
);
588 fprintf(stderr
, "GC bignum_logbitp(): fwd from %p to %p\n",
589 (void*)bignum
, (void*)forwarded
);
591 bignum
= (struct bignum
*)native_pointer(forwarded
);
594 int len
= HeaderValue(bignum
->header
);
595 int word_index
= index
/ N_WORD_BITS
;
596 int bit_index
= index
% N_WORD_BITS
;
597 if (word_index
>= len
) {
598 // just return 0 since the marking logic does not allow negative bignums
601 return (bignum
->digits
[word_index
] >> bit_index
) & 1;
605 struct instance_scanner
{
607 void (*proc
)(lispobj
*, sword_t
);
610 // Helper function for helper function below, since lambda isn't a thing
611 static void instance_scan_range(void* arg
, int offset
, int nwords
)
613 struct instance_scanner
*scanner
= (struct instance_scanner
*)arg
;
614 scanner
->proc(scanner
->base
+ offset
, nwords
);
617 // Helper function for stepping through the tagged slots of an instance in
618 // scav_instance and verify_space.
620 instance_scan(void (*proc
)(lispobj
*, sword_t
),
621 lispobj
*instance_slots
,
623 lispobj layout_bitmap
)
627 /* This code might be made more efficient by run-length-encoding the ranges
628 of words to scan, but probably not by much */
630 if (fixnump(layout_bitmap
)) {
631 sword_t bitmap
= (sword_t
)layout_bitmap
>> N_FIXNUM_TAG_BITS
; // signed integer!
632 for (index
= 0; index
< n_words
; index
++, bitmap
>>= 1)
634 proc(instance_slots
+ index
, 1);
635 } else { /* huge bitmap */
636 struct bignum
* bitmap
;
637 bitmap
= (struct bignum
*)native_pointer(layout_bitmap
);
638 if (forwarding_pointer_p((lispobj
*)bitmap
))
639 bitmap
= (struct bignum
*)
640 native_pointer(forwarding_pointer_value((lispobj
*)bitmap
));
641 struct instance_scanner scanner
;
642 scanner
.base
= instance_slots
;
644 bitmap_scan((uword_t
*)bitmap
->digits
, HeaderValue(bitmap
->header
), 0,
645 instance_scan_range
, &scanner
);
649 void bitmap_scan(uword_t
* bitmap
, int n_bitmap_words
, int flags
,
650 void (*proc
)(void*, int, int), void* arg
)
652 uword_t sense
= (flags
& BIT_SCAN_INVERT
) ? ~0L : 0;
653 int start_word_index
= 0;
655 in_use_marker_t word
;
657 flags
= flags
& BIT_SCAN_CLEAR
;
659 // Rather than bzero'ing we can just clear each nonzero word as it's read,
661 #define BITMAP_REF(j) word = bitmap[j]; if(word && flags) bitmap[j] = 0; word ^= sense
664 int skip_bits
, start_bit
, start_position
, run_length
;
666 if (++start_word_index
>= n_bitmap_words
) break;
667 BITMAP_REF(start_word_index
);
671 // On each loop iteration, the lowest 1 bit is a "relative"
672 // bit index, since the word was already shifted. This is 'skip_bits'.
673 // Adding back in the total shift amount gives 'start_bit',
674 // the true absolute index within the current word.
675 // 'start_position' is absolute within the entire bitmap.
676 skip_bits
= ffsl(word
) - 1;
677 start_bit
= skip_bits
+ shift
;
678 start_position
= N_WORD_BITS
* start_word_index
+ start_bit
;
679 // Compute the number of consecutive 1s in the current word.
681 run_length
= ~word
? ffsl(~word
) - 1 : N_WORD_BITS
;
682 if (start_bit
+ run_length
< N_WORD_BITS
) { // Do not extend to additional words.
684 shift
+= skip_bits
+ run_length
;
686 int end_word_index
= ++start_word_index
;
688 if (end_word_index
>= n_bitmap_words
) {
690 run_length
+= (end_word_index
- start_word_index
) * N_WORD_BITS
;
693 BITMAP_REF(end_word_index
);
697 // end_word_index is the exclusive bound on contiguous
698 // words to include in the range. See if the low bits
699 // from the next word can extend the range.
700 shift
= ffsl(~word
) - 1;
702 run_length
+= (end_word_index
- start_word_index
) * N_WORD_BITS
707 start_word_index
= end_word_index
;
709 proc(arg
, start_position
, run_length
);
715 scav_instance(lispobj
*where
, lispobj header
)
717 lispobj
* layout
= (lispobj
*)instance_layout(where
);
720 layout
= native_pointer((lispobj
)layout
);
721 #ifdef LISP_FEATURE_COMPACT_INSTANCE_HEADER
722 if (__immobile_obj_gen_bits(layout
) == from_space
)
723 promote_immobile_obj(layout
, 1);
725 if (forwarding_pointer_p(layout
))
726 layout
= native_pointer(forwarding_pointer_value(layout
));
729 sword_t nslots
= instance_length(header
) | 1;
730 lispobj bitmap
= ((struct layout
*)layout
)->bitmap
;
731 if (bitmap
== make_fixnum(-1))
732 scavenge(where
+1, nslots
);
734 instance_scan(scavenge
, where
+1, nslots
, bitmap
);
739 #ifdef LISP_FEATURE_COMPACT_INSTANCE_HEADER
741 scav_funinstance(lispobj
*where
, lispobj header
)
743 // This works because the layout is in the header word of all instances,
744 // ordinary and funcallable, when compact headers are enabled.
745 // The trampoline slot in the funcallable-instance is raw, but can be
746 // scavenged, because it points to readonly space, never oldspace.
747 // (And for certain backends it looks like a fixnum, not a pointer)
748 return scav_instance(where
, header
);
752 static lispobj
trans_boxed(lispobj object
)
754 gc_assert(is_lisp_pointer(object
));
755 sword_t length
= HeaderValue(*native_pointer(object
)) + 1;
756 return copy_object(object
, CEILING(length
, 2));
759 static sword_t
size_boxed(lispobj
*where
)
761 sword_t length
= HeaderValue(*where
) + 1;
762 return CEILING(length
, 2);
765 static lispobj
trans_short_boxed(lispobj object
) // Payload count expressed in 15 bits
767 sword_t length
= (HeaderValue(*native_pointer(object
)) & SHORT_HEADER_MAX_WORDS
) + 1;
768 return copy_object(object
, CEILING(length
, 2));
771 static sword_t
size_short_boxed(lispobj
*where
)
773 sword_t length
= (HeaderValue(*where
) & SHORT_HEADER_MAX_WORDS
) + 1;
774 return CEILING(length
, 2);
777 static lispobj
trans_tiny_boxed(lispobj object
) // Payload count expressed in 8 bits
779 sword_t length
= (HeaderValue(*native_pointer(object
)) & 0xFF) + 1;
780 return copy_object(object
, CEILING(length
, 2));
783 static sword_t
size_tiny_boxed(lispobj
*where
)
785 sword_t length
= (HeaderValue(*where
) & 0xFF) + 1;
786 return CEILING(length
, 2);
789 /* Note: on the sparc we don't have to do anything special for fdefns, */
790 /* 'cause the raw-addr has a function lowtag. */
791 #if !defined(LISP_FEATURE_SPARC) && !defined(LISP_FEATURE_ARM)
793 scav_fdefn(lispobj
*where
, lispobj object
)
797 fdefn
= (struct fdefn
*)where
;
799 /* FSHOW((stderr, "scav_fdefn, function = %p, raw_addr = %p\n",
800 fdefn->fun, fdefn->raw_addr)); */
802 scavenge(where
+ 1, 2); // 'name' and 'fun'
803 #ifndef LISP_FEATURE_IMMOBILE_CODE
804 lispobj raw_fun
= (lispobj
)fdefn
->raw_addr
;
805 if (raw_fun
> READ_ONLY_SPACE_END
) {
806 lispobj simple_fun
= raw_fun
- FUN_RAW_ADDR_OFFSET
;
807 scavenge(&simple_fun
, 1);
808 /* Don't write unnecessarily. */
809 if (simple_fun
!= raw_fun
- FUN_RAW_ADDR_OFFSET
)
810 fdefn
->raw_addr
= (char *)simple_fun
+ FUN_RAW_ADDR_OFFSET
;
812 #elif defined(LISP_FEATURE_X86_64)
813 lispobj obj
= fdefn_raw_referent(fdefn
);
816 scavenge(&new, 1); // enliven
817 gc_assert(new == obj
); // must not move
820 # error "Need to implement scav_fdefn"
827 scav_unboxed(lispobj
*where
, lispobj object
)
829 sword_t length
= HeaderValue(object
) + 1;
830 return CEILING(length
, 2);
834 trans_unboxed(lispobj object
)
836 gc_assert(lowtag_of(object
) == OTHER_POINTER_LOWTAG
);
837 sword_t length
= HeaderValue(*native_pointer(object
)) + 1;
838 return copy_unboxed_object(object
, CEILING(length
, 2));
842 size_unboxed(lispobj
*where
)
844 sword_t length
= HeaderValue(*where
) + 1;
845 return CEILING(length
, 2);
849 /* vector-like objects */
851 trans_vector(lispobj object
)
853 gc_assert(lowtag_of(object
) == OTHER_POINTER_LOWTAG
);
856 fixnum_value(((struct vector
*)native_pointer(object
))->length
);
857 return copy_large_object(object
, CEILING(length
+ 2, 2));
861 size_vector(lispobj
*where
)
863 sword_t length
= fixnum_value(((struct vector
*)where
)->length
);
864 return CEILING(length
+ 2, 2);
867 #define DEF_SCAV_TRANS_SIZE_UB(nbits) \
868 DEF_SPECIALIZED_VECTOR(vector_unsigned_byte_##nbits, NWORDS(length, nbits))
869 #define DEF_SPECIALIZED_VECTOR(name, nwords) \
870 static sword_t __attribute__((unused)) scav_##name(lispobj *where, lispobj header) { \
871 sword_t length = fixnum_value(((struct vector*)where)->length); \
872 return CEILING(nwords + 2, 2); \
874 static lispobj __attribute__((unused)) trans_##name(lispobj object) { \
875 gc_assert(lowtag_of(object)==OTHER_POINTER_LOWTAG); \
876 sword_t length = fixnum_value(((struct vector*)(object-OTHER_POINTER_LOWTAG))->length); \
877 return copy_large_unboxed_object(object, CEILING(nwords + 2, 2)); \
879 static sword_t __attribute__((unused)) size_##name(lispobj *where) { \
880 sword_t length = fixnum_value(((struct vector*)where)->length); \
881 return CEILING(nwords + 2, 2); \
884 DEF_SPECIALIZED_VECTOR(vector_nil
, 0*length
)
885 DEF_SPECIALIZED_VECTOR(vector_bit
, NWORDS(length
,1))
886 /* NOTE: strings contain one more element of data (a terminating '\0'
887 * to help interface with C functions) than indicated by the length slot.
888 * This is true even for UCS4 strings, despite that C APIs are unlikely
889 * to have a convention that expects 4 zero bytes. */
890 DEF_SPECIALIZED_VECTOR(base_string
, NWORDS((length
+1), 8))
891 DEF_SPECIALIZED_VECTOR(character_string
, NWORDS((length
+1), 32))
892 DEF_SCAV_TRANS_SIZE_UB(2)
893 DEF_SCAV_TRANS_SIZE_UB(4)
894 DEF_SCAV_TRANS_SIZE_UB(8)
895 DEF_SCAV_TRANS_SIZE_UB(16)
896 DEF_SCAV_TRANS_SIZE_UB(32)
897 DEF_SCAV_TRANS_SIZE_UB(64)
898 DEF_SCAV_TRANS_SIZE_UB(128)
899 #ifdef LONG_FLOAT_SIZE
900 DEF_SPECIALIZED_VECTOR(vector_long_float
, length
* LONG_FLOAT_SIZE
)
901 DEF_SPECIALIZED_VECTOR(vector_complex_long_float
, length
* (2 * LONG_FLOAT_SIZE
))
904 #define WEAK_POINTER_NWORDS \
905 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
908 trans_weak_pointer(lispobj object
)
911 #ifndef LISP_FEATURE_GENCGC
912 struct weak_pointer
*wp
;
914 gc_assert(lowtag_of(object
) == OTHER_POINTER_LOWTAG
);
916 #if defined(DEBUG_WEAK)
917 printf("Transporting weak pointer from 0x%08x\n", object
);
920 /* Need to remember where all the weak pointers are that have */
921 /* been transported so they can be fixed up in a post-GC pass. */
923 copy
= copy_object(object
, WEAK_POINTER_NWORDS
);
924 #ifndef LISP_FEATURE_GENCGC
925 wp
= (struct weak_pointer
*) native_pointer(copy
);
927 gc_assert(widetag_of(wp
->header
)==WEAK_POINTER_WIDETAG
);
928 /* Push the weak pointer onto the list of weak pointers. */
929 wp
->next
= (struct weak_pointer
*)LOW_WORD(weak_pointers
);
936 size_weak_pointer(lispobj
*where
)
938 return WEAK_POINTER_NWORDS
;
942 void scan_weak_pointers(void)
944 struct weak_pointer
*wp
, *next_wp
;
945 for (wp
= weak_pointers
, next_wp
= NULL
; wp
!= NULL
; wp
= next_wp
) {
946 lispobj value
= wp
->value
;
947 lispobj
*first_pointer
;
948 gc_assert(widetag_of(wp
->header
)==WEAK_POINTER_WIDETAG
);
952 if (next_wp
== wp
) /* gencgc uses a ref to self for end of list */
955 if (!is_lisp_pointer(value
))
958 /* Now, we need to check whether the object has been forwarded. If
959 * it has been, the weak pointer is still good and needs to be
960 * updated. Otherwise, the weak pointer needs to be nil'ed
963 if (from_space_p(value
)) {
964 first_pointer
= native_pointer(value
);
966 if (forwarding_pointer_p(first_pointer
)) {
967 wp
->value
= LOW_WORD(forwarding_pointer_value(first_pointer
));
974 #ifdef LISP_FEATURE_IMMOBILE_SPACE
975 else if (immobile_space_p(value
) &&
976 immobile_obj_gen_bits(native_pointer(value
)) == from_space
) {
987 #if N_WORD_BITS == 32
988 #define EQ_HASH_MASK 0x1fffffff
989 #elif N_WORD_BITS == 64
990 #define EQ_HASH_MASK 0x1fffffffffffffff
993 /* Compute the EQ-hash of KEY. This must match POINTER-HASH in
994 * target-hash-table.lisp. */
995 #define EQ_HASH(key) ((key) & EQ_HASH_MASK)
997 /* List of weak hash tables chained through their NEXT-WEAK-HASH-TABLE
998 * slot. Set to NULL at the end of a collection.
1000 * This is not optimal because, when a table is tenured, it won't be
1001 * processed automatically; only the yougest generation is GC'd by
1002 * default. On the other hand, all applications will need an
1003 * occasional full GC anyway, so it's not that bad either. */
1004 struct hash_table
*weak_hash_tables
= NULL
;
1006 /* Return true if OBJ has already survived the current GC. */
1008 survived_gc_yet (lispobj obj
)
1010 #ifdef LISP_FEATURE_IMMOBILE_SPACE
1011 /* If an immobile object's generation# is that of 'from_space', but has been
1012 visited (i.e. is live), then it is conceptually not in 'from_space'.
1013 This can happen when and only when _not_ raising the generation number.
1014 Since the gen_bits() accessor returns the visited bit, the byte value
1015 is numerically unequal to 'from_space', which is what we want */
1016 return !is_lisp_pointer(obj
)
1017 || (immobile_space_p(obj
)
1018 ? immobile_obj_gen_bits(native_pointer(obj
)) != from_space
1019 : (!from_space_p(obj
) || forwarding_pointer_p(native_pointer(obj
))));
1021 return (!is_lisp_pointer(obj
) || !from_space_p(obj
) ||
1022 forwarding_pointer_p(native_pointer(obj
)));
1026 static int survived_gc_yet_KEY(lispobj key
, lispobj value
) {
1027 return survived_gc_yet(key
);
1029 static int survived_gc_yet_VALUE(lispobj key
, lispobj value
) {
1030 return survived_gc_yet(value
);
1032 static int survived_gc_yet_AND(lispobj key
, lispobj value
) {
1033 return survived_gc_yet(key
) && survived_gc_yet(value
);
1035 static int survived_gc_yet_OR(lispobj key
, lispobj value
) {
1036 return survived_gc_yet(key
) || survived_gc_yet(value
);
1039 static int (*weak_hash_entry_alivep_fun(lispobj weakness
))(lispobj
,lispobj
)
1042 case KEY
: return survived_gc_yet_KEY
;
1043 case VALUE
: return survived_gc_yet_VALUE
;
1044 case KEY_OR_VALUE
: return survived_gc_yet_OR
;
1045 case KEY_AND_VALUE
: return survived_gc_yet_AND
;
1046 case NIL
: return NULL
;
1047 default: lose("Bad hash table weakness");
1051 /* Return the beginning of data in ARRAY (skipping the header and the
1052 * length) or NULL if it isn't an array of the specified widetag after
1054 static inline lispobj
*
1055 get_array_data (lispobj array
, int widetag
, uword_t
*length
)
1057 if (is_lisp_pointer(array
) && widetag_of(*native_pointer(array
)) == widetag
) {
1059 *length
= fixnum_value(native_pointer(array
)[1]);
1060 return native_pointer(array
) + 2;
1066 /* Only need to worry about scavenging the _real_ entries in the
1067 * table. Phantom entries such as the hash table itself at index 0 and
1068 * the empty marker at index 1 were scavenged by scav_vector that
1069 * either called this function directly or arranged for it to be
1070 * called later by pushing the hash table onto weak_hash_tables. */
1072 scav_hash_table_entries (struct hash_table
*hash_table
)
1076 lispobj
*index_vector
;
1078 lispobj
*next_vector
;
1079 uword_t next_vector_length
;
1080 lispobj
*hash_vector
;
1081 uword_t hash_vector_length
;
1082 lispobj empty_symbol
;
1083 lispobj weakness
= hash_table
->weakness
;
1086 kv_vector
= get_array_data(hash_table
->table
,
1087 SIMPLE_VECTOR_WIDETAG
, &kv_length
);
1088 if (kv_vector
== NULL
)
1089 lose("invalid kv_vector %x\n", hash_table
->table
);
1091 index_vector
= get_array_data(hash_table
->index_vector
,
1092 SIMPLE_ARRAY_WORD_WIDETAG
, &length
);
1093 if (index_vector
== NULL
)
1094 lose("invalid index_vector %x\n", hash_table
->index_vector
);
1096 next_vector
= get_array_data(hash_table
->next_vector
,
1097 SIMPLE_ARRAY_WORD_WIDETAG
,
1098 &next_vector_length
);
1099 if (next_vector
== NULL
)
1100 lose("invalid next_vector %x\n", hash_table
->next_vector
);
1102 hash_vector
= get_array_data(hash_table
->hash_vector
,
1103 SIMPLE_ARRAY_WORD_WIDETAG
,
1104 &hash_vector_length
);
1105 if (hash_vector
!= NULL
)
1106 gc_assert(hash_vector_length
== next_vector_length
);
1108 /* These lengths could be different as the index_vector can be a
1109 * different length from the others, a larger index_vector could
1110 * help reduce collisions. */
1111 gc_assert(next_vector_length
*2 == kv_length
);
1113 empty_symbol
= kv_vector
[1];
1114 /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/
1115 if (widetag_of(*native_pointer(empty_symbol
)) != SYMBOL_HEADER_WIDETAG
) {
1116 lose("not a symbol where empty-hash-table-slot symbol expected: %x\n",
1117 *native_pointer(empty_symbol
));
1120 /* Work through the KV vector. */
1121 int (*alivep_test
)(lispobj
,lispobj
) = weak_hash_entry_alivep_fun(weakness
);
1122 #define SCAV_ENTRIES(aliveness_predicate) \
1123 for (i = 1; i < next_vector_length; i++) { \
1124 lispobj old_key = kv_vector[2*i]; \
1125 lispobj __attribute__((unused)) value = kv_vector[2*i+1]; \
1126 if (aliveness_predicate) { \
1127 /* Scavenge the key and value. */ \
1128 scavenge(&kv_vector[2*i], 2); \
1129 /* If an EQ-based key has moved, mark the hash-table for rehash */ \
1130 if (!hash_vector || hash_vector[i] == MAGIC_HASH_VECTOR_VALUE) { \
1131 lispobj new_key = kv_vector[2*i]; \
1132 if (old_key != new_key && new_key != empty_symbol) \
1133 hash_table->needs_rehash_p = T; \
1136 SCAV_ENTRIES(alivep_test(old_key
, value
))
1142 scav_vector (lispobj
*where
, lispobj object
)
1145 struct hash_table
*hash_table
;
1147 /* SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based and weak
1148 * hash tables in the Lisp HASH-TABLE code to indicate need for
1149 * special GC support. */
1150 if ((HeaderValue(object
) & 0xFF) == subtype_VectorNormal
)
1153 kv_length
= fixnum_value(where
[1]);
1154 /*FSHOW((stderr,"/kv_length = %d\n", kv_length));*/
1156 /* Scavenge element 0, which may be a hash-table structure. */
1157 scavenge(where
+2, 1);
1158 if (!is_lisp_pointer(where
[2])) {
1159 /* This'll happen when REHASH clears the header of old-kv-vector
1160 * and fills it with zero, but some other thread simulatenously
1161 * sets the header in %%PUTHASH.
1164 "Warning: no pointer at %p in hash table: this indicates "
1165 "non-fatal corruption caused by concurrent access to a "
1166 "hash-table from multiple threads. Any accesses to "
1167 "hash-tables shared between threads should be protected "
1168 "by locks.\n", (void*)&where
[2]);
1169 // We've scavenged three words.
1172 hash_table
= (struct hash_table
*)native_pointer(where
[2]);
1173 /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/
1174 if (widetag_of(hash_table
->header
) != INSTANCE_HEADER_WIDETAG
) {
1175 lose("hash table not instance (%x at %x)\n",
1180 /* Scavenge element 1, which should be some internal symbol that
1181 * the hash table code reserves for marking empty slots. */
1182 scavenge(where
+3, 1);
1183 if (!is_lisp_pointer(where
[3])) {
1184 lose("not empty-hash-table-slot symbol pointer: %x\n", where
[3]);
1187 /* Scavenge hash table, which will fix the positions of the other
1188 * needed objects. */
1189 scavenge((lispobj
*)hash_table
,
1190 CEILING(sizeof(struct hash_table
) / sizeof(lispobj
), 2));
1192 /* Cross-check the kv_vector. */
1193 if (where
!= native_pointer(hash_table
->table
)) {
1194 lose("hash_table table!=this table %x\n", hash_table
->table
);
1197 if (hash_table
->weakness
== NIL
) {
1198 scav_hash_table_entries(hash_table
);
1200 /* Delay scavenging of this table by pushing it onto
1201 * weak_hash_tables (if it's not there already) for the weak
1203 if (hash_table
->next_weak_hash_table
== NIL
) {
1204 hash_table
->next_weak_hash_table
= (lispobj
)weak_hash_tables
;
1205 weak_hash_tables
= hash_table
;
1209 return (CEILING(kv_length
+ 2, 2));
1213 scav_weak_hash_tables (void)
1215 struct hash_table
*table
;
1217 /* Scavenge entries whose triggers are known to survive. */
1218 for (table
= weak_hash_tables
; table
!= NULL
;
1219 table
= (struct hash_table
*)table
->next_weak_hash_table
) {
1220 scav_hash_table_entries(table
);
1224 /* Walk through the chain whose first element is *FIRST and remove
1225 * dead weak entries. */
1227 scan_weak_hash_table_chain (struct hash_table
*hash_table
, lispobj
*prev
,
1228 lispobj
*kv_vector
, lispobj
*index_vector
,
1229 lispobj
*next_vector
, lispobj
*hash_vector
,
1230 lispobj empty_symbol
, int (*alivep_test
)(lispobj
,lispobj
))
1232 unsigned index
= *prev
;
1234 unsigned next
= next_vector
[index
];
1235 lispobj key
= kv_vector
[2 * index
];
1236 lispobj value
= kv_vector
[2 * index
+ 1];
1237 gc_assert(key
!= empty_symbol
);
1238 gc_assert(value
!= empty_symbol
);
1239 if (!alivep_test(key
, value
)) {
1240 unsigned count
= fixnum_value(hash_table
->number_entries
);
1241 gc_assert(count
> 0);
1243 hash_table
->number_entries
= make_fixnum(count
- 1);
1244 next_vector
[index
] = fixnum_value(hash_table
->next_free_kv
);
1245 hash_table
->next_free_kv
= make_fixnum(index
);
1246 kv_vector
[2 * index
] = empty_symbol
;
1247 kv_vector
[2 * index
+ 1] = empty_symbol
;
1249 hash_vector
[index
] = MAGIC_HASH_VECTOR_VALUE
;
1251 prev
= &next_vector
[index
];
1258 scan_weak_hash_table (struct hash_table
*hash_table
)
1261 lispobj
*index_vector
;
1262 uword_t length
= 0; /* prevent warning */
1263 lispobj
*next_vector
;
1264 uword_t next_vector_length
= 0; /* prevent warning */
1265 lispobj
*hash_vector
;
1266 lispobj empty_symbol
;
1267 lispobj weakness
= hash_table
->weakness
;
1270 kv_vector
= get_array_data(hash_table
->table
,
1271 SIMPLE_VECTOR_WIDETAG
, NULL
);
1272 index_vector
= get_array_data(hash_table
->index_vector
,
1273 SIMPLE_ARRAY_WORD_WIDETAG
, &length
);
1274 next_vector
= get_array_data(hash_table
->next_vector
,
1275 SIMPLE_ARRAY_WORD_WIDETAG
,
1276 &next_vector_length
);
1277 hash_vector
= get_array_data(hash_table
->hash_vector
,
1278 SIMPLE_ARRAY_WORD_WIDETAG
, NULL
);
1279 empty_symbol
= kv_vector
[1];
1281 for (i
= 0; i
< length
; i
++) {
1282 scan_weak_hash_table_chain(hash_table
, &index_vector
[i
],
1283 kv_vector
, index_vector
, next_vector
,
1284 hash_vector
, empty_symbol
,
1285 weak_hash_entry_alivep_fun(weakness
));
1289 /* Remove dead entries from weak hash tables. */
1291 scan_weak_hash_tables (void)
1293 struct hash_table
*table
, *next
;
1295 for (table
= weak_hash_tables
; table
!= NULL
; table
= next
) {
1296 next
= (struct hash_table
*)table
->next_weak_hash_table
;
1297 table
->next_weak_hash_table
= NIL
;
1298 scan_weak_hash_table(table
);
1301 weak_hash_tables
= NULL
;
1310 scav_lose(lispobj
*where
, lispobj object
)
1312 lose("no scavenge function for object %p (widetag 0x%x)\n",
1314 widetag_of(*where
));
1316 return 0; /* bogus return value to satisfy static type checking */
1320 trans_lose(lispobj object
)
1322 lose("no transport function for object %p (widetag 0x%x)\n",
1324 widetag_of(*native_pointer(object
)));
1325 return NIL
; /* bogus return value to satisfy static type checking */
1329 size_lose(lispobj
*where
)
1331 lose("no size function for object at %p (widetag 0x%x)\n",
1333 widetag_of(*where
));
1334 return 1; /* bogus return value to satisfy static type checking */
1342 #include "genesis/gc-tables.h"
1345 static lispobj
*search_spaces(void *pointer
)
1348 if (((start
= search_dynamic_space(pointer
)) != NULL
) ||
1349 #ifdef LISP_FEATURE_IMMOBILE_SPACE
1350 ((start
= search_immobile_space(pointer
)) != NULL
) ||
1352 ((start
= search_static_space(pointer
)) != NULL
) ||
1353 ((start
= search_read_only_space(pointer
)) != NULL
))
1358 /* Find the code object for the given pc, or return NULL on
1361 component_ptr_from_pc(lispobj
*pc
)
1363 lispobj
*object
= search_spaces(pc
);
1365 if (object
!= NULL
&& widetag_of(*object
) == CODE_HEADER_WIDETAG
)
1371 /* Scan an area looking for an object which encloses the given pointer.
1372 * Return the object start on success, or NULL on failure. */
1374 gc_search_space3(void *pointer
, lispobj
*start
, void *limit
)
1376 if (pointer
< (void*)start
|| pointer
>= limit
) return NULL
;
1380 /* CAUTION: this code is _significantly_ slower than the production version
1381 due to the extra checks for forwarding. Only use it if debugging */
1382 for ( ; (void*)start
< limit
; start
+= count
) {
1383 lispobj
*forwarded_start
;
1384 if (forwarding_pointer_p(start
))
1385 forwarded_start
= native_pointer(forwarding_pointer_value(start
));
1387 forwarded_start
= start
;
1388 lispobj thing
= *forwarded_start
;
1389 count
= is_cons_half(thing
) ? 2 : sizetab
[widetag_of(thing
)](forwarded_start
);
1390 /* Check whether the pointer is within this object. */
1391 if (pointer
< (void*)(start
+count
)) return start
;
1394 for ( ; (void*)start
< limit
; start
+= count
) {
1395 lispobj thing
= *start
;
1396 count
= is_cons_half(thing
) ? 2 : sizetab
[widetag_of(thing
)](start
);
1397 /* Check whether the pointer is within this object. */
1398 if (pointer
< (void*)(start
+count
)) return start
;
1404 /* Helper for valid_lisp_pointer_p (below) and
1405 * conservative_root_p (gencgc).
1407 * pointer is the pointer to check validity of,
1408 * and start_addr is the address of the enclosing object.
1411 properly_tagged_descriptor_p(void *thing
, lispobj
*start_addr
)
1413 lispobj pointer
= (lispobj
)thing
;
1414 if (!is_lisp_pointer(pointer
)) {
1418 /* Check that the object pointed to is consistent with the pointer
1420 switch (lowtag_of(pointer
)) {
1421 case FUN_POINTER_LOWTAG
:
1422 /* Start_addr should be the enclosing code object, or a closure
1424 switch (widetag_of(*start_addr
)) {
1425 case CODE_HEADER_WIDETAG
:
1426 /* Make sure we actually point to a function in the code object,
1427 * as opposed to a random point there. */
1428 for_each_simple_fun(i
, function
, (struct code
*)start_addr
, 0, {
1429 if ((lispobj
)function
== pointer
-FUN_POINTER_LOWTAG
) return 1;
1432 case CLOSURE_HEADER_WIDETAG
:
1433 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
1434 return make_lispobj(start_addr
, FUN_POINTER_LOWTAG
) == pointer
;
1439 case LIST_POINTER_LOWTAG
:
1440 return make_lispobj(start_addr
, LIST_POINTER_LOWTAG
) == pointer
1441 && is_cons_half(start_addr
[0]) // Is it plausible?
1442 && is_cons_half(start_addr
[1]);
1444 case INSTANCE_POINTER_LOWTAG
:
1445 return make_lispobj(start_addr
, INSTANCE_POINTER_LOWTAG
) == pointer
1446 && widetag_of(*start_addr
) == INSTANCE_HEADER_WIDETAG
;
1448 case OTHER_POINTER_LOWTAG
:
1450 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
1451 /* The all-architecture test below is good as far as it goes,
1452 * but an LRA object is similar to a FUN-POINTER: It is
1453 * embedded within a CODE-OBJECT pointed to by start_addr, and
1454 * cannot be found by simply walking the heap, therefore we
1455 * need to check for it. -- AB, 2010-Jun-04 */
1456 if ((widetag_of(start_addr
[0]) == CODE_HEADER_WIDETAG
)) {
1457 lispobj
*potential_lra
= native_pointer(pointer
);
1458 if ((widetag_of(potential_lra
[0]) == RETURN_PC_HEADER_WIDETAG
) &&
1459 ((potential_lra
- HeaderValue(potential_lra
[0])) == start_addr
)) {
1460 return 1; /* It's as good as we can verify. */
1465 if (pointer
!= make_lispobj(start_addr
, OTHER_POINTER_LOWTAG
)
1466 || !other_immediate_lowtag_p(*start_addr
))
1469 switch (widetag_of(start_addr
[0])) {
1470 case UNBOUND_MARKER_WIDETAG
:
1471 case NO_TLS_VALUE_MARKER_WIDETAG
:
1472 case CHARACTER_WIDETAG
:
1473 #if N_WORD_BITS == 64
1474 case SINGLE_FLOAT_WIDETAG
:
1478 /* only pointed to by function pointers? */
1479 case CLOSURE_HEADER_WIDETAG
:
1480 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
1483 case INSTANCE_HEADER_WIDETAG
:
1486 /* the valid other immediate pointer objects */
1487 case SIMPLE_VECTOR_WIDETAG
:
1489 case COMPLEX_WIDETAG
:
1490 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
1491 case COMPLEX_SINGLE_FLOAT_WIDETAG
:
1493 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
1494 case COMPLEX_DOUBLE_FLOAT_WIDETAG
:
1496 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
1497 case COMPLEX_LONG_FLOAT_WIDETAG
:
1499 #ifdef SIMD_PACK_WIDETAG
1500 case SIMD_PACK_WIDETAG
:
1502 case SIMPLE_ARRAY_WIDETAG
:
1503 case COMPLEX_BASE_STRING_WIDETAG
:
1504 #ifdef COMPLEX_CHARACTER_STRING_WIDETAG
1505 case COMPLEX_CHARACTER_STRING_WIDETAG
:
1507 case COMPLEX_VECTOR_NIL_WIDETAG
:
1508 case COMPLEX_BIT_VECTOR_WIDETAG
:
1509 case COMPLEX_VECTOR_WIDETAG
:
1510 case COMPLEX_ARRAY_WIDETAG
:
1511 case VALUE_CELL_HEADER_WIDETAG
:
1512 case SYMBOL_HEADER_WIDETAG
:
1514 case CODE_HEADER_WIDETAG
:
1515 case BIGNUM_WIDETAG
:
1516 #if N_WORD_BITS != 64
1517 case SINGLE_FLOAT_WIDETAG
:
1519 case DOUBLE_FLOAT_WIDETAG
:
1520 #ifdef LONG_FLOAT_WIDETAG
1521 case LONG_FLOAT_WIDETAG
:
1523 #include "genesis/specialized-vectors.inc"
1525 case WEAK_POINTER_WIDETAG
:
1540 /* META: Note the ambiguous word "validate" in the comment below.
1541 * This means "Decide whether <x> is valid".
1542 * But when you see os_validate() elsewhere, that doesn't mean to ask
1543 * whether something is valid, it says to *make* it valid.
1544 * I think it would be nice if we could avoid using the word in the
1545 * sense in which os_validate() uses it, which would entail renaming
1546 * a bunch of stuff, which is harder than just explaining why
1547 * the comments can be deceptive */
1549 /* Used by the debugger to validate possibly bogus pointers before
1550 * calling MAKE-LISP-OBJ on them.
1552 * FIXME: We would like to make this perfect, because if the debugger
1553 * constructs a reference to a bugs lisp object, and it ends up in a
1554 * location scavenged by the GC all hell breaks loose.
1556 * Whereas conservative_root_p has to be conservative
1557 * and return true for all valid pointers, this could actually be eager
1558 * and lie about a few pointers without bad results... but that should
1559 * be reflected in the name.
1562 valid_lisp_pointer_p(lispobj pointer
)
1564 lispobj
*start
= search_spaces((void*)pointer
);
1566 return properly_tagged_descriptor_p((void*)pointer
, start
);
1571 maybe_gc(os_context_t
*context
)
1573 lispobj gc_happened
;
1574 struct thread
*thread
= arch_os_get_current_thread();
1575 boolean were_in_lisp
= !foreign_function_call_active_p(thread
);
1578 fake_foreign_function_call(context
);
1581 /* SUB-GC may return without GCing if *GC-INHIBIT* is set, in
1582 * which case we will be running with no gc trigger barrier
1583 * thing for a while. But it shouldn't be long until the end
1586 * FIXME: It would be good to protect the end of dynamic space for
1587 * CheneyGC and signal a storage condition from there.
1590 /* Restore the signal mask from the interrupted context before
1591 * calling into Lisp if interrupts are enabled. Why not always?
1593 * Suppose there is a WITHOUT-INTERRUPTS block far, far out. If an
1594 * interrupt hits while in SUB-GC, it is deferred and the
1595 * os_context_sigmask of that interrupt is set to block further
1596 * deferrable interrupts (until the first one is
1597 * handled). Unfortunately, that context refers to this place and
1598 * when we return from here the signals will not be blocked.
1600 * A kludgy alternative is to propagate the sigmask change to the
1603 #if !(defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_SAFEPOINT))
1604 check_gc_signals_unblocked_or_lose(os_context_sigmask_addr(context
));
1605 unblock_gc_signals(0, 0);
1607 FSHOW((stderr
, "/maybe_gc: calling SUB_GC\n"));
1608 /* FIXME: Nothing must go wrong during GC else we end up running
1609 * the debugger, error handlers, and user code in general in a
1610 * potentially unsafe place. Running out of the control stack or
1611 * the heap in SUB-GC are ways to lose. Of course, deferrables
1612 * cannot be unblocked because there may be a pending handler, or
1613 * we may even be in a WITHOUT-INTERRUPTS. */
1614 gc_happened
= funcall0(StaticSymbolFunction(SUB_GC
));
1615 FSHOW((stderr
, "/maybe_gc: gc_happened=%s\n",
1616 (gc_happened
== NIL
)
1618 : ((gc_happened
== T
)
1621 /* gc_happened can take three values: T, NIL, 0.
1623 * T means that the thread managed to trigger a GC, and post-gc
1626 * NIL means that the thread is within without-gcing, and no GC
1629 * Finally, 0 means that *a* GC has occurred, but it wasn't
1630 * triggered by this thread; success, but post-gc doesn't have
1633 if ((gc_happened
== T
) &&
1634 /* See if interrupts are enabled or it's possible to enable
1635 * them. POST-GC has a similar check, but we don't want to
1636 * unlock deferrables in that case and get a pending interrupt
1638 ((SymbolValue(INTERRUPTS_ENABLED
,thread
) != NIL
) ||
1639 (SymbolValue(ALLOW_WITH_INTERRUPTS
,thread
) != NIL
))) {
1640 #ifndef LISP_FEATURE_WIN32
1641 sigset_t
*context_sigmask
= os_context_sigmask_addr(context
);
1642 if (!deferrables_blocked_p(context_sigmask
)) {
1643 thread_sigmask(SIG_SETMASK
, context_sigmask
, 0);
1644 #ifndef LISP_FEATURE_SB_SAFEPOINT
1645 check_gc_signals_unblocked_or_lose(0);
1648 FSHOW((stderr
, "/maybe_gc: calling POST_GC\n"));
1649 funcall0(StaticSymbolFunction(POST_GC
));
1650 #ifndef LISP_FEATURE_WIN32
1652 FSHOW((stderr
, "/maybe_gc: punting on POST_GC due to blockage\n"));
1658 undo_fake_foreign_function_call(context
);
1660 /* Otherwise done by undo_fake_foreign_function_call. And
1661 something later wants them to be blocked. What a nice
1663 block_blockable_signals(0);
1666 FSHOW((stderr
, "/maybe_gc: returning\n"));
1667 return (gc_happened
!= NIL
);
1670 #define BYTES_ZERO_BEFORE_END (1<<12)
1672 /* There used to be a similar function called SCRUB-CONTROL-STACK in
1673 * Lisp and another called zero_stack() in cheneygc.c, but since it's
1674 * shorter to express in, and more often called from C, I keep only
1675 * the C one after fixing it. -- MG 2009-03-25 */
1677 /* Zero the unused portion of the control stack so that old objects
1678 * are not kept alive because of uninitialized stack variables.
1680 * "To summarize the problem, since not all allocated stack frame
1681 * slots are guaranteed to be written by the time you call an another
1682 * function or GC, there may be garbage pointers retained in your dead
1683 * stack locations. The stack scrubbing only affects the part of the
1684 * stack from the SP to the end of the allocated stack." - ram, on
1685 * cmucl-imp, Tue, 25 Sep 2001
1687 * So, as an (admittedly lame) workaround, from time to time we call
1688 * scrub-control-stack to zero out all the unused portion. This is
1689 * supposed to happen when the stack is mostly empty, so that we have
1690 * a chance of clearing more of it: callers are currently (2002.07.18)
1691 * REPL, SUB-GC and sig_stop_for_gc_handler. */
1693 /* Take care not to tread on the guard page and the hard guard page as
1694 * it would be unkind to sig_stop_for_gc_handler. Touching the return
1695 * guard page is not dangerous. For this to work the guard page must
1696 * be zeroed when protected. */
1698 /* FIXME: I think there is no guarantee that once
1699 * BYTES_ZERO_BEFORE_END bytes are zero the rest are also zero. This
1700 * may be what the "lame" adjective in the above comment is for. In
1701 * this case, exact gc may lose badly. */
1703 scrub_control_stack()
1705 scrub_thread_control_stack(arch_os_get_current_thread());
1709 scrub_thread_control_stack(struct thread
*th
)
1711 os_vm_address_t guard_page_address
= CONTROL_STACK_GUARD_PAGE(th
);
1712 os_vm_address_t hard_guard_page_address
= CONTROL_STACK_HARD_GUARD_PAGE(th
);
1713 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1714 /* On these targets scrubbing from C is a bad idea, so we punt to
1715 * a routine in $ARCH-assem.S. */
1716 extern void arch_scrub_control_stack(struct thread
*, os_vm_address_t
, os_vm_address_t
);
1717 arch_scrub_control_stack(th
, guard_page_address
, hard_guard_page_address
);
1719 lispobj
*sp
= access_control_stack_pointer(th
);
1721 if ((((os_vm_address_t
)sp
< (hard_guard_page_address
+ os_vm_page_size
)) &&
1722 ((os_vm_address_t
)sp
>= hard_guard_page_address
)) ||
1723 (((os_vm_address_t
)sp
< (guard_page_address
+ os_vm_page_size
)) &&
1724 ((os_vm_address_t
)sp
>= guard_page_address
) &&
1725 (th
->control_stack_guard_page_protected
!= NIL
)))
1727 #ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
1730 } while (((uword_t
)sp
--) & (BYTES_ZERO_BEFORE_END
- 1));
1731 if ((os_vm_address_t
)sp
< (hard_guard_page_address
+ os_vm_page_size
))
1736 } while (((uword_t
)sp
--) & (BYTES_ZERO_BEFORE_END
- 1));
1740 } while (((uword_t
)++sp
) & (BYTES_ZERO_BEFORE_END
- 1));
1741 if ((os_vm_address_t
)sp
>= hard_guard_page_address
)
1746 } while (((uword_t
)++sp
) & (BYTES_ZERO_BEFORE_END
- 1));
1748 #endif /* LISP_FEATURE_C_STACK_IS_CONTROL_STACK */
1751 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
1754 scavenge_control_stack(struct thread
*th
)
1756 lispobj
*object_ptr
;
1758 /* In order to properly support dynamic-extent allocation of
1759 * non-CONS objects, the control stack requires special handling.
1760 * Rather than calling scavenge() directly, grovel over it fixing
1761 * broken hearts, scavenging pointers to oldspace, and pitching a
1762 * fit when encountering unboxed data. This prevents stray object
1763 * headers from causing the scavenger to blow past the end of the
1764 * stack (an error case checked in scavenge()). We don't worry
1765 * about treating unboxed words as boxed or vice versa, because
1766 * the compiler isn't allowed to store unboxed objects on the
1767 * control stack. -- AB, 2011-Dec-02 */
1769 for (object_ptr
= th
->control_stack_start
;
1770 object_ptr
< access_control_stack_pointer(th
);
1773 lispobj object
= *object_ptr
;
1774 #ifdef LISP_FEATURE_GENCGC
1775 if (forwarding_pointer_p(object_ptr
))
1776 lose("unexpected forwarding pointer in scavenge_control_stack: %p, start=%p, end=%p\n",
1777 object_ptr
, th
->control_stack_start
, access_control_stack_pointer(th
));
1779 if (is_lisp_pointer(object
) && from_space_p(object
)) {
1780 /* It currently points to old space. Check for a
1781 * forwarding pointer. */
1782 lispobj
*ptr
= native_pointer(object
);
1783 if (forwarding_pointer_p(ptr
)) {
1784 /* Yes, there's a forwarding pointer. */
1785 *object_ptr
= LOW_WORD(forwarding_pointer_value(ptr
));
1787 /* Scavenge that pointer. */
1788 long n_words_scavenged
=
1789 (scavtab
[widetag_of(object
)])(object_ptr
, object
);
1790 gc_assert(n_words_scavenged
== 1);
1792 } else if (scavtab
[widetag_of(object
)] == scav_lose
) {
1793 lose("unboxed object in scavenge_control_stack: %p->%x, start=%p, end=%p\n",
1794 object_ptr
, object
, th
->control_stack_start
, access_control_stack_pointer(th
));
1799 /* Scavenging Interrupt Contexts */
1801 static int boxed_registers
[] = BOXED_REGISTERS
;
1803 /* The GC has a notion of an "interior pointer" register, an unboxed
1804 * register that typically contains a pointer to inside an object
1805 * referenced by another pointer. The most obvious of these is the
1806 * program counter, although many compiler backends define a "Lisp
1807 * Interior Pointer" register known to the runtime as reg_LIP, and
1808 * various CPU architectures have other registers that also partake of
1809 * the interior-pointer nature. As the code for pairing an interior
1810 * pointer value up with its "base" register, and fixing it up after
1811 * scavenging is complete is horribly repetitive, a few macros paper
1812 * over the monotony. --AB, 2010-Jul-14 */
1814 /* These macros are only ever used over a lexical environment which
1815 * defines a pointer to an os_context_t called context, thus we don't
1816 * bother to pass that context in as a parameter. */
1818 /* Define how to access a given interior pointer. */
1819 #define ACCESS_INTERIOR_POINTER_pc \
1820 *os_context_pc_addr(context)
1821 #define ACCESS_INTERIOR_POINTER_lip \
1822 *os_context_register_addr(context, reg_LIP)
1823 #define ACCESS_INTERIOR_POINTER_lr \
1824 *os_context_lr_addr(context)
1825 #define ACCESS_INTERIOR_POINTER_npc \
1826 *os_context_npc_addr(context)
1827 #define ACCESS_INTERIOR_POINTER_ctr \
1828 *os_context_ctr_addr(context)
1830 #define INTERIOR_POINTER_VARS(name) \
1831 uword_t name##_offset; \
1832 int name##_register_pair
1834 #define PAIR_INTERIOR_POINTER(name) \
1835 pair_interior_pointer(context, \
1836 ACCESS_INTERIOR_POINTER_##name, \
1838 &name##_register_pair)
1840 /* One complexity here is that if a paired register is not found for
1841 * an interior pointer, then that pointer does not get updated.
1842 * Originally, there was some commentary about using an index of -1
1843 * when calling os_context_register_addr() on SPARC referring to the
1844 * program counter, but the real reason is to allow an interior
1845 * pointer register to point to the runtime, read-only space, or
1846 * static space without problems. */
1847 #define FIXUP_INTERIOR_POINTER(name) \
1849 if (name##_register_pair >= 0) { \
1850 ACCESS_INTERIOR_POINTER_##name = \
1851 (*os_context_register_addr(context, \
1852 name##_register_pair) \
1860 pair_interior_pointer(os_context_t
*context
, uword_t pointer
,
1861 uword_t
*saved_offset
, int *register_pair
)
1866 * I (RLT) think this is trying to find the boxed register that is
1867 * closest to the LIP address, without going past it. Usually, it's
1868 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
1870 /* 0x7FFFFFFF on 32-bit platforms;
1871 0x7FFFFFFFFFFFFFFF on 64-bit platforms */
1872 *saved_offset
= (((uword_t
)1) << (N_WORD_BITS
- 1)) - 1;
1873 *register_pair
= -1;
1874 for (i
= 0; i
< (sizeof(boxed_registers
) / sizeof(int)); i
++) {
1879 index
= boxed_registers
[i
];
1880 reg
= *os_context_register_addr(context
, index
);
1882 /* An interior pointer is never relative to a non-pointer
1883 * register (an oversight in the original implementation).
1884 * The simplest argument for why this is true is to consider
1885 * the fixnum that happens by coincide to be the word-index in
1886 * memory of the header for some object plus two. This is
1887 * happenstance would cause the register containing the fixnum
1888 * to be selected as the register_pair if the interior pointer
1889 * is to anywhere after the first two words of the object.
1890 * The fixnum won't be changed during GC, but the object might
1891 * move, thus destroying the interior pointer. --AB,
1894 if (is_lisp_pointer(reg
) &&
1895 ((reg
& ~LOWTAG_MASK
) <= pointer
)) {
1896 offset
= pointer
- (reg
& ~LOWTAG_MASK
);
1897 if (offset
< *saved_offset
) {
1898 *saved_offset
= offset
;
1899 *register_pair
= index
;
1906 scavenge_interrupt_context(os_context_t
* context
)
1910 /* FIXME: The various #ifdef noise here is precisely that: noise.
1911 * Is it possible to fold it into the macrology so that we have
1912 * one set of #ifdefs and then INTERIOR_POINTER_VARS /et alia/
1913 * compile out for the registers that don't exist on a given
1916 INTERIOR_POINTER_VARS(pc
);
1918 INTERIOR_POINTER_VARS(lip
);
1920 #ifdef ARCH_HAS_LINK_REGISTER
1921 INTERIOR_POINTER_VARS(lr
);
1923 #ifdef ARCH_HAS_NPC_REGISTER
1924 INTERIOR_POINTER_VARS(npc
);
1926 #ifdef LISP_FEATURE_PPC
1927 INTERIOR_POINTER_VARS(ctr
);
1930 PAIR_INTERIOR_POINTER(pc
);
1932 PAIR_INTERIOR_POINTER(lip
);
1934 #ifdef ARCH_HAS_LINK_REGISTER
1935 PAIR_INTERIOR_POINTER(lr
);
1937 #ifdef ARCH_HAS_NPC_REGISTER
1938 PAIR_INTERIOR_POINTER(npc
);
1940 #ifdef LISP_FEATURE_PPC
1941 PAIR_INTERIOR_POINTER(ctr
);
1944 /* Scavenge all boxed registers in the context. */
1945 for (i
= 0; i
< (sizeof(boxed_registers
) / sizeof(int)); i
++) {
1949 index
= boxed_registers
[i
];
1950 foo
= *os_context_register_addr(context
, index
);
1952 *os_context_register_addr(context
, index
) = foo
;
1954 /* this is unlikely to work as intended on bigendian
1955 * 64 bit platforms */
1957 scavenge((lispobj
*) os_context_register_addr(context
, index
), 1);
1960 /* Now that the scavenging is done, repair the various interior
1962 FIXUP_INTERIOR_POINTER(pc
);
1964 FIXUP_INTERIOR_POINTER(lip
);
1966 #ifdef ARCH_HAS_LINK_REGISTER
1967 FIXUP_INTERIOR_POINTER(lr
);
1969 #ifdef ARCH_HAS_NPC_REGISTER
1970 FIXUP_INTERIOR_POINTER(npc
);
1972 #ifdef LISP_FEATURE_PPC
1973 FIXUP_INTERIOR_POINTER(ctr
);
1978 scavenge_interrupt_contexts(struct thread
*th
)
1981 os_context_t
*context
;
1983 index
= fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
1985 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
1986 printf("Number of active contexts: %d\n", index
);
1989 for (i
= 0; i
< index
; i
++) {
1990 context
= th
->interrupt_contexts
[i
];
1991 scavenge_interrupt_context(context
);
1994 #endif /* x86oid targets */
1996 void varint_unpacker_init(struct varint_unpacker
* unpacker
, lispobj integer
)
1998 if (fixnump(integer
)) {
1999 unpacker
->word
= fixnum_value(integer
);
2000 unpacker
->limit
= N_WORD_BYTES
;
2001 unpacker
->data
= (char*)&unpacker
->word
;
2003 struct bignum
* bignum
= (struct bignum
*)(integer
- OTHER_POINTER_LOWTAG
);
2005 unpacker
->limit
= HeaderValue(bignum
->header
) * N_WORD_BYTES
;
2006 unpacker
->data
= (char*)bignum
->digits
;
2008 unpacker
->index
= 0;
2011 // Fetch the next varint from 'unpacker' into 'result'.
2012 // Because there is no length prefix on the number of varints encoded,
2013 // spurious trailing zeros might be observed. The data consumer can
2014 // circumvent that by storing a count as the first value in the series.
2015 // Return 1 for success, 0 for EOF.
2016 int varint_unpack(struct varint_unpacker
* unpacker
, int* result
)
2018 if (unpacker
->index
>= unpacker
->limit
) return 0;
2019 int accumulator
= 0;
2022 #ifdef LISP_FEATURE_LITTLE_ENDIAN
2023 int byte
= unpacker
->data
[unpacker
->index
];
2025 // bignums are little-endian in word order,
2026 // but machine-native within each word.
2027 // We could pack bytes MSB-to-LSB in the bigdigits,
2028 // but that seems less intuitive on the Lisp side.
2029 int word_index
= unpacker
->index
/ N_WORD_BYTES
;
2030 int byte_index
= unpacker
->index
% N_WORD_BYTES
;
2031 int byte
= (((unsigned int*)unpacker
->data
)[word_index
]
2032 >> (byte_index
* 8)) & 0xFF;
2035 accumulator
|= (byte
& 0x7F) << shift
;
2036 if (!(byte
& 0x80)) break;
2037 gc_assert(unpacker
->index
< unpacker
->limit
);
2040 *result
= accumulator
;