2 * Garbage Collection common functions for scavenging, moving and sizing
3 * objects. These are for use with both GC (stop & copy GC) and GENCGC
7 * This software is part of the SBCL system. See the README file for
10 * This software is derived from the CMU CL system, which was
11 * written at Carnegie Mellon University and released into the
12 * public domain. The software is in the public domain and is
13 * provided with absolutely no warranty. See the COPYING and CREDITS
14 * files for more information.
18 * For a review of garbage collection techniques (e.g. generational
19 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
20 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
21 * had been accepted for _ACM Computing Surveys_ and was available
22 * as a PostScript preprint through
23 * <http://www.cs.utexas.edu/users/oops/papers.html>
25 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
28 #define _GNU_SOURCE /* for ffsl(3) from string.h */
38 #include "interrupt.h"
43 #include "genesis/primitive-objects.h"
44 #include "genesis/static-symbols.h"
45 #include "genesis/layout.h"
46 #include "genesis/hash-table.h"
47 #include "gc-internal.h"
48 #include "forwarding-ptr.h"
51 #ifdef LISP_FEATURE_SPARC
52 #define LONG_FLOAT_SIZE 4
53 #elif defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
54 #define LONG_FLOAT_SIZE 3
57 os_vm_size_t dynamic_space_size
= DEFAULT_DYNAMIC_SPACE_SIZE
;
58 os_vm_size_t thread_control_stack_size
= DEFAULT_CONTROL_STACK_SIZE
;
60 sword_t (*scavtab
[256])(lispobj
*where
, lispobj object
);
61 lispobj (*transother
[256])(lispobj object
);
62 sword_t (*sizetab
[256])(lispobj
*where
);
63 struct weak_pointer
*weak_pointers
;
65 os_vm_size_t bytes_consed_between_gcs
= 12*1024*1024;
71 /* gc_general_copy_object is inline from gc-internal.h */
73 /* to copy a boxed object */
75 copy_object(lispobj object
, sword_t nwords
)
77 return gc_general_copy_object(object
, nwords
, BOXED_PAGE_FLAG
);
81 copy_code_object(lispobj object
, sword_t nwords
)
83 return gc_general_copy_object(object
, nwords
, CODE_PAGE_FLAG
);
86 static sword_t
scav_lose(lispobj
*where
, lispobj object
); /* forward decl */
88 #ifdef LISP_FEATURE_GENCGC
89 static const int n_dwords_in_card
= GENCGC_CARD_BYTES
/ N_WORD_BYTES
/ 2;
90 extern uword_t
*page_table_pinned_dwords
;
92 static inline boolean
pinned_p(lispobj obj
, page_index_t page
)
94 if (!page_table
[page
].has_pin_map
) return 0;
95 int dword_num
= (obj
& (GENCGC_CARD_BYTES
-1)) >> (1+WORD_SHIFT
);
96 uword_t
*bits
= &page_table_pinned_dwords
[page
* (n_dwords_in_card
/N_WORD_BITS
)];
97 return (bits
[dword_num
/ N_WORD_BITS
] >> (dword_num
% N_WORD_BITS
)) & 1;
102 scavenge(lispobj
*start
, sword_t n_words
)
104 lispobj
*end
= start
+ n_words
;
108 // * With 32-bit words, is_lisp_pointer(object) returns true if object_ptr
109 // points to a forwarding pointer, so we need a sanity check inside the
110 // branch for is_lisp_pointer(). For maximum efficiency, check that only
111 // after from_space_p() returns false, so that valid pointers into
112 // from_space incur no extra test. This could be improved further by
113 // skipping the FP check if 'object' points within dynamic space, i.e.,
114 // when find_page_index() returns >= 0. That would entail injecting
115 // from_space_p() explicitly into the loop, so as to separate the
116 // "was a page found at all" condition from the page generation test.
118 // * With 64-bit words, is_lisp_pointer(object) is false when object_ptr
119 // points to a forwarding pointer, and the fixnump() test also returns
120 // false, so we'll indirect through scavtab[]. This will safely invoke
121 // scav_lose(), detecting corruption without any extra cost.
122 // The major difference between that and the explicit test is that you
123 // won't see 'start' and 'n_words', but if you need those, chances are
124 // you'll want to run under an external debugger in the first place.
125 // [And btw it sure would be nice to assert statically
126 // that is_lisp_pointer(0x01) is indeed false]
128 #define FIX_POINTER() { \
129 lispobj *ptr = native_pointer(object); \
130 if (forwarding_pointer_p(ptr)) \
131 *object_ptr = LOW_WORD(forwarding_pointer_value(ptr)); \
132 else /* Scavenge that pointer. */ \
133 (void)scavtab[widetag_of(object)](object_ptr, object); \
136 for (object_ptr
= start
; object_ptr
< end
;) {
137 lispobj object
= *object_ptr
;
138 if (is_lisp_pointer(object
)) {
139 #ifdef LISP_FEATURE_IMMOBILE_SPACE
141 // It would be fine, though suboptimal, to use from_space_p() here.
142 // If it returns false, we don't want to call immobile_space_p()
143 // unless the pointer is *not* into dynamic space.
144 if ((page
= find_page_index((void*)object
)) >= 0) {
145 if (page_table
[page
].gen
== from_space
&& !pinned_p(object
, page
))
147 } else if (immobile_space_p(object
)) {
148 lispobj
*ptr
= native_pointer(object
);
149 if (immobile_obj_gen_bits(ptr
) == from_space
)
150 promote_immobile_obj(ptr
, 1);
153 if (from_space_p(object
)) {
156 #if (N_WORD_BITS == 32) && defined(LISP_FEATURE_GENCGC)
157 if (forwarding_pointer_p(object_ptr
))
158 lose("unexpected forwarding pointer in scavenge: %p, start=%p, n=%ld\n",
159 object_ptr
, start
, n_words
);
161 /* It points somewhere other than oldspace. Leave it
167 else if (fixnump(object
)) {
168 /* It's a fixnum: really easy.. */
171 /* It's some sort of header object or another. */
172 object_ptr
+= (scavtab
[widetag_of(object
)])(object_ptr
, object
);
175 // This assertion is usually the one that fails when something
176 // is subtly wrong with the heap, so definitely always do it.
177 gc_assert_verbose(object_ptr
== end
, "Final object pointer %p, start %p, end %p\n",
178 object_ptr
, start
, end
);
181 static lispobj
trans_fun_header(lispobj object
); /* forward decls */
182 static lispobj
trans_short_boxed(lispobj object
);
185 scav_fun_pointer(lispobj
*where
, lispobj object
)
187 lispobj
*first_pointer
;
190 gc_dcheck(lowtag_of(object
) == FUN_POINTER_LOWTAG
);
192 /* Object is a pointer into from_space - not a FP. */
193 first_pointer
= native_pointer(object
);
195 /* must transport object -- object may point to either a function
196 * header, a closure function header, or to a closure header. */
198 switch (widetag_of(*first_pointer
)) {
199 case SIMPLE_FUN_HEADER_WIDETAG
:
200 copy
= trans_fun_header(object
);
203 copy
= trans_short_boxed(object
);
207 if (copy
!= object
) {
208 /* Set forwarding pointer */
209 set_forwarding_pointer(first_pointer
,copy
);
212 CHECK_COPY_POSTCONDITIONS(copy
, FUN_POINTER_LOWTAG
);
221 trans_code(struct code
*code
)
223 /* if object has already been transported, just return pointer */
224 if (forwarding_pointer_p((lispobj
*)code
)) {
226 printf("Was already transported\n");
228 return (struct code
*)native_pointer(forwarding_pointer_value((lispobj
*)code
));
231 gc_dcheck(widetag_of(code
->header
) == CODE_HEADER_WIDETAG
);
233 /* prepare to transport the code vector */
234 lispobj l_code
= (lispobj
) LOW_WORD(code
) | OTHER_POINTER_LOWTAG
;
235 sword_t nheader_words
= code_header_words(code
->header
);
236 sword_t ncode_words
= code_instruction_words(code
->code_size
);
237 sword_t nwords
= nheader_words
+ ncode_words
;
238 lispobj l_new_code
= copy_code_object(l_code
, nwords
);
239 struct code
*new_code
= (struct code
*) native_pointer(l_new_code
);
241 #if defined(DEBUG_CODE_GC)
242 printf("Old code object at 0x%08x, new code object at 0x%08x.\n",
243 (uword_t
) code
, (uword_t
) new_code
);
244 printf("Code object is %d words long.\n", nwords
);
247 #ifdef LISP_FEATURE_GENCGC
248 if (new_code
== code
)
252 set_forwarding_pointer((lispobj
*)code
, l_new_code
);
254 /* set forwarding pointers for all the function headers in the */
255 /* code object. also fix all self pointers */
256 /* Do this by scanning the new code, since the old header is unusable */
258 uword_t displacement
= l_new_code
- l_code
;
260 for_each_simple_fun(i
, nfheaderp
, new_code
, 1, {
261 /* Calculate the old raw function pointer */
262 struct simple_fun
* fheaderp
=
263 (struct simple_fun
*)LOW_WORD((char*)nfheaderp
- displacement
);
264 /* Calculate the new lispobj */
265 lispobj nfheaderl
= make_lispobj(nfheaderp
, FUN_POINTER_LOWTAG
);
268 printf("fheaderp->header (at %x) <- %x\n",
269 &(fheaderp
->header
) , nfheaderl
);
271 set_forwarding_pointer((lispobj
*)fheaderp
, nfheaderl
);
273 /* fix self pointer. */
275 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
276 FUN_RAW_ADDR_OFFSET
+
280 #ifdef LISP_FEATURE_GENCGC
281 /* Cheneygc doesn't need this os_flush_icache, it flushes the whole
282 spaces once when all copying is done. */
283 os_flush_icache((os_vm_address_t
) (((sword_t
*)new_code
) + nheader_words
),
284 ncode_words
* sizeof(sword_t
));
288 #ifdef LISP_FEATURE_X86
289 gencgc_apply_code_fixups(code
, new_code
);
296 scav_code_header(lispobj
*where
, lispobj header
)
298 struct code
*code
= (struct code
*) where
;
299 sword_t n_header_words
= code_header_words(header
);
301 /* Scavenge the boxed section of the code data block. */
302 scavenge(where
+ 1, n_header_words
- 1);
304 /* Scavenge the boxed section of each function object in the
305 * code data block. */
306 for_each_simple_fun(i
, function_ptr
, code
, 1, {
307 scavenge(SIMPLE_FUN_SCAV_START(function_ptr
),
308 SIMPLE_FUN_SCAV_NWORDS(function_ptr
));
311 return n_header_words
+ code_instruction_words(code
->code_size
);
315 trans_code_header(lispobj object
)
319 ncode
= trans_code((struct code
*) native_pointer(object
));
320 return (lispobj
) LOW_WORD(ncode
) | OTHER_POINTER_LOWTAG
;
324 size_code_header(lispobj
*where
)
326 return code_header_words(((struct code
*)where
)->header
)
327 + code_instruction_words(((struct code
*)where
)->code_size
);
330 #ifdef RETURN_PC_HEADER_WIDETAG
332 scav_return_pc_header(lispobj
*where
, lispobj object
)
334 lose("attempted to scavenge a return PC header where=%p object=%#lx\n",
335 where
, (uword_t
) object
);
336 return 0; /* bogus return value to satisfy static type checking */
340 trans_return_pc_header(lispobj object
)
342 struct simple_fun
*return_pc
;
344 struct code
*code
, *ncode
;
346 return_pc
= (struct simple_fun
*) native_pointer(object
);
347 offset
= HeaderValue(return_pc
->header
) * N_WORD_BYTES
;
349 /* Transport the whole code object */
350 code
= (struct code
*) ((uword_t
) return_pc
- offset
);
351 ncode
= trans_code(code
);
353 return ((lispobj
) LOW_WORD(ncode
) + offset
) | OTHER_POINTER_LOWTAG
;
355 #endif /* RETURN_PC_HEADER_WIDETAG */
357 /* On the 386, closures hold a pointer to the raw address instead of the
358 * function object, so we can use CALL [$FDEFN+const] to invoke
359 * the function without loading it into a register. Given that code
360 * objects don't move, we don't need to update anything, but we do
361 * have to figure out that the function is still live. */
363 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
365 scav_closure_header(lispobj
*where
, lispobj object
)
367 struct closure
*closure
;
370 closure
= (struct closure
*)where
;
371 fun
= closure
->fun
- FUN_RAW_ADDR_OFFSET
;
373 #ifdef LISP_FEATURE_GENCGC
374 /* The function may have moved so update the raw address. But
375 * don't write unnecessarily. */
376 if (closure
->fun
!= fun
+ FUN_RAW_ADDR_OFFSET
)
377 closure
->fun
= fun
+ FUN_RAW_ADDR_OFFSET
;
383 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
385 scav_fun_header(lispobj
*where
, lispobj object
)
387 lose("attempted to scavenge a function header where=%p object=%#lx\n",
388 where
, (uword_t
) object
);
389 return 0; /* bogus return value to satisfy static type checking */
391 #endif /* LISP_FEATURE_X86 */
394 trans_fun_header(lispobj object
)
396 struct simple_fun
*fheader
;
398 struct code
*code
, *ncode
;
400 fheader
= (struct simple_fun
*) native_pointer(object
);
401 offset
= HeaderValue(fheader
->header
) * N_WORD_BYTES
;
403 /* Transport the whole code object */
404 code
= (struct code
*) ((uword_t
) fheader
- offset
);
405 ncode
= trans_code(code
);
407 return ((lispobj
) LOW_WORD(ncode
) + offset
) | FUN_POINTER_LOWTAG
;
416 trans_instance(lispobj object
)
418 gc_dcheck(lowtag_of(object
) == INSTANCE_POINTER_LOWTAG
);
419 lispobj header
= *(lispobj
*)(object
- INSTANCE_POINTER_LOWTAG
);
420 return copy_object(object
, 1 + (instance_length(header
)|1));
424 scav_instance_pointer(lispobj
*where
, lispobj object
)
426 lispobj copy
, *first_pointer
;
428 /* Object is a pointer into from space - not a FP. */
429 copy
= trans_instance(object
);
431 #ifdef LISP_FEATURE_GENCGC
432 gc_dcheck(copy
!= object
);
435 first_pointer
= native_pointer(object
);
436 set_forwarding_pointer(first_pointer
,copy
);
447 static lispobj
trans_list(lispobj object
);
450 scav_list_pointer(lispobj
*where
, lispobj object
)
453 gc_dcheck(lowtag_of(object
) == LIST_POINTER_LOWTAG
);
455 copy
= trans_list(object
);
456 gc_dcheck(copy
!= object
);
458 CHECK_COPY_POSTCONDITIONS(copy
, LIST_POINTER_LOWTAG
);
466 trans_list(lispobj object
)
469 struct cons
*copy
= (struct cons
*)
470 gc_general_alloc(sizeof(struct cons
), BOXED_PAGE_FLAG
, ALLOC_QUICK
);
471 lispobj new_list_pointer
= make_lispobj(copy
, LIST_POINTER_LOWTAG
);
472 copy
->car
= CONS(object
)->car
;
473 /* Grab the cdr: set_forwarding_pointer will clobber it in GENCGC */
474 lispobj cdr
= CONS(object
)->cdr
;
475 set_forwarding_pointer((lispobj
*)CONS(object
), new_list_pointer
);
477 /* Try to linearize the list in the cdr direction to help reduce
479 while (lowtag_of(cdr
) == LIST_POINTER_LOWTAG
&& from_space_p(cdr
)) {
480 lispobj
* native_cdr
= (lispobj
*)CONS(cdr
);
481 if (forwarding_pointer_p(native_cdr
)) { // Might as well fix now.
482 cdr
= forwarding_pointer_value(native_cdr
);
486 struct cons
*cdr_copy
= (struct cons
*)
487 gc_general_alloc(sizeof(struct cons
), BOXED_PAGE_FLAG
, ALLOC_QUICK
);
488 cdr_copy
->car
= ((struct cons
*)native_cdr
)->car
;
489 /* Grab the cdr before it is clobbered. */
490 lispobj next
= ((struct cons
*)native_cdr
)->cdr
;
491 /* Set cdr of the predecessor, and store an FP. */
492 set_forwarding_pointer(native_cdr
,
493 copy
->cdr
= make_lispobj(cdr_copy
,
494 LIST_POINTER_LOWTAG
));
499 return new_list_pointer
;
504 * scavenging and transporting other pointers
508 scav_other_pointer(lispobj
*where
, lispobj object
)
510 lispobj copy
, *first_pointer
;
512 gc_dcheck(lowtag_of(object
) == OTHER_POINTER_LOWTAG
);
514 /* Object is a pointer into from space - not FP. */
515 first_pointer
= (lispobj
*)(object
- OTHER_POINTER_LOWTAG
);
516 copy
= (transother
[widetag_of(*first_pointer
)])(object
);
518 // If the object was large, then instead of transporting it,
519 // gencgc might simply promote the pages and return the same pointer.
520 // That decision is made in general_copy_large_object().
521 if (copy
!= object
) {
522 set_forwarding_pointer(first_pointer
, copy
);
523 #ifdef LISP_FEATURE_GENCGC
527 #ifndef LISP_FEATURE_GENCGC
530 CHECK_COPY_POSTCONDITIONS(copy
, OTHER_POINTER_LOWTAG
);
535 * immediate, boxed, and unboxed objects
539 scav_immediate(lispobj
*where
, lispobj object
)
545 trans_immediate(lispobj object
)
547 lose("trying to transport an immediate\n");
548 return NIL
; /* bogus return value to satisfy static type checking */
552 size_immediate(lispobj
*where
)
559 scav_boxed(lispobj
*where
, lispobj object
)
564 boolean
positive_bignum_logbitp(int index
, struct bignum
* bignum
)
566 /* If the bignum in the layout has another pointer to it (besides the layout)
567 acting as a root, and which is scavenged first, then transporting the
568 bignum causes the layout to see a FP, as would copying an instance whose
569 layout that is. This is a nearly impossible scenario to create organically
570 in Lisp, because mostly nothing ever looks again at that exact (EQ) bignum
571 except for a few things that would cause it to be pinned anyway,
572 such as it being kept in a local variable during structure manipulation.
573 See 'interleaved-raw.impure.lisp' for a way to trigger this */
574 if (forwarding_pointer_p((lispobj
*)bignum
)) {
575 lispobj forwarded
= forwarding_pointer_value((lispobj
*)bignum
);
577 fprintf(stderr
, "GC bignum_logbitp(): fwd from %p to %p\n",
578 (void*)bignum
, (void*)forwarded
);
580 bignum
= (struct bignum
*)native_pointer(forwarded
);
583 int len
= HeaderValue(bignum
->header
);
584 int word_index
= index
/ N_WORD_BITS
;
585 int bit_index
= index
% N_WORD_BITS
;
586 if (word_index
>= len
) {
587 // just return 0 since the marking logic does not allow negative bignums
590 return (bignum
->digits
[word_index
] >> bit_index
) & 1;
594 struct instance_scanner
{
596 void (*proc
)(lispobj
*, sword_t
);
599 // Helper function for helper function below, since lambda isn't a thing
600 static void instance_scan_range(void* arg
, int offset
, int nwords
)
602 struct instance_scanner
*scanner
= (struct instance_scanner
*)arg
;
603 scanner
->proc(scanner
->base
+ offset
, nwords
);
606 // Helper function for stepping through the tagged slots of an instance in
607 // scav_instance and verify_space.
609 instance_scan(void (*proc
)(lispobj
*, sword_t
),
610 lispobj
*instance_slots
,
612 lispobj layout_bitmap
)
616 /* This code might be made more efficient by run-length-encoding the ranges
617 of words to scan, but probably not by much */
619 if (fixnump(layout_bitmap
)) {
620 sword_t bitmap
= (sword_t
)layout_bitmap
>> N_FIXNUM_TAG_BITS
; // signed integer!
621 for (index
= 0; index
< n_words
; index
++, bitmap
>>= 1)
623 proc(instance_slots
+ index
, 1);
624 } else { /* huge bitmap */
625 struct bignum
* bitmap
;
626 bitmap
= (struct bignum
*)native_pointer(layout_bitmap
);
627 if (forwarding_pointer_p((lispobj
*)bitmap
))
628 bitmap
= (struct bignum
*)
629 native_pointer(forwarding_pointer_value((lispobj
*)bitmap
));
630 struct instance_scanner scanner
;
631 scanner
.base
= instance_slots
;
633 bitmap_scan((uword_t
*)bitmap
->digits
, HeaderValue(bitmap
->header
), 0,
634 instance_scan_range
, &scanner
);
638 void bitmap_scan(uword_t
* bitmap
, int n_bitmap_words
, int flags
,
639 void (*proc
)(void*, int, int), void* arg
)
641 uword_t sense
= (flags
& BIT_SCAN_INVERT
) ? ~0L : 0;
642 int start_word_index
= 0;
644 in_use_marker_t word
;
646 flags
= flags
& BIT_SCAN_CLEAR
;
648 // Rather than bzero'ing we can just clear each nonzero word as it's read,
650 #define BITMAP_REF(j) word = bitmap[j]; if(word && flags) bitmap[j] = 0; word ^= sense
653 int skip_bits
, start_bit
, start_position
, run_length
;
655 if (++start_word_index
>= n_bitmap_words
) break;
656 BITMAP_REF(start_word_index
);
660 // On each loop iteration, the lowest 1 bit is a "relative"
661 // bit index, since the word was already shifted. This is 'skip_bits'.
662 // Adding back in the total shift amount gives 'start_bit',
663 // the true absolute index within the current word.
664 // 'start_position' is absolute within the entire bitmap.
665 skip_bits
= ffsl(word
) - 1;
666 start_bit
= skip_bits
+ shift
;
667 start_position
= N_WORD_BITS
* start_word_index
+ start_bit
;
668 // Compute the number of consecutive 1s in the current word.
670 run_length
= ~word
? ffsl(~word
) - 1 : N_WORD_BITS
;
671 if (start_bit
+ run_length
< N_WORD_BITS
) { // Do not extend to additional words.
673 shift
+= skip_bits
+ run_length
;
675 int end_word_index
= ++start_word_index
;
677 if (end_word_index
>= n_bitmap_words
) {
679 run_length
+= (end_word_index
- start_word_index
) * N_WORD_BITS
;
682 BITMAP_REF(end_word_index
);
686 // end_word_index is the exclusive bound on contiguous
687 // words to include in the range. See if the low bits
688 // from the next word can extend the range.
689 shift
= ffsl(~word
) - 1;
691 run_length
+= (end_word_index
- start_word_index
) * N_WORD_BITS
696 start_word_index
= end_word_index
;
698 proc(arg
, start_position
, run_length
);
704 scav_instance(lispobj
*where
, lispobj header
)
706 lispobj
* layout
= (lispobj
*)instance_layout(where
);
709 layout
= native_pointer((lispobj
)layout
);
710 #ifdef LISP_FEATURE_COMPACT_INSTANCE_HEADER
711 if (__immobile_obj_gen_bits(layout
) == from_space
)
712 promote_immobile_obj(layout
, 1);
714 if (forwarding_pointer_p(layout
))
715 layout
= native_pointer(forwarding_pointer_value(layout
));
718 sword_t nslots
= instance_length(header
) | 1;
719 lispobj bitmap
= ((struct layout
*)layout
)->bitmap
;
720 if (bitmap
== make_fixnum(-1))
721 scavenge(where
+1, nslots
);
723 instance_scan(scavenge
, where
+1, nslots
, bitmap
);
728 #ifdef LISP_FEATURE_COMPACT_INSTANCE_HEADER
730 scav_funinstance(lispobj
*where
, lispobj header
)
732 // This works because the layout is in the header word of all instances,
733 // ordinary and funcallable, when compact headers are enabled.
734 // The trampoline slot in the funcallable-instance is raw, but can be
735 // scavenged, because it points to readonly space, never oldspace.
736 // (And for certain backends it looks like a fixnum, not a pointer)
737 return scav_instance(where
, header
);
741 static lispobj
trans_boxed(lispobj object
)
743 gc_dcheck(is_lisp_pointer(object
));
744 sword_t length
= HeaderValue(*native_pointer(object
)) + 1;
745 return copy_object(object
, CEILING(length
, 2));
748 static sword_t
size_boxed(lispobj
*where
)
750 sword_t length
= HeaderValue(*where
) + 1;
751 return CEILING(length
, 2);
754 static lispobj
trans_short_boxed(lispobj object
) // Payload count expressed in 15 bits
756 sword_t length
= (HeaderValue(*native_pointer(object
)) & SHORT_HEADER_MAX_WORDS
) + 1;
757 return copy_object(object
, CEILING(length
, 2));
760 static sword_t
size_short_boxed(lispobj
*where
)
762 sword_t length
= (HeaderValue(*where
) & SHORT_HEADER_MAX_WORDS
) + 1;
763 return CEILING(length
, 2);
766 static lispobj
trans_tiny_boxed(lispobj object
) // Payload count expressed in 8 bits
768 sword_t length
= (HeaderValue(*native_pointer(object
)) & 0xFF) + 1;
769 return copy_object(object
, CEILING(length
, 2));
772 static sword_t
size_tiny_boxed(lispobj
*where
)
774 sword_t length
= (HeaderValue(*where
) & 0xFF) + 1;
775 return CEILING(length
, 2);
778 /* Note: on the sparc we don't have to do anything special for fdefns, */
779 /* 'cause the raw-addr has a function lowtag. */
780 #if !defined(LISP_FEATURE_SPARC) && !defined(LISP_FEATURE_ARM)
782 scav_fdefn(lispobj
*where
, lispobj object
)
786 fdefn
= (struct fdefn
*)where
;
788 /* FSHOW((stderr, "scav_fdefn, function = %p, raw_addr = %p\n",
789 fdefn->fun, fdefn->raw_addr)); */
791 scavenge(where
+ 1, 2); // 'name' and 'fun'
792 #ifndef LISP_FEATURE_IMMOBILE_CODE
793 lispobj raw_fun
= (lispobj
)fdefn
->raw_addr
;
794 if (raw_fun
> READ_ONLY_SPACE_END
) {
795 lispobj simple_fun
= raw_fun
- FUN_RAW_ADDR_OFFSET
;
796 scavenge(&simple_fun
, 1);
797 /* Don't write unnecessarily. */
798 if (simple_fun
!= raw_fun
- FUN_RAW_ADDR_OFFSET
)
799 fdefn
->raw_addr
= (char *)simple_fun
+ FUN_RAW_ADDR_OFFSET
;
801 #elif defined(LISP_FEATURE_X86_64)
802 lispobj obj
= fdefn_raw_referent(fdefn
);
805 scavenge(&new, 1); // enliven
806 gc_dcheck(new == obj
); // must not move
809 # error "Need to implement scav_fdefn"
816 scav_unboxed(lispobj
*where
, lispobj object
)
818 sword_t length
= HeaderValue(object
) + 1;
819 return CEILING(length
, 2);
823 trans_unboxed(lispobj object
)
825 gc_dcheck(lowtag_of(object
) == OTHER_POINTER_LOWTAG
);
826 sword_t length
= HeaderValue(*native_pointer(object
)) + 1;
827 return copy_unboxed_object(object
, CEILING(length
, 2));
830 /* vector-like objects */
832 trans_vector(lispobj object
)
834 gc_dcheck(lowtag_of(object
) == OTHER_POINTER_LOWTAG
);
837 fixnum_value(((struct vector
*)native_pointer(object
))->length
);
838 return copy_large_object(object
, CEILING(length
+ 2, 2));
842 size_vector(lispobj
*where
)
844 sword_t length
= fixnum_value(((struct vector
*)where
)->length
);
845 return CEILING(length
+ 2, 2);
848 #define DEF_SCAV_TRANS_SIZE_UB(nbits) \
849 DEF_SPECIALIZED_VECTOR(vector_unsigned_byte_##nbits, NWORDS(length, nbits))
850 #define DEF_SPECIALIZED_VECTOR(name, nwords) \
851 static sword_t __attribute__((unused)) scav_##name(lispobj *where, lispobj header) { \
852 sword_t length = fixnum_value(((struct vector*)where)->length); \
853 return CEILING(nwords + 2, 2); \
855 static lispobj __attribute__((unused)) trans_##name(lispobj object) { \
856 gc_dcheck(lowtag_of(object) == OTHER_POINTER_LOWTAG); \
857 sword_t length = fixnum_value(((struct vector*)(object-OTHER_POINTER_LOWTAG))->length); \
858 return copy_large_unboxed_object(object, CEILING(nwords + 2, 2)); \
860 static sword_t __attribute__((unused)) size_##name(lispobj *where) { \
861 sword_t length = fixnum_value(((struct vector*)where)->length); \
862 return CEILING(nwords + 2, 2); \
865 DEF_SPECIALIZED_VECTOR(vector_nil
, 0*length
)
866 DEF_SPECIALIZED_VECTOR(vector_bit
, NWORDS(length
,1))
867 /* NOTE: strings contain one more element of data (a terminating '\0'
868 * to help interface with C functions) than indicated by the length slot.
869 * This is true even for UCS4 strings, despite that C APIs are unlikely
870 * to have a convention that expects 4 zero bytes. */
871 DEF_SPECIALIZED_VECTOR(base_string
, NWORDS((length
+1), 8))
872 DEF_SPECIALIZED_VECTOR(character_string
, NWORDS((length
+1), 32))
873 DEF_SCAV_TRANS_SIZE_UB(2)
874 DEF_SCAV_TRANS_SIZE_UB(4)
875 DEF_SCAV_TRANS_SIZE_UB(8)
876 DEF_SCAV_TRANS_SIZE_UB(16)
877 DEF_SCAV_TRANS_SIZE_UB(32)
878 DEF_SCAV_TRANS_SIZE_UB(64)
879 DEF_SCAV_TRANS_SIZE_UB(128)
880 #ifdef LONG_FLOAT_SIZE
881 DEF_SPECIALIZED_VECTOR(vector_long_float
, length
* LONG_FLOAT_SIZE
)
882 DEF_SPECIALIZED_VECTOR(vector_complex_long_float
, length
* (2 * LONG_FLOAT_SIZE
))
886 trans_weak_pointer(lispobj object
)
889 gc_dcheck(lowtag_of(object
) == OTHER_POINTER_LOWTAG
);
891 #if defined(DEBUG_WEAK)
892 printf("Transporting weak pointer from 0x%08x\n", object
);
895 /* Need to remember where all the weak pointers are that have */
896 /* been transported so they can be fixed up in a post-GC pass. */
898 copy
= copy_object(object
, WEAK_POINTER_NWORDS
);
899 #ifndef LISP_FEATURE_GENCGC
900 struct weak_pointer
*wp
= (struct weak_pointer
*) native_pointer(copy
);
902 gc_dcheck(widetag_of(wp
->header
)==WEAK_POINTER_WIDETAG
);
903 /* Push the weak pointer onto the list of weak pointers. */
904 if (weak_pointer_breakable_p(wp
)) {
905 wp
->next
= (struct weak_pointer
*)LOW_WORD(weak_pointers
);
912 void scan_weak_pointers(void)
914 struct weak_pointer
*wp
, *next_wp
;
915 for (wp
= weak_pointers
, next_wp
= NULL
; wp
!= NULL
; wp
= next_wp
) {
916 gc_assert(widetag_of(wp
->header
)==WEAK_POINTER_WIDETAG
);
920 if (next_wp
== wp
) /* gencgc uses a ref to self for end of list */
923 gc_assert(is_lisp_pointer(wp
->value
));
924 lispobj
*value
= native_pointer(wp
->value
);
926 /* Now, we need to check whether the object has been forwarded. If
927 * it has been, the weak pointer is still good and needs to be
928 * updated. Otherwise, the weak pointer needs to be broken. */
930 if (from_space_p((lispobj
)value
)) {
931 wp
->value
= forwarding_pointer_p(value
) ?
932 LOW_WORD(forwarding_pointer_value(value
)) : UNBOUND_MARKER_WIDETAG
;
934 #ifdef LISP_FEATURE_IMMOBILE_SPACE
935 else if (immobile_space_p((lispobj
)value
) &&
936 immobile_obj_gen_bits(value
) == from_space
) {
937 wp
->value
= UNBOUND_MARKER_WIDETAG
;
941 lose("unbreakable pointer %p", wp
);
948 #if N_WORD_BITS == 32
949 #define EQ_HASH_MASK 0x1fffffff
950 #elif N_WORD_BITS == 64
951 #define EQ_HASH_MASK 0x1fffffffffffffff
954 /* Compute the EQ-hash of KEY. This must match POINTER-HASH in
955 * target-hash-table.lisp. */
956 #define EQ_HASH(key) ((key) & EQ_HASH_MASK)
958 /* List of weak hash tables chained through their NEXT-WEAK-HASH-TABLE
959 * slot. Set to NULL at the end of a collection.
961 * This is not optimal because, when a table is tenured, it won't be
962 * processed automatically; only the yougest generation is GC'd by
963 * default. On the other hand, all applications will need an
964 * occasional full GC anyway, so it's not that bad either. */
965 struct hash_table
*weak_hash_tables
= NULL
;
967 /* Return true if OBJ has already survived the current GC. */
968 static inline int pointer_survived_gc_yet(lispobj obj
)
970 #ifdef LISP_FEATURE_CHENEYGC
971 // This is the most straightforward definition.
972 return (!from_space_p(obj
) || forwarding_pointer_p(native_pointer(obj
)));
974 /* Check for a pointer to dynamic space before considering immobile space.
975 Based on the relative size of the spaces, this should be a win because
976 if the object is in the dynamic space and not the 'from' generation
977 we don't want to test immobile_space_p() at all.
978 Additionally, pinned_p() is both more expensive and less likely than
979 forwarding_pointer_p(), so we want to reverse those conditions, which
980 would not be possible with pinned_p() buried inside from_space_p(). */
981 page_index_t page_index
= find_page_index((void*)obj
);
983 return page_table
[page_index
].gen
!= from_space
||
984 forwarding_pointer_p(native_pointer(obj
)) ||
985 pinned_p(obj
, page_index
);
986 #ifdef LISP_FEATURE_IMMOBILE_SPACE
987 if (immobile_space_p(obj
))
988 return immobile_obj_gen_bits(native_pointer(obj
)) != from_space
;
994 #ifdef EMPTY_HT_SLOT /* only if it's a static symbol */
995 // "ish" because EMPTY_HT_SLOT is of course a pointer.
996 # define ht_cell_nonpointerish(x) (!is_lisp_pointer(x) || x==EMPTY_HT_SLOT)
998 # define ht_cell_nonpointerish(x) !is_lisp_pointer(x)
1001 static int survived_gc_yet_KEY(lispobj key
, lispobj value
) {
1002 return ht_cell_nonpointerish(key
) || pointer_survived_gc_yet(key
);
1004 static int survived_gc_yet_VALUE(lispobj key
, lispobj value
) {
1005 return ht_cell_nonpointerish(value
) || pointer_survived_gc_yet(value
);
1007 static int survived_gc_yet_AND(lispobj key
, lispobj value
) {
1008 int key_nonpointer
= ht_cell_nonpointerish(key
);
1009 int val_nonpointer
= ht_cell_nonpointerish(value
);
1010 if (key_nonpointer
&& val_nonpointer
) return 1;
1011 return (key_nonpointer
|| pointer_survived_gc_yet(key
))
1012 && (val_nonpointer
|| pointer_survived_gc_yet(value
));
1014 static int survived_gc_yet_OR(lispobj key
, lispobj value
) {
1015 int key_nonpointer
= ht_cell_nonpointerish(key
);
1016 int val_nonpointer
= ht_cell_nonpointerish(value
);
1017 if (key_nonpointer
|| val_nonpointer
) return 1;
1018 // Both MUST be pointers
1019 return pointer_survived_gc_yet(key
) || pointer_survived_gc_yet(value
);
1022 static int (*weak_hash_entry_alivep_fun(lispobj weakness
))(lispobj
,lispobj
)
1025 case KEY
: return survived_gc_yet_KEY
;
1026 case VALUE
: return survived_gc_yet_VALUE
;
1027 case KEY_OR_VALUE
: return survived_gc_yet_OR
;
1028 case KEY_AND_VALUE
: return survived_gc_yet_AND
;
1029 case NIL
: return NULL
;
1030 default: lose("Bad hash table weakness");
1034 /* Return the beginning of data in ARRAY (skipping the header and the
1035 * length) or NULL if it isn't an array of the specified widetag after
1037 static inline lispobj
*
1038 get_array_data (lispobj array
, int widetag
, uword_t
*length
)
1040 if (is_lisp_pointer(array
) && widetag_of(*native_pointer(array
)) == widetag
) {
1042 *length
= fixnum_value(native_pointer(array
)[1]);
1043 return native_pointer(array
) + 2;
1049 /* Only need to worry about scavenging the _real_ entries in the
1050 * table. Phantom entries such as the hash table itself at index 0 and
1051 * the empty marker at index 1 were scavenged by scav_vector that
1052 * either called this function directly or arranged for it to be
1053 * called later by pushing the hash table onto weak_hash_tables. */
1055 scav_hash_table_entries (struct hash_table
*hash_table
)
1059 lispobj
*index_vector
;
1061 lispobj
*next_vector
;
1062 uword_t next_vector_length
;
1063 lispobj
*hash_vector
;
1064 uword_t hash_vector_length
;
1065 lispobj empty_symbol
;
1066 lispobj weakness
= hash_table
->weakness
;
1069 kv_vector
= get_array_data(hash_table
->table
,
1070 SIMPLE_VECTOR_WIDETAG
, &kv_length
);
1071 if (kv_vector
== NULL
)
1072 lose("invalid kv_vector %x\n", hash_table
->table
);
1074 index_vector
= get_array_data(hash_table
->index_vector
,
1075 SIMPLE_ARRAY_WORD_WIDETAG
, &length
);
1076 if (index_vector
== NULL
)
1077 lose("invalid index_vector %x\n", hash_table
->index_vector
);
1079 next_vector
= get_array_data(hash_table
->next_vector
,
1080 SIMPLE_ARRAY_WORD_WIDETAG
,
1081 &next_vector_length
);
1082 if (next_vector
== NULL
)
1083 lose("invalid next_vector %x\n", hash_table
->next_vector
);
1085 hash_vector
= get_array_data(hash_table
->hash_vector
,
1086 SIMPLE_ARRAY_WORD_WIDETAG
,
1087 &hash_vector_length
);
1088 if (hash_vector
!= NULL
)
1089 gc_assert(hash_vector_length
== next_vector_length
);
1091 /* These lengths could be different as the index_vector can be a
1092 * different length from the others, a larger index_vector could
1093 * help reduce collisions. */
1094 gc_assert(next_vector_length
*2 == kv_length
);
1096 empty_symbol
= kv_vector
[1];
1097 /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/
1098 if (widetag_of(*native_pointer(empty_symbol
)) != SYMBOL_HEADER_WIDETAG
) {
1099 lose("not a symbol where empty-hash-table-slot symbol expected: %x\n",
1100 *native_pointer(empty_symbol
));
1103 /* Work through the KV vector. */
1104 int (*alivep_test
)(lispobj
,lispobj
) = weak_hash_entry_alivep_fun(weakness
);
1105 #define SCAV_ENTRIES(aliveness_predicate) \
1106 for (i = 1; i < next_vector_length; i++) { \
1107 lispobj old_key = kv_vector[2*i]; \
1108 lispobj __attribute__((unused)) value = kv_vector[2*i+1]; \
1109 if (aliveness_predicate) { \
1110 /* Scavenge the key and value. */ \
1111 scavenge(&kv_vector[2*i], 2); \
1112 /* If an EQ-based key has moved, mark the hash-table for rehash */ \
1113 if (!hash_vector || hash_vector[i] == MAGIC_HASH_VECTOR_VALUE) { \
1114 lispobj new_key = kv_vector[2*i]; \
1115 if (old_key != new_key && new_key != empty_symbol) \
1116 hash_table->needs_rehash_p = T; \
1119 SCAV_ENTRIES(alivep_test(old_key
, value
))
1125 scav_vector (lispobj
*where
, lispobj object
)
1128 struct hash_table
*hash_table
;
1130 /* SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based and weak
1131 * hash tables in the Lisp HASH-TABLE code to indicate need for
1132 * special GC support. */
1133 if ((HeaderValue(object
) & 0xFF) == subtype_VectorNormal
)
1136 kv_length
= fixnum_value(where
[1]);
1137 /*FSHOW((stderr,"/kv_length = %d\n", kv_length));*/
1139 /* Scavenge element 0, which may be a hash-table structure. */
1140 scavenge(where
+2, 1);
1141 if (!is_lisp_pointer(where
[2])) {
1142 /* This'll happen when REHASH clears the header of old-kv-vector
1143 * and fills it with zero, but some other thread simulatenously
1144 * sets the header in %%PUTHASH.
1147 "Warning: no pointer at %p in hash table: this indicates "
1148 "non-fatal corruption caused by concurrent access to a "
1149 "hash-table from multiple threads. Any accesses to "
1150 "hash-tables shared between threads should be protected "
1151 "by locks.\n", (void*)&where
[2]);
1152 // We've scavenged three words.
1155 hash_table
= (struct hash_table
*)native_pointer(where
[2]);
1156 /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/
1157 if (widetag_of(hash_table
->header
) != INSTANCE_HEADER_WIDETAG
) {
1158 lose("hash table not instance (%x at %x)\n",
1163 /* Scavenge element 1, which should be some internal symbol that
1164 * the hash table code reserves for marking empty slots. */
1165 scavenge(where
+3, 1);
1166 if (!is_lisp_pointer(where
[3])) {
1167 lose("not empty-hash-table-slot symbol pointer: %x\n", where
[3]);
1170 /* Scavenge hash table, which will fix the positions of the other
1171 * needed objects. */
1172 scavenge((lispobj
*)hash_table
,
1173 CEILING(sizeof(struct hash_table
) / sizeof(lispobj
), 2));
1175 /* Cross-check the kv_vector. */
1176 if (where
!= native_pointer(hash_table
->table
)) {
1177 lose("hash_table table!=this table %x\n", hash_table
->table
);
1180 if (hash_table
->weakness
== NIL
) {
1181 scav_hash_table_entries(hash_table
);
1183 /* Delay scavenging of this table by pushing it onto
1184 * weak_hash_tables (if it's not there already) for the weak
1186 if (hash_table
->next_weak_hash_table
== NIL
) {
1187 hash_table
->next_weak_hash_table
= (lispobj
)weak_hash_tables
;
1188 weak_hash_tables
= hash_table
;
1192 return (CEILING(kv_length
+ 2, 2));
1196 scav_weak_hash_tables (void)
1198 struct hash_table
*table
;
1200 /* Scavenge entries whose triggers are known to survive. */
1201 for (table
= weak_hash_tables
; table
!= NULL
;
1202 table
= (struct hash_table
*)table
->next_weak_hash_table
) {
1203 scav_hash_table_entries(table
);
1207 /* Walk through the chain whose first element is *FIRST and remove
1208 * dead weak entries. */
1210 scan_weak_hash_table_chain (struct hash_table
*hash_table
, lispobj
*prev
,
1211 lispobj
*kv_vector
, lispobj
*index_vector
,
1212 lispobj
*next_vector
, lispobj
*hash_vector
,
1213 lispobj empty_symbol
, int (*alivep_test
)(lispobj
,lispobj
))
1215 unsigned index
= *prev
;
1217 unsigned next
= next_vector
[index
];
1218 lispobj key
= kv_vector
[2 * index
];
1219 lispobj value
= kv_vector
[2 * index
+ 1];
1220 gc_assert(key
!= empty_symbol
);
1221 gc_assert(value
!= empty_symbol
);
1222 if (!alivep_test(key
, value
)) {
1223 unsigned count
= fixnum_value(hash_table
->number_entries
);
1224 gc_assert(count
> 0);
1226 hash_table
->number_entries
= make_fixnum(count
- 1);
1227 next_vector
[index
] = fixnum_value(hash_table
->next_free_kv
);
1228 hash_table
->next_free_kv
= make_fixnum(index
);
1229 kv_vector
[2 * index
] = empty_symbol
;
1230 kv_vector
[2 * index
+ 1] = empty_symbol
;
1232 hash_vector
[index
] = MAGIC_HASH_VECTOR_VALUE
;
1234 prev
= &next_vector
[index
];
1241 scan_weak_hash_table (struct hash_table
*hash_table
)
1244 lispobj
*index_vector
;
1245 uword_t length
= 0; /* prevent warning */
1246 lispobj
*next_vector
;
1247 uword_t next_vector_length
= 0; /* prevent warning */
1248 lispobj
*hash_vector
;
1249 lispobj empty_symbol
;
1250 lispobj weakness
= hash_table
->weakness
;
1253 kv_vector
= get_array_data(hash_table
->table
,
1254 SIMPLE_VECTOR_WIDETAG
, NULL
);
1255 index_vector
= get_array_data(hash_table
->index_vector
,
1256 SIMPLE_ARRAY_WORD_WIDETAG
, &length
);
1257 next_vector
= get_array_data(hash_table
->next_vector
,
1258 SIMPLE_ARRAY_WORD_WIDETAG
,
1259 &next_vector_length
);
1260 hash_vector
= get_array_data(hash_table
->hash_vector
,
1261 SIMPLE_ARRAY_WORD_WIDETAG
, NULL
);
1262 empty_symbol
= kv_vector
[1];
1264 for (i
= 0; i
< length
; i
++) {
1265 scan_weak_hash_table_chain(hash_table
, &index_vector
[i
],
1266 kv_vector
, index_vector
, next_vector
,
1267 hash_vector
, empty_symbol
,
1268 weak_hash_entry_alivep_fun(weakness
));
1272 /* Remove dead entries from weak hash tables. */
1274 scan_weak_hash_tables (void)
1276 struct hash_table
*table
, *next
;
1278 for (table
= weak_hash_tables
; table
!= NULL
; table
= next
) {
1279 next
= (struct hash_table
*)table
->next_weak_hash_table
;
1280 table
->next_weak_hash_table
= NIL
;
1281 scan_weak_hash_table(table
);
1284 weak_hash_tables
= NULL
;
1293 scav_lose(lispobj
*where
, lispobj object
)
1295 lose("no scavenge function for object %p (widetag 0x%x)\n",
1297 widetag_of(*where
));
1299 return 0; /* bogus return value to satisfy static type checking */
1303 trans_lose(lispobj object
)
1305 lose("no transport function for object %p (widetag 0x%x)\n",
1307 widetag_of(*native_pointer(object
)));
1308 return NIL
; /* bogus return value to satisfy static type checking */
1312 size_lose(lispobj
*where
)
1314 lose("no size function for object at %p (widetag 0x%x)\n",
1316 widetag_of(*where
));
1317 return 1; /* bogus return value to satisfy static type checking */
1325 #include "genesis/gc-tables.h"
1328 static lispobj
*search_spaces(void *pointer
)
1331 if (((start
= search_dynamic_space(pointer
)) != NULL
) ||
1332 #ifdef LISP_FEATURE_IMMOBILE_SPACE
1333 ((start
= search_immobile_space(pointer
)) != NULL
) ||
1335 ((start
= search_static_space(pointer
)) != NULL
) ||
1336 ((start
= search_read_only_space(pointer
)) != NULL
))
1341 /* Find the code object for the given pc, or return NULL on
1344 component_ptr_from_pc(lispobj
*pc
)
1346 lispobj
*object
= search_spaces(pc
);
1348 if (object
!= NULL
&& widetag_of(*object
) == CODE_HEADER_WIDETAG
)
1354 /* Scan an area looking for an object which encloses the given pointer.
1355 * Return the object start on success, or NULL on failure. */
1357 gc_search_space3(void *pointer
, lispobj
*start
, void *limit
)
1359 if (pointer
< (void*)start
|| pointer
>= limit
) return NULL
;
1363 /* CAUTION: this code is _significantly_ slower than the production version
1364 due to the extra checks for forwarding. Only use it if debugging */
1365 for ( ; (void*)start
< limit
; start
+= count
) {
1366 lispobj
*forwarded_start
;
1367 if (forwarding_pointer_p(start
))
1368 forwarded_start
= native_pointer(forwarding_pointer_value(start
));
1370 forwarded_start
= start
;
1371 lispobj thing
= *forwarded_start
;
1372 count
= is_cons_half(thing
) ? 2 : sizetab
[widetag_of(thing
)](forwarded_start
);
1373 /* Check whether the pointer is within this object. */
1374 if (pointer
< (void*)(start
+count
)) return start
;
1377 for ( ; (void*)start
< limit
; start
+= count
) {
1378 lispobj thing
= *start
;
1379 count
= is_cons_half(thing
) ? 2 : sizetab
[widetag_of(thing
)](start
);
1380 /* Check whether the pointer is within this object. */
1381 if (pointer
< (void*)(start
+count
)) return start
;
1387 /* Helper for valid_lisp_pointer_p (below) and
1388 * conservative_root_p (gencgc).
1390 * pointer is the pointer to check validity of,
1391 * and start_addr is the address of the enclosing object.
1394 properly_tagged_descriptor_p(void *thing
, lispobj
*start_addr
)
1396 lispobj pointer
= (lispobj
)thing
;
1397 if (!is_lisp_pointer(pointer
)) {
1401 /* Check that the object pointed to is consistent with the pointer
1403 switch (lowtag_of(pointer
)) {
1404 case FUN_POINTER_LOWTAG
:
1405 /* Start_addr should be the enclosing code object, or a closure
1407 switch (widetag_of(*start_addr
)) {
1408 case CODE_HEADER_WIDETAG
:
1409 /* Make sure we actually point to a function in the code object,
1410 * as opposed to a random point there. */
1411 for_each_simple_fun(i
, function
, (struct code
*)start_addr
, 0, {
1412 if ((lispobj
)function
== pointer
-FUN_POINTER_LOWTAG
) return 1;
1415 case CLOSURE_HEADER_WIDETAG
:
1416 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
1417 return make_lispobj(start_addr
, FUN_POINTER_LOWTAG
) == pointer
;
1422 case LIST_POINTER_LOWTAG
:
1423 return make_lispobj(start_addr
, LIST_POINTER_LOWTAG
) == pointer
1424 && is_cons_half(start_addr
[0]) // Is it plausible?
1425 && is_cons_half(start_addr
[1]);
1427 case INSTANCE_POINTER_LOWTAG
:
1428 return make_lispobj(start_addr
, INSTANCE_POINTER_LOWTAG
) == pointer
1429 && widetag_of(*start_addr
) == INSTANCE_HEADER_WIDETAG
;
1431 case OTHER_POINTER_LOWTAG
:
1433 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
1434 /* The all-architecture test below is good as far as it goes,
1435 * but an LRA object is similar to a FUN-POINTER: It is
1436 * embedded within a CODE-OBJECT pointed to by start_addr, and
1437 * cannot be found by simply walking the heap, therefore we
1438 * need to check for it. -- AB, 2010-Jun-04 */
1439 if ((widetag_of(start_addr
[0]) == CODE_HEADER_WIDETAG
)) {
1440 lispobj
*potential_lra
= native_pointer(pointer
);
1441 if ((widetag_of(potential_lra
[0]) == RETURN_PC_HEADER_WIDETAG
) &&
1442 ((potential_lra
- HeaderValue(potential_lra
[0])) == start_addr
)) {
1443 return 1; /* It's as good as we can verify. */
1448 if (pointer
!= make_lispobj(start_addr
, OTHER_POINTER_LOWTAG
)
1449 || !other_immediate_lowtag_p(*start_addr
))
1452 switch (widetag_of(start_addr
[0])) {
1453 case UNBOUND_MARKER_WIDETAG
:
1454 case NO_TLS_VALUE_MARKER_WIDETAG
:
1455 case CHARACTER_WIDETAG
:
1456 #if N_WORD_BITS == 64
1457 case SINGLE_FLOAT_WIDETAG
:
1461 /* only pointed to by function pointers? */
1462 case CLOSURE_HEADER_WIDETAG
:
1463 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
1466 case INSTANCE_HEADER_WIDETAG
:
1469 /* the valid other immediate pointer objects */
1470 case SIMPLE_VECTOR_WIDETAG
:
1472 case COMPLEX_WIDETAG
:
1473 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
1474 case COMPLEX_SINGLE_FLOAT_WIDETAG
:
1476 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
1477 case COMPLEX_DOUBLE_FLOAT_WIDETAG
:
1479 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
1480 case COMPLEX_LONG_FLOAT_WIDETAG
:
1482 #ifdef SIMD_PACK_WIDETAG
1483 case SIMD_PACK_WIDETAG
:
1485 case SIMPLE_ARRAY_WIDETAG
:
1486 case COMPLEX_BASE_STRING_WIDETAG
:
1487 #ifdef COMPLEX_CHARACTER_STRING_WIDETAG
1488 case COMPLEX_CHARACTER_STRING_WIDETAG
:
1490 case COMPLEX_VECTOR_NIL_WIDETAG
:
1491 case COMPLEX_BIT_VECTOR_WIDETAG
:
1492 case COMPLEX_VECTOR_WIDETAG
:
1493 case COMPLEX_ARRAY_WIDETAG
:
1494 case VALUE_CELL_HEADER_WIDETAG
:
1495 case SYMBOL_HEADER_WIDETAG
:
1497 case CODE_HEADER_WIDETAG
:
1498 case BIGNUM_WIDETAG
:
1499 #if N_WORD_BITS != 64
1500 case SINGLE_FLOAT_WIDETAG
:
1502 case DOUBLE_FLOAT_WIDETAG
:
1503 #ifdef LONG_FLOAT_WIDETAG
1504 case LONG_FLOAT_WIDETAG
:
1506 #include "genesis/specialized-vectors.inc"
1508 case WEAK_POINTER_WIDETAG
:
1523 /* META: Note the ambiguous word "validate" in the comment below.
1524 * This means "Decide whether <x> is valid".
1525 * But when you see os_validate() elsewhere, that doesn't mean to ask
1526 * whether something is valid, it says to *make* it valid.
1527 * I think it would be nice if we could avoid using the word in the
1528 * sense in which os_validate() uses it, which would entail renaming
1529 * a bunch of stuff, which is harder than just explaining why
1530 * the comments can be deceptive */
1532 /* Used by the debugger to validate possibly bogus pointers before
1533 * calling MAKE-LISP-OBJ on them.
1535 * FIXME: We would like to make this perfect, because if the debugger
1536 * constructs a reference to a bugs lisp object, and it ends up in a
1537 * location scavenged by the GC all hell breaks loose.
1539 * Whereas conservative_root_p has to be conservative
1540 * and return true for all valid pointers, this could actually be eager
1541 * and lie about a few pointers without bad results... but that should
1542 * be reflected in the name.
1545 valid_lisp_pointer_p(lispobj pointer
)
1547 lispobj
*start
= search_spaces((void*)pointer
);
1549 return properly_tagged_descriptor_p((void*)pointer
, start
);
1554 maybe_gc(os_context_t
*context
)
1556 lispobj gc_happened
;
1557 struct thread
*thread
= arch_os_get_current_thread();
1558 boolean were_in_lisp
= !foreign_function_call_active_p(thread
);
1561 fake_foreign_function_call(context
);
1564 /* SUB-GC may return without GCing if *GC-INHIBIT* is set, in
1565 * which case we will be running with no gc trigger barrier
1566 * thing for a while. But it shouldn't be long until the end
1569 * FIXME: It would be good to protect the end of dynamic space for
1570 * CheneyGC and signal a storage condition from there.
1573 /* Restore the signal mask from the interrupted context before
1574 * calling into Lisp if interrupts are enabled. Why not always?
1576 * Suppose there is a WITHOUT-INTERRUPTS block far, far out. If an
1577 * interrupt hits while in SUB-GC, it is deferred and the
1578 * os_context_sigmask of that interrupt is set to block further
1579 * deferrable interrupts (until the first one is
1580 * handled). Unfortunately, that context refers to this place and
1581 * when we return from here the signals will not be blocked.
1583 * A kludgy alternative is to propagate the sigmask change to the
1586 #if !(defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_SAFEPOINT))
1587 check_gc_signals_unblocked_or_lose(os_context_sigmask_addr(context
));
1588 unblock_gc_signals(0, 0);
1590 FSHOW((stderr
, "/maybe_gc: calling SUB_GC\n"));
1591 /* FIXME: Nothing must go wrong during GC else we end up running
1592 * the debugger, error handlers, and user code in general in a
1593 * potentially unsafe place. Running out of the control stack or
1594 * the heap in SUB-GC are ways to lose. Of course, deferrables
1595 * cannot be unblocked because there may be a pending handler, or
1596 * we may even be in a WITHOUT-INTERRUPTS. */
1597 gc_happened
= funcall0(StaticSymbolFunction(SUB_GC
));
1598 FSHOW((stderr
, "/maybe_gc: gc_happened=%s\n",
1599 (gc_happened
== NIL
)
1601 : ((gc_happened
== T
)
1604 /* gc_happened can take three values: T, NIL, 0.
1606 * T means that the thread managed to trigger a GC, and post-gc
1609 * NIL means that the thread is within without-gcing, and no GC
1612 * Finally, 0 means that *a* GC has occurred, but it wasn't
1613 * triggered by this thread; success, but post-gc doesn't have
1616 if ((gc_happened
== T
) &&
1617 /* See if interrupts are enabled or it's possible to enable
1618 * them. POST-GC has a similar check, but we don't want to
1619 * unlock deferrables in that case and get a pending interrupt
1621 ((SymbolValue(INTERRUPTS_ENABLED
,thread
) != NIL
) ||
1622 (SymbolValue(ALLOW_WITH_INTERRUPTS
,thread
) != NIL
))) {
1623 #ifndef LISP_FEATURE_WIN32
1624 sigset_t
*context_sigmask
= os_context_sigmask_addr(context
);
1625 if (!deferrables_blocked_p(context_sigmask
)) {
1626 thread_sigmask(SIG_SETMASK
, context_sigmask
, 0);
1627 #ifndef LISP_FEATURE_SB_SAFEPOINT
1628 check_gc_signals_unblocked_or_lose(0);
1631 FSHOW((stderr
, "/maybe_gc: calling POST_GC\n"));
1632 funcall0(StaticSymbolFunction(POST_GC
));
1633 #ifndef LISP_FEATURE_WIN32
1635 FSHOW((stderr
, "/maybe_gc: punting on POST_GC due to blockage\n"));
1641 undo_fake_foreign_function_call(context
);
1643 /* Otherwise done by undo_fake_foreign_function_call. And
1644 something later wants them to be blocked. What a nice
1646 block_blockable_signals(0);
1649 FSHOW((stderr
, "/maybe_gc: returning\n"));
1650 return (gc_happened
!= NIL
);
1653 #define BYTES_ZERO_BEFORE_END (1<<12)
1655 /* There used to be a similar function called SCRUB-CONTROL-STACK in
1656 * Lisp and another called zero_stack() in cheneygc.c, but since it's
1657 * shorter to express in, and more often called from C, I keep only
1658 * the C one after fixing it. -- MG 2009-03-25 */
1660 /* Zero the unused portion of the control stack so that old objects
1661 * are not kept alive because of uninitialized stack variables.
1663 * "To summarize the problem, since not all allocated stack frame
1664 * slots are guaranteed to be written by the time you call an another
1665 * function or GC, there may be garbage pointers retained in your dead
1666 * stack locations. The stack scrubbing only affects the part of the
1667 * stack from the SP to the end of the allocated stack." - ram, on
1668 * cmucl-imp, Tue, 25 Sep 2001
1670 * So, as an (admittedly lame) workaround, from time to time we call
1671 * scrub-control-stack to zero out all the unused portion. This is
1672 * supposed to happen when the stack is mostly empty, so that we have
1673 * a chance of clearing more of it: callers are currently (2002.07.18)
1674 * REPL, SUB-GC and sig_stop_for_gc_handler. */
1676 /* Take care not to tread on the guard page and the hard guard page as
1677 * it would be unkind to sig_stop_for_gc_handler. Touching the return
1678 * guard page is not dangerous. For this to work the guard page must
1679 * be zeroed when protected. */
1681 /* FIXME: I think there is no guarantee that once
1682 * BYTES_ZERO_BEFORE_END bytes are zero the rest are also zero. This
1683 * may be what the "lame" adjective in the above comment is for. In
1684 * this case, exact gc may lose badly. */
1686 scrub_control_stack()
1688 scrub_thread_control_stack(arch_os_get_current_thread());
1692 scrub_thread_control_stack(struct thread
*th
)
1694 os_vm_address_t guard_page_address
= CONTROL_STACK_GUARD_PAGE(th
);
1695 os_vm_address_t hard_guard_page_address
= CONTROL_STACK_HARD_GUARD_PAGE(th
);
1696 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1697 /* On these targets scrubbing from C is a bad idea, so we punt to
1698 * a routine in $ARCH-assem.S. */
1699 extern void arch_scrub_control_stack(struct thread
*, os_vm_address_t
, os_vm_address_t
);
1700 arch_scrub_control_stack(th
, guard_page_address
, hard_guard_page_address
);
1702 lispobj
*sp
= access_control_stack_pointer(th
);
1704 if ((((os_vm_address_t
)sp
< (hard_guard_page_address
+ os_vm_page_size
)) &&
1705 ((os_vm_address_t
)sp
>= hard_guard_page_address
)) ||
1706 (((os_vm_address_t
)sp
< (guard_page_address
+ os_vm_page_size
)) &&
1707 ((os_vm_address_t
)sp
>= guard_page_address
) &&
1708 (th
->control_stack_guard_page_protected
!= NIL
)))
1710 #ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
1713 } while (((uword_t
)sp
--) & (BYTES_ZERO_BEFORE_END
- 1));
1714 if ((os_vm_address_t
)sp
< (hard_guard_page_address
+ os_vm_page_size
))
1719 } while (((uword_t
)sp
--) & (BYTES_ZERO_BEFORE_END
- 1));
1723 } while (((uword_t
)++sp
) & (BYTES_ZERO_BEFORE_END
- 1));
1724 if ((os_vm_address_t
)sp
>= hard_guard_page_address
)
1729 } while (((uword_t
)++sp
) & (BYTES_ZERO_BEFORE_END
- 1));
1731 #endif /* LISP_FEATURE_C_STACK_IS_CONTROL_STACK */
1734 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
1737 scavenge_control_stack(struct thread
*th
)
1739 lispobj
*object_ptr
;
1741 /* In order to properly support dynamic-extent allocation of
1742 * non-CONS objects, the control stack requires special handling.
1743 * Rather than calling scavenge() directly, grovel over it fixing
1744 * broken hearts, scavenging pointers to oldspace, and pitching a
1745 * fit when encountering unboxed data. This prevents stray object
1746 * headers from causing the scavenger to blow past the end of the
1747 * stack (an error case checked in scavenge()). We don't worry
1748 * about treating unboxed words as boxed or vice versa, because
1749 * the compiler isn't allowed to store unboxed objects on the
1750 * control stack. -- AB, 2011-Dec-02 */
1752 for (object_ptr
= th
->control_stack_start
;
1753 object_ptr
< access_control_stack_pointer(th
);
1756 lispobj object
= *object_ptr
;
1757 #ifdef LISP_FEATURE_GENCGC
1758 if (forwarding_pointer_p(object_ptr
))
1759 lose("unexpected forwarding pointer in scavenge_control_stack: %p, start=%p, end=%p\n",
1760 object_ptr
, th
->control_stack_start
, access_control_stack_pointer(th
));
1762 if (is_lisp_pointer(object
) && from_space_p(object
)) {
1763 /* It currently points to old space. Check for a
1764 * forwarding pointer. */
1765 lispobj
*ptr
= native_pointer(object
);
1766 if (forwarding_pointer_p(ptr
)) {
1767 /* Yes, there's a forwarding pointer. */
1768 *object_ptr
= LOW_WORD(forwarding_pointer_value(ptr
));
1770 /* Scavenge that pointer. */
1771 long n_words_scavenged
=
1772 (scavtab
[widetag_of(object
)])(object_ptr
, object
);
1773 gc_assert(n_words_scavenged
== 1);
1775 } else if (scavtab
[widetag_of(object
)] == scav_lose
) {
1776 lose("unboxed object in scavenge_control_stack: %p->%x, start=%p, end=%p\n",
1777 object_ptr
, object
, th
->control_stack_start
, access_control_stack_pointer(th
));
1782 /* Scavenging Interrupt Contexts */
1784 static int boxed_registers
[] = BOXED_REGISTERS
;
1786 /* The GC has a notion of an "interior pointer" register, an unboxed
1787 * register that typically contains a pointer to inside an object
1788 * referenced by another pointer. The most obvious of these is the
1789 * program counter, although many compiler backends define a "Lisp
1790 * Interior Pointer" register known to the runtime as reg_LIP, and
1791 * various CPU architectures have other registers that also partake of
1792 * the interior-pointer nature. As the code for pairing an interior
1793 * pointer value up with its "base" register, and fixing it up after
1794 * scavenging is complete is horribly repetitive, a few macros paper
1795 * over the monotony. --AB, 2010-Jul-14 */
1797 /* These macros are only ever used over a lexical environment which
1798 * defines a pointer to an os_context_t called context, thus we don't
1799 * bother to pass that context in as a parameter. */
1801 /* Define how to access a given interior pointer. */
1802 #define ACCESS_INTERIOR_POINTER_pc \
1803 *os_context_pc_addr(context)
1804 #define ACCESS_INTERIOR_POINTER_lip \
1805 *os_context_register_addr(context, reg_LIP)
1806 #define ACCESS_INTERIOR_POINTER_lr \
1807 *os_context_lr_addr(context)
1808 #define ACCESS_INTERIOR_POINTER_npc \
1809 *os_context_npc_addr(context)
1810 #define ACCESS_INTERIOR_POINTER_ctr \
1811 *os_context_ctr_addr(context)
1813 #define INTERIOR_POINTER_VARS(name) \
1814 uword_t name##_offset; \
1815 int name##_register_pair
1817 #define PAIR_INTERIOR_POINTER(name) \
1818 pair_interior_pointer(context, \
1819 ACCESS_INTERIOR_POINTER_##name, \
1821 &name##_register_pair)
1823 /* One complexity here is that if a paired register is not found for
1824 * an interior pointer, then that pointer does not get updated.
1825 * Originally, there was some commentary about using an index of -1
1826 * when calling os_context_register_addr() on SPARC referring to the
1827 * program counter, but the real reason is to allow an interior
1828 * pointer register to point to the runtime, read-only space, or
1829 * static space without problems. */
1830 #define FIXUP_INTERIOR_POINTER(name) \
1832 if (name##_register_pair >= 0) { \
1833 ACCESS_INTERIOR_POINTER_##name = \
1834 (*os_context_register_addr(context, \
1835 name##_register_pair) \
1843 pair_interior_pointer(os_context_t
*context
, uword_t pointer
,
1844 uword_t
*saved_offset
, int *register_pair
)
1849 * I (RLT) think this is trying to find the boxed register that is
1850 * closest to the LIP address, without going past it. Usually, it's
1851 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
1853 /* 0x7FFFFFFF on 32-bit platforms;
1854 0x7FFFFFFFFFFFFFFF on 64-bit platforms */
1855 *saved_offset
= (((uword_t
)1) << (N_WORD_BITS
- 1)) - 1;
1856 *register_pair
= -1;
1857 for (i
= 0; i
< (sizeof(boxed_registers
) / sizeof(int)); i
++) {
1862 index
= boxed_registers
[i
];
1863 reg
= *os_context_register_addr(context
, index
);
1865 /* An interior pointer is never relative to a non-pointer
1866 * register (an oversight in the original implementation).
1867 * The simplest argument for why this is true is to consider
1868 * the fixnum that happens by coincide to be the word-index in
1869 * memory of the header for some object plus two. This is
1870 * happenstance would cause the register containing the fixnum
1871 * to be selected as the register_pair if the interior pointer
1872 * is to anywhere after the first two words of the object.
1873 * The fixnum won't be changed during GC, but the object might
1874 * move, thus destroying the interior pointer. --AB,
1877 if (is_lisp_pointer(reg
) &&
1878 ((reg
& ~LOWTAG_MASK
) <= pointer
)) {
1879 offset
= pointer
- (reg
& ~LOWTAG_MASK
);
1880 if (offset
< *saved_offset
) {
1881 *saved_offset
= offset
;
1882 *register_pair
= index
;
1889 scavenge_interrupt_context(os_context_t
* context
)
1893 /* FIXME: The various #ifdef noise here is precisely that: noise.
1894 * Is it possible to fold it into the macrology so that we have
1895 * one set of #ifdefs and then INTERIOR_POINTER_VARS /et alia/
1896 * compile out for the registers that don't exist on a given
1899 INTERIOR_POINTER_VARS(pc
);
1901 INTERIOR_POINTER_VARS(lip
);
1903 #ifdef ARCH_HAS_LINK_REGISTER
1904 INTERIOR_POINTER_VARS(lr
);
1906 #ifdef ARCH_HAS_NPC_REGISTER
1907 INTERIOR_POINTER_VARS(npc
);
1909 #ifdef LISP_FEATURE_PPC
1910 INTERIOR_POINTER_VARS(ctr
);
1913 PAIR_INTERIOR_POINTER(pc
);
1915 PAIR_INTERIOR_POINTER(lip
);
1917 #ifdef ARCH_HAS_LINK_REGISTER
1918 PAIR_INTERIOR_POINTER(lr
);
1920 #ifdef ARCH_HAS_NPC_REGISTER
1921 PAIR_INTERIOR_POINTER(npc
);
1923 #ifdef LISP_FEATURE_PPC
1924 PAIR_INTERIOR_POINTER(ctr
);
1927 /* Scavenge all boxed registers in the context. */
1928 for (i
= 0; i
< (sizeof(boxed_registers
) / sizeof(int)); i
++) {
1932 index
= boxed_registers
[i
];
1933 foo
= *os_context_register_addr(context
, index
);
1935 *os_context_register_addr(context
, index
) = foo
;
1937 /* this is unlikely to work as intended on bigendian
1938 * 64 bit platforms */
1940 scavenge((lispobj
*) os_context_register_addr(context
, index
), 1);
1943 /* Now that the scavenging is done, repair the various interior
1945 FIXUP_INTERIOR_POINTER(pc
);
1947 FIXUP_INTERIOR_POINTER(lip
);
1949 #ifdef ARCH_HAS_LINK_REGISTER
1950 FIXUP_INTERIOR_POINTER(lr
);
1952 #ifdef ARCH_HAS_NPC_REGISTER
1953 FIXUP_INTERIOR_POINTER(npc
);
1955 #ifdef LISP_FEATURE_PPC
1956 FIXUP_INTERIOR_POINTER(ctr
);
1961 scavenge_interrupt_contexts(struct thread
*th
)
1964 os_context_t
*context
;
1966 index
= fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
1968 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
1969 printf("Number of active contexts: %d\n", index
);
1972 for (i
= 0; i
< index
; i
++) {
1973 context
= th
->interrupt_contexts
[i
];
1974 scavenge_interrupt_context(context
);
1977 #endif /* x86oid targets */
1979 void varint_unpacker_init(struct varint_unpacker
* unpacker
, lispobj integer
)
1981 if (fixnump(integer
)) {
1982 unpacker
->word
= fixnum_value(integer
);
1983 unpacker
->limit
= N_WORD_BYTES
;
1984 unpacker
->data
= (char*)&unpacker
->word
;
1986 struct bignum
* bignum
= (struct bignum
*)(integer
- OTHER_POINTER_LOWTAG
);
1988 unpacker
->limit
= HeaderValue(bignum
->header
) * N_WORD_BYTES
;
1989 unpacker
->data
= (char*)bignum
->digits
;
1991 unpacker
->index
= 0;
1994 // Fetch the next varint from 'unpacker' into 'result'.
1995 // Because there is no length prefix on the number of varints encoded,
1996 // spurious trailing zeros might be observed. The data consumer can
1997 // circumvent that by storing a count as the first value in the series.
1998 // Return 1 for success, 0 for EOF.
1999 int varint_unpack(struct varint_unpacker
* unpacker
, int* result
)
2001 if (unpacker
->index
>= unpacker
->limit
) return 0;
2002 int accumulator
= 0;
2005 #ifdef LISP_FEATURE_LITTLE_ENDIAN
2006 int byte
= unpacker
->data
[unpacker
->index
];
2008 // bignums are little-endian in word order,
2009 // but machine-native within each word.
2010 // We could pack bytes MSB-to-LSB in the bigdigits,
2011 // but that seems less intuitive on the Lisp side.
2012 int word_index
= unpacker
->index
/ N_WORD_BYTES
;
2013 int byte_index
= unpacker
->index
% N_WORD_BYTES
;
2014 int byte
= (((unsigned int*)unpacker
->data
)[word_index
]
2015 >> (byte_index
* 8)) & 0xFF;
2018 accumulator
|= (byte
& 0x7F) << shift
;
2019 if (!(byte
& 0x80)) break;
2020 gc_assert(unpacker
->index
< unpacker
->limit
);
2023 *result
= accumulator
;