Improve closure scavenging a tiny bit.
[sbcl.git] / src / runtime / gc-common.c
blob3ca1cc0508c3ae59d6d3c0e434aa0846b8271347
1 /*
2 * Garbage Collection common functions for scavenging, moving and sizing
3 * objects. These are for use with both GC (stop & copy GC) and GENCGC
4 */
6 /*
7 * This software is part of the SBCL system. See the README file for
8 * more information.
10 * This software is derived from the CMU CL system, which was
11 * written at Carnegie Mellon University and released into the
12 * public domain. The software is in the public domain and is
13 * provided with absolutely no warranty. See the COPYING and CREDITS
14 * files for more information.
18 * For a review of garbage collection techniques (e.g. generational
19 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
20 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
21 * had been accepted for _ACM Computing Surveys_ and was available
22 * as a PostScript preprint through
23 * <http://www.cs.utexas.edu/users/oops/papers.html>
24 * as
25 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
28 #define _GNU_SOURCE /* for ffsl(3) from string.h */
30 #include <stdio.h>
31 #include <signal.h>
32 #include <string.h>
33 #include "sbcl.h"
34 #include "runtime.h"
35 #include "os.h"
36 #include "interr.h"
37 #include "globals.h"
38 #include "interrupt.h"
39 #include "validate.h"
40 #include "lispregs.h"
41 #include "arch.h"
42 #include "gc.h"
43 #include "genesis/primitive-objects.h"
44 #include "genesis/static-symbols.h"
45 #include "genesis/layout.h"
46 #include "genesis/hash-table.h"
47 #define WANT_SCAV_TRANS_SIZE_TABLES
48 #include "gc-internal.h"
49 #include "forwarding-ptr.h"
50 #include "var-io.h"
52 #ifdef LISP_FEATURE_SPARC
53 #define LONG_FLOAT_SIZE 4
54 #elif defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
55 #define LONG_FLOAT_SIZE 3
56 #endif
58 os_vm_size_t dynamic_space_size = DEFAULT_DYNAMIC_SPACE_SIZE;
59 os_vm_size_t thread_control_stack_size = DEFAULT_CONTROL_STACK_SIZE;
61 sword_t (*scavtab[256])(lispobj *where, lispobj object);
62 lispobj (*transother[256])(lispobj object);
63 sword_t (*sizetab[256])(lispobj *where);
64 struct weak_pointer *weak_pointers;
66 os_vm_size_t bytes_consed_between_gcs = 12*1024*1024;
68 /// These sizing macros return the number of *payload* words,
69 /// exclusive of the object header word. Payload length is always
70 /// an odd number so that total word count is an even number.
71 #define BOXED_NWORDS(obj) (HeaderValue(obj) | 1)
72 // Payload count expressed in 15 bits
73 #define SHORT_BOXED_NWORDS(obj) ((HeaderValue(obj) & SHORT_HEADER_MAX_WORDS) | 1)
74 // Payload count expressed in 8 bits
75 #define TINY_BOXED_NWORDS(obj) ((HeaderValue(obj) & 0xFF) | 1)
78 * copying objects
81 /* gc_general_copy_object is inline from gc-internal.h */
83 /* to copy a boxed object */
84 lispobj
85 copy_object(lispobj object, sword_t nwords)
87 return gc_general_copy_object(object, nwords, BOXED_PAGE_FLAG);
90 lispobj
91 copy_code_object(lispobj object, sword_t nwords)
93 return gc_general_copy_object(object, nwords, CODE_PAGE_FLAG);
96 static sword_t scav_lose(lispobj *where, lispobj object); /* forward decl */
98 static inline void scav1(lispobj* object_ptr, lispobj object)
100 // GENCGC only:
101 // * With 32-bit words, is_lisp_pointer(object) returns true if object_ptr
102 // points to a forwarding pointer, so we need a sanity check inside the
103 // branch for is_lisp_pointer(). For maximum efficiency, check that only
104 // after from_space_p() returns false, so that valid pointers into
105 // from_space incur no extra test. This could be improved further by
106 // skipping the FP check if 'object' points within dynamic space, i.e.,
107 // when find_page_index() returns >= 0. That would entail injecting
108 // from_space_p() explicitly into the loop, so as to separate the
109 // "was a page found at all" condition from the page generation test.
111 // * With 64-bit words, is_lisp_pointer(object) is false when object_ptr
112 // points to a forwarding pointer, and the fixnump() test also returns
113 // false, so we'll indirect through scavtab[]. This will safely invoke
114 // scav_lose(), detecting corruption without any extra cost.
115 // The major difference between that and the explicit test is that you
116 // won't see 'start' and 'n_words', but if you need those, chances are
117 // you'll want to run under an external debugger in the first place.
118 // [And btw it sure would be nice to assert statically
119 // that is_lisp_pointer(0x01) is indeed false]
121 #define FIX_POINTER() { \
122 lispobj *ptr = native_pointer(object); \
123 if (forwarding_pointer_p(ptr)) \
124 *object_ptr = LOW_WORD(forwarding_pointer_value(ptr)); \
125 else /* Scavenge that pointer. */ \
126 (void)scavtab[widetag_of(object)](object_ptr, object); \
128 #ifdef LISP_FEATURE_IMMOBILE_SPACE
129 page_index_t page;
130 // It would be fine, though suboptimal, to use from_space_p() here.
131 // If it returns false, we don't want to call immobile_space_p()
132 // unless the pointer is *not* into dynamic space.
133 if ((page = find_page_index((void*)object)) >= 0) {
134 if (page_table[page].gen == from_space && !pinned_p(object, page))
135 FIX_POINTER();
136 } else if (immobile_space_p(object)) {
137 lispobj *ptr = native_pointer(object);
138 if (immobile_obj_gen_bits(ptr) == from_space)
139 promote_immobile_obj(ptr, 1);
141 #else
142 if (from_space_p(object)) {
143 FIX_POINTER();
144 } else {
145 #if (N_WORD_BITS == 32) && defined(LISP_FEATURE_GENCGC)
146 if (forwarding_pointer_p(object_ptr))
147 lose("unexpected forwarding pointer in scavenge @ %p\n",
148 object_ptr);
149 #endif
150 /* It points somewhere other than oldspace. Leave it
151 * alone. */
153 #endif
156 // Scavenge a block of memory from 'start' to 'end'
157 // that may contain object headers.
158 void heap_scavenge(lispobj *start, lispobj *end)
160 lispobj *object_ptr;
162 for (object_ptr = start; object_ptr < end;) {
163 lispobj object = *object_ptr;
164 if (is_lisp_pointer(object)) {
165 scav1(object_ptr, object);
166 object_ptr++;
168 else if (fixnump(object)) {
169 /* It's a fixnum: really easy.. */
170 object_ptr++;
171 } else {
172 /* It's some sort of header object or another. */
173 object_ptr += (scavtab[widetag_of(object)])(object_ptr, object);
176 // This assertion is usually the one that fails when something
177 // is subtly wrong with the heap, so definitely always do it.
178 gc_assert_verbose(object_ptr == end, "Final object pointer %p, start %p, end %p\n",
179 object_ptr, start, end);
182 // Scavenge a block of memory from 'start' extending for 'n_words'
183 // that must not contain any object headers.
184 sword_t scavenge(lispobj *start, sword_t n_words)
186 lispobj *end = start + n_words;
187 lispobj *object_ptr;
188 for (object_ptr = start; object_ptr < end; object_ptr++) {
189 lispobj object = *object_ptr;
190 if (is_lisp_pointer(object)) scav1(object_ptr, object);
192 return n_words;
195 static lispobj trans_fun_header(lispobj object); /* forward decls */
196 static lispobj trans_short_boxed(lispobj object);
198 static sword_t
199 scav_fun_pointer(lispobj *where, lispobj object)
201 gc_dcheck(lowtag_of(object) == FUN_POINTER_LOWTAG);
203 /* Object is a pointer into from_space - not a FP. */
204 lispobj *first_pointer = native_pointer(object);
206 /* must transport object -- object may point to either a function
207 * header, a funcallable instance header, or a closure header. */
208 lispobj copy = widetag_of(*first_pointer) == SIMPLE_FUN_HEADER_WIDETAG
209 ? trans_fun_header(object) : trans_short_boxed(object);
211 if (copy != object) {
212 /* Set forwarding pointer */
213 set_forwarding_pointer(first_pointer,copy);
216 CHECK_COPY_POSTCONDITIONS(copy, FUN_POINTER_LOWTAG);
218 *where = copy;
220 return 1;
224 static struct code *
225 trans_code(struct code *code)
227 /* if object has already been transported, just return pointer */
228 if (forwarding_pointer_p((lispobj *)code)) {
229 #ifdef DEBUG_CODE_GC
230 printf("Was already transported\n");
231 #endif
232 return (struct code *)native_pointer(forwarding_pointer_value((lispobj*)code));
235 gc_dcheck(widetag_of(code->header) == CODE_HEADER_WIDETAG);
237 /* prepare to transport the code vector */
238 lispobj l_code = (lispobj) LOW_WORD(code) | OTHER_POINTER_LOWTAG;
239 sword_t nheader_words = code_header_words(code->header);
240 sword_t ncode_words = code_instruction_words(code->code_size);
241 sword_t nwords = nheader_words + ncode_words;
242 lispobj l_new_code = copy_code_object(l_code, nwords);
243 struct code *new_code = (struct code *) native_pointer(l_new_code);
245 #if defined(DEBUG_CODE_GC)
246 printf("Old code object at 0x%08x, new code object at 0x%08x.\n",
247 (uword_t) code, (uword_t) new_code);
248 printf("Code object is %d words long.\n", nwords);
249 #endif
251 #ifdef LISP_FEATURE_GENCGC
252 if (new_code == code)
253 return new_code;
254 #endif
256 set_forwarding_pointer((lispobj *)code, l_new_code);
258 /* set forwarding pointers for all the function headers in the */
259 /* code object. also fix all self pointers */
260 /* Do this by scanning the new code, since the old header is unusable */
262 uword_t displacement = l_new_code - l_code;
264 for_each_simple_fun(i, nfheaderp, new_code, 1, {
265 /* Calculate the old raw function pointer */
266 struct simple_fun* fheaderp =
267 (struct simple_fun*)LOW_WORD((char*)nfheaderp - displacement);
268 /* Calculate the new lispobj */
269 lispobj nfheaderl = make_lispobj(nfheaderp, FUN_POINTER_LOWTAG);
271 #ifdef DEBUG_CODE_GC
272 printf("fheaderp->header (at %x) <- %x\n",
273 &(fheaderp->header) , nfheaderl);
274 #endif
275 set_forwarding_pointer((lispobj *)fheaderp, nfheaderl);
277 /* fix self pointer. */
278 nfheaderp->self =
279 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
280 FUN_RAW_ADDR_OFFSET +
281 #endif
282 nfheaderl;
284 #ifdef LISP_FEATURE_GENCGC
285 /* Cheneygc doesn't need this os_flush_icache, it flushes the whole
286 spaces once when all copying is done. */
287 os_flush_icache((os_vm_address_t) (((sword_t *)new_code) + nheader_words),
288 ncode_words * sizeof(sword_t));
290 #endif
292 #ifdef LISP_FEATURE_X86
293 gencgc_apply_code_fixups(code, new_code);
294 #endif
296 return new_code;
299 static sword_t
300 scav_code_header(lispobj *where, lispobj header)
302 struct code *code = (struct code *) where;
303 sword_t n_header_words = code_header_words(header);
305 /* Scavenge the boxed section of the code data block. */
306 scavenge(where + 1, n_header_words - 1);
308 /* Scavenge the boxed section of each function object in the
309 * code data block. */
310 for_each_simple_fun(i, function_ptr, code, 1, {
311 scavenge(SIMPLE_FUN_SCAV_START(function_ptr),
312 SIMPLE_FUN_SCAV_NWORDS(function_ptr));
315 return n_header_words + code_instruction_words(code->code_size);
318 static lispobj
319 trans_code_header(lispobj object)
321 struct code *ncode = trans_code((struct code *) native_pointer(object));
322 return (lispobj) LOW_WORD(ncode) | OTHER_POINTER_LOWTAG;
325 static sword_t
326 size_code_header(lispobj *where)
328 return code_header_words(((struct code *)where)->header)
329 + code_instruction_words(((struct code *)where)->code_size);
332 #ifdef RETURN_PC_HEADER_WIDETAG
333 static sword_t
334 scav_return_pc_header(lispobj *where, lispobj object)
336 lose("attempted to scavenge a return PC header where=%p object=%#lx\n",
337 where, (uword_t) object);
338 return 0; /* bogus return value to satisfy static type checking */
341 static lispobj
342 trans_return_pc_header(lispobj object)
344 struct simple_fun *return_pc = (struct simple_fun *) native_pointer(object);
345 uword_t offset = HeaderValue(return_pc->header) * N_WORD_BYTES;
347 /* Transport the whole code object */
348 struct code *code = (struct code *) ((uword_t) return_pc - offset);
349 struct code *ncode = trans_code(code);
351 return ((lispobj) LOW_WORD(ncode) + offset) | OTHER_POINTER_LOWTAG;
353 #endif /* RETURN_PC_HEADER_WIDETAG */
355 /* On the 386, closures hold a pointer to the raw address instead of the
356 * function object, so we can use CALL [$FDEFN+const] to invoke
357 * the function without loading it into a register. Given that code
358 * objects don't move, we don't need to update anything, but we do
359 * have to figure out that the function is still live. */
361 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
362 static sword_t
363 scav_closure(lispobj *where, lispobj header)
365 struct closure *closure = (struct closure *)where;
366 int payload_words = SHORT_BOXED_NWORDS(header);
367 lispobj fun = closure->fun - FUN_RAW_ADDR_OFFSET;
368 scavenge(&fun, 1);
369 #ifdef LISP_FEATURE_GENCGC
370 /* The function may have moved so update the raw address. But
371 * don't write unnecessarily. */
372 if (closure->fun != fun + FUN_RAW_ADDR_OFFSET)
373 closure->fun = fun + FUN_RAW_ADDR_OFFSET;
374 #endif
375 // Payload includes 'fun' which was just looked at, so subtract it.
376 scavenge(closure->info, payload_words - 1);
377 return 1 + payload_words;
379 #endif
381 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
382 static sword_t
383 scav_fun_header(lispobj *where, lispobj object)
385 lose("attempted to scavenge a function header where=%p object=%#lx\n",
386 where, (uword_t) object);
387 return 0; /* bogus return value to satisfy static type checking */
389 #endif /* LISP_FEATURE_X86 */
391 static lispobj
392 trans_fun_header(lispobj object)
394 struct simple_fun *fheader = (struct simple_fun *) native_pointer(object);
395 uword_t offset = HeaderValue(fheader->header) * N_WORD_BYTES;
397 /* Transport the whole code object */
398 struct code *code = (struct code *) ((uword_t) fheader - offset);
399 struct code *ncode = trans_code(code);
401 return ((lispobj) LOW_WORD(ncode) + offset) | FUN_POINTER_LOWTAG;
406 * instances
409 static lispobj
410 trans_instance(lispobj object)
412 gc_dcheck(lowtag_of(object) == INSTANCE_POINTER_LOWTAG);
413 lispobj header = *(lispobj*)(object - INSTANCE_POINTER_LOWTAG);
414 return copy_object(object, 1 + (instance_length(header)|1));
417 static sword_t
418 scav_instance_pointer(lispobj *where, lispobj object)
420 /* Object is a pointer into from space - not a FP. */
421 lispobj copy = trans_instance(object);
423 gc_dcheck(copy != object);
425 set_forwarding_pointer(native_pointer(object), copy);
426 *where = copy;
428 return 1;
433 * lists and conses
436 static lispobj trans_list(lispobj object);
438 static sword_t
439 scav_list_pointer(lispobj *where, lispobj object)
441 gc_dcheck(lowtag_of(object) == LIST_POINTER_LOWTAG);
443 lispobj copy = trans_list(object);
444 gc_dcheck(copy != object);
446 CHECK_COPY_POSTCONDITIONS(copy, LIST_POINTER_LOWTAG);
448 *where = copy;
449 return 1;
453 static lispobj
454 trans_list(lispobj object)
456 /* Copy 'object'. */
457 struct cons *copy = (struct cons *)
458 gc_general_alloc(sizeof(struct cons), BOXED_PAGE_FLAG, ALLOC_QUICK);
459 lispobj new_list_pointer = make_lispobj(copy, LIST_POINTER_LOWTAG);
460 copy->car = CONS(object)->car;
461 /* Grab the cdr: set_forwarding_pointer will clobber it in GENCGC */
462 lispobj cdr = CONS(object)->cdr;
463 set_forwarding_pointer((lispobj *)CONS(object), new_list_pointer);
465 /* Try to linearize the list in the cdr direction to help reduce
466 * paging. */
467 while (lowtag_of(cdr) == LIST_POINTER_LOWTAG && from_space_p(cdr)) {
468 lispobj* native_cdr = (lispobj*)CONS(cdr);
469 if (forwarding_pointer_p(native_cdr)) { // Might as well fix now.
470 cdr = forwarding_pointer_value(native_cdr);
471 break;
473 /* Copy 'cdr'. */
474 struct cons *cdr_copy = (struct cons*)
475 gc_general_alloc(sizeof(struct cons), BOXED_PAGE_FLAG, ALLOC_QUICK);
476 cdr_copy->car = ((struct cons*)native_cdr)->car;
477 /* Grab the cdr before it is clobbered. */
478 lispobj next = ((struct cons*)native_cdr)->cdr;
479 /* Set cdr of the predecessor, and store an FP. */
480 set_forwarding_pointer(native_cdr,
481 copy->cdr = make_lispobj(cdr_copy,
482 LIST_POINTER_LOWTAG));
483 copy = cdr_copy;
484 cdr = next;
486 copy->cdr = cdr;
487 return new_list_pointer;
492 * scavenging and transporting other pointers
495 static sword_t
496 scav_other_pointer(lispobj *where, lispobj object)
498 gc_dcheck(lowtag_of(object) == OTHER_POINTER_LOWTAG);
500 /* Object is a pointer into from space - not FP. */
501 lispobj *first_pointer = (lispobj *)(object - OTHER_POINTER_LOWTAG);
502 lispobj copy = transother[widetag_of(*first_pointer)](object);
504 // If the object was large, then instead of transporting it,
505 // gencgc might simply promote the pages and return the same pointer.
506 // That decision is made in general_copy_large_object().
507 if (copy != object) {
508 set_forwarding_pointer(first_pointer, copy);
509 #ifdef LISP_FEATURE_GENCGC
510 *where = copy;
511 #endif
513 #ifndef LISP_FEATURE_GENCGC
514 *where = copy;
515 #endif
516 CHECK_COPY_POSTCONDITIONS(copy, OTHER_POINTER_LOWTAG);
517 return 1;
521 * immediate, boxed, and unboxed objects
524 static sword_t
525 scav_immediate(lispobj *where, lispobj object)
527 return 1;
530 static lispobj
531 trans_immediate(lispobj object)
533 lose("trying to transport an immediate\n");
534 return NIL; /* bogus return value to satisfy static type checking */
537 static sword_t
538 size_immediate(lispobj *where)
540 return 1;
544 boolean positive_bignum_logbitp(int index, struct bignum* bignum)
546 /* If the bignum in the layout has another pointer to it (besides the layout)
547 acting as a root, and which is scavenged first, then transporting the
548 bignum causes the layout to see a FP, as would copying an instance whose
549 layout that is. This is a nearly impossible scenario to create organically
550 in Lisp, because mostly nothing ever looks again at that exact (EQ) bignum
551 except for a few things that would cause it to be pinned anyway,
552 such as it being kept in a local variable during structure manipulation.
553 See 'interleaved-raw.impure.lisp' for a way to trigger this */
554 if (forwarding_pointer_p((lispobj*)bignum)) {
555 lispobj forwarded = forwarding_pointer_value((lispobj*)bignum);
556 #if 0
557 fprintf(stderr, "GC bignum_logbitp(): fwd from %p to %p\n",
558 (void*)bignum, (void*)forwarded);
559 #endif
560 bignum = (struct bignum*)native_pointer(forwarded);
563 int len = HeaderValue(bignum->header);
564 int word_index = index / N_WORD_BITS;
565 int bit_index = index % N_WORD_BITS;
566 if (word_index >= len) {
567 // just return 0 since the marking logic does not allow negative bignums
568 return 0;
569 } else {
570 return (bignum->digits[word_index] >> bit_index) & 1;
574 struct instance_scanner {
575 lispobj* base;
576 void (*proc)(lispobj*, sword_t);
579 // Helper function for helper function below, since lambda isn't a thing
580 static void instance_scan_range(void* arg, int offset, int nwords)
582 struct instance_scanner *scanner = (struct instance_scanner*)arg;
583 scanner->proc(scanner->base + offset, nwords);
586 // Helper function for stepping through the tagged slots of an instance in
587 // scav_instance and verify_space.
588 void
589 instance_scan(void (*proc)(lispobj*, sword_t),
590 lispobj *instance_slots,
591 sword_t n_words,
592 lispobj layout_bitmap)
594 sword_t index;
596 /* This code might be made more efficient by run-length-encoding the ranges
597 of words to scan, but probably not by much */
599 if (fixnump(layout_bitmap)) {
600 sword_t bitmap = (sword_t)layout_bitmap >> N_FIXNUM_TAG_BITS; // signed integer!
601 for (index = 0; index < n_words ; index++, bitmap >>= 1)
602 if (bitmap & 1)
603 proc(instance_slots + index, 1);
604 } else { /* huge bitmap */
605 struct bignum * bitmap;
606 bitmap = (struct bignum*)native_pointer(layout_bitmap);
607 if (forwarding_pointer_p((lispobj*)bitmap))
608 bitmap = (struct bignum*)
609 native_pointer(forwarding_pointer_value((lispobj*)bitmap));
610 struct instance_scanner scanner;
611 scanner.base = instance_slots;
612 scanner.proc = proc;
613 bitmap_scan((uword_t*)bitmap->digits, HeaderValue(bitmap->header), 0,
614 instance_scan_range, &scanner);
618 void bitmap_scan(uword_t* bitmap, int n_bitmap_words, int flags,
619 void (*proc)(void*, int, int), void* arg)
621 uword_t sense = (flags & BIT_SCAN_INVERT) ? ~0L : 0;
622 int start_word_index = 0;
623 int shift = 0;
624 in_use_marker_t word;
626 flags = flags & BIT_SCAN_CLEAR;
628 // Rather than bzero'ing we can just clear each nonzero word as it's read,
629 // if so specified.
630 #define BITMAP_REF(j) word = bitmap[j]; if(word && flags) bitmap[j] = 0; word ^= sense
631 BITMAP_REF(0);
632 while (1) {
633 int skip_bits, start_bit, start_position, run_length;
634 if (word == 0) {
635 if (++start_word_index >= n_bitmap_words) break;
636 BITMAP_REF(start_word_index);
637 shift = 0;
638 continue;
640 // On each loop iteration, the lowest 1 bit is a "relative"
641 // bit index, since the word was already shifted. This is 'skip_bits'.
642 // Adding back in the total shift amount gives 'start_bit',
643 // the true absolute index within the current word.
644 // 'start_position' is absolute within the entire bitmap.
645 skip_bits = ffsl(word) - 1;
646 start_bit = skip_bits + shift;
647 start_position = N_WORD_BITS * start_word_index + start_bit;
648 // Compute the number of consecutive 1s in the current word.
649 word >>= skip_bits;
650 run_length = ~word ? ffsl(~word) - 1 : N_WORD_BITS;
651 if (start_bit + run_length < N_WORD_BITS) { // Do not extend to additional words.
652 word >>= run_length;
653 shift += skip_bits + run_length;
654 } else {
655 int end_word_index = ++start_word_index;
656 while (1) {
657 if (end_word_index >= n_bitmap_words) {
658 word = 0;
659 run_length += (end_word_index - start_word_index) * N_WORD_BITS;
660 break;
662 BITMAP_REF(end_word_index);
663 if (~word == 0)
664 ++end_word_index;
665 else {
666 // end_word_index is the exclusive bound on contiguous
667 // words to include in the range. See if the low bits
668 // from the next word can extend the range.
669 shift = ffsl(~word) - 1;
670 word >>= shift;
671 run_length += (end_word_index - start_word_index) * N_WORD_BITS
672 + shift;
673 break;
676 start_word_index = end_word_index;
678 proc(arg, start_position, run_length);
680 #undef BITMAP_REF
683 static sword_t
684 scav_instance(lispobj *where, lispobj header)
686 lispobj* layout = (lispobj*)instance_layout(where);
687 lispobj lbitmap = make_fixnum(-1);
689 if (layout) {
690 layout = native_pointer((lispobj)layout);
691 #ifdef LISP_FEATURE_COMPACT_INSTANCE_HEADER
692 if (__immobile_obj_gen_bits(layout) == from_space)
693 promote_immobile_obj(layout, 1);
694 #else
695 if (forwarding_pointer_p(layout))
696 layout = native_pointer(forwarding_pointer_value(layout));
697 #endif
698 lbitmap = ((struct layout*)layout)->bitmap;
700 sword_t nslots = instance_length(header) | 1;
701 if (lbitmap == make_fixnum(-1))
702 scavenge(where+1, nslots);
703 else if (!fixnump(lbitmap))
704 instance_scan((void(*)(lispobj*,sword_t))scavenge,
705 where+1, nslots, lbitmap);
706 else {
707 sword_t bitmap = (sword_t)lbitmap >> N_FIXNUM_TAG_BITS; // signed integer!
708 sword_t n = nslots;
709 lispobj obj;
710 for ( ; n-- ; bitmap >>= 1) {
711 ++where;
712 if ((bitmap & 1) && is_lisp_pointer(obj = *where))
713 scav1(where, obj);
716 return 1 + nslots;
719 #ifdef LISP_FEATURE_COMPACT_INSTANCE_HEADER
720 static sword_t
721 scav_funinstance(lispobj *where, lispobj header)
723 // This works because the layout is in the header word of all instances,
724 // ordinary and funcallable, when compact headers are enabled.
725 // The trampoline slot in the funcallable-instance is raw, but can be
726 // scavenged, because it points to readonly space, never oldspace.
727 // (And for certain backends it looks like a fixnum, not a pointer)
728 return scav_instance(where, header);
730 #endif
732 //// Boxed object scav/trans/size functions
734 #define DEF_SCAV_BOXED(suffix, sizer) \
735 static sword_t __attribute__((unused)) \
736 scav_##suffix(lispobj *where, lispobj header) { \
737 return 1 + scavenge(where+1, sizer(header)); \
739 static lispobj trans_##suffix(lispobj object) { \
740 return copy_object(object, 1 + sizer(*native_pointer(object))); \
742 static sword_t size_##suffix(lispobj *where) { return 1 + sizer(*where); }
744 DEF_SCAV_BOXED(boxed, BOXED_NWORDS)
745 DEF_SCAV_BOXED(short_boxed, SHORT_BOXED_NWORDS)
746 DEF_SCAV_BOXED(tiny_boxed, TINY_BOXED_NWORDS)
748 /* Note: on the sparc we don't have to do anything special for fdefns, */
749 /* 'cause the raw-addr has a function lowtag. */
750 #if !defined(LISP_FEATURE_SPARC) && !defined(LISP_FEATURE_ARM)
751 static sword_t
752 scav_fdefn(lispobj *where, lispobj object)
754 struct fdefn *fdefn = (struct fdefn *)where;
756 /* FSHOW((stderr, "scav_fdefn, function = %p, raw_addr = %p\n",
757 fdefn->fun, fdefn->raw_addr)); */
759 scavenge(where + 1, 2); // 'name' and 'fun'
760 #ifndef LISP_FEATURE_IMMOBILE_CODE
761 lispobj raw_fun = (lispobj)fdefn->raw_addr;
762 if (raw_fun > READ_ONLY_SPACE_END) {
763 lispobj simple_fun = raw_fun - FUN_RAW_ADDR_OFFSET;
764 scavenge(&simple_fun, 1);
765 /* Don't write unnecessarily. */
766 if (simple_fun != raw_fun - FUN_RAW_ADDR_OFFSET)
767 fdefn->raw_addr = (char *)simple_fun + FUN_RAW_ADDR_OFFSET;
769 #elif defined(LISP_FEATURE_X86_64)
770 lispobj obj = fdefn_raw_referent(fdefn);
771 if (obj) {
772 lispobj new = obj;
773 scavenge(&new, 1); // enliven
774 gc_dcheck(new == obj); // must not move
776 #else
777 # error "Need to implement scav_fdefn"
778 #endif
779 return 4;
781 #endif
783 static sword_t
784 scav_unboxed(lispobj *where, lispobj object)
786 sword_t length = HeaderValue(object) + 1;
787 return CEILING(length, 2);
790 static lispobj
791 trans_unboxed(lispobj object)
793 gc_dcheck(lowtag_of(object) == OTHER_POINTER_LOWTAG);
794 sword_t length = HeaderValue(*native_pointer(object)) + 1;
795 return copy_unboxed_object(object, CEILING(length, 2));
798 /* vector-like objects */
799 static lispobj
800 trans_vector(lispobj object)
802 gc_dcheck(lowtag_of(object) == OTHER_POINTER_LOWTAG);
804 sword_t length =
805 fixnum_value(((struct vector*)native_pointer(object))->length);
806 return copy_large_object(object, CEILING(length + 2, 2));
809 static sword_t
810 size_vector(lispobj *where)
812 sword_t length = fixnum_value(((struct vector*)where)->length);
813 return CEILING(length + 2, 2);
816 #define DEF_SCAV_TRANS_SIZE_UB(nbits) \
817 DEF_SPECIALIZED_VECTOR(vector_unsigned_byte_##nbits, NWORDS(length, nbits))
818 #define DEF_SPECIALIZED_VECTOR(name, nwords) \
819 static sword_t __attribute__((unused)) scav_##name(lispobj *where, lispobj header) { \
820 sword_t length = fixnum_value(((struct vector*)where)->length); \
821 return CEILING(nwords + 2, 2); \
823 static lispobj __attribute__((unused)) trans_##name(lispobj object) { \
824 gc_dcheck(lowtag_of(object) == OTHER_POINTER_LOWTAG); \
825 sword_t length = fixnum_value(((struct vector*)(object-OTHER_POINTER_LOWTAG))->length); \
826 return copy_large_unboxed_object(object, CEILING(nwords + 2, 2)); \
828 static sword_t __attribute__((unused)) size_##name(lispobj *where) { \
829 sword_t length = fixnum_value(((struct vector*)where)->length); \
830 return CEILING(nwords + 2, 2); \
833 DEF_SPECIALIZED_VECTOR(vector_nil, 0*length)
834 DEF_SPECIALIZED_VECTOR(vector_bit, NWORDS(length,1))
835 /* NOTE: strings contain one more element of data (a terminating '\0'
836 * to help interface with C functions) than indicated by the length slot.
837 * This is true even for UCS4 strings, despite that C APIs are unlikely
838 * to have a convention that expects 4 zero bytes. */
839 DEF_SPECIALIZED_VECTOR(base_string, NWORDS((length+1), 8))
840 DEF_SPECIALIZED_VECTOR(character_string, NWORDS((length+1), 32))
841 DEF_SCAV_TRANS_SIZE_UB(2)
842 DEF_SCAV_TRANS_SIZE_UB(4)
843 DEF_SCAV_TRANS_SIZE_UB(8)
844 DEF_SCAV_TRANS_SIZE_UB(16)
845 DEF_SCAV_TRANS_SIZE_UB(32)
846 DEF_SCAV_TRANS_SIZE_UB(64)
847 DEF_SCAV_TRANS_SIZE_UB(128)
848 #ifdef LONG_FLOAT_SIZE
849 DEF_SPECIALIZED_VECTOR(vector_long_float, length * LONG_FLOAT_SIZE)
850 DEF_SPECIALIZED_VECTOR(vector_complex_long_float, length * (2 * LONG_FLOAT_SIZE))
851 #endif
853 static lispobj
854 trans_weak_pointer(lispobj object)
856 lispobj copy;
857 gc_dcheck(lowtag_of(object) == OTHER_POINTER_LOWTAG);
859 #if defined(DEBUG_WEAK)
860 printf("Transporting weak pointer from 0x%08x\n", object);
861 #endif
863 /* Need to remember where all the weak pointers are that have */
864 /* been transported so they can be fixed up in a post-GC pass. */
866 copy = copy_object(object, WEAK_POINTER_NWORDS);
867 #ifndef LISP_FEATURE_GENCGC
868 struct weak_pointer *wp = (struct weak_pointer *) native_pointer(copy);
870 gc_dcheck(widetag_of(wp->header)==WEAK_POINTER_WIDETAG);
871 /* Push the weak pointer onto the list of weak pointers. */
872 if (weak_pointer_breakable_p(wp)) {
873 wp->next = (struct weak_pointer *)LOW_WORD(weak_pointers);
874 weak_pointers = wp;
876 #endif
877 return copy;
880 void scan_weak_pointers(void)
882 struct weak_pointer *wp, *next_wp;
883 for (wp = weak_pointers, next_wp = NULL; wp != NULL; wp = next_wp) {
884 gc_assert(widetag_of(wp->header)==WEAK_POINTER_WIDETAG);
886 next_wp = wp->next;
887 wp->next = NULL;
888 if (next_wp == wp) /* gencgc uses a ref to self for end of list */
889 next_wp = NULL;
891 gc_assert(is_lisp_pointer(wp->value));
892 lispobj *value = native_pointer(wp->value);
894 /* Now, we need to check whether the object has been forwarded. If
895 * it has been, the weak pointer is still good and needs to be
896 * updated. Otherwise, the weak pointer needs to be broken. */
898 if (from_space_p((lispobj)value)) {
899 wp->value = forwarding_pointer_p(value) ?
900 LOW_WORD(forwarding_pointer_value(value)) : UNBOUND_MARKER_WIDETAG;
902 #ifdef LISP_FEATURE_IMMOBILE_SPACE
903 else if (immobile_space_p((lispobj)value) &&
904 immobile_obj_gen_bits(value) == from_space) {
905 wp->value = UNBOUND_MARKER_WIDETAG;
907 #endif
908 else
909 lose("unbreakable pointer %p", wp);
914 /* Hash tables */
916 #if N_WORD_BITS == 32
917 #define EQ_HASH_MASK 0x1fffffff
918 #elif N_WORD_BITS == 64
919 #define EQ_HASH_MASK 0x1fffffffffffffff
920 #endif
922 /* Compute the EQ-hash of KEY. This must match POINTER-HASH in
923 * target-hash-table.lisp. */
924 #define EQ_HASH(key) ((key) & EQ_HASH_MASK)
926 /* List of weak hash tables chained through their NEXT-WEAK-HASH-TABLE
927 * slot. Set to NULL at the end of a collection.
929 * This is not optimal because, when a table is tenured, it won't be
930 * processed automatically; only the yougest generation is GC'd by
931 * default. On the other hand, all applications will need an
932 * occasional full GC anyway, so it's not that bad either. */
933 struct hash_table *weak_hash_tables = NULL;
935 /* Return true if OBJ has already survived the current GC. */
936 static inline int pointer_survived_gc_yet(lispobj obj)
938 #ifdef LISP_FEATURE_CHENEYGC
939 // This is the most straightforward definition.
940 return (!from_space_p(obj) || forwarding_pointer_p(native_pointer(obj)));
941 #else
942 /* Check for a pointer to dynamic space before considering immobile space.
943 Based on the relative size of the spaces, this should be a win because
944 if the object is in the dynamic space and not the 'from' generation
945 we don't want to test immobile_space_p() at all.
946 Additionally, pinned_p() is both more expensive and less likely than
947 forwarding_pointer_p(), so we want to reverse those conditions, which
948 would not be possible with pinned_p() buried inside from_space_p(). */
949 page_index_t page_index = find_page_index((void*)obj);
950 if (page_index >= 0)
951 return page_table[page_index].gen != from_space ||
952 forwarding_pointer_p(native_pointer(obj)) ||
953 pinned_p(obj, page_index);
954 #ifdef LISP_FEATURE_IMMOBILE_SPACE
955 if (immobile_space_p(obj))
956 return immobile_obj_gen_bits(native_pointer(obj)) != from_space;
957 #endif
958 return 1;
959 #endif
962 #ifdef EMPTY_HT_SLOT /* only if it's a static symbol */
963 // "ish" because EMPTY_HT_SLOT is of course a pointer.
964 # define ht_cell_nonpointerish(x) (!is_lisp_pointer(x) || x==EMPTY_HT_SLOT)
965 #else
966 # define ht_cell_nonpointerish(x) !is_lisp_pointer(x)
967 #endif
969 static int survived_gc_yet_KEY(lispobj key, lispobj value) {
970 return ht_cell_nonpointerish(key) || pointer_survived_gc_yet(key);
972 static int survived_gc_yet_VALUE(lispobj key, lispobj value) {
973 return ht_cell_nonpointerish(value) || pointer_survived_gc_yet(value);
975 static int survived_gc_yet_AND(lispobj key, lispobj value) {
976 int key_nonpointer = ht_cell_nonpointerish(key);
977 int val_nonpointer = ht_cell_nonpointerish(value);
978 if (key_nonpointer && val_nonpointer) return 1;
979 return (key_nonpointer || pointer_survived_gc_yet(key))
980 && (val_nonpointer || pointer_survived_gc_yet(value));
982 static int survived_gc_yet_OR(lispobj key, lispobj value) {
983 int key_nonpointer = ht_cell_nonpointerish(key);
984 int val_nonpointer = ht_cell_nonpointerish(value);
985 if (key_nonpointer || val_nonpointer) return 1;
986 // Both MUST be pointers
987 return pointer_survived_gc_yet(key) || pointer_survived_gc_yet(value);
990 static int (*weak_hash_entry_alivep_fun(lispobj weakness))(lispobj,lispobj)
992 switch (weakness) {
993 case KEY: return survived_gc_yet_KEY;
994 case VALUE: return survived_gc_yet_VALUE;
995 case KEY_OR_VALUE: return survived_gc_yet_OR;
996 case KEY_AND_VALUE: return survived_gc_yet_AND;
997 case NIL: return NULL;
998 default: lose("Bad hash table weakness");
1002 /* Return the beginning of data in ARRAY (skipping the header and the
1003 * length) or NULL if it isn't an array of the specified widetag after
1004 * all. */
1005 static inline lispobj *
1006 get_array_data (lispobj array, int widetag, uword_t *length)
1008 if (is_lisp_pointer(array) && widetag_of(*native_pointer(array)) == widetag) {
1009 if (length != NULL)
1010 *length = fixnum_value(native_pointer(array)[1]);
1011 return native_pointer(array) + 2;
1012 } else {
1013 return NULL;
1017 /* Only need to worry about scavenging the _real_ entries in the
1018 * table. Phantom entries such as the hash table itself at index 0 and
1019 * the empty marker at index 1 were scavenged by scav_vector that
1020 * either called this function directly or arranged for it to be
1021 * called later by pushing the hash table onto weak_hash_tables. */
1022 static void
1023 scav_hash_table_entries (struct hash_table *hash_table)
1025 lispobj *kv_vector;
1026 uword_t kv_length;
1027 lispobj *index_vector;
1028 uword_t length;
1029 lispobj *next_vector;
1030 uword_t next_vector_length;
1031 lispobj *hash_vector;
1032 uword_t hash_vector_length;
1033 lispobj empty_symbol;
1034 lispobj weakness = hash_table->weakness;
1035 uword_t i;
1037 kv_vector = get_array_data(hash_table->table,
1038 SIMPLE_VECTOR_WIDETAG, &kv_length);
1039 if (kv_vector == NULL)
1040 lose("invalid kv_vector %x\n", hash_table->table);
1042 index_vector = get_array_data(hash_table->index_vector,
1043 SIMPLE_ARRAY_WORD_WIDETAG, &length);
1044 if (index_vector == NULL)
1045 lose("invalid index_vector %x\n", hash_table->index_vector);
1047 next_vector = get_array_data(hash_table->next_vector,
1048 SIMPLE_ARRAY_WORD_WIDETAG,
1049 &next_vector_length);
1050 if (next_vector == NULL)
1051 lose("invalid next_vector %x\n", hash_table->next_vector);
1053 hash_vector = get_array_data(hash_table->hash_vector,
1054 SIMPLE_ARRAY_WORD_WIDETAG,
1055 &hash_vector_length);
1056 if (hash_vector != NULL)
1057 gc_assert(hash_vector_length == next_vector_length);
1059 /* These lengths could be different as the index_vector can be a
1060 * different length from the others, a larger index_vector could
1061 * help reduce collisions. */
1062 gc_assert(next_vector_length*2 == kv_length);
1064 empty_symbol = kv_vector[1];
1065 /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/
1066 if (widetag_of(*native_pointer(empty_symbol)) != SYMBOL_HEADER_WIDETAG) {
1067 lose("not a symbol where empty-hash-table-slot symbol expected: %x\n",
1068 *native_pointer(empty_symbol));
1071 /* Work through the KV vector. */
1072 int (*alivep_test)(lispobj,lispobj) = weak_hash_entry_alivep_fun(weakness);
1073 #define SCAV_ENTRIES(aliveness_predicate) \
1074 for (i = 1; i < next_vector_length; i++) { \
1075 lispobj old_key = kv_vector[2*i]; \
1076 lispobj __attribute__((unused)) value = kv_vector[2*i+1]; \
1077 if (aliveness_predicate) { \
1078 /* Scavenge the key and value. */ \
1079 scavenge(&kv_vector[2*i], 2); \
1080 /* If an EQ-based key has moved, mark the hash-table for rehash */ \
1081 if (!hash_vector || hash_vector[i] == MAGIC_HASH_VECTOR_VALUE) { \
1082 lispobj new_key = kv_vector[2*i]; \
1083 if (old_key != new_key && new_key != empty_symbol) \
1084 hash_table->needs_rehash_p = T; \
1086 if (alivep_test)
1087 SCAV_ENTRIES(alivep_test(old_key, value))
1088 else
1089 SCAV_ENTRIES(1)
1092 sword_t
1093 scav_vector (lispobj *where, lispobj object)
1095 uword_t kv_length;
1096 struct hash_table *hash_table;
1098 /* SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based and weak
1099 * hash tables in the Lisp HASH-TABLE code to indicate need for
1100 * special GC support. */
1101 if ((HeaderValue(object) & 0xFF) == subtype_VectorNormal) {
1102 sword_t length = fixnum_value(((struct vector*)where)->length);
1103 scavenge(where + 2, length);
1104 return CEILING(length + 2, 2);
1107 kv_length = fixnum_value(where[1]);
1108 /*FSHOW((stderr,"/kv_length = %d\n", kv_length));*/
1110 /* Scavenge element 0, which may be a hash-table structure. */
1111 scavenge(where+2, 1);
1112 if (!is_lisp_pointer(where[2])) {
1113 /* This'll happen when REHASH clears the header of old-kv-vector
1114 * and fills it with zero, but some other thread simulatenously
1115 * sets the header in %%PUTHASH.
1117 fprintf(stderr,
1118 "Warning: no pointer at %p in hash table: this indicates "
1119 "non-fatal corruption caused by concurrent access to a "
1120 "hash-table from multiple threads. Any accesses to "
1121 "hash-tables shared between threads should be protected "
1122 "by locks.\n", (void*)&where[2]);
1123 // We've scavenged three words.
1124 return 3;
1126 hash_table = (struct hash_table *)native_pointer(where[2]);
1127 /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/
1128 if (widetag_of(hash_table->header) != INSTANCE_HEADER_WIDETAG) {
1129 lose("hash table not instance (%x at %x)\n",
1130 hash_table->header,
1131 hash_table);
1134 /* Scavenge element 1, which should be some internal symbol that
1135 * the hash table code reserves for marking empty slots. */
1136 scavenge(where+3, 1);
1137 if (!is_lisp_pointer(where[3])) {
1138 lose("not empty-hash-table-slot symbol pointer: %x\n", where[3]);
1141 /* Scavenge hash table, which will fix the positions of the other
1142 * needed objects. */
1143 scavenge((lispobj *)hash_table,
1144 CEILING(sizeof(struct hash_table) / sizeof(lispobj), 2));
1146 /* Cross-check the kv_vector. */
1147 if (where != native_pointer(hash_table->table)) {
1148 lose("hash_table table!=this table %x\n", hash_table->table);
1151 if (hash_table->weakness == NIL) {
1152 scav_hash_table_entries(hash_table);
1153 } else {
1154 /* Delay scavenging of this table by pushing it onto
1155 * weak_hash_tables (if it's not there already) for the weak
1156 * object phase. */
1157 if (hash_table->next_weak_hash_table == NIL) {
1158 hash_table->next_weak_hash_table = (lispobj)weak_hash_tables;
1159 weak_hash_tables = hash_table;
1163 return (CEILING(kv_length + 2, 2));
1166 void
1167 scav_weak_hash_tables (void)
1169 struct hash_table *table;
1171 /* Scavenge entries whose triggers are known to survive. */
1172 for (table = weak_hash_tables; table != NULL;
1173 table = (struct hash_table *)table->next_weak_hash_table) {
1174 scav_hash_table_entries(table);
1178 /* Walk through the chain whose first element is *FIRST and remove
1179 * dead weak entries. */
1180 static inline void
1181 scan_weak_hash_table_chain (struct hash_table *hash_table, lispobj *prev,
1182 lispobj *kv_vector, lispobj *index_vector,
1183 lispobj *next_vector, lispobj *hash_vector,
1184 lispobj empty_symbol, int (*alivep_test)(lispobj,lispobj))
1186 unsigned index = *prev;
1187 while (index) {
1188 unsigned next = next_vector[index];
1189 lispobj key = kv_vector[2 * index];
1190 lispobj value = kv_vector[2 * index + 1];
1191 gc_assert(key != empty_symbol);
1192 gc_assert(value != empty_symbol);
1193 if (!alivep_test(key, value)) {
1194 unsigned count = fixnum_value(hash_table->number_entries);
1195 gc_assert(count > 0);
1196 *prev = next;
1197 hash_table->number_entries = make_fixnum(count - 1);
1198 next_vector[index] = fixnum_value(hash_table->next_free_kv);
1199 hash_table->next_free_kv = make_fixnum(index);
1200 kv_vector[2 * index] = empty_symbol;
1201 kv_vector[2 * index + 1] = empty_symbol;
1202 if (hash_vector)
1203 hash_vector[index] = MAGIC_HASH_VECTOR_VALUE;
1204 } else {
1205 prev = &next_vector[index];
1207 index = next;
1211 static void
1212 scan_weak_hash_table (struct hash_table *hash_table)
1214 lispobj *kv_vector;
1215 lispobj *index_vector;
1216 uword_t length = 0; /* prevent warning */
1217 lispobj *next_vector;
1218 uword_t next_vector_length = 0; /* prevent warning */
1219 lispobj *hash_vector;
1220 lispobj empty_symbol;
1221 lispobj weakness = hash_table->weakness;
1222 uword_t i;
1224 kv_vector = get_array_data(hash_table->table,
1225 SIMPLE_VECTOR_WIDETAG, NULL);
1226 index_vector = get_array_data(hash_table->index_vector,
1227 SIMPLE_ARRAY_WORD_WIDETAG, &length);
1228 next_vector = get_array_data(hash_table->next_vector,
1229 SIMPLE_ARRAY_WORD_WIDETAG,
1230 &next_vector_length);
1231 hash_vector = get_array_data(hash_table->hash_vector,
1232 SIMPLE_ARRAY_WORD_WIDETAG, NULL);
1233 empty_symbol = kv_vector[1];
1235 for (i = 0; i < length; i++) {
1236 scan_weak_hash_table_chain(hash_table, &index_vector[i],
1237 kv_vector, index_vector, next_vector,
1238 hash_vector, empty_symbol,
1239 weak_hash_entry_alivep_fun(weakness));
1243 /* Remove dead entries from weak hash tables. */
1244 void
1245 scan_weak_hash_tables (void)
1247 struct hash_table *table, *next;
1249 for (table = weak_hash_tables; table != NULL; table = next) {
1250 next = (struct hash_table *)table->next_weak_hash_table;
1251 table->next_weak_hash_table = NIL;
1252 scan_weak_hash_table(table);
1255 weak_hash_tables = NULL;
1260 * initialization
1263 static sword_t
1264 scav_lose(lispobj *where, lispobj object)
1266 lose("no scavenge function for object %p (widetag 0x%x)\n",
1267 (uword_t)object,
1268 widetag_of(*where));
1270 return 0; /* bogus return value to satisfy static type checking */
1273 static lispobj
1274 trans_lose(lispobj object)
1276 lose("no transport function for object %p (widetag 0x%x)\n",
1277 (void*)object,
1278 widetag_of(*native_pointer(object)));
1279 return NIL; /* bogus return value to satisfy static type checking */
1282 static sword_t
1283 size_lose(lispobj *where)
1285 lose("no size function for object at %p (widetag 0x%x)\n",
1286 (void*)where,
1287 widetag_of(*where));
1288 return 1; /* bogus return value to satisfy static type checking */
1293 * initialization
1296 #include "genesis/gc-tables.h"
1299 static lispobj *search_spaces(void *pointer)
1301 lispobj *start;
1302 if (((start = search_dynamic_space(pointer)) != NULL) ||
1303 #ifdef LISP_FEATURE_IMMOBILE_SPACE
1304 ((start = search_immobile_space(pointer)) != NULL) ||
1305 #endif
1306 ((start = search_static_space(pointer)) != NULL) ||
1307 ((start = search_read_only_space(pointer)) != NULL))
1308 return start;
1309 return NULL;
1312 /* Find the code object for the given pc, or return NULL on
1313 failure. */
1314 lispobj *
1315 component_ptr_from_pc(lispobj *pc)
1317 lispobj *object = search_spaces(pc);
1319 if (object != NULL && widetag_of(*object) == CODE_HEADER_WIDETAG)
1320 return object;
1322 return NULL;
1325 /* Scan an area looking for an object which encloses the given pointer.
1326 * Return the object start on success, or NULL on failure. */
1327 lispobj *
1328 gc_search_space3(void *pointer, lispobj *start, void *limit)
1330 if (pointer < (void*)start || pointer >= limit) return NULL;
1332 size_t count;
1333 #if 0
1334 /* CAUTION: this code is _significantly_ slower than the production version
1335 due to the extra checks for forwarding. Only use it if debugging */
1336 for ( ; (void*)start < limit ; start += count) {
1337 lispobj *forwarded_start;
1338 if (forwarding_pointer_p(start))
1339 forwarded_start = native_pointer(forwarding_pointer_value(start));
1340 else
1341 forwarded_start = start;
1342 lispobj thing = *forwarded_start;
1343 count = is_cons_half(thing) ? 2 : sizetab[widetag_of(thing)](forwarded_start);
1344 /* Check whether the pointer is within this object. */
1345 if (pointer < (void*)(start+count)) return start;
1347 #else
1348 for ( ; (void*)start < limit ; start += count) {
1349 lispobj thing = *start;
1350 count = is_cons_half(thing) ? 2 : sizetab[widetag_of(thing)](start);
1351 /* Check whether the pointer is within this object. */
1352 if (pointer < (void*)(start+count)) return start;
1354 #endif
1355 return NULL;
1358 /* Helper for valid_lisp_pointer_p (below) and
1359 * conservative_root_p (gencgc).
1361 * pointer is the pointer to check validity of,
1362 * and start_addr is the address of the enclosing object.
1364 * This is actually quite simple to check: because the heap state is assumed
1365 * consistent, and 'start_addr' is known good, having come from
1366 * gc_search_space(), only the 'pointer' argument is dubious.
1367 * So make 'start_addr' into a tagged pointer and see if that matches 'pointer'.
1368 * If it does, then 'pointer' is valid.
1371 properly_tagged_p_internal(lispobj pointer, lispobj *start_addr)
1373 // If a headerless object, confirm that 'pointer' is a list pointer.
1374 // Given the precondition that the heap is in a valid state,
1375 // it may be assumed that one check of is_cons_half() suffices;
1376 // we don't need to check the other half.
1377 lispobj header = *start_addr;
1378 if (is_cons_half(header))
1379 return make_lispobj(start_addr, LIST_POINTER_LOWTAG) == pointer;
1381 // Because this heap object was not deemed to be a cons,
1382 // it must be an object header. Don't need a check except when paranoid.
1383 gc_dcheck(other_immediate_lowtag_p(header));
1385 // The space of potential widetags has 64 elements, not 256,
1386 // because of the constant low 2 bits.
1387 int widetag = widetag_of(header);
1388 int lowtag = lowtag_for_widetag[widetag>>2];
1389 if (lowtag && make_lispobj(start_addr, lowtag) == pointer)
1390 return 1; // instant win
1392 if (widetag == CODE_HEADER_WIDETAG) {
1393 // Check for RETURN_PC_HEADER first since it's quicker.
1394 // Then consider the embedded simple-funs.
1395 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
1396 /* The all-architecture test below is good as far as it goes,
1397 * but an LRA object is similar to a FUN-POINTER: It is
1398 * embedded within a CODE-OBJECT pointed to by start_addr, and
1399 * cannot be found by simply walking the heap, therefore we
1400 * need to check for it. -- AB, 2010-Jun-04 */
1401 if (lowtag_of(pointer) == OTHER_POINTER_LOWTAG) {
1402 lispobj *potential_lra = native_pointer(pointer);
1403 if ((widetag_of(potential_lra[0]) == RETURN_PC_HEADER_WIDETAG) &&
1404 ((potential_lra - HeaderValue(potential_lra[0])) == start_addr)) {
1405 return 1; /* It's as good as we can verify. */
1408 #endif
1409 if (lowtag_of(pointer) == FUN_POINTER_LOWTAG) {
1410 struct simple_fun *pfun =
1411 (struct simple_fun*)(pointer-FUN_POINTER_LOWTAG);
1412 for_each_simple_fun(i, function, (struct code*)start_addr, 0, {
1413 if (pfun == function) return 1;
1417 return 0; // no good
1420 /* META: Note the ambiguous word "validate" in the comment below.
1421 * This means "Decide whether <x> is valid".
1422 * But when you see os_validate() elsewhere, that doesn't mean to ask
1423 * whether something is valid, it says to *make* it valid.
1424 * I think it would be nice if we could avoid using the word in the
1425 * sense in which os_validate() uses it, which would entail renaming
1426 * a bunch of stuff, which is harder than just explaining why
1427 * the comments can be deceptive */
1429 /* Used by the debugger to validate possibly bogus pointers before
1430 * calling MAKE-LISP-OBJ on them.
1432 * FIXME: We would like to make this perfect, because if the debugger
1433 * constructs a reference to a bugs lisp object, and it ends up in a
1434 * location scavenged by the GC all hell breaks loose.
1436 * Whereas conservative_root_p has to be conservative
1437 * and return true for all valid pointers, this could actually be eager
1438 * and lie about a few pointers without bad results... but that should
1439 * be reflected in the name.
1442 valid_lisp_pointer_p(lispobj pointer)
1444 lispobj *start = search_spaces((void*)pointer);
1445 if (start != NULL)
1446 return properly_tagged_descriptor_p((void*)pointer, start);
1447 return 0;
1450 boolean
1451 maybe_gc(os_context_t *context)
1453 lispobj gc_happened;
1454 struct thread *thread = arch_os_get_current_thread();
1455 boolean were_in_lisp = !foreign_function_call_active_p(thread);
1457 if (were_in_lisp) {
1458 fake_foreign_function_call(context);
1461 /* SUB-GC may return without GCing if *GC-INHIBIT* is set, in
1462 * which case we will be running with no gc trigger barrier
1463 * thing for a while. But it shouldn't be long until the end
1464 * of WITHOUT-GCING.
1466 * FIXME: It would be good to protect the end of dynamic space for
1467 * CheneyGC and signal a storage condition from there.
1470 /* Restore the signal mask from the interrupted context before
1471 * calling into Lisp if interrupts are enabled. Why not always?
1473 * Suppose there is a WITHOUT-INTERRUPTS block far, far out. If an
1474 * interrupt hits while in SUB-GC, it is deferred and the
1475 * os_context_sigmask of that interrupt is set to block further
1476 * deferrable interrupts (until the first one is
1477 * handled). Unfortunately, that context refers to this place and
1478 * when we return from here the signals will not be blocked.
1480 * A kludgy alternative is to propagate the sigmask change to the
1481 * outer context.
1483 #if !(defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_SAFEPOINT))
1484 check_gc_signals_unblocked_or_lose(os_context_sigmask_addr(context));
1485 unblock_gc_signals(0, 0);
1486 #endif
1487 FSHOW((stderr, "/maybe_gc: calling SUB_GC\n"));
1488 /* FIXME: Nothing must go wrong during GC else we end up running
1489 * the debugger, error handlers, and user code in general in a
1490 * potentially unsafe place. Running out of the control stack or
1491 * the heap in SUB-GC are ways to lose. Of course, deferrables
1492 * cannot be unblocked because there may be a pending handler, or
1493 * we may even be in a WITHOUT-INTERRUPTS. */
1494 gc_happened = funcall0(StaticSymbolFunction(SUB_GC));
1495 FSHOW((stderr, "/maybe_gc: gc_happened=%s\n",
1496 (gc_happened == NIL)
1497 ? "NIL"
1498 : ((gc_happened == T)
1499 ? "T"
1500 : "0")));
1501 /* gc_happened can take three values: T, NIL, 0.
1503 * T means that the thread managed to trigger a GC, and post-gc
1504 * must be called.
1506 * NIL means that the thread is within without-gcing, and no GC
1507 * has occurred.
1509 * Finally, 0 means that *a* GC has occurred, but it wasn't
1510 * triggered by this thread; success, but post-gc doesn't have
1511 * to be called.
1513 if ((gc_happened == T) &&
1514 /* See if interrupts are enabled or it's possible to enable
1515 * them. POST-GC has a similar check, but we don't want to
1516 * unlock deferrables in that case and get a pending interrupt
1517 * here. */
1518 ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) ||
1519 (SymbolValue(ALLOW_WITH_INTERRUPTS,thread) != NIL))) {
1520 #ifndef LISP_FEATURE_WIN32
1521 sigset_t *context_sigmask = os_context_sigmask_addr(context);
1522 if (!deferrables_blocked_p(context_sigmask)) {
1523 thread_sigmask(SIG_SETMASK, context_sigmask, 0);
1524 #ifndef LISP_FEATURE_SB_SAFEPOINT
1525 check_gc_signals_unblocked_or_lose(0);
1526 #endif
1527 #endif
1528 FSHOW((stderr, "/maybe_gc: calling POST_GC\n"));
1529 funcall0(StaticSymbolFunction(POST_GC));
1530 #ifndef LISP_FEATURE_WIN32
1531 } else {
1532 FSHOW((stderr, "/maybe_gc: punting on POST_GC due to blockage\n"));
1534 #endif
1537 if (were_in_lisp) {
1538 undo_fake_foreign_function_call(context);
1539 } else {
1540 /* Otherwise done by undo_fake_foreign_function_call. And
1541 something later wants them to be blocked. What a nice
1542 interface.*/
1543 block_blockable_signals(0);
1546 FSHOW((stderr, "/maybe_gc: returning\n"));
1547 return (gc_happened != NIL);
1550 #define BYTES_ZERO_BEFORE_END (1<<12)
1552 /* There used to be a similar function called SCRUB-CONTROL-STACK in
1553 * Lisp and another called zero_stack() in cheneygc.c, but since it's
1554 * shorter to express in, and more often called from C, I keep only
1555 * the C one after fixing it. -- MG 2009-03-25 */
1557 /* Zero the unused portion of the control stack so that old objects
1558 * are not kept alive because of uninitialized stack variables.
1560 * "To summarize the problem, since not all allocated stack frame
1561 * slots are guaranteed to be written by the time you call an another
1562 * function or GC, there may be garbage pointers retained in your dead
1563 * stack locations. The stack scrubbing only affects the part of the
1564 * stack from the SP to the end of the allocated stack." - ram, on
1565 * cmucl-imp, Tue, 25 Sep 2001
1567 * So, as an (admittedly lame) workaround, from time to time we call
1568 * scrub-control-stack to zero out all the unused portion. This is
1569 * supposed to happen when the stack is mostly empty, so that we have
1570 * a chance of clearing more of it: callers are currently (2002.07.18)
1571 * REPL, SUB-GC and sig_stop_for_gc_handler. */
1573 /* Take care not to tread on the guard page and the hard guard page as
1574 * it would be unkind to sig_stop_for_gc_handler. Touching the return
1575 * guard page is not dangerous. For this to work the guard page must
1576 * be zeroed when protected. */
1578 /* FIXME: I think there is no guarantee that once
1579 * BYTES_ZERO_BEFORE_END bytes are zero the rest are also zero. This
1580 * may be what the "lame" adjective in the above comment is for. In
1581 * this case, exact gc may lose badly. */
1582 void
1583 scrub_control_stack()
1585 scrub_thread_control_stack(arch_os_get_current_thread());
1588 void
1589 scrub_thread_control_stack(struct thread *th)
1591 os_vm_address_t guard_page_address = CONTROL_STACK_GUARD_PAGE(th);
1592 os_vm_address_t hard_guard_page_address = CONTROL_STACK_HARD_GUARD_PAGE(th);
1593 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1594 /* On these targets scrubbing from C is a bad idea, so we punt to
1595 * a routine in $ARCH-assem.S. */
1596 extern void arch_scrub_control_stack(struct thread *, os_vm_address_t, os_vm_address_t);
1597 arch_scrub_control_stack(th, guard_page_address, hard_guard_page_address);
1598 #else
1599 lispobj *sp = access_control_stack_pointer(th);
1600 scrub:
1601 if ((((os_vm_address_t)sp < (hard_guard_page_address + os_vm_page_size)) &&
1602 ((os_vm_address_t)sp >= hard_guard_page_address)) ||
1603 (((os_vm_address_t)sp < (guard_page_address + os_vm_page_size)) &&
1604 ((os_vm_address_t)sp >= guard_page_address) &&
1605 (th->control_stack_guard_page_protected != NIL)))
1606 return;
1607 #ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
1608 do {
1609 *sp = 0;
1610 } while (((uword_t)sp--) & (BYTES_ZERO_BEFORE_END - 1));
1611 if ((os_vm_address_t)sp < (hard_guard_page_address + os_vm_page_size))
1612 return;
1613 do {
1614 if (*sp)
1615 goto scrub;
1616 } while (((uword_t)sp--) & (BYTES_ZERO_BEFORE_END - 1));
1617 #else
1618 do {
1619 *sp = 0;
1620 } while (((uword_t)++sp) & (BYTES_ZERO_BEFORE_END - 1));
1621 if ((os_vm_address_t)sp >= hard_guard_page_address)
1622 return;
1623 do {
1624 if (*sp)
1625 goto scrub;
1626 } while (((uword_t)++sp) & (BYTES_ZERO_BEFORE_END - 1));
1627 #endif
1628 #endif /* LISP_FEATURE_C_STACK_IS_CONTROL_STACK */
1631 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
1633 void
1634 scavenge_control_stack(struct thread *th)
1636 lispobj *object_ptr;
1638 /* In order to properly support dynamic-extent allocation of
1639 * non-CONS objects, the control stack requires special handling.
1640 * Rather than calling scavenge() directly, grovel over it fixing
1641 * broken hearts, scavenging pointers to oldspace, and pitching a
1642 * fit when encountering unboxed data. This prevents stray object
1643 * headers from causing the scavenger to blow past the end of the
1644 * stack (an error case checked in scavenge()). We don't worry
1645 * about treating unboxed words as boxed or vice versa, because
1646 * the compiler isn't allowed to store unboxed objects on the
1647 * control stack. -- AB, 2011-Dec-02 */
1649 for (object_ptr = th->control_stack_start;
1650 object_ptr < access_control_stack_pointer(th);
1651 object_ptr++) {
1653 lispobj object = *object_ptr;
1654 #ifdef LISP_FEATURE_GENCGC
1655 if (forwarding_pointer_p(object_ptr))
1656 lose("unexpected forwarding pointer in scavenge_control_stack: %p, start=%p, end=%p\n",
1657 object_ptr, th->control_stack_start, access_control_stack_pointer(th));
1658 #endif
1659 if (is_lisp_pointer(object) && from_space_p(object)) {
1660 /* It currently points to old space. Check for a
1661 * forwarding pointer. */
1662 lispobj *ptr = native_pointer(object);
1663 if (forwarding_pointer_p(ptr)) {
1664 /* Yes, there's a forwarding pointer. */
1665 *object_ptr = LOW_WORD(forwarding_pointer_value(ptr));
1666 } else {
1667 /* Scavenge that pointer. */
1668 long n_words_scavenged =
1669 (scavtab[widetag_of(object)])(object_ptr, object);
1670 gc_assert(n_words_scavenged == 1);
1672 } else if (scavtab[widetag_of(object)] == scav_lose) {
1673 lose("unboxed object in scavenge_control_stack: %p->%x, start=%p, end=%p\n",
1674 object_ptr, object, th->control_stack_start, access_control_stack_pointer(th));
1679 /* Scavenging Interrupt Contexts */
1681 static int boxed_registers[] = BOXED_REGISTERS;
1683 /* The GC has a notion of an "interior pointer" register, an unboxed
1684 * register that typically contains a pointer to inside an object
1685 * referenced by another pointer. The most obvious of these is the
1686 * program counter, although many compiler backends define a "Lisp
1687 * Interior Pointer" register known to the runtime as reg_LIP, and
1688 * various CPU architectures have other registers that also partake of
1689 * the interior-pointer nature. As the code for pairing an interior
1690 * pointer value up with its "base" register, and fixing it up after
1691 * scavenging is complete is horribly repetitive, a few macros paper
1692 * over the monotony. --AB, 2010-Jul-14 */
1694 /* These macros are only ever used over a lexical environment which
1695 * defines a pointer to an os_context_t called context, thus we don't
1696 * bother to pass that context in as a parameter. */
1698 /* Define how to access a given interior pointer. */
1699 #define ACCESS_INTERIOR_POINTER_pc \
1700 *os_context_pc_addr(context)
1701 #define ACCESS_INTERIOR_POINTER_lip \
1702 *os_context_register_addr(context, reg_LIP)
1703 #define ACCESS_INTERIOR_POINTER_lr \
1704 *os_context_lr_addr(context)
1705 #define ACCESS_INTERIOR_POINTER_npc \
1706 *os_context_npc_addr(context)
1707 #define ACCESS_INTERIOR_POINTER_ctr \
1708 *os_context_ctr_addr(context)
1710 #define INTERIOR_POINTER_VARS(name) \
1711 uword_t name##_offset; \
1712 int name##_register_pair
1714 #define PAIR_INTERIOR_POINTER(name) \
1715 pair_interior_pointer(context, \
1716 ACCESS_INTERIOR_POINTER_##name, \
1717 &name##_offset, \
1718 &name##_register_pair)
1720 /* One complexity here is that if a paired register is not found for
1721 * an interior pointer, then that pointer does not get updated.
1722 * Originally, there was some commentary about using an index of -1
1723 * when calling os_context_register_addr() on SPARC referring to the
1724 * program counter, but the real reason is to allow an interior
1725 * pointer register to point to the runtime, read-only space, or
1726 * static space without problems. */
1727 #define FIXUP_INTERIOR_POINTER(name) \
1728 do { \
1729 if (name##_register_pair >= 0) { \
1730 ACCESS_INTERIOR_POINTER_##name = \
1731 (*os_context_register_addr(context, \
1732 name##_register_pair) \
1733 & ~LOWTAG_MASK) \
1734 + name##_offset; \
1736 } while (0)
1739 static void
1740 pair_interior_pointer(os_context_t *context, uword_t pointer,
1741 uword_t *saved_offset, int *register_pair)
1743 unsigned int i;
1746 * I (RLT) think this is trying to find the boxed register that is
1747 * closest to the LIP address, without going past it. Usually, it's
1748 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
1750 /* 0x7FFFFFFF on 32-bit platforms;
1751 0x7FFFFFFFFFFFFFFF on 64-bit platforms */
1752 *saved_offset = (((uword_t)1) << (N_WORD_BITS - 1)) - 1;
1753 *register_pair = -1;
1754 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
1755 uword_t reg;
1756 uword_t offset;
1757 int index;
1759 index = boxed_registers[i];
1760 reg = *os_context_register_addr(context, index);
1762 /* An interior pointer is never relative to a non-pointer
1763 * register (an oversight in the original implementation).
1764 * The simplest argument for why this is true is to consider
1765 * the fixnum that happens by coincide to be the word-index in
1766 * memory of the header for some object plus two. This is
1767 * happenstance would cause the register containing the fixnum
1768 * to be selected as the register_pair if the interior pointer
1769 * is to anywhere after the first two words of the object.
1770 * The fixnum won't be changed during GC, but the object might
1771 * move, thus destroying the interior pointer. --AB,
1772 * 2010-Jul-14 */
1774 if (is_lisp_pointer(reg) &&
1775 ((reg & ~LOWTAG_MASK) <= pointer)) {
1776 offset = pointer - (reg & ~LOWTAG_MASK);
1777 if (offset < *saved_offset) {
1778 *saved_offset = offset;
1779 *register_pair = index;
1785 static void
1786 scavenge_interrupt_context(os_context_t * context)
1788 unsigned int i;
1790 /* FIXME: The various #ifdef noise here is precisely that: noise.
1791 * Is it possible to fold it into the macrology so that we have
1792 * one set of #ifdefs and then INTERIOR_POINTER_VARS /et alia/
1793 * compile out for the registers that don't exist on a given
1794 * platform? */
1796 INTERIOR_POINTER_VARS(pc);
1797 #ifdef reg_LIP
1798 INTERIOR_POINTER_VARS(lip);
1799 #endif
1800 #ifdef ARCH_HAS_LINK_REGISTER
1801 INTERIOR_POINTER_VARS(lr);
1802 #endif
1803 #ifdef ARCH_HAS_NPC_REGISTER
1804 INTERIOR_POINTER_VARS(npc);
1805 #endif
1806 #ifdef LISP_FEATURE_PPC
1807 INTERIOR_POINTER_VARS(ctr);
1808 #endif
1810 PAIR_INTERIOR_POINTER(pc);
1811 #ifdef reg_LIP
1812 PAIR_INTERIOR_POINTER(lip);
1813 #endif
1814 #ifdef ARCH_HAS_LINK_REGISTER
1815 PAIR_INTERIOR_POINTER(lr);
1816 #endif
1817 #ifdef ARCH_HAS_NPC_REGISTER
1818 PAIR_INTERIOR_POINTER(npc);
1819 #endif
1820 #ifdef LISP_FEATURE_PPC
1821 PAIR_INTERIOR_POINTER(ctr);
1822 #endif
1824 /* Scavenge all boxed registers in the context. */
1825 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
1826 int index;
1827 lispobj foo;
1829 index = boxed_registers[i];
1830 foo = *os_context_register_addr(context, index);
1831 scavenge(&foo, 1);
1832 *os_context_register_addr(context, index) = foo;
1834 /* this is unlikely to work as intended on bigendian
1835 * 64 bit platforms */
1837 scavenge((lispobj *) os_context_register_addr(context, index), 1);
1840 /* Now that the scavenging is done, repair the various interior
1841 * pointers. */
1842 FIXUP_INTERIOR_POINTER(pc);
1843 #ifdef reg_LIP
1844 FIXUP_INTERIOR_POINTER(lip);
1845 #endif
1846 #ifdef ARCH_HAS_LINK_REGISTER
1847 FIXUP_INTERIOR_POINTER(lr);
1848 #endif
1849 #ifdef ARCH_HAS_NPC_REGISTER
1850 FIXUP_INTERIOR_POINTER(npc);
1851 #endif
1852 #ifdef LISP_FEATURE_PPC
1853 FIXUP_INTERIOR_POINTER(ctr);
1854 #endif
1857 void
1858 scavenge_interrupt_contexts(struct thread *th)
1860 int i, index;
1861 os_context_t *context;
1863 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
1865 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
1866 printf("Number of active contexts: %d\n", index);
1867 #endif
1869 for (i = 0; i < index; i++) {
1870 context = th->interrupt_contexts[i];
1871 scavenge_interrupt_context(context);
1874 #endif /* x86oid targets */
1876 void varint_unpacker_init(struct varint_unpacker* unpacker, lispobj integer)
1878 if (fixnump(integer)) {
1879 unpacker->word = fixnum_value(integer);
1880 unpacker->limit = N_WORD_BYTES;
1881 unpacker->data = (char*)&unpacker->word;
1882 } else {
1883 struct bignum* bignum = (struct bignum*)(integer - OTHER_POINTER_LOWTAG);
1884 unpacker->word = 0;
1885 unpacker->limit = HeaderValue(bignum->header) * N_WORD_BYTES;
1886 unpacker->data = (char*)bignum->digits;
1888 unpacker->index = 0;
1891 // Fetch the next varint from 'unpacker' into 'result'.
1892 // Because there is no length prefix on the number of varints encoded,
1893 // spurious trailing zeros might be observed. The data consumer can
1894 // circumvent that by storing a count as the first value in the series.
1895 // Return 1 for success, 0 for EOF.
1896 int varint_unpack(struct varint_unpacker* unpacker, int* result)
1898 if (unpacker->index >= unpacker->limit) return 0;
1899 int accumulator = 0;
1900 int shift = 0;
1901 while (1) {
1902 #ifdef LISP_FEATURE_LITTLE_ENDIAN
1903 int byte = unpacker->data[unpacker->index];
1904 #else
1905 // bignums are little-endian in word order,
1906 // but machine-native within each word.
1907 // We could pack bytes MSB-to-LSB in the bigdigits,
1908 // but that seems less intuitive on the Lisp side.
1909 int word_index = unpacker->index / N_WORD_BYTES;
1910 int byte_index = unpacker->index % N_WORD_BYTES;
1911 int byte = (((unsigned int*)unpacker->data)[word_index]
1912 >> (byte_index * 8)) & 0xFF;
1913 #endif
1914 ++unpacker->index;
1915 accumulator |= (byte & 0x7F) << shift;
1916 if (!(byte & 0x80)) break;
1917 gc_assert(unpacker->index < unpacker->limit);
1918 shift += 7;
1920 *result = accumulator;
1921 return 1;