OAOO-ify WEAK_POINTER_NWORDS
[sbcl.git] / src / runtime / gencgc.c
blobf7d0cd4f5f304aac4b114c0a70dd6892f138adc7
1 /*
2 * GENerational Conservative Garbage Collector for SBCL
3 */
5 /*
6 * This software is part of the SBCL system. See the README file for
7 * more information.
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 * For a review of garbage collection techniques (e.g. generational
18 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
19 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
20 * had been accepted for _ACM Computing Surveys_ and was available
21 * as a PostScript preprint through
22 * <http://www.cs.utexas.edu/users/oops/papers.html>
23 * as
24 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <errno.h>
30 #include <string.h>
31 #include "sbcl.h"
32 #if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)
33 #include "pthreads_win32.h"
34 #else
35 #include <signal.h>
36 #endif
37 #include "runtime.h"
38 #include "os.h"
39 #include "interr.h"
40 #include "globals.h"
41 #include "interrupt.h"
42 #include "validate.h"
43 #include "lispregs.h"
44 #include "arch.h"
45 #include "gc.h"
46 #include "gc-internal.h"
47 #include "thread.h"
48 #include "pseudo-atomic.h"
49 #include "alloc.h"
50 #include "genesis/vector.h"
51 #include "genesis/weak-pointer.h"
52 #include "genesis/fdefn.h"
53 #include "genesis/simple-fun.h"
54 #include "save.h"
55 #include "genesis/hash-table.h"
56 #include "genesis/instance.h"
57 #include "genesis/layout.h"
58 #include "gencgc.h"
59 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
60 #include "genesis/cons.h"
61 #endif
62 #ifdef LISP_FEATURE_X86
63 #include "forwarding-ptr.h"
64 #endif
66 /* forward declarations */
67 page_index_t gc_find_freeish_pages(page_index_t *restart_page_ptr, sword_t nbytes,
68 int page_type_flag);
72 * GC parameters
75 /* As usually configured, generations 0-5 are normal collected generations,
76 6 is pseudo-static (the objects in which are never moved nor reclaimed),
77 and 7 is scratch space used when collecting a generation without promotion,
78 wherein it is moved to generation 7 and back again.
80 enum {
81 SCRATCH_GENERATION = PSEUDO_STATIC_GENERATION+1,
82 NUM_GENERATIONS
85 /* Should we use page protection to help avoid the scavenging of pages
86 * that don't have pointers to younger generations? */
87 boolean enable_page_protection = 1;
89 /* Largest allocation seen since last GC. */
90 os_vm_size_t large_allocation = 0;
94 * debugging
97 /* the verbosity level. All non-error messages are disabled at level 0;
98 * and only a few rare messages are printed at level 1. */
99 #if QSHOW == 2
100 boolean gencgc_verbose = 1;
101 #else
102 boolean gencgc_verbose = 0;
103 #endif
105 /* FIXME: At some point enable the various error-checking things below
106 * and see what they say. */
108 /* We hunt for pointers to old-space, when GCing generations >= verify_gen.
109 * Set verify_gens to HIGHEST_NORMAL_GENERATION + 1 to disable this kind of
110 * check. */
111 generation_index_t verify_gens = HIGHEST_NORMAL_GENERATION + 1;
113 /* Should we do a pre-scan verify of generation 0 before it's GCed? */
114 boolean pre_verify_gen_0 = 0;
116 /* Should we print a note when code objects are found in the dynamic space
117 * during a heap verify? */
118 boolean verify_dynamic_code_check = 0;
120 #ifdef LISP_FEATURE_X86
121 /* Should we check code objects for fixup errors after they are transported? */
122 boolean check_code_fixups = 0;
123 #endif
125 /* Should we check that newly allocated regions are zero filled? */
126 boolean gencgc_zero_check = 0;
128 /* Should we check that the free space is zero filled? */
129 boolean gencgc_enable_verify_zero_fill = 0;
131 /* When loading a core, don't do a full scan of the memory for the
132 * memory region boundaries. (Set to true by coreparse.c if the core
133 * contained a pagetable entry).
135 boolean gencgc_partial_pickup = 0;
137 /* If defined, free pages are read-protected to ensure that nothing
138 * accesses them.
141 /* #define READ_PROTECT_FREE_PAGES */
145 * GC structures and variables
148 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
149 os_vm_size_t bytes_allocated = 0;
150 os_vm_size_t auto_gc_trigger = 0;
152 /* the source and destination generations. These are set before a GC starts
153 * scavenging. */
154 generation_index_t from_space;
155 generation_index_t new_space;
157 /* Set to 1 when in GC */
158 boolean gc_active_p = 0;
160 /* should the GC be conservative on stack. If false (only right before
161 * saving a core), don't scan the stack / mark pages dont_move. */
162 static boolean conservative_stack = 1;
164 /* An array of page structures is allocated on gc initialization.
165 * This helps to quickly map between an address and its page structure.
166 * page_table_pages is set from the size of the dynamic space. */
167 page_index_t page_table_pages;
168 struct page *page_table;
170 in_use_marker_t *page_table_pinned_dwords;
171 size_t pins_map_size_in_bytes;
173 /* In GC cards that have conservative pointers to them, should we wipe out
174 * dwords in there that are not used, so that they do not act as false
175 * root to other things in the heap from then on? This is a new feature
176 * but in testing it is both reliable and no noticeable slowdown. */
177 int do_wipe_p = 1;
179 static inline boolean page_allocated_p(page_index_t page) {
180 return (page_table[page].allocated != FREE_PAGE_FLAG);
183 static inline boolean page_no_region_p(page_index_t page) {
184 return !(page_table[page].allocated & OPEN_REGION_PAGE_FLAG);
187 static inline boolean page_allocated_no_region_p(page_index_t page) {
188 return ((page_table[page].allocated & (UNBOXED_PAGE_FLAG | BOXED_PAGE_FLAG))
189 && page_no_region_p(page));
192 static inline boolean page_free_p(page_index_t page) {
193 return (page_table[page].allocated == FREE_PAGE_FLAG);
196 static inline boolean page_boxed_p(page_index_t page) {
197 return (page_table[page].allocated & BOXED_PAGE_FLAG);
200 static inline boolean page_boxed_no_region_p(page_index_t page) {
201 return page_boxed_p(page) && page_no_region_p(page);
204 static inline boolean page_unboxed_p(page_index_t page) {
205 /* Both flags set == boxed code page */
206 return ((page_table[page].allocated & UNBOXED_PAGE_FLAG)
207 && !page_boxed_p(page));
210 static inline boolean protect_page_p(page_index_t page, generation_index_t generation) {
211 return (page_boxed_no_region_p(page)
212 && (page_table[page].bytes_used != 0)
213 && !page_table[page].dont_move
214 && (page_table[page].gen == generation));
217 /* To map addresses to page structures the address of the first page
218 * is needed. */
219 void *heap_base = NULL;
221 /* Calculate the start address for the given page number. */
222 inline void *
223 page_address(page_index_t page_num)
225 return (heap_base + (page_num * GENCGC_CARD_BYTES));
228 /* Calculate the address where the allocation region associated with
229 * the page starts. */
230 static inline void *
231 page_scan_start(page_index_t page_index)
233 return page_address(page_index)-page_table[page_index].scan_start_offset;
236 /* True if the page starts a contiguous block. */
237 static inline boolean
238 page_starts_contiguous_block_p(page_index_t page_index)
240 return page_table[page_index].scan_start_offset == 0;
243 /* True if the page is the last page in a contiguous block. */
244 static inline boolean
245 page_ends_contiguous_block_p(page_index_t page_index, generation_index_t gen)
247 return (/* page doesn't fill block */
248 (page_table[page_index].bytes_used < GENCGC_CARD_BYTES)
249 /* page is last allocated page */
250 || ((page_index + 1) >= last_free_page)
251 /* next page free */
252 || page_free_p(page_index + 1)
253 /* next page contains no data */
254 || (page_table[page_index + 1].bytes_used == 0)
255 /* next page is in different generation */
256 || (page_table[page_index + 1].gen != gen)
257 /* next page starts its own contiguous block */
258 || (page_starts_contiguous_block_p(page_index + 1)));
261 /// External function for calling from Lisp.
262 page_index_t ext_find_page_index(void *addr) { return find_page_index(addr); }
264 static os_vm_size_t
265 npage_bytes(page_index_t npages)
267 gc_assert(npages>=0);
268 return ((os_vm_size_t)npages)*GENCGC_CARD_BYTES;
271 /* Check that X is a higher address than Y and return offset from Y to
272 * X in bytes. */
273 static inline os_vm_size_t
274 void_diff(void *x, void *y)
276 gc_assert(x >= y);
277 return (pointer_sized_uint_t)x - (pointer_sized_uint_t)y;
280 /* a structure to hold the state of a generation
282 * CAUTION: If you modify this, make sure to touch up the alien
283 * definition in src/code/gc.lisp accordingly. ...or better yes,
284 * deal with the FIXME there...
286 struct generation {
288 /* the first page that gc_alloc() checks on its next call */
289 page_index_t alloc_start_page;
291 /* the first page that gc_alloc_unboxed() checks on its next call */
292 page_index_t alloc_unboxed_start_page;
294 /* the first page that gc_alloc_large (boxed) considers on its next
295 * call. (Although it always allocates after the boxed_region.) */
296 page_index_t alloc_large_start_page;
298 /* the first page that gc_alloc_large (unboxed) considers on its
299 * next call. (Although it always allocates after the
300 * current_unboxed_region.) */
301 page_index_t alloc_large_unboxed_start_page;
303 /* the bytes allocated to this generation */
304 os_vm_size_t bytes_allocated;
306 /* the number of bytes at which to trigger a GC */
307 os_vm_size_t gc_trigger;
309 /* to calculate a new level for gc_trigger */
310 os_vm_size_t bytes_consed_between_gc;
312 /* the number of GCs since the last raise */
313 int num_gc;
315 /* the number of GCs to run on the generations before raising objects to the
316 * next generation */
317 int number_of_gcs_before_promotion;
319 /* the cumulative sum of the bytes allocated to this generation. It is
320 * cleared after a GC on this generations, and update before new
321 * objects are added from a GC of a younger generation. Dividing by
322 * the bytes_allocated will give the average age of the memory in
323 * this generation since its last GC. */
324 os_vm_size_t cum_sum_bytes_allocated;
326 /* a minimum average memory age before a GC will occur helps
327 * prevent a GC when a large number of new live objects have been
328 * added, in which case a GC could be a waste of time */
329 double minimum_age_before_gc;
332 /* an array of generation structures. There needs to be one more
333 * generation structure than actual generations as the oldest
334 * generation is temporarily raised then lowered. */
335 struct generation generations[NUM_GENERATIONS];
337 /* the oldest generation that is will currently be GCed by default.
338 * Valid values are: 0, 1, ... HIGHEST_NORMAL_GENERATION
340 * The default of HIGHEST_NORMAL_GENERATION enables GC on all generations.
342 * Setting this to 0 effectively disables the generational nature of
343 * the GC. In some applications generational GC may not be useful
344 * because there are no long-lived objects.
346 * An intermediate value could be handy after moving long-lived data
347 * into an older generation so an unnecessary GC of this long-lived
348 * data can be avoided. */
349 generation_index_t gencgc_oldest_gen_to_gc = HIGHEST_NORMAL_GENERATION;
351 /* META: Is nobody aside from me bothered by this especially misleading
352 * use of the word "last"? It could mean either "ultimate" or "prior",
353 * but in fact means neither. It is the *FIRST* page that should be grabbed
354 * for more space, so it is min free page, or 1+ the max used page. */
355 /* The maximum free page in the heap is maintained and used to update
356 * ALLOCATION_POINTER which is used by the room function to limit its
357 * search of the heap. XX Gencgc obviously needs to be better
358 * integrated with the Lisp code. */
360 page_index_t last_free_page;
362 #ifdef LISP_FEATURE_SB_THREAD
363 /* This lock is to prevent multiple threads from simultaneously
364 * allocating new regions which overlap each other. Note that the
365 * majority of GC is single-threaded, but alloc() may be called from
366 * >1 thread at a time and must be thread-safe. This lock must be
367 * seized before all accesses to generations[] or to parts of
368 * page_table[] that other threads may want to see */
369 static pthread_mutex_t free_pages_lock = PTHREAD_MUTEX_INITIALIZER;
370 /* This lock is used to protect non-thread-local allocation. */
371 static pthread_mutex_t allocation_lock = PTHREAD_MUTEX_INITIALIZER;
372 #endif
374 extern os_vm_size_t gencgc_release_granularity;
375 os_vm_size_t gencgc_release_granularity = GENCGC_RELEASE_GRANULARITY;
377 extern os_vm_size_t gencgc_alloc_granularity;
378 os_vm_size_t gencgc_alloc_granularity = GENCGC_ALLOC_GRANULARITY;
382 * miscellaneous heap functions
385 /* Count the number of pages which are write-protected within the
386 * given generation. */
387 static page_index_t
388 count_write_protect_generation_pages(generation_index_t generation)
390 page_index_t i, count = 0;
392 for (i = 0; i < last_free_page; i++)
393 if (page_allocated_p(i)
394 && (page_table[i].gen == generation)
395 && (page_table[i].write_protected == 1))
396 count++;
397 return count;
400 /* Count the number of pages within the given generation. */
401 static page_index_t
402 count_generation_pages(generation_index_t generation)
404 page_index_t i;
405 page_index_t count = 0;
407 for (i = 0; i < last_free_page; i++)
408 if (page_allocated_p(i)
409 && (page_table[i].gen == generation))
410 count++;
411 return count;
414 #if QSHOW
415 static page_index_t
416 count_dont_move_pages(void)
418 page_index_t i;
419 page_index_t count = 0;
420 for (i = 0; i < last_free_page; i++) {
421 if (page_allocated_p(i)
422 && (page_table[i].dont_move != 0)) {
423 ++count;
426 return count;
428 #endif /* QSHOW */
430 /* Work through the pages and add up the number of bytes used for the
431 * given generation. */
432 static __attribute__((unused)) os_vm_size_t
433 count_generation_bytes_allocated (generation_index_t gen)
435 page_index_t i;
436 os_vm_size_t result = 0;
437 for (i = 0; i < last_free_page; i++) {
438 if (page_allocated_p(i)
439 && (page_table[i].gen == gen))
440 result += page_table[i].bytes_used;
442 return result;
445 /* Return the average age of the memory in a generation. */
446 extern double
447 generation_average_age(generation_index_t gen)
449 if (generations[gen].bytes_allocated == 0)
450 return 0.0;
452 return
453 ((double)generations[gen].cum_sum_bytes_allocated)
454 / ((double)generations[gen].bytes_allocated);
457 #ifdef LISP_FEATURE_X86
458 extern void fpu_save(void *);
459 extern void fpu_restore(void *);
460 #endif
462 extern void
463 write_generation_stats(FILE *file)
465 generation_index_t i;
467 #ifdef LISP_FEATURE_X86
468 int fpu_state[27];
470 /* Can end up here after calling alloc_tramp which doesn't prepare
471 * the x87 state, and the C ABI uses a different mode */
472 fpu_save(fpu_state);
473 #endif
475 /* Print the heap stats. */
476 fprintf(file,
477 " Gen StaPg UbSta LaSta LUbSt Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
479 for (i = 0; i < SCRATCH_GENERATION; i++) {
480 page_index_t j;
481 page_index_t boxed_cnt = 0;
482 page_index_t unboxed_cnt = 0;
483 page_index_t large_boxed_cnt = 0;
484 page_index_t large_unboxed_cnt = 0;
485 page_index_t pinned_cnt=0;
487 for (j = 0; j < last_free_page; j++)
488 if (page_table[j].gen == i) {
490 /* Count the number of boxed pages within the given
491 * generation. */
492 if (page_boxed_p(j)) {
493 if (page_table[j].large_object)
494 large_boxed_cnt++;
495 else
496 boxed_cnt++;
498 if(page_table[j].dont_move) pinned_cnt++;
499 /* Count the number of unboxed pages within the given
500 * generation. */
501 if (page_unboxed_p(j)) {
502 if (page_table[j].large_object)
503 large_unboxed_cnt++;
504 else
505 unboxed_cnt++;
509 gc_assert(generations[i].bytes_allocated
510 == count_generation_bytes_allocated(i));
511 fprintf(file,
512 " %1d: %5ld %5ld %5ld %5ld",
514 (long)generations[i].alloc_start_page,
515 (long)generations[i].alloc_unboxed_start_page,
516 (long)generations[i].alloc_large_start_page,
517 (long)generations[i].alloc_large_unboxed_start_page);
518 fprintf(file,
519 " %5"PAGE_INDEX_FMT" %5"PAGE_INDEX_FMT" %5"PAGE_INDEX_FMT
520 " %5"PAGE_INDEX_FMT" %5"PAGE_INDEX_FMT,
521 boxed_cnt, unboxed_cnt, large_boxed_cnt,
522 large_unboxed_cnt, pinned_cnt);
523 fprintf(file,
524 " %8"OS_VM_SIZE_FMT
525 " %5"OS_VM_SIZE_FMT
526 " %8"OS_VM_SIZE_FMT
527 " %4"PAGE_INDEX_FMT" %3d %7.4f\n",
528 generations[i].bytes_allocated,
529 (npage_bytes(count_generation_pages(i)) - generations[i].bytes_allocated),
530 generations[i].gc_trigger,
531 count_write_protect_generation_pages(i),
532 generations[i].num_gc,
533 generation_average_age(i));
535 fprintf(file," Total bytes allocated = %"OS_VM_SIZE_FMT"\n", bytes_allocated);
536 fprintf(file," Dynamic-space-size bytes = %"OS_VM_SIZE_FMT"\n", dynamic_space_size);
538 #ifdef LISP_FEATURE_X86
539 fpu_restore(fpu_state);
540 #endif
543 extern void
544 write_heap_exhaustion_report(FILE *file, long available, long requested,
545 struct thread *thread)
547 fprintf(file,
548 "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
549 gc_active_p ? "garbage collection" : "allocation",
550 available,
551 requested);
552 write_generation_stats(file);
553 fprintf(file, "GC control variables:\n");
554 fprintf(file, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n",
555 SymbolValue(GC_INHIBIT,thread)==NIL ? "false" : "true",
556 (SymbolValue(GC_PENDING, thread) == T) ?
557 "true" : ((SymbolValue(GC_PENDING, thread) == NIL) ?
558 "false" : "in progress"));
559 #ifdef LISP_FEATURE_SB_THREAD
560 fprintf(file, " *STOP-FOR-GC-PENDING* = %s\n",
561 SymbolValue(STOP_FOR_GC_PENDING,thread)==NIL ? "false" : "true");
562 #endif
565 extern void
566 print_generation_stats(void)
568 write_generation_stats(stderr);
571 extern char* gc_logfile;
572 char * gc_logfile = NULL;
574 extern void
575 log_generation_stats(char *logfile, char *header)
577 if (logfile) {
578 FILE * log = fopen(logfile, "a");
579 if (log) {
580 fprintf(log, "%s\n", header);
581 write_generation_stats(log);
582 fclose(log);
583 } else {
584 fprintf(stderr, "Could not open gc logfile: %s\n", logfile);
585 fflush(stderr);
590 extern void
591 report_heap_exhaustion(long available, long requested, struct thread *th)
593 if (gc_logfile) {
594 FILE * log = fopen(gc_logfile, "a");
595 if (log) {
596 write_heap_exhaustion_report(log, available, requested, th);
597 fclose(log);
598 } else {
599 fprintf(stderr, "Could not open gc logfile: %s\n", gc_logfile);
600 fflush(stderr);
603 /* Always to stderr as well. */
604 write_heap_exhaustion_report(stderr, available, requested, th);
608 #if defined(LISP_FEATURE_X86)
609 void fast_bzero(void*, size_t); /* in <arch>-assem.S */
610 #endif
612 /* Zero the pages from START to END (inclusive), but use mmap/munmap instead
613 * if zeroing it ourselves, i.e. in practice give the memory back to the
614 * OS. Generally done after a large GC.
616 void zero_pages_with_mmap(page_index_t start, page_index_t end) {
617 page_index_t i;
618 void *addr = page_address(start), *new_addr;
619 os_vm_size_t length = npage_bytes(1+end-start);
621 if (start > end)
622 return;
624 gc_assert(length >= gencgc_release_granularity);
625 gc_assert((length % gencgc_release_granularity) == 0);
627 #ifdef LISP_FEATURE_LINUX
628 extern os_vm_address_t anon_dynamic_space_start;
629 // We use MADV_DONTNEED only on Linux due to differing semantics from BSD.
630 // Linux treats it as a demand that the memory be 0-filled, or refreshed
631 // from a file that backs the range. BSD takes it as a hint that you don't
632 // care if the memory has to brought in from swap when next accessed,
633 // i.e. it's not a request to make a user-visible alteration to memory.
634 // So in theory this can bring a page in from the core file, if we happen
635 // to hit a page that resides in the portion of memory mapped by coreparse.
636 // In practice this should not happen because objects from a core file can't
637 // become garbage. Except in save-lisp-and-die they can, and we must be
638 // cautious not to resurrect bytes that originally came from the file.
639 if ((os_vm_address_t)addr >= anon_dynamic_space_start) {
640 if (madvise(addr, length, MADV_DONTNEED) != 0)
641 lose("madvise failed\n");
642 } else
643 #endif
645 os_invalidate(addr, length);
646 new_addr = os_validate(addr, length);
647 if (new_addr == NULL || new_addr != addr) {
648 lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x",
649 start, new_addr);
653 for (i = start; i <= end; i++) {
654 page_table[i].need_to_zero = 0;
658 /* Zero the pages from START to END (inclusive). Generally done just after
659 * a new region has been allocated.
661 static void
662 zero_pages(page_index_t start, page_index_t end) {
663 if (start > end)
664 return;
666 #if defined(LISP_FEATURE_X86)
667 fast_bzero(page_address(start), npage_bytes(1+end-start));
668 #else
669 bzero(page_address(start), npage_bytes(1+end-start));
670 #endif
674 static void
675 zero_and_mark_pages(page_index_t start, page_index_t end) {
676 page_index_t i;
678 zero_pages(start, end);
679 for (i = start; i <= end; i++)
680 page_table[i].need_to_zero = 0;
683 /* Zero the pages from START to END (inclusive), except for those
684 * pages that are known to already zeroed. Mark all pages in the
685 * ranges as non-zeroed.
687 static void
688 zero_dirty_pages(page_index_t start, page_index_t end) {
689 page_index_t i, j;
691 for (i = start; i <= end; i++) {
692 if (!page_table[i].need_to_zero) continue;
693 for (j = i+1; (j <= end) && (page_table[j].need_to_zero); j++);
694 zero_pages(i, j-1);
695 i = j;
698 for (i = start; i <= end; i++) {
699 page_table[i].need_to_zero = 1;
705 * To support quick and inline allocation, regions of memory can be
706 * allocated and then allocated from with just a free pointer and a
707 * check against an end address.
709 * Since objects can be allocated to spaces with different properties
710 * e.g. boxed/unboxed, generation, ages; there may need to be many
711 * allocation regions.
713 * Each allocation region may start within a partly used page. Many
714 * features of memory use are noted on a page wise basis, e.g. the
715 * generation; so if a region starts within an existing allocated page
716 * it must be consistent with this page.
718 * During the scavenging of the newspace, objects will be transported
719 * into an allocation region, and pointers updated to point to this
720 * allocation region. It is possible that these pointers will be
721 * scavenged again before the allocation region is closed, e.g. due to
722 * trans_list which jumps all over the place to cleanup the list. It
723 * is important to be able to determine properties of all objects
724 * pointed to when scavenging, e.g to detect pointers to the oldspace.
725 * Thus it's important that the allocation regions have the correct
726 * properties set when allocated, and not just set when closed. The
727 * region allocation routines return regions with the specified
728 * properties, and grab all the pages, setting their properties
729 * appropriately, except that the amount used is not known.
731 * These regions are used to support quicker allocation using just a
732 * free pointer. The actual space used by the region is not reflected
733 * in the pages tables until it is closed. It can't be scavenged until
734 * closed.
736 * When finished with the region it should be closed, which will
737 * update the page tables for the actual space used returning unused
738 * space. Further it may be noted in the new regions which is
739 * necessary when scavenging the newspace.
741 * Large objects may be allocated directly without an allocation
742 * region, the page tables are updated immediately.
744 * Unboxed objects don't contain pointers to other objects and so
745 * don't need scavenging. Further they can't contain pointers to
746 * younger generations so WP is not needed. By allocating pages to
747 * unboxed objects the whole page never needs scavenging or
748 * write-protecting. */
750 /* We are only using two regions at present. Both are for the current
751 * newspace generation. */
752 struct alloc_region boxed_region;
753 struct alloc_region unboxed_region;
755 /* The generation currently being allocated to. */
756 static generation_index_t gc_alloc_generation;
758 static inline page_index_t
759 generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large)
761 if (large) {
762 if (UNBOXED_PAGE_FLAG == page_type_flag) {
763 return generations[generation].alloc_large_unboxed_start_page;
764 } else if (BOXED_PAGE_FLAG & page_type_flag) {
765 /* Both code and data. */
766 return generations[generation].alloc_large_start_page;
767 } else {
768 lose("bad page type flag: %d", page_type_flag);
770 } else {
771 if (UNBOXED_PAGE_FLAG == page_type_flag) {
772 return generations[generation].alloc_unboxed_start_page;
773 } else if (BOXED_PAGE_FLAG & page_type_flag) {
774 /* Both code and data. */
775 return generations[generation].alloc_start_page;
776 } else {
777 lose("bad page_type_flag: %d", page_type_flag);
782 static inline void
783 set_generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large,
784 page_index_t page)
786 if (large) {
787 if (UNBOXED_PAGE_FLAG == page_type_flag) {
788 generations[generation].alloc_large_unboxed_start_page = page;
789 } else if (BOXED_PAGE_FLAG & page_type_flag) {
790 /* Both code and data. */
791 generations[generation].alloc_large_start_page = page;
792 } else {
793 lose("bad page type flag: %d", page_type_flag);
795 } else {
796 if (UNBOXED_PAGE_FLAG == page_type_flag) {
797 generations[generation].alloc_unboxed_start_page = page;
798 } else if (BOXED_PAGE_FLAG & page_type_flag) {
799 /* Both code and data. */
800 generations[generation].alloc_start_page = page;
801 } else {
802 lose("bad page type flag: %d", page_type_flag);
807 const int n_dwords_in_card = GENCGC_CARD_BYTES / N_WORD_BYTES / 2;
808 in_use_marker_t *
809 pinned_dwords(page_index_t page)
811 if (page_table[page].has_pin_map)
812 return &page_table_pinned_dwords[page * (n_dwords_in_card/N_WORD_BITS)];
813 return NULL;
816 /* Find a new region with room for at least the given number of bytes.
818 * It starts looking at the current generation's alloc_start_page. So
819 * may pick up from the previous region if there is enough space. This
820 * keeps the allocation contiguous when scavenging the newspace.
822 * The alloc_region should have been closed by a call to
823 * gc_alloc_update_page_tables(), and will thus be in an empty state.
825 * To assist the scavenging functions write-protected pages are not
826 * used. Free pages should not be write-protected.
828 * It is critical to the conservative GC that the start of regions be
829 * known. To help achieve this only small regions are allocated at a
830 * time.
832 * During scavenging, pointers may be found to within the current
833 * region and the page generation must be set so that pointers to the
834 * from space can be recognized. Therefore the generation of pages in
835 * the region are set to gc_alloc_generation. To prevent another
836 * allocation call using the same pages, all the pages in the region
837 * are allocated, although they will initially be empty.
839 static void
840 gc_alloc_new_region(sword_t nbytes, int page_type_flag, struct alloc_region *alloc_region)
842 page_index_t first_page;
843 page_index_t last_page;
844 os_vm_size_t bytes_found;
845 page_index_t i;
846 int ret;
849 FSHOW((stderr,
850 "/alloc_new_region for %d bytes from gen %d\n",
851 nbytes, gc_alloc_generation));
854 /* Check that the region is in a reset state. */
855 gc_assert((alloc_region->first_page == 0)
856 && (alloc_region->last_page == -1)
857 && (alloc_region->free_pointer == alloc_region->end_addr));
858 ret = thread_mutex_lock(&free_pages_lock);
859 gc_assert(ret == 0);
860 first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0);
861 last_page=gc_find_freeish_pages(&first_page, nbytes, page_type_flag);
862 bytes_found=(GENCGC_CARD_BYTES - page_table[first_page].bytes_used)
863 + npage_bytes(last_page-first_page);
865 /* Set up the alloc_region. */
866 alloc_region->first_page = first_page;
867 alloc_region->last_page = last_page;
868 alloc_region->start_addr = page_table[first_page].bytes_used
869 + page_address(first_page);
870 alloc_region->free_pointer = alloc_region->start_addr;
871 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
873 /* Set up the pages. */
875 /* The first page may have already been in use. */
876 if (page_table[first_page].bytes_used == 0) {
877 page_table[first_page].allocated = page_type_flag;
878 page_table[first_page].gen = gc_alloc_generation;
879 page_table[first_page].large_object = 0;
880 page_table[first_page].scan_start_offset = 0;
881 // wiping should have free()ed and :=NULL
882 gc_assert(pinned_dwords(first_page) == NULL);
885 gc_assert(page_table[first_page].allocated == page_type_flag);
886 page_table[first_page].allocated |= OPEN_REGION_PAGE_FLAG;
888 gc_assert(page_table[first_page].gen == gc_alloc_generation);
889 gc_assert(page_table[first_page].large_object == 0);
891 for (i = first_page+1; i <= last_page; i++) {
892 page_table[i].allocated = page_type_flag;
893 page_table[i].gen = gc_alloc_generation;
894 page_table[i].large_object = 0;
895 /* This may not be necessary for unboxed regions (think it was
896 * broken before!) */
897 page_table[i].scan_start_offset =
898 void_diff(page_address(i),alloc_region->start_addr);
899 page_table[i].allocated |= OPEN_REGION_PAGE_FLAG ;
901 /* Bump up last_free_page. */
902 if (last_page+1 > last_free_page) {
903 last_free_page = last_page+1;
904 /* do we only want to call this on special occasions? like for
905 * boxed_region? */
906 set_alloc_pointer((lispobj)page_address(last_free_page));
908 ret = thread_mutex_unlock(&free_pages_lock);
909 gc_assert(ret == 0);
911 #ifdef READ_PROTECT_FREE_PAGES
912 os_protect(page_address(first_page),
913 npage_bytes(1+last_page-first_page),
914 OS_VM_PROT_ALL);
915 #endif
917 /* If the first page was only partial, don't check whether it's
918 * zeroed (it won't be) and don't zero it (since the parts that
919 * we're interested in are guaranteed to be zeroed).
921 if (page_table[first_page].bytes_used) {
922 first_page++;
925 zero_dirty_pages(first_page, last_page);
927 /* we can do this after releasing free_pages_lock */
928 if (gencgc_zero_check) {
929 word_t *p;
930 for (p = (word_t *)alloc_region->start_addr;
931 p < (word_t *)alloc_region->end_addr; p++) {
932 if (*p != 0) {
933 lose("The new region is not zero at %p (start=%p, end=%p).\n",
934 p, alloc_region->start_addr, alloc_region->end_addr);
940 /* If the record_new_objects flag is 2 then all new regions created
941 * are recorded.
943 * If it's 1 then then it is only recorded if the first page of the
944 * current region is <= new_areas_ignore_page. This helps avoid
945 * unnecessary recording when doing full scavenge pass.
947 * The new_object structure holds the page, byte offset, and size of
948 * new regions of objects. Each new area is placed in the array of
949 * these structures pointer to by new_areas. new_areas_index holds the
950 * offset into new_areas.
952 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
953 * later code must detect this and handle it, probably by doing a full
954 * scavenge of a generation. */
955 #define NUM_NEW_AREAS 512
956 static int record_new_objects = 0;
957 static page_index_t new_areas_ignore_page;
958 struct new_area {
959 page_index_t page;
960 size_t offset;
961 size_t size;
963 static struct new_area (*new_areas)[];
964 static size_t new_areas_index;
965 size_t max_new_areas;
967 /* Add a new area to new_areas. */
968 static void
969 add_new_area(page_index_t first_page, size_t offset, size_t size)
971 size_t new_area_start, c;
972 ssize_t i;
974 /* Ignore if full. */
975 if (new_areas_index >= NUM_NEW_AREAS)
976 return;
978 switch (record_new_objects) {
979 case 0:
980 return;
981 case 1:
982 if (first_page > new_areas_ignore_page)
983 return;
984 break;
985 case 2:
986 break;
987 default:
988 gc_abort();
991 new_area_start = npage_bytes(first_page) + offset;
993 /* Search backwards for a prior area that this follows from. If
994 found this will save adding a new area. */
995 for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
996 size_t area_end =
997 npage_bytes((*new_areas)[i].page)
998 + (*new_areas)[i].offset
999 + (*new_areas)[i].size;
1000 /*FSHOW((stderr,
1001 "/add_new_area S1 %d %d %d %d\n",
1002 i, c, new_area_start, area_end));*/
1003 if (new_area_start == area_end) {
1004 /*FSHOW((stderr,
1005 "/adding to [%d] %d %d %d with %d %d %d:\n",
1007 (*new_areas)[i].page,
1008 (*new_areas)[i].offset,
1009 (*new_areas)[i].size,
1010 first_page,
1011 offset,
1012 size);*/
1013 (*new_areas)[i].size += size;
1014 return;
1018 (*new_areas)[new_areas_index].page = first_page;
1019 (*new_areas)[new_areas_index].offset = offset;
1020 (*new_areas)[new_areas_index].size = size;
1021 /*FSHOW((stderr,
1022 "/new_area %d page %d offset %d size %d\n",
1023 new_areas_index, first_page, offset, size));*/
1024 new_areas_index++;
1026 /* Note the max new_areas used. */
1027 if (new_areas_index > max_new_areas)
1028 max_new_areas = new_areas_index;
1031 /* Update the tables for the alloc_region. The region may be added to
1032 * the new_areas.
1034 * When done the alloc_region is set up so that the next quick alloc
1035 * will fail safely and thus a new region will be allocated. Further
1036 * it is safe to try to re-update the page table of this reset
1037 * alloc_region. */
1038 void
1039 gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_region)
1041 boolean more;
1042 page_index_t first_page;
1043 page_index_t next_page;
1044 os_vm_size_t bytes_used;
1045 os_vm_size_t region_size;
1046 os_vm_size_t byte_cnt;
1047 page_bytes_t orig_first_page_bytes_used;
1048 int ret;
1051 first_page = alloc_region->first_page;
1053 /* Catch an unused alloc_region. */
1054 if ((first_page == 0) && (alloc_region->last_page == -1))
1055 return;
1057 next_page = first_page+1;
1059 ret = thread_mutex_lock(&free_pages_lock);
1060 gc_assert(ret == 0);
1061 if (alloc_region->free_pointer != alloc_region->start_addr) {
1062 /* some bytes were allocated in the region */
1063 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1065 gc_assert(alloc_region->start_addr ==
1066 (page_address(first_page)
1067 + page_table[first_page].bytes_used));
1069 /* All the pages used need to be updated */
1071 /* Update the first page. */
1073 /* If the page was free then set up the gen, and
1074 * scan_start_offset. */
1075 if (page_table[first_page].bytes_used == 0)
1076 gc_assert(page_starts_contiguous_block_p(first_page));
1077 page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
1079 gc_assert(page_table[first_page].allocated & page_type_flag);
1080 gc_assert(page_table[first_page].gen == gc_alloc_generation);
1081 gc_assert(page_table[first_page].large_object == 0);
1083 byte_cnt = 0;
1085 /* Calculate the number of bytes used in this page. This is not
1086 * always the number of new bytes, unless it was free. */
1087 more = 0;
1088 if ((bytes_used = void_diff(alloc_region->free_pointer,
1089 page_address(first_page)))
1090 >GENCGC_CARD_BYTES) {
1091 bytes_used = GENCGC_CARD_BYTES;
1092 more = 1;
1094 page_table[first_page].bytes_used = bytes_used;
1095 byte_cnt += bytes_used;
1098 /* All the rest of the pages should be free. We need to set
1099 * their scan_start_offset pointer to the start of the
1100 * region, and set the bytes_used. */
1101 while (more) {
1102 page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
1103 gc_assert(page_table[next_page].allocated & page_type_flag);
1104 gc_assert(page_table[next_page].bytes_used == 0);
1105 gc_assert(page_table[next_page].gen == gc_alloc_generation);
1106 gc_assert(page_table[next_page].large_object == 0);
1108 gc_assert(page_table[next_page].scan_start_offset ==
1109 void_diff(page_address(next_page),
1110 alloc_region->start_addr));
1112 /* Calculate the number of bytes used in this page. */
1113 more = 0;
1114 if ((bytes_used = void_diff(alloc_region->free_pointer,
1115 page_address(next_page)))>GENCGC_CARD_BYTES) {
1116 bytes_used = GENCGC_CARD_BYTES;
1117 more = 1;
1119 page_table[next_page].bytes_used = bytes_used;
1120 byte_cnt += bytes_used;
1122 next_page++;
1125 region_size = void_diff(alloc_region->free_pointer,
1126 alloc_region->start_addr);
1127 bytes_allocated += region_size;
1128 generations[gc_alloc_generation].bytes_allocated += region_size;
1130 gc_assert((byte_cnt- orig_first_page_bytes_used) == region_size);
1132 /* Set the generations alloc restart page to the last page of
1133 * the region. */
1134 set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0, next_page-1);
1136 /* Add the region to the new_areas if requested. */
1137 if (BOXED_PAGE_FLAG & page_type_flag)
1138 add_new_area(first_page,orig_first_page_bytes_used, region_size);
1141 FSHOW((stderr,
1142 "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
1143 region_size,
1144 gc_alloc_generation));
1146 } else {
1147 /* There are no bytes allocated. Unallocate the first_page if
1148 * there are 0 bytes_used. */
1149 page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
1150 if (page_table[first_page].bytes_used == 0)
1151 page_table[first_page].allocated = FREE_PAGE_FLAG;
1154 /* Unallocate any unused pages. */
1155 while (next_page <= alloc_region->last_page) {
1156 gc_assert(page_table[next_page].bytes_used == 0);
1157 page_table[next_page].allocated = FREE_PAGE_FLAG;
1158 next_page++;
1160 ret = thread_mutex_unlock(&free_pages_lock);
1161 gc_assert(ret == 0);
1163 /* alloc_region is per-thread, we're ok to do this unlocked */
1164 gc_set_region_empty(alloc_region);
1167 /* Allocate a possibly large object. */
1168 void *
1169 gc_alloc_large(sword_t nbytes, int page_type_flag, struct alloc_region *alloc_region)
1171 boolean more;
1172 page_index_t first_page, next_page, last_page;
1173 page_bytes_t orig_first_page_bytes_used;
1174 os_vm_size_t byte_cnt;
1175 os_vm_size_t bytes_used;
1176 int ret;
1178 ret = thread_mutex_lock(&free_pages_lock);
1179 gc_assert(ret == 0);
1181 first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1);
1182 if (first_page <= alloc_region->last_page) {
1183 first_page = alloc_region->last_page+1;
1186 last_page=gc_find_freeish_pages(&first_page,nbytes, page_type_flag);
1188 gc_assert(first_page > alloc_region->last_page);
1190 set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1, last_page);
1192 /* Set up the pages. */
1193 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1195 /* If the first page was free then set up the gen, and
1196 * scan_start_offset. */
1197 if (page_table[first_page].bytes_used == 0) {
1198 page_table[first_page].allocated = page_type_flag;
1199 page_table[first_page].gen = gc_alloc_generation;
1200 page_table[first_page].scan_start_offset = 0;
1201 page_table[first_page].large_object = 1;
1204 gc_assert(page_table[first_page].allocated == page_type_flag);
1205 gc_assert(page_table[first_page].gen == gc_alloc_generation);
1206 gc_assert(page_table[first_page].large_object == 1);
1208 byte_cnt = 0;
1210 /* Calc. the number of bytes used in this page. This is not
1211 * always the number of new bytes, unless it was free. */
1212 more = 0;
1213 if ((bytes_used = nbytes+orig_first_page_bytes_used) > GENCGC_CARD_BYTES) {
1214 bytes_used = GENCGC_CARD_BYTES;
1215 more = 1;
1217 page_table[first_page].bytes_used = bytes_used;
1218 byte_cnt += bytes_used;
1220 next_page = first_page+1;
1222 /* All the rest of the pages should be free. We need to set their
1223 * scan_start_offset pointer to the start of the region, and set
1224 * the bytes_used. */
1225 while (more) {
1226 gc_assert(page_free_p(next_page));
1227 gc_assert(page_table[next_page].bytes_used == 0);
1228 page_table[next_page].allocated = page_type_flag;
1229 page_table[next_page].gen = gc_alloc_generation;
1230 page_table[next_page].large_object = 1;
1232 page_table[next_page].scan_start_offset =
1233 npage_bytes(next_page-first_page) - orig_first_page_bytes_used;
1235 /* Calculate the number of bytes used in this page. */
1236 more = 0;
1237 bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt;
1238 if (bytes_used > GENCGC_CARD_BYTES) {
1239 bytes_used = GENCGC_CARD_BYTES;
1240 more = 1;
1242 page_table[next_page].bytes_used = bytes_used;
1243 page_table[next_page].write_protected=0;
1244 page_table[next_page].dont_move=0;
1245 byte_cnt += bytes_used;
1246 next_page++;
1249 gc_assert((byte_cnt-orig_first_page_bytes_used) == (size_t)nbytes);
1251 bytes_allocated += nbytes;
1252 generations[gc_alloc_generation].bytes_allocated += nbytes;
1254 /* Add the region to the new_areas if requested. */
1255 if (BOXED_PAGE_FLAG & page_type_flag)
1256 add_new_area(first_page,orig_first_page_bytes_used,nbytes);
1258 /* Bump up last_free_page */
1259 if (last_page+1 > last_free_page) {
1260 last_free_page = last_page+1;
1261 set_alloc_pointer((lispobj)(page_address(last_free_page)));
1263 ret = thread_mutex_unlock(&free_pages_lock);
1264 gc_assert(ret == 0);
1266 #ifdef READ_PROTECT_FREE_PAGES
1267 os_protect(page_address(first_page),
1268 npage_bytes(1+last_page-first_page),
1269 OS_VM_PROT_ALL);
1270 #endif
1272 zero_dirty_pages(first_page, last_page);
1274 return page_address(first_page);
1277 static page_index_t gencgc_alloc_start_page = -1;
1279 void
1280 gc_heap_exhausted_error_or_lose (sword_t available, sword_t requested)
1282 struct thread *thread = arch_os_get_current_thread();
1283 /* Write basic information before doing anything else: if we don't
1284 * call to lisp this is a must, and even if we do there is always
1285 * the danger that we bounce back here before the error has been
1286 * handled, or indeed even printed.
1288 report_heap_exhaustion(available, requested, thread);
1289 if (gc_active_p || (available == 0)) {
1290 /* If we are in GC, or totally out of memory there is no way
1291 * to sanely transfer control to the lisp-side of things.
1293 lose("Heap exhausted, game over.");
1295 else {
1296 /* FIXME: assert free_pages_lock held */
1297 (void)thread_mutex_unlock(&free_pages_lock);
1298 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
1299 gc_assert(get_pseudo_atomic_atomic(thread));
1300 clear_pseudo_atomic_atomic(thread);
1301 if (get_pseudo_atomic_interrupted(thread))
1302 do_pending_interrupt();
1303 #endif
1304 /* Another issue is that signalling HEAP-EXHAUSTED error leads
1305 * to running user code at arbitrary places, even in a
1306 * WITHOUT-INTERRUPTS which may lead to a deadlock without
1307 * running out of the heap. So at this point all bets are
1308 * off. */
1309 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
1310 corruption_warning_and_maybe_lose
1311 ("Signalling HEAP-EXHAUSTED in a WITHOUT-INTERRUPTS.");
1312 /* available and requested should be double word aligned, thus
1313 they can passed as fixnums and shifted later. */
1314 funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR), available, requested);
1315 lose("HEAP-EXHAUSTED-ERROR fell through");
1319 page_index_t
1320 gc_find_freeish_pages(page_index_t *restart_page_ptr, sword_t bytes,
1321 int page_type_flag)
1323 page_index_t most_bytes_found_from = 0, most_bytes_found_to = 0;
1324 page_index_t first_page, last_page, restart_page = *restart_page_ptr;
1325 os_vm_size_t nbytes = bytes;
1326 os_vm_size_t nbytes_goal = nbytes;
1327 os_vm_size_t bytes_found = 0;
1328 os_vm_size_t most_bytes_found = 0;
1329 boolean small_object = nbytes < GENCGC_CARD_BYTES;
1330 /* FIXME: assert(free_pages_lock is held); */
1332 if (nbytes_goal < gencgc_alloc_granularity)
1333 nbytes_goal = gencgc_alloc_granularity;
1335 /* Toggled by gc_and_save for heap compaction, normally -1. */
1336 if (gencgc_alloc_start_page != -1) {
1337 restart_page = gencgc_alloc_start_page;
1340 /* FIXME: This is on bytes instead of nbytes pending cleanup of
1341 * long from the interface. */
1342 gc_assert(bytes>=0);
1343 /* Search for a page with at least nbytes of space. We prefer
1344 * not to split small objects on multiple pages, to reduce the
1345 * number of contiguous allocation regions spaning multiple
1346 * pages: this helps avoid excessive conservativism.
1348 * For other objects, we guarantee that they start on their own
1349 * page boundary.
1351 first_page = restart_page;
1352 while (first_page < page_table_pages) {
1353 bytes_found = 0;
1354 if (page_free_p(first_page)) {
1355 gc_assert(0 == page_table[first_page].bytes_used);
1356 bytes_found = GENCGC_CARD_BYTES;
1357 } else if (small_object &&
1358 (page_table[first_page].allocated == page_type_flag) &&
1359 (page_table[first_page].large_object == 0) &&
1360 (page_table[first_page].gen == gc_alloc_generation) &&
1361 (page_table[first_page].write_protected == 0) &&
1362 (page_table[first_page].dont_move == 0)) {
1363 bytes_found = GENCGC_CARD_BYTES - page_table[first_page].bytes_used;
1364 if (bytes_found < nbytes) {
1365 if (bytes_found > most_bytes_found)
1366 most_bytes_found = bytes_found;
1367 first_page++;
1368 continue;
1370 } else {
1371 first_page++;
1372 continue;
1375 gc_assert(page_table[first_page].write_protected == 0);
1376 for (last_page = first_page+1;
1377 ((last_page < page_table_pages) &&
1378 page_free_p(last_page) &&
1379 (bytes_found < nbytes_goal));
1380 last_page++) {
1381 bytes_found += GENCGC_CARD_BYTES;
1382 gc_assert(0 == page_table[last_page].bytes_used);
1383 gc_assert(0 == page_table[last_page].write_protected);
1386 if (bytes_found > most_bytes_found) {
1387 most_bytes_found = bytes_found;
1388 most_bytes_found_from = first_page;
1389 most_bytes_found_to = last_page;
1391 if (bytes_found >= nbytes_goal)
1392 break;
1394 first_page = last_page;
1397 bytes_found = most_bytes_found;
1398 restart_page = first_page + 1;
1400 /* Check for a failure */
1401 if (bytes_found < nbytes) {
1402 gc_assert(restart_page >= page_table_pages);
1403 gc_heap_exhausted_error_or_lose(most_bytes_found, nbytes);
1406 gc_assert(most_bytes_found_to);
1407 *restart_page_ptr = most_bytes_found_from;
1408 return most_bytes_found_to-1;
1411 /* Allocate bytes. All the rest of the special-purpose allocation
1412 * functions will eventually call this */
1414 void *
1415 gc_alloc_with_region(sword_t nbytes,int page_type_flag, struct alloc_region *my_region,
1416 int quick_p)
1418 void *new_free_pointer;
1420 if (nbytes>=LARGE_OBJECT_SIZE)
1421 return gc_alloc_large(nbytes, page_type_flag, my_region);
1423 /* Check whether there is room in the current alloc region. */
1424 new_free_pointer = my_region->free_pointer + nbytes;
1426 /* fprintf(stderr, "alloc %d bytes from %p to %p\n", nbytes,
1427 my_region->free_pointer, new_free_pointer); */
1429 if (new_free_pointer <= my_region->end_addr) {
1430 /* If so then allocate from the current alloc region. */
1431 void *new_obj = my_region->free_pointer;
1432 my_region->free_pointer = new_free_pointer;
1434 /* Unless a `quick' alloc was requested, check whether the
1435 alloc region is almost empty. */
1436 if (!quick_p &&
1437 void_diff(my_region->end_addr,my_region->free_pointer) <= 32) {
1438 /* If so, finished with the current region. */
1439 gc_alloc_update_page_tables(page_type_flag, my_region);
1440 /* Set up a new region. */
1441 gc_alloc_new_region(32 /*bytes*/, page_type_flag, my_region);
1444 return((void *)new_obj);
1447 /* Else not enough free space in the current region: retry with a
1448 * new region. */
1450 gc_alloc_update_page_tables(page_type_flag, my_region);
1451 gc_alloc_new_region(nbytes, page_type_flag, my_region);
1452 return gc_alloc_with_region(nbytes, page_type_flag, my_region,0);
1455 /* Copy a large object. If the object is in a large object region then
1456 * it is simply promoted, else it is copied. If it's large enough then
1457 * it's copied to a large object region.
1459 * Bignums and vectors may have shrunk. If the object is not copied
1460 * the space needs to be reclaimed, and the page_tables corrected. */
1461 static lispobj
1462 general_copy_large_object(lispobj object, word_t nwords, boolean boxedp)
1464 lispobj *new;
1465 page_index_t first_page;
1467 CHECK_COPY_PRECONDITIONS(object, nwords);
1469 if ((nwords > 1024*1024) && gencgc_verbose) {
1470 FSHOW((stderr, "/general_copy_large_object: %d bytes\n",
1471 nwords*N_WORD_BYTES));
1474 /* Check whether it's a large object. */
1475 first_page = find_page_index((void *)object);
1476 gc_assert(first_page >= 0);
1478 if (page_table[first_page].large_object) {
1479 /* Promote the object. Note: Unboxed objects may have been
1480 * allocated to a BOXED region so it may be necessary to
1481 * change the region to UNBOXED. */
1482 os_vm_size_t remaining_bytes;
1483 os_vm_size_t bytes_freed;
1484 page_index_t next_page;
1485 page_bytes_t old_bytes_used;
1487 /* FIXME: This comment is somewhat stale.
1489 * Note: Any page write-protection must be removed, else a
1490 * later scavenge_newspace may incorrectly not scavenge these
1491 * pages. This would not be necessary if they are added to the
1492 * new areas, but let's do it for them all (they'll probably
1493 * be written anyway?). */
1495 gc_assert(page_starts_contiguous_block_p(first_page));
1496 next_page = first_page;
1497 remaining_bytes = nwords*N_WORD_BYTES;
1499 while (remaining_bytes > GENCGC_CARD_BYTES) {
1500 gc_assert(page_table[next_page].gen == from_space);
1501 gc_assert(page_table[next_page].large_object);
1502 gc_assert(page_table[next_page].scan_start_offset ==
1503 npage_bytes(next_page-first_page));
1504 gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES);
1505 /* Should have been unprotected by unprotect_oldspace()
1506 * for boxed objects, and after promotion unboxed ones
1507 * should not be on protected pages at all. */
1508 gc_assert(!page_table[next_page].write_protected);
1510 if (boxedp)
1511 gc_assert(page_boxed_p(next_page));
1512 else {
1513 gc_assert(page_allocated_no_region_p(next_page));
1514 page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
1516 page_table[next_page].gen = new_space;
1518 remaining_bytes -= GENCGC_CARD_BYTES;
1519 next_page++;
1522 /* Now only one page remains, but the object may have shrunk so
1523 * there may be more unused pages which will be freed. */
1525 /* Object may have shrunk but shouldn't have grown - check. */
1526 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1528 page_table[next_page].gen = new_space;
1530 if (boxedp)
1531 gc_assert(page_boxed_p(next_page));
1532 else
1533 page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
1535 /* Adjust the bytes_used. */
1536 old_bytes_used = page_table[next_page].bytes_used;
1537 page_table[next_page].bytes_used = remaining_bytes;
1539 bytes_freed = old_bytes_used - remaining_bytes;
1541 /* Free any remaining pages; needs care. */
1542 next_page++;
1543 while ((old_bytes_used == GENCGC_CARD_BYTES) &&
1544 (page_table[next_page].gen == from_space) &&
1545 /* FIXME: It is not obvious to me why this is necessary
1546 * as a loop condition: it seems to me that the
1547 * scan_start_offset test should be sufficient, but
1548 * experimentally that is not the case. --NS
1549 * 2011-11-28 */
1550 (boxedp ?
1551 page_boxed_p(next_page) :
1552 page_allocated_no_region_p(next_page)) &&
1553 page_table[next_page].large_object &&
1554 (page_table[next_page].scan_start_offset ==
1555 npage_bytes(next_page - first_page))) {
1556 /* Checks out OK, free the page. Don't need to both zeroing
1557 * pages as this should have been done before shrinking the
1558 * object. These pages shouldn't be write-protected, even if
1559 * boxed they should be zero filled. */
1560 gc_assert(page_table[next_page].write_protected == 0);
1562 old_bytes_used = page_table[next_page].bytes_used;
1563 page_table[next_page].allocated = FREE_PAGE_FLAG;
1564 page_table[next_page].bytes_used = 0;
1565 bytes_freed += old_bytes_used;
1566 next_page++;
1569 if ((bytes_freed > 0) && gencgc_verbose) {
1570 FSHOW((stderr,
1571 "/general_copy_large_object bytes_freed=%"OS_VM_SIZE_FMT"\n",
1572 bytes_freed));
1575 generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES
1576 + bytes_freed;
1577 generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
1578 bytes_allocated -= bytes_freed;
1580 /* Add the region to the new_areas if requested. */
1581 if (boxedp)
1582 add_new_area(first_page,0,nwords*N_WORD_BYTES);
1584 return(object);
1586 } else {
1587 /* Allocate space. */
1588 new = gc_general_alloc(nwords*N_WORD_BYTES,
1589 (boxedp ? BOXED_PAGE_FLAG : UNBOXED_PAGE_FLAG),
1590 ALLOC_QUICK);
1592 /* Copy the object. */
1593 memcpy(new,native_pointer(object),nwords*N_WORD_BYTES);
1595 /* Return Lisp pointer of new object. */
1596 return make_lispobj(new, lowtag_of(object));
1600 lispobj
1601 copy_large_object(lispobj object, sword_t nwords)
1603 return general_copy_large_object(object, nwords, 1);
1606 lispobj
1607 copy_large_unboxed_object(lispobj object, sword_t nwords)
1609 return general_copy_large_object(object, nwords, 0);
1612 /* to copy unboxed objects */
1613 lispobj
1614 copy_unboxed_object(lispobj object, sword_t nwords)
1616 return gc_general_copy_object(object, nwords, UNBOXED_PAGE_FLAG);
1621 * code and code-related objects
1624 static lispobj trans_fun_header(lispobj object);
1625 static lispobj trans_boxed(lispobj object);
1628 /* Scan a x86 compiled code object, looking for possible fixups that
1629 * have been missed after a move.
1631 * Two types of fixups are needed:
1632 * 1. Absolute fixups to within the code object.
1633 * 2. Relative fixups to outside the code object.
1635 * Currently only absolute fixups to the constant vector, or to the
1636 * code area are checked. */
1637 #ifdef LISP_FEATURE_X86
1638 void
1639 sniff_code_object(struct code *code, os_vm_size_t displacement)
1641 sword_t nheader_words, ncode_words, nwords;
1642 os_vm_address_t constants_start_addr = NULL, constants_end_addr, p;
1643 os_vm_address_t code_start_addr, code_end_addr;
1644 os_vm_address_t code_addr = (os_vm_address_t)code;
1645 int fixup_found = 0;
1647 if (!check_code_fixups)
1648 return;
1650 FSHOW((stderr, "/sniffing code: %p, %lu\n", code, displacement));
1652 ncode_words = code_instruction_words(code->code_size);
1653 nheader_words = code_header_words(*(lispobj *)code);
1654 nwords = ncode_words + nheader_words;
1656 constants_start_addr = code_addr + 5*N_WORD_BYTES;
1657 constants_end_addr = code_addr + nheader_words*N_WORD_BYTES;
1658 code_start_addr = code_addr + nheader_words*N_WORD_BYTES;
1659 code_end_addr = code_addr + nwords*N_WORD_BYTES;
1661 /* Work through the unboxed code. */
1662 for (p = code_start_addr; p < code_end_addr; p++) {
1663 void *data = *(void **)p;
1664 unsigned d1 = *((unsigned char *)p - 1);
1665 unsigned d2 = *((unsigned char *)p - 2);
1666 unsigned d3 = *((unsigned char *)p - 3);
1667 unsigned d4 = *((unsigned char *)p - 4);
1668 #if QSHOW
1669 unsigned d5 = *((unsigned char *)p - 5);
1670 unsigned d6 = *((unsigned char *)p - 6);
1671 #endif
1673 /* Check for code references. */
1674 /* Check for a 32 bit word that looks like an absolute
1675 reference to within the code adea of the code object. */
1676 if ((data >= (void*)(code_start_addr-displacement))
1677 && (data < (void*)(code_end_addr-displacement))) {
1678 /* function header */
1679 if ((d4 == 0x5e)
1680 && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) ==
1681 (unsigned)code)) {
1682 /* Skip the function header */
1683 p += 6*4 - 4 - 1;
1684 continue;
1686 /* the case of PUSH imm32 */
1687 if (d1 == 0x68) {
1688 fixup_found = 1;
1689 FSHOW((stderr,
1690 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1691 p, d6, d5, d4, d3, d2, d1, data));
1692 FSHOW((stderr, "/PUSH $0x%.8x\n", data));
1694 /* the case of MOV [reg-8],imm32 */
1695 if ((d3 == 0xc7)
1696 && (d2==0x40 || d2==0x41 || d2==0x42 || d2==0x43
1697 || d2==0x45 || d2==0x46 || d2==0x47)
1698 && (d1 == 0xf8)) {
1699 fixup_found = 1;
1700 FSHOW((stderr,
1701 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1702 p, d6, d5, d4, d3, d2, d1, data));
1703 FSHOW((stderr, "/MOV [reg-8],$0x%.8x\n", data));
1705 /* the case of LEA reg,[disp32] */
1706 if ((d2 == 0x8d) && ((d1 & 0xc7) == 5)) {
1707 fixup_found = 1;
1708 FSHOW((stderr,
1709 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1710 p, d6, d5, d4, d3, d2, d1, data));
1711 FSHOW((stderr,"/LEA reg,[$0x%.8x]\n", data));
1715 /* Check for constant references. */
1716 /* Check for a 32 bit word that looks like an absolute
1717 reference to within the constant vector. Constant references
1718 will be aligned. */
1719 if ((data >= (void*)(constants_start_addr-displacement))
1720 && (data < (void*)(constants_end_addr-displacement))
1721 && (((unsigned)data & 0x3) == 0)) {
1722 /* Mov eax,m32 */
1723 if (d1 == 0xa1) {
1724 fixup_found = 1;
1725 FSHOW((stderr,
1726 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1727 p, d6, d5, d4, d3, d2, d1, data));
1728 FSHOW((stderr,"/MOV eax,0x%.8x\n", data));
1731 /* the case of MOV m32,EAX */
1732 if (d1 == 0xa3) {
1733 fixup_found = 1;
1734 FSHOW((stderr,
1735 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1736 p, d6, d5, d4, d3, d2, d1, data));
1737 FSHOW((stderr, "/MOV 0x%.8x,eax\n", data));
1740 /* the case of CMP m32,imm32 */
1741 if ((d1 == 0x3d) && (d2 == 0x81)) {
1742 fixup_found = 1;
1743 FSHOW((stderr,
1744 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1745 p, d6, d5, d4, d3, d2, d1, data));
1746 /* XX Check this */
1747 FSHOW((stderr, "/CMP 0x%.8x,immed32\n", data));
1750 /* Check for a mod=00, r/m=101 byte. */
1751 if ((d1 & 0xc7) == 5) {
1752 /* Cmp m32,reg */
1753 if (d2 == 0x39) {
1754 fixup_found = 1;
1755 FSHOW((stderr,
1756 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1757 p, d6, d5, d4, d3, d2, d1, data));
1758 FSHOW((stderr,"/CMP 0x%.8x,reg\n", data));
1760 /* the case of CMP reg32,m32 */
1761 if (d2 == 0x3b) {
1762 fixup_found = 1;
1763 FSHOW((stderr,
1764 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1765 p, d6, d5, d4, d3, d2, d1, data));
1766 FSHOW((stderr, "/CMP reg32,0x%.8x\n", data));
1768 /* the case of MOV m32,reg32 */
1769 if (d2 == 0x89) {
1770 fixup_found = 1;
1771 FSHOW((stderr,
1772 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1773 p, d6, d5, d4, d3, d2, d1, data));
1774 FSHOW((stderr, "/MOV 0x%.8x,reg32\n", data));
1776 /* the case of MOV reg32,m32 */
1777 if (d2 == 0x8b) {
1778 fixup_found = 1;
1779 FSHOW((stderr,
1780 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1781 p, d6, d5, d4, d3, d2, d1, data));
1782 FSHOW((stderr, "/MOV reg32,0x%.8x\n", data));
1784 /* the case of LEA reg32,m32 */
1785 if (d2 == 0x8d) {
1786 fixup_found = 1;
1787 FSHOW((stderr,
1788 "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1789 p, d6, d5, d4, d3, d2, d1, data));
1790 FSHOW((stderr, "/LEA reg32,0x%.8x\n", data));
1796 /* If anything was found, print some information on the code
1797 * object. */
1798 if (fixup_found) {
1799 FSHOW((stderr,
1800 "/compiled code object at %x: header words = %d, code words = %d\n",
1801 code, nheader_words, ncode_words));
1802 FSHOW((stderr,
1803 "/const start = %x, end = %x\n",
1804 constants_start_addr, constants_end_addr));
1805 FSHOW((stderr,
1806 "/code start = %x, end = %x\n",
1807 code_start_addr, code_end_addr));
1810 #endif
1812 #ifdef LISP_FEATURE_X86
1813 void
1814 gencgc_apply_code_fixups(struct code *old_code, struct code *new_code)
1816 sword_t nheader_words, ncode_words, nwords;
1817 os_vm_address_t __attribute__((unused)) constants_start_addr, constants_end_addr;
1818 os_vm_address_t __attribute__((unused)) code_start_addr, code_end_addr;
1819 os_vm_address_t code_addr = (os_vm_address_t)new_code;
1820 os_vm_address_t old_addr = (os_vm_address_t)old_code;
1821 os_vm_size_t displacement = code_addr - old_addr;
1822 lispobj fixups = NIL;
1823 struct vector *fixups_vector;
1825 ncode_words = code_instruction_words(new_code->code_size);
1826 nheader_words = code_header_words(*(lispobj *)new_code);
1827 nwords = ncode_words + nheader_words;
1828 /* FSHOW((stderr,
1829 "/compiled code object at %x: header words = %d, code words = %d\n",
1830 new_code, nheader_words, ncode_words)); */
1831 constants_start_addr = code_addr + 5*N_WORD_BYTES;
1832 constants_end_addr = code_addr + nheader_words*N_WORD_BYTES;
1833 code_start_addr = code_addr + nheader_words*N_WORD_BYTES;
1834 code_end_addr = code_addr + nwords*N_WORD_BYTES;
1836 FSHOW((stderr,
1837 "/const start = %x, end = %x\n",
1838 constants_start_addr,constants_end_addr));
1839 FSHOW((stderr,
1840 "/code start = %x; end = %x\n",
1841 code_start_addr,code_end_addr));
1844 fixups = new_code->fixups;
1845 /* It will be a Lisp vector if valid, or 0 if there are no fixups */
1846 if (fixups == 0 || !is_lisp_pointer(fixups)) {
1847 /* Check for possible errors. */
1848 if (check_code_fixups)
1849 sniff_code_object(new_code, displacement);
1851 return;
1854 fixups_vector = (struct vector *)native_pointer(fixups);
1856 /* Could be pointing to a forwarding pointer. */
1857 /* This is extremely unlikely, because the only referent of the fixups
1858 is usually the code itself; so scavenging the vector won't occur
1859 until after the code object is known to be live. As we're just now
1860 enlivening the code, the fixups shouldn't have been forwarded.
1861 Maybe the vector is on the special binding stack though ... */
1862 if (is_lisp_pointer(fixups) &&
1863 (find_page_index((void*)fixups_vector) != -1) &&
1864 forwarding_pointer_p((lispobj*)fixups_vector)) {
1865 /* If so, then follow it. */
1866 /*SHOW("following pointer to a forwarding pointer");*/
1867 fixups_vector = (struct vector *)
1868 native_pointer(forwarding_pointer_value((lispobj*)fixups_vector));
1871 /*SHOW("got fixups");*/
1873 if (widetag_of(fixups_vector->header) == SIMPLE_ARRAY_WORD_WIDETAG) {
1874 /* Got the fixups for the code block. Now work through the vector,
1875 and apply a fixup at each address. */
1876 sword_t length = fixnum_value(fixups_vector->length);
1877 sword_t i;
1878 for (i = 0; i < length; i++) {
1879 long offset = fixups_vector->data[i];
1880 /* Now check the current value of offset. */
1881 os_vm_address_t old_value = *(os_vm_address_t *)(code_start_addr + offset);
1883 /* If it's within the old_code object then it must be an
1884 * absolute fixup (relative ones are not saved) */
1885 if ((old_value >= old_addr)
1886 && (old_value < (old_addr + nwords*N_WORD_BYTES)))
1887 /* So add the dispacement. */
1888 *(os_vm_address_t *)(code_start_addr + offset) =
1889 old_value + displacement;
1890 else
1891 /* It is outside the old code object so it must be a
1892 * relative fixup (absolute fixups are not saved). So
1893 * subtract the displacement. */
1894 *(os_vm_address_t *)(code_start_addr + offset) =
1895 old_value - displacement;
1897 } else {
1898 /* This used to just print a note to stderr, but a bogus fixup seems to
1899 * indicate real heap corruption, so a hard hailure is in order. */
1900 lose("fixup vector %p has a bad widetag: %d\n",
1901 fixups_vector, widetag_of(fixups_vector->header));
1904 /* Check for possible errors. */
1905 if (check_code_fixups) {
1906 sniff_code_object(new_code,displacement);
1909 #endif
1911 static lispobj
1912 trans_boxed_large(lispobj object)
1914 gc_assert(is_lisp_pointer(object));
1915 return copy_large_object(object,
1916 (HeaderValue(*native_pointer(object)) | 1) + 1);
1920 * weak pointers
1923 /* XX This is a hack adapted from cgc.c. These don't work too
1924 * efficiently with the gencgc as a list of the weak pointers is
1925 * maintained within the objects which causes writes to the pages. A
1926 * limited attempt is made to avoid unnecessary writes, but this needs
1927 * a re-think. */
1928 static sword_t
1929 scav_weak_pointer(lispobj *where, lispobj object)
1931 /* Since we overwrite the 'next' field, we have to make
1932 * sure not to do so for pointers already in the list.
1933 * Instead of searching the list of weak_pointers each
1934 * time, we ensure that next is always NULL when the weak
1935 * pointer isn't in the list, and not NULL otherwise.
1936 * Since we can't use NULL to denote end of list, we
1937 * use a pointer back to the same weak_pointer.
1939 struct weak_pointer * wp = (struct weak_pointer*)where;
1941 if (NULL == wp->next) {
1942 wp->next = weak_pointers;
1943 weak_pointers = wp;
1944 if (NULL == wp->next)
1945 wp->next = wp;
1948 /* Do not let GC scavenge the value slot of the weak pointer.
1949 * (That is why it is a weak pointer.) */
1951 return WEAK_POINTER_NWORDS;
1955 lispobj *
1956 search_read_only_space(void *pointer)
1958 lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
1959 lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0);
1960 if ((pointer < (void *)start) || (pointer >= (void *)end))
1961 return NULL;
1962 return gc_search_space(start, pointer);
1965 lispobj *
1966 search_static_space(void *pointer)
1968 lispobj *start = (lispobj *)STATIC_SPACE_START;
1969 lispobj *end = (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0);
1970 if ((pointer < (void *)start) || (pointer >= (void *)end))
1971 return NULL;
1972 return gc_search_space(start, pointer);
1975 /* a faster version for searching the dynamic space. This will work even
1976 * if the object is in a current allocation region. */
1977 lispobj *
1978 search_dynamic_space(void *pointer)
1980 page_index_t page_index = find_page_index(pointer);
1981 lispobj *start;
1983 /* The address may be invalid, so do some checks. */
1984 if ((page_index == -1) || page_free_p(page_index))
1985 return NULL;
1986 start = (lispobj *)page_scan_start(page_index);
1987 return gc_search_space(start, pointer);
1990 // Return the starting address of the object containing 'addr'
1991 // if and only if the object is one which would be evacuated from 'from_space'
1992 // were it allowed to be either discarded as garbage or moved.
1993 // 'addr_page_index' is the page containing 'addr' and must not be -1.
1994 // Return 0 if there is no such object - that is, if addr is past the
1995 // end of the used bytes, or its pages are not in 'from_space' etc.
1996 static lispobj*
1997 conservative_root_p(void *addr, page_index_t addr_page_index)
1999 #ifdef GENCGC_IS_PRECISE
2000 /* If we're in precise gencgc (non-x86oid as of this writing) then
2001 * we are only called on valid object pointers in the first place,
2002 * so we just have to do a bounds-check against the heap, a
2003 * generation check, and the already-pinned check. */
2004 if ((page_table[addr_page_index].gen != from_space)
2005 || (page_table[addr_page_index].dont_move != 0))
2006 return 0;
2007 return (lispobj*)1;
2008 #else
2009 /* quick check 1: Address is quite likely to have been invalid. */
2010 if (page_free_p(addr_page_index)
2011 || (page_table[addr_page_index].bytes_used == 0)
2012 || (page_table[addr_page_index].gen != from_space))
2013 return 0;
2014 gc_assert(!(page_table[addr_page_index].allocated&OPEN_REGION_PAGE_FLAG));
2016 /* quick check 2: Check the offset within the page.
2019 if (((uword_t)addr & (GENCGC_CARD_BYTES - 1)) >
2020 page_table[addr_page_index].bytes_used)
2021 return 0;
2023 /* Filter out anything which can't be a pointer to a Lisp object
2024 * (or, as a special case which also requires dont_move, a return
2025 * address referring to something in a CodeObject). This is
2026 * expensive but important, since it vastly reduces the
2027 * probability that random garbage will be bogusly interpreted as
2028 * a pointer which prevents a page from moving. */
2029 lispobj* object_start = search_dynamic_space(addr);
2030 if (!object_start) return 0;
2032 /* If the containing object is a code object and 'addr' points
2033 * anywhere beyond the boxed words,
2034 * presume it to be a valid unboxed return address. */
2035 if (instruction_ptr_p(addr, object_start))
2036 return object_start;
2038 /* Large object pages only contain ONE object, and it will never
2039 * be a CONS. However, arrays and bignums can be allocated larger
2040 * than necessary and then shrunk to fit, leaving what look like
2041 * (0 . 0) CONSes at the end. These appear valid to
2042 * properly_tagged_descriptor_p(), so pick them off here. */
2043 if (((lowtag_of((lispobj)addr) == LIST_POINTER_LOWTAG) &&
2044 page_table[addr_page_index].large_object)
2045 || !properly_tagged_descriptor_p(addr, object_start))
2046 return 0;
2048 return object_start;
2049 #endif
2052 /* Adjust large bignum and vector objects. This will adjust the
2053 * allocated region if the size has shrunk, and move unboxed objects
2054 * into unboxed pages. The pages are not promoted here, and the
2055 * promoted region is not added to the new_regions; this is really
2056 * only designed to be called from preserve_pointer(). Shouldn't fail
2057 * if this is missed, just may delay the moving of objects to unboxed
2058 * pages, and the freeing of pages. */
2059 static void
2060 maybe_adjust_large_object(lispobj *where)
2062 page_index_t first_page;
2063 page_index_t next_page;
2064 sword_t nwords;
2066 uword_t remaining_bytes;
2067 uword_t bytes_freed;
2068 uword_t old_bytes_used;
2070 int boxed;
2072 /* Check whether it's a vector or bignum object. */
2073 switch (widetag_of(where[0])) {
2074 case SIMPLE_VECTOR_WIDETAG:
2075 boxed = BOXED_PAGE_FLAG;
2076 break;
2077 case BIGNUM_WIDETAG:
2078 #include "genesis/specialized-vectors.inc"
2079 boxed = UNBOXED_PAGE_FLAG;
2080 break;
2081 default:
2082 return;
2085 /* Find its current size. */
2086 nwords = (sizetab[widetag_of(where[0])])(where);
2088 first_page = find_page_index((void *)where);
2089 gc_assert(first_page >= 0);
2091 /* Note: Any page write-protection must be removed, else a later
2092 * scavenge_newspace may incorrectly not scavenge these pages.
2093 * This would not be necessary if they are added to the new areas,
2094 * but lets do it for them all (they'll probably be written
2095 * anyway?). */
2097 gc_assert(page_starts_contiguous_block_p(first_page));
2099 next_page = first_page;
2100 remaining_bytes = nwords*N_WORD_BYTES;
2101 while (remaining_bytes > GENCGC_CARD_BYTES) {
2102 gc_assert(page_table[next_page].gen == from_space);
2103 gc_assert(page_allocated_no_region_p(next_page));
2104 gc_assert(page_table[next_page].large_object);
2105 gc_assert(page_table[next_page].scan_start_offset ==
2106 npage_bytes(next_page-first_page));
2107 gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES);
2109 page_table[next_page].allocated = boxed;
2111 /* Shouldn't be write-protected at this stage. Essential that the
2112 * pages aren't. */
2113 gc_assert(!page_table[next_page].write_protected);
2114 remaining_bytes -= GENCGC_CARD_BYTES;
2115 next_page++;
2118 /* Now only one page remains, but the object may have shrunk so
2119 * there may be more unused pages which will be freed. */
2121 /* Object may have shrunk but shouldn't have grown - check. */
2122 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2124 page_table[next_page].allocated = boxed;
2125 gc_assert(page_table[next_page].allocated ==
2126 page_table[first_page].allocated);
2128 /* Adjust the bytes_used. */
2129 old_bytes_used = page_table[next_page].bytes_used;
2130 page_table[next_page].bytes_used = remaining_bytes;
2132 bytes_freed = old_bytes_used - remaining_bytes;
2134 /* Free any remaining pages; needs care. */
2135 next_page++;
2136 while ((old_bytes_used == GENCGC_CARD_BYTES) &&
2137 (page_table[next_page].gen == from_space) &&
2138 page_allocated_no_region_p(next_page) &&
2139 page_table[next_page].large_object &&
2140 (page_table[next_page].scan_start_offset ==
2141 npage_bytes(next_page - first_page))) {
2142 /* It checks out OK, free the page. We don't need to both zeroing
2143 * pages as this should have been done before shrinking the
2144 * object. These pages shouldn't be write protected as they
2145 * should be zero filled. */
2146 gc_assert(page_table[next_page].write_protected == 0);
2148 old_bytes_used = page_table[next_page].bytes_used;
2149 page_table[next_page].allocated = FREE_PAGE_FLAG;
2150 page_table[next_page].bytes_used = 0;
2151 bytes_freed += old_bytes_used;
2152 next_page++;
2155 if ((bytes_freed > 0) && gencgc_verbose) {
2156 FSHOW((stderr,
2157 "/maybe_adjust_large_object() freed %d\n",
2158 bytes_freed));
2161 generations[from_space].bytes_allocated -= bytes_freed;
2162 bytes_allocated -= bytes_freed;
2164 return;
2168 * Why is this restricted to protected objects only?
2169 * Because the rest of the page has been scavenged already,
2170 * and since that leaves forwarding pointers in the unprotected
2171 * areas you cannot scavenge it again until those are gone.
2173 static void
2174 scavenge_pinned_range(void* page_base, int start, int count)
2176 // 'start' and 'count' are expressed in units of dwords
2177 scavenge((lispobj*)page_base + 2*start, 2*count);
2180 static void
2181 scavenge_pinned_ranges()
2183 page_index_t page;
2184 for (page = 0; page < last_free_page; page++) {
2185 in_use_marker_t* bitmap = pinned_dwords(page);
2186 if (bitmap)
2187 bitmap_scan(bitmap,
2188 GENCGC_CARD_BYTES / (2*N_WORD_BYTES) / N_WORD_BITS,
2189 0, scavenge_pinned_range, page_address(page));
2193 static void wipe_range(void* page_base, int start, int count)
2195 bzero((lispobj*)page_base + 2*start, count*2*N_WORD_BYTES);
2198 static void
2199 wipe_nonpinned_words()
2201 page_index_t i;
2202 in_use_marker_t* bitmap;
2204 for (i = 0; i < last_free_page; i++) {
2205 if (page_table[i].dont_move && (bitmap = pinned_dwords(i)) != 0) {
2206 bitmap_scan(bitmap,
2207 GENCGC_CARD_BYTES / (2*N_WORD_BYTES) / N_WORD_BITS,
2208 BIT_SCAN_INVERT | BIT_SCAN_CLEAR,
2209 wipe_range, page_address(i));
2210 page_table[i].has_pin_map = 0;
2211 // move the page to newspace
2212 generations[new_space].bytes_allocated += page_table[i].bytes_used;
2213 generations[page_table[i].gen].bytes_allocated -= page_table[i].bytes_used;
2214 page_table[i].gen = new_space;
2217 #ifndef LISP_FEATURE_WIN32
2218 madvise(page_table_pinned_dwords, pins_map_size_in_bytes, MADV_DONTNEED);
2219 #endif
2222 static void __attribute__((unused))
2223 pin_words(page_index_t pageindex, lispobj *mark_which_pointer)
2225 struct page *page = &page_table[pageindex];
2226 gc_assert(mark_which_pointer);
2227 if (!page->has_pin_map) {
2228 page->has_pin_map = 1;
2229 #ifdef DEBUG
2231 int i;
2232 in_use_marker_t* map = pinned_dwords(pageindex);
2233 for (i=0; i<n_dwords_in_card/N_WORD_BITS; ++i)
2234 gc_assert(map[i] == 0);
2236 #endif
2238 lispobj *page_base = page_address(pageindex);
2239 unsigned int begin_dword_index = (mark_which_pointer - page_base) / 2;
2240 in_use_marker_t *bitmap = pinned_dwords(pageindex);
2241 if (bitmap[begin_dword_index/N_WORD_BITS]
2242 & ((uword_t)1 << (begin_dword_index % N_WORD_BITS)))
2243 return; // already seen this object
2245 lispobj header = *mark_which_pointer;
2246 int size = 2;
2247 // Don't bother calling a sizing function for cons cells.
2248 if (!is_cons_half(header))
2249 size = (sizetab[widetag_of(header)])(mark_which_pointer);
2250 gc_assert(size % 2 == 0);
2251 unsigned int end_dword_index = begin_dword_index + size / 2;
2252 unsigned int index;
2253 for (index = begin_dword_index; index < end_dword_index; index++)
2254 bitmap[index/N_WORD_BITS] |= (uword_t)1 << (index % N_WORD_BITS);
2257 /* Take a possible pointer to a Lisp object and mark its page in the
2258 * page_table so that it will not be relocated during a GC.
2260 * This involves locating the page it points to, then backing up to
2261 * the start of its region, then marking all pages dont_move from there
2262 * up to the first page that's not full or has a different generation
2264 * It is assumed that all the page static flags have been cleared at
2265 * the start of a GC.
2267 * It is also assumed that the current gc_alloc() region has been
2268 * flushed and the tables updated. */
2270 // TODO: there's probably a way to be a little more efficient here.
2271 // As things are, we start by finding the object that encloses 'addr',
2272 // then we see if 'addr' was a "valid" Lisp pointer to that object
2273 // - meaning we expect the correct lowtag on the pointer - except
2274 // that for code objects we don't require a correct lowtag
2275 // and we allow a pointer to anywhere in the object.
2277 // It should be possible to avoid calling search_dynamic_space
2278 // more of the time. First, check if the page pointed to might hold code.
2279 // If it does, then we continue regardless of the pointer's lowtag
2280 // (because of the special allowance). If the page definitely does *not*
2281 // hold code, then we require up front that the lowtake make sense,
2282 // by doing the same checks that are in properly_tagged_descriptor_p.
2284 // Problem: when code is allocated from a per-thread region,
2285 // does it ensure that the occupied pages are flagged as having code?
2287 static void
2288 preserve_pointer(void *addr)
2290 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2291 /* Immobile space MUST be lower than dynamic space,
2292 or else this test needs to be revised */
2293 if (addr < (void*)IMMOBILE_SPACE_END) {
2294 extern void immobile_space_preserve_pointer(void*);
2295 immobile_space_preserve_pointer(addr);
2296 return;
2298 #endif
2299 page_index_t addr_page_index = find_page_index(addr);
2300 lispobj *object_start;
2302 if (addr_page_index == -1
2303 || (object_start = conservative_root_p(addr, addr_page_index)) == 0)
2304 return;
2306 /* (Now that we know that addr_page_index is in range, it's
2307 * safe to index into page_table[] with it.) */
2308 unsigned int region_allocation = page_table[addr_page_index].allocated;
2310 /* Find the beginning of the region. Note that there may be
2311 * objects in the region preceding the one that we were passed a
2312 * pointer to: if this is the case, we will write-protect all the
2313 * previous objects' pages too. */
2315 #if 0
2316 /* I think this'd work just as well, but without the assertions.
2317 * -dan 2004.01.01 */
2318 page_index_t first_page = find_page_index(page_scan_start(addr_page_index))
2319 #else
2320 page_index_t first_page = addr_page_index;
2321 while (!page_starts_contiguous_block_p(first_page)) {
2322 --first_page;
2323 /* Do some checks. */
2324 gc_assert(page_table[first_page].bytes_used == GENCGC_CARD_BYTES);
2325 gc_assert(page_table[first_page].gen == from_space);
2326 gc_assert(page_table[first_page].allocated == region_allocation);
2328 #endif
2330 /* Adjust any large objects before promotion as they won't be
2331 * copied after promotion. */
2332 if (page_table[first_page].large_object) {
2333 maybe_adjust_large_object(page_address(first_page));
2334 /* It may have moved to unboxed pages. */
2335 region_allocation = page_table[first_page].allocated;
2338 /* Now work forward until the end of this contiguous area is found,
2339 * marking all pages as dont_move. */
2340 page_index_t i;
2341 for (i = first_page; ;i++) {
2342 gc_assert(page_table[i].allocated == region_allocation);
2344 /* Mark the page static. */
2345 page_table[i].dont_move = 1;
2347 /* It is essential that the pages are not write protected as
2348 * they may have pointers into the old-space which need
2349 * scavenging. They shouldn't be write protected at this
2350 * stage. */
2351 gc_assert(!page_table[i].write_protected);
2353 /* Check whether this is the last page in this contiguous block.. */
2354 if (page_ends_contiguous_block_p(i, from_space))
2355 break;
2358 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2359 /* Do not do this for multi-page objects. Those pages do not need
2360 * object wipeout anyway.
2362 if (do_wipe_p && i == first_page) // single-page object
2363 pin_words(first_page, object_start);
2364 #endif
2366 /* Check that the page is now static. */
2367 gc_assert(page_table[addr_page_index].dont_move != 0);
2370 /* If the given page is not write-protected, then scan it for pointers
2371 * to younger generations or the top temp. generation, if no
2372 * suspicious pointers are found then the page is write-protected.
2374 * Care is taken to check for pointers to the current gc_alloc()
2375 * region if it is a younger generation or the temp. generation. This
2376 * frees the caller from doing a gc_alloc_update_page_tables(). Actually
2377 * the gc_alloc_generation does not need to be checked as this is only
2378 * called from scavenge_generation() when the gc_alloc generation is
2379 * younger, so it just checks if there is a pointer to the current
2380 * region.
2382 * We return 1 if the page was write-protected, else 0. */
2383 static int
2384 update_page_write_prot(page_index_t page)
2386 generation_index_t gen = page_table[page].gen;
2387 sword_t j;
2388 int wp_it = 1;
2389 void **page_addr = (void **)page_address(page);
2390 sword_t num_words = page_table[page].bytes_used / N_WORD_BYTES;
2392 /* Shouldn't be a free page. */
2393 gc_assert(page_allocated_p(page));
2394 gc_assert(page_table[page].bytes_used != 0);
2396 /* Skip if it's already write-protected, pinned, or unboxed */
2397 if (page_table[page].write_protected
2398 /* FIXME: What's the reason for not write-protecting pinned pages? */
2399 || page_table[page].dont_move
2400 || page_unboxed_p(page))
2401 return (0);
2403 /* Scan the page for pointers to younger generations or the
2404 * top temp. generation. */
2406 /* This is conservative: any word satisfying is_lisp_pointer() is
2407 * assumed to be a pointer. To do otherwise would require a family
2408 * of scavenge-like functions. */
2409 for (j = 0; j < num_words; j++) {
2410 void *ptr = *(page_addr+j);
2411 page_index_t index;
2412 lispobj __attribute__((unused)) header;
2414 if (!is_lisp_pointer((lispobj)ptr))
2415 continue;
2416 /* Check that it's in the dynamic space */
2417 if ((index = find_page_index(ptr)) != -1) {
2418 if (/* Does it point to a younger or the temp. generation? */
2419 (page_allocated_p(index)
2420 && (page_table[index].bytes_used != 0)
2421 && ((page_table[index].gen < gen)
2422 || (page_table[index].gen == SCRATCH_GENERATION)))
2424 /* Or does it point within a current gc_alloc() region? */
2425 || ((boxed_region.start_addr <= ptr)
2426 && (ptr <= boxed_region.free_pointer))
2427 || ((unboxed_region.start_addr <= ptr)
2428 && (ptr <= unboxed_region.free_pointer))) {
2429 wp_it = 0;
2430 break;
2433 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2434 else if ((index = find_immobile_page_index(ptr)) >= 0 &&
2435 other_immediate_lowtag_p(header = *native_pointer((lispobj)ptr))) {
2436 // This is *possibly* a pointer to an object in immobile space,
2437 // given that above two conditions were satisfied.
2438 // But unlike in the dynamic space case, we need to read a byte
2439 // from the object to determine its generation, which requires care.
2440 // Consider an unboxed word that looks like a pointer to a word that
2441 // looks like fun-header-widetag. We can't naively back up to the
2442 // underlying code object since the alleged header might not be one.
2443 int obj_gen = gen; // Make comparison fail if we fall through
2444 if (lowtag_of((lispobj)ptr) != FUN_POINTER_LOWTAG) {
2445 obj_gen = __immobile_obj_generation(native_pointer((lispobj)ptr));
2446 } else if (widetag_of(header) == SIMPLE_FUN_HEADER_WIDETAG) {
2447 struct code* code =
2448 code_obj_from_simple_fun((struct simple_fun *)
2449 ((lispobj)ptr - FUN_POINTER_LOWTAG));
2450 // This is a heuristic, since we're not actually looking for
2451 // an object boundary. Precise scanning of 'page' would obviate
2452 // the guard conditions here.
2453 if ((lispobj)code >= IMMOBILE_VARYOBJ_SUBSPACE_START
2454 && widetag_of(code->header) == CODE_HEADER_WIDETAG)
2455 obj_gen = __immobile_obj_generation((lispobj*)code);
2457 // A bogus generation number implies a not-really-pointer,
2458 // but it won't cause misbehavior.
2459 if (obj_gen < gen || obj_gen == SCRATCH_GENERATION) {
2460 wp_it = 0;
2461 break;
2464 #endif
2467 if (wp_it == 1) {
2468 /* Write-protect the page. */
2469 /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
2471 os_protect((void *)page_addr,
2472 GENCGC_CARD_BYTES,
2473 OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
2475 /* Note the page as protected in the page tables. */
2476 page_table[page].write_protected = 1;
2479 return (wp_it);
2482 /* Scavenge all generations from FROM to TO, inclusive, except for
2483 * new_space which needs special handling, as new objects may be
2484 * added which are not checked here - use scavenge_newspace generation.
2486 * Write-protected pages should not have any pointers to the
2487 * from_space so do need scavenging; thus write-protected pages are
2488 * not always scavenged. There is some code to check that these pages
2489 * are not written; but to check fully the write-protected pages need
2490 * to be scavenged by disabling the code to skip them.
2492 * Under the current scheme when a generation is GCed the younger
2493 * generations will be empty. So, when a generation is being GCed it
2494 * is only necessary to scavenge the older generations for pointers
2495 * not the younger. So a page that does not have pointers to younger
2496 * generations does not need to be scavenged.
2498 * The write-protection can be used to note pages that don't have
2499 * pointers to younger pages. But pages can be written without having
2500 * pointers to younger generations. After the pages are scavenged here
2501 * they can be scanned for pointers to younger generations and if
2502 * there are none the page can be write-protected.
2504 * One complication is when the newspace is the top temp. generation.
2506 * Enabling SC_GEN_CK scavenges the write-protected pages and checks
2507 * that none were written, which they shouldn't be as they should have
2508 * no pointers to younger generations. This breaks down for weak
2509 * pointers as the objects contain a link to the next and are written
2510 * if a weak pointer is scavenged. Still it's a useful check. */
2511 static void
2512 scavenge_generations(generation_index_t from, generation_index_t to)
2514 page_index_t i;
2515 page_index_t num_wp = 0;
2517 #define SC_GEN_CK 0
2518 #if SC_GEN_CK
2519 /* Clear the write_protected_cleared flags on all pages. */
2520 for (i = 0; i < page_table_pages; i++)
2521 page_table[i].write_protected_cleared = 0;
2522 #endif
2524 for (i = 0; i < last_free_page; i++) {
2525 generation_index_t generation = page_table[i].gen;
2526 if (page_boxed_p(i)
2527 && (page_table[i].bytes_used != 0)
2528 && (generation != new_space)
2529 && (generation >= from)
2530 && (generation <= to)) {
2531 page_index_t last_page,j;
2532 int write_protected=1;
2534 /* This should be the start of a region */
2535 gc_assert(page_starts_contiguous_block_p(i));
2537 /* Now work forward until the end of the region */
2538 for (last_page = i; ; last_page++) {
2539 write_protected =
2540 write_protected && page_table[last_page].write_protected;
2541 if (page_ends_contiguous_block_p(last_page, generation))
2542 break;
2544 if (!write_protected) {
2545 scavenge(page_address(i),
2546 ((uword_t)(page_table[last_page].bytes_used
2547 + npage_bytes(last_page-i)))
2548 /N_WORD_BYTES);
2550 /* Now scan the pages and write protect those that
2551 * don't have pointers to younger generations. */
2552 if (enable_page_protection) {
2553 for (j = i; j <= last_page; j++) {
2554 num_wp += update_page_write_prot(j);
2557 if ((gencgc_verbose > 1) && (num_wp != 0)) {
2558 FSHOW((stderr,
2559 "/write protected %d pages within generation %d\n",
2560 num_wp, generation));
2563 i = last_page;
2567 #if SC_GEN_CK
2568 /* Check that none of the write_protected pages in this generation
2569 * have been written to. */
2570 for (i = 0; i < page_table_pages; i++) {
2571 if (page_allocated_p(i)
2572 && (page_table[i].bytes_used != 0)
2573 && (page_table[i].gen == generation)
2574 && (page_table[i].write_protected_cleared != 0)) {
2575 FSHOW((stderr, "/scavenge_generation() %d\n", generation));
2576 FSHOW((stderr,
2577 "/page bytes_used=%d scan_start_offset=%lu dont_move=%d\n",
2578 page_table[i].bytes_used,
2579 page_table[i].scan_start_offset,
2580 page_table[i].dont_move));
2581 lose("write to protected page %d in scavenge_generation()\n", i);
2584 #endif
2588 /* Scavenge a newspace generation. As it is scavenged new objects may
2589 * be allocated to it; these will also need to be scavenged. This
2590 * repeats until there are no more objects unscavenged in the
2591 * newspace generation.
2593 * To help improve the efficiency, areas written are recorded by
2594 * gc_alloc() and only these scavenged. Sometimes a little more will be
2595 * scavenged, but this causes no harm. An easy check is done that the
2596 * scavenged bytes equals the number allocated in the previous
2597 * scavenge.
2599 * Write-protected pages are not scanned except if they are marked
2600 * dont_move in which case they may have been promoted and still have
2601 * pointers to the from space.
2603 * Write-protected pages could potentially be written by alloc however
2604 * to avoid having to handle re-scavenging of write-protected pages
2605 * gc_alloc() does not write to write-protected pages.
2607 * New areas of objects allocated are recorded alternatively in the two
2608 * new_areas arrays below. */
2609 static struct new_area new_areas_1[NUM_NEW_AREAS];
2610 static struct new_area new_areas_2[NUM_NEW_AREAS];
2612 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2613 extern unsigned int immobile_scav_queue_count;
2614 extern void
2615 gc_init_immobile(),
2616 update_immobile_nursery_bits(),
2617 scavenge_immobile_roots(generation_index_t,generation_index_t),
2618 scavenge_immobile_newspace(),
2619 sweep_immobile_space(int raise),
2620 write_protect_immobile_space();
2621 #else
2622 #define immobile_scav_queue_count 0
2623 #endif
2625 /* Do one full scan of the new space generation. This is not enough to
2626 * complete the job as new objects may be added to the generation in
2627 * the process which are not scavenged. */
2628 static void
2629 scavenge_newspace_generation_one_scan(generation_index_t generation)
2631 page_index_t i;
2633 FSHOW((stderr,
2634 "/starting one full scan of newspace generation %d\n",
2635 generation));
2636 for (i = 0; i < last_free_page; i++) {
2637 /* Note that this skips over open regions when it encounters them. */
2638 if (page_boxed_p(i)
2639 && (page_table[i].bytes_used != 0)
2640 && (page_table[i].gen == generation)
2641 && ((page_table[i].write_protected == 0)
2642 /* (This may be redundant as write_protected is now
2643 * cleared before promotion.) */
2644 || (page_table[i].dont_move == 1))) {
2645 page_index_t last_page;
2646 int all_wp=1;
2648 /* The scavenge will start at the scan_start_offset of
2649 * page i.
2651 * We need to find the full extent of this contiguous
2652 * block in case objects span pages.
2654 * Now work forward until the end of this contiguous area
2655 * is found. A small area is preferred as there is a
2656 * better chance of its pages being write-protected. */
2657 for (last_page = i; ;last_page++) {
2658 /* If all pages are write-protected and movable,
2659 * then no need to scavenge */
2660 all_wp=all_wp && page_table[last_page].write_protected &&
2661 !page_table[last_page].dont_move;
2663 /* Check whether this is the last page in this
2664 * contiguous block */
2665 if (page_ends_contiguous_block_p(last_page, generation))
2666 break;
2669 /* Do a limited check for write-protected pages. */
2670 if (!all_wp) {
2671 sword_t nwords = (((uword_t)
2672 (page_table[last_page].bytes_used
2673 + npage_bytes(last_page-i)
2674 + page_table[i].scan_start_offset))
2675 / N_WORD_BYTES);
2676 new_areas_ignore_page = last_page;
2678 scavenge(page_scan_start(i), nwords);
2681 i = last_page;
2684 FSHOW((stderr,
2685 "/done with one full scan of newspace generation %d\n",
2686 generation));
2689 /* Do a complete scavenge of the newspace generation. */
2690 static void
2691 scavenge_newspace_generation(generation_index_t generation)
2693 size_t i;
2695 /* the new_areas array currently being written to by gc_alloc() */
2696 struct new_area (*current_new_areas)[] = &new_areas_1;
2697 size_t current_new_areas_index;
2699 /* the new_areas created by the previous scavenge cycle */
2700 struct new_area (*previous_new_areas)[] = NULL;
2701 size_t previous_new_areas_index;
2703 /* Flush the current regions updating the tables. */
2704 gc_alloc_update_all_page_tables(0);
2706 /* Turn on the recording of new areas by gc_alloc(). */
2707 new_areas = current_new_areas;
2708 new_areas_index = 0;
2710 /* Don't need to record new areas that get scavenged anyway during
2711 * scavenge_newspace_generation_one_scan. */
2712 record_new_objects = 1;
2714 /* Start with a full scavenge. */
2715 scavenge_newspace_generation_one_scan(generation);
2717 /* Record all new areas now. */
2718 record_new_objects = 2;
2720 /* Give a chance to weak hash tables to make other objects live.
2721 * FIXME: The algorithm implemented here for weak hash table gcing
2722 * is O(W^2+N) as Bruno Haible warns in
2723 * http://www.haible.de/bruno/papers/cs/weak/WeakDatastructures-writeup.html
2724 * see "Implementation 2". */
2725 scav_weak_hash_tables();
2727 /* Flush the current regions updating the tables. */
2728 gc_alloc_update_all_page_tables(0);
2730 /* Grab new_areas_index. */
2731 current_new_areas_index = new_areas_index;
2733 /*FSHOW((stderr,
2734 "The first scan is finished; current_new_areas_index=%d.\n",
2735 current_new_areas_index));*/
2737 while (current_new_areas_index > 0 || immobile_scav_queue_count) {
2738 /* Move the current to the previous new areas */
2739 previous_new_areas = current_new_areas;
2740 previous_new_areas_index = current_new_areas_index;
2742 /* Scavenge all the areas in previous new areas. Any new areas
2743 * allocated are saved in current_new_areas. */
2745 /* Allocate an array for current_new_areas; alternating between
2746 * new_areas_1 and 2 */
2747 if (previous_new_areas == &new_areas_1)
2748 current_new_areas = &new_areas_2;
2749 else
2750 current_new_areas = &new_areas_1;
2752 /* Set up for gc_alloc(). */
2753 new_areas = current_new_areas;
2754 new_areas_index = 0;
2756 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2757 scavenge_immobile_newspace();
2758 #endif
2759 /* Check whether previous_new_areas had overflowed. */
2760 if (previous_new_areas_index >= NUM_NEW_AREAS) {
2762 /* New areas of objects allocated have been lost so need to do a
2763 * full scan to be sure! If this becomes a problem try
2764 * increasing NUM_NEW_AREAS. */
2765 if (gencgc_verbose) {
2766 SHOW("new_areas overflow, doing full scavenge");
2769 /* Don't need to record new areas that get scavenged
2770 * anyway during scavenge_newspace_generation_one_scan. */
2771 record_new_objects = 1;
2773 scavenge_newspace_generation_one_scan(generation);
2775 /* Record all new areas now. */
2776 record_new_objects = 2;
2778 scav_weak_hash_tables();
2780 /* Flush the current regions updating the tables. */
2781 gc_alloc_update_all_page_tables(0);
2783 } else {
2785 /* Work through previous_new_areas. */
2786 for (i = 0; i < previous_new_areas_index; i++) {
2787 page_index_t page = (*previous_new_areas)[i].page;
2788 size_t offset = (*previous_new_areas)[i].offset;
2789 size_t size = (*previous_new_areas)[i].size / N_WORD_BYTES;
2790 gc_assert((*previous_new_areas)[i].size % N_WORD_BYTES == 0);
2791 scavenge(page_address(page)+offset, size);
2794 scav_weak_hash_tables();
2796 /* Flush the current regions updating the tables. */
2797 gc_alloc_update_all_page_tables(0);
2800 current_new_areas_index = new_areas_index;
2802 /*FSHOW((stderr,
2803 "The re-scan has finished; current_new_areas_index=%d.\n",
2804 current_new_areas_index));*/
2807 /* Turn off recording of areas allocated by gc_alloc(). */
2808 record_new_objects = 0;
2810 #if SC_NS_GEN_CK
2812 page_index_t i;
2813 /* Check that none of the write_protected pages in this generation
2814 * have been written to. */
2815 for (i = 0; i < page_table_pages; i++) {
2816 if (page_allocated_p(i)
2817 && (page_table[i].bytes_used != 0)
2818 && (page_table[i].gen == generation)
2819 && (page_table[i].write_protected_cleared != 0)
2820 && (page_table[i].dont_move == 0)) {
2821 lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d\n",
2822 i, generation, page_table[i].dont_move);
2826 #endif
2829 /* Un-write-protect all the pages in from_space. This is done at the
2830 * start of a GC else there may be many page faults while scavenging
2831 * the newspace (I've seen drive the system time to 99%). These pages
2832 * would need to be unprotected anyway before unmapping in
2833 * free_oldspace; not sure what effect this has on paging.. */
2834 static void
2835 unprotect_oldspace(void)
2837 page_index_t i;
2838 void *region_addr = 0;
2839 void *page_addr = 0;
2840 uword_t region_bytes = 0;
2842 for (i = 0; i < last_free_page; i++) {
2843 if (page_allocated_p(i)
2844 && (page_table[i].bytes_used != 0)
2845 && (page_table[i].gen == from_space)) {
2847 /* Remove any write-protection. We should be able to rely
2848 * on the write-protect flag to avoid redundant calls. */
2849 if (page_table[i].write_protected) {
2850 page_table[i].write_protected = 0;
2851 page_addr = page_address(i);
2852 if (!region_addr) {
2853 /* First region. */
2854 region_addr = page_addr;
2855 region_bytes = GENCGC_CARD_BYTES;
2856 } else if (region_addr + region_bytes == page_addr) {
2857 /* Region continue. */
2858 region_bytes += GENCGC_CARD_BYTES;
2859 } else {
2860 /* Unprotect previous region. */
2861 os_protect(region_addr, region_bytes, OS_VM_PROT_ALL);
2862 /* First page in new region. */
2863 region_addr = page_addr;
2864 region_bytes = GENCGC_CARD_BYTES;
2869 if (region_addr) {
2870 /* Unprotect last region. */
2871 os_protect(region_addr, region_bytes, OS_VM_PROT_ALL);
2875 /* Work through all the pages and free any in from_space. This
2876 * assumes that all objects have been copied or promoted to an older
2877 * generation. Bytes_allocated and the generation bytes_allocated
2878 * counter are updated. The number of bytes freed is returned. */
2879 static uword_t
2880 free_oldspace(void)
2882 uword_t bytes_freed = 0;
2883 page_index_t first_page, last_page;
2885 first_page = 0;
2887 do {
2888 /* Find a first page for the next region of pages. */
2889 while ((first_page < last_free_page)
2890 && (page_free_p(first_page)
2891 || (page_table[first_page].bytes_used == 0)
2892 || (page_table[first_page].gen != from_space)))
2893 first_page++;
2895 if (first_page >= last_free_page)
2896 break;
2898 /* Find the last page of this region. */
2899 last_page = first_page;
2901 do {
2902 /* Free the page. */
2903 bytes_freed += page_table[last_page].bytes_used;
2904 generations[page_table[last_page].gen].bytes_allocated -=
2905 page_table[last_page].bytes_used;
2906 page_table[last_page].allocated = FREE_PAGE_FLAG;
2907 page_table[last_page].bytes_used = 0;
2908 /* Should already be unprotected by unprotect_oldspace(). */
2909 gc_assert(!page_table[last_page].write_protected);
2910 last_page++;
2912 while ((last_page < last_free_page)
2913 && page_allocated_p(last_page)
2914 && (page_table[last_page].bytes_used != 0)
2915 && (page_table[last_page].gen == from_space));
2917 #ifdef READ_PROTECT_FREE_PAGES
2918 os_protect(page_address(first_page),
2919 npage_bytes(last_page-first_page),
2920 OS_VM_PROT_NONE);
2921 #endif
2922 first_page = last_page;
2923 } while (first_page < last_free_page);
2925 bytes_allocated -= bytes_freed;
2926 return bytes_freed;
2929 #if 0
2930 /* Print some information about a pointer at the given address. */
2931 static void
2932 print_ptr(lispobj *addr)
2934 /* If addr is in the dynamic space then out the page information. */
2935 page_index_t pi1 = find_page_index((void*)addr);
2937 if (pi1 != -1)
2938 fprintf(stderr," %p: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n",
2939 addr,
2940 pi1,
2941 page_table[pi1].allocated,
2942 page_table[pi1].gen,
2943 page_table[pi1].bytes_used,
2944 page_table[pi1].scan_start_offset,
2945 page_table[pi1].dont_move);
2946 fprintf(stderr," %x %x %x %x (%x) %x %x %x %x\n",
2947 *(addr-4),
2948 *(addr-3),
2949 *(addr-2),
2950 *(addr-1),
2951 *(addr-0),
2952 *(addr+1),
2953 *(addr+2),
2954 *(addr+3),
2955 *(addr+4));
2957 #endif
2959 static int
2960 is_in_stack_space(lispobj ptr)
2962 /* For space verification: Pointers can be valid if they point
2963 * to a thread stack space. This would be faster if the thread
2964 * structures had page-table entries as if they were part of
2965 * the heap space. */
2966 struct thread *th;
2967 for_each_thread(th) {
2968 if ((th->control_stack_start <= (lispobj *)ptr) &&
2969 (th->control_stack_end >= (lispobj *)ptr)) {
2970 return 1;
2973 return 0;
2976 // NOTE: This function can produces false failure indications,
2977 // usually related to dynamic space pointing to the stack of a
2978 // dead thread, but there may be other reasons as well.
2979 static void
2980 verify_space(lispobj *start, size_t words)
2982 extern int valid_lisp_pointer_p(lispobj);
2983 int is_in_dynamic_space = (find_page_index((void*)start) != -1);
2984 int is_in_readonly_space =
2985 (READ_ONLY_SPACE_START <= (uword_t)start &&
2986 (uword_t)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
2987 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2988 int is_in_immobile_space =
2989 (IMMOBILE_SPACE_START <= (uword_t)start &&
2990 (uword_t)start < SymbolValue(IMMOBILE_SPACE_FREE_POINTER,0));
2991 #endif
2993 while (words > 0) {
2994 size_t count = 1;
2995 lispobj thing = *start;
2996 lispobj __attribute__((unused)) pointee;
2998 if (is_lisp_pointer(thing)) {
2999 page_index_t page_index = find_page_index((void*)thing);
3000 sword_t to_readonly_space =
3001 (READ_ONLY_SPACE_START <= thing &&
3002 thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
3003 sword_t to_static_space =
3004 (STATIC_SPACE_START <= thing &&
3005 thing < SymbolValue(STATIC_SPACE_FREE_POINTER,0));
3006 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3007 sword_t to_immobile_space =
3008 (IMMOBILE_SPACE_START <= thing &&
3009 thing < SymbolValue(IMMOBILE_FIXEDOBJ_FREE_POINTER,0)) ||
3010 (IMMOBILE_VARYOBJ_SUBSPACE_START <= thing &&
3011 thing < SymbolValue(IMMOBILE_SPACE_FREE_POINTER,0));
3012 #endif
3014 /* Does it point to the dynamic space? */
3015 if (page_index != -1) {
3016 /* If it's within the dynamic space it should point to a used page. */
3017 if (!page_allocated_p(page_index))
3018 lose ("Ptr %p @ %p sees free page.\n", thing, start);
3019 if ((char*)thing - (char*)page_address(page_index)
3020 >= page_table[page_index].bytes_used)
3021 lose ("Ptr %p @ %p sees unallocated space.\n", thing, start);
3022 /* Check that it doesn't point to a forwarding pointer! */
3023 if (*native_pointer(thing) == 0x01) {
3024 lose("Ptr %p @ %p sees forwarding ptr.\n", thing, start);
3026 /* Check that its not in the RO space as it would then be a
3027 * pointer from the RO to the dynamic space. */
3028 if (is_in_readonly_space) {
3029 lose("ptr to dynamic space %p from RO space %x\n",
3030 thing, start);
3032 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3033 // verify all immobile space -> dynamic space pointers
3034 if (is_in_immobile_space && !valid_lisp_pointer_p(thing)) {
3035 lose("Ptr %p @ %p sees junk.\n", thing, start);
3037 #endif
3038 /* Does it point to a plausible object? This check slows
3039 * it down a lot (so it's commented out).
3041 * "a lot" is serious: it ate 50 minutes cpu time on
3042 * my duron 950 before I came back from lunch and
3043 * killed it.
3045 * FIXME: Add a variable to enable this
3046 * dynamically. */
3048 if (!valid_lisp_pointer_p((lispobj *)thing) {
3049 lose("ptr %p to invalid object %p\n", thing, start);
3052 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3053 } else if (to_immobile_space) {
3054 // the object pointed to must not have been discarded as garbage
3055 if (!other_immediate_lowtag_p(*native_pointer(thing))
3056 || immobile_filler_p(native_pointer(thing)))
3057 lose("Ptr %p @ %p sees trashed object.\n", (void*)thing, start);
3058 // verify all pointers to immobile space
3059 if (!valid_lisp_pointer_p(thing))
3060 lose("Ptr %p @ %p sees junk.\n", thing, start);
3061 #endif
3062 } else {
3063 extern char __attribute__((unused)) funcallable_instance_tramp;
3064 /* Verify that it points to another valid space. */
3065 if (!to_readonly_space && !to_static_space
3066 && !is_in_stack_space(thing)) {
3067 lose("Ptr %p @ %p sees junk.\n", thing, start);
3070 } else {
3071 if (!(fixnump(thing))) {
3072 /* skip fixnums */
3073 switch(widetag_of(*start)) {
3075 /* boxed objects */
3076 case SIMPLE_VECTOR_WIDETAG:
3077 case RATIO_WIDETAG:
3078 case COMPLEX_WIDETAG:
3079 case SIMPLE_ARRAY_WIDETAG:
3080 case COMPLEX_BASE_STRING_WIDETAG:
3081 #ifdef COMPLEX_CHARACTER_STRING_WIDETAG
3082 case COMPLEX_CHARACTER_STRING_WIDETAG:
3083 #endif
3084 case COMPLEX_VECTOR_NIL_WIDETAG:
3085 case COMPLEX_BIT_VECTOR_WIDETAG:
3086 case COMPLEX_VECTOR_WIDETAG:
3087 case COMPLEX_ARRAY_WIDETAG:
3088 case CLOSURE_HEADER_WIDETAG:
3089 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
3090 case VALUE_CELL_HEADER_WIDETAG:
3091 case SYMBOL_HEADER_WIDETAG:
3092 case CHARACTER_WIDETAG:
3093 #if N_WORD_BITS == 64
3094 case SINGLE_FLOAT_WIDETAG:
3095 #endif
3096 case UNBOUND_MARKER_WIDETAG:
3097 break;
3098 case FDEFN_WIDETAG:
3099 #ifdef LISP_FEATURE_IMMOBILE_CODE
3100 verify_space(start + 1, 2);
3101 pointee = fdefn_raw_referent((struct fdefn*)start);
3102 verify_space(&pointee, 1);
3103 count = 4;
3104 #endif
3105 break;
3107 case INSTANCE_HEADER_WIDETAG:
3108 if (instance_layout(start)) {
3109 lispobj bitmap =
3110 ((struct layout*)
3111 native_pointer(instance_layout(start)))->bitmap;
3112 sword_t nslots = instance_length(thing) | 1;
3113 instance_scan(verify_space, start+1, nslots, bitmap);
3114 count = 1 + nslots;
3116 break;
3117 case CODE_HEADER_WIDETAG:
3119 /* Check that it's not in the dynamic space.
3120 * FIXME: Isn't is supposed to be OK for code
3121 * objects to be in the dynamic space these days? */
3122 /* It is for byte compiled code, but there's
3123 * no byte compilation in SBCL anymore. */
3124 if (is_in_dynamic_space
3125 /* Only when enabled */
3126 && verify_dynamic_code_check) {
3127 FSHOW((stderr,
3128 "/code object at %p in the dynamic space\n",
3129 start));
3132 struct code *code = (struct code *) start;
3133 sword_t nheader_words = code_header_words(code->header);
3134 /* Scavenge the boxed section of the code data block */
3135 verify_space(start + 1, nheader_words - 1);
3137 /* Scavenge the boxed section of each function
3138 * object in the code data block. */
3139 for_each_simple_fun(i, fheaderp, code, 1, {
3140 verify_space(SIMPLE_FUN_SCAV_START(fheaderp),
3141 SIMPLE_FUN_SCAV_NWORDS(fheaderp)); });
3142 count = nheader_words + code_instruction_words(code->code_size);
3143 break;
3146 /* unboxed objects */
3147 case BIGNUM_WIDETAG:
3148 #if N_WORD_BITS != 64
3149 case SINGLE_FLOAT_WIDETAG:
3150 #endif
3151 case DOUBLE_FLOAT_WIDETAG:
3152 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3153 case LONG_FLOAT_WIDETAG:
3154 #endif
3155 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
3156 case COMPLEX_SINGLE_FLOAT_WIDETAG:
3157 #endif
3158 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
3159 case COMPLEX_DOUBLE_FLOAT_WIDETAG:
3160 #endif
3161 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3162 case COMPLEX_LONG_FLOAT_WIDETAG:
3163 #endif
3164 #ifdef SIMD_PACK_WIDETAG
3165 case SIMD_PACK_WIDETAG:
3166 #endif
3167 #include "genesis/specialized-vectors.inc"
3168 case SAP_WIDETAG:
3169 case WEAK_POINTER_WIDETAG:
3170 #ifdef NO_TLS_VALUE_MARKER_WIDETAG
3171 case NO_TLS_VALUE_MARKER_WIDETAG:
3172 #endif
3173 count = (sizetab[widetag_of(*start)])(start);
3174 break;
3176 default:
3177 lose("Unhandled widetag %p at %p\n",
3178 widetag_of(*start), start);
3182 start += count;
3183 words -= count;
3187 static void verify_dynamic_space();
3189 static void
3190 verify_gc(void)
3192 /* FIXME: It would be nice to make names consistent so that
3193 * foo_size meant size *in* *bytes* instead of size in some
3194 * arbitrary units. (Yes, this caused a bug, how did you guess?:-)
3195 * Some counts of lispobjs are called foo_count; it might be good
3196 * to grep for all foo_size and rename the appropriate ones to
3197 * foo_count. */
3198 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3199 # ifdef __linux__
3200 // Try this verification if marknsweep was compiled with extra debugging.
3201 // But weak symbols don't work on macOS.
3202 extern void __attribute__((weak)) check_varyobj_pages();
3203 if (&check_varyobj_pages) check_varyobj_pages();
3204 # endif
3205 verify_space((lispobj*)IMMOBILE_SPACE_START,
3206 (lispobj*)SymbolValue(IMMOBILE_FIXEDOBJ_FREE_POINTER,0)
3207 - (lispobj*)IMMOBILE_SPACE_START);
3208 verify_space((lispobj*)IMMOBILE_VARYOBJ_SUBSPACE_START,
3209 (lispobj*)SymbolValue(IMMOBILE_SPACE_FREE_POINTER,0)
3210 - (lispobj*)IMMOBILE_VARYOBJ_SUBSPACE_START);
3211 #endif
3212 sword_t read_only_space_size =
3213 (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)
3214 - (lispobj*)READ_ONLY_SPACE_START;
3215 sword_t static_space_size =
3216 (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0)
3217 - (lispobj*)STATIC_SPACE_START;
3218 struct thread *th;
3219 for_each_thread(th) {
3220 sword_t binding_stack_size =
3221 (lispobj*)get_binding_stack_pointer(th)
3222 - (lispobj*)th->binding_stack_start;
3223 verify_space(th->binding_stack_start, binding_stack_size);
3225 verify_space((lispobj*)READ_ONLY_SPACE_START, read_only_space_size);
3226 verify_space((lispobj*)STATIC_SPACE_START , static_space_size);
3227 verify_dynamic_space();
3230 void
3231 walk_generation(void (*proc)(lispobj*,size_t),
3232 generation_index_t generation)
3234 page_index_t i;
3235 int genmask = generation >= 0 ? 1 << generation : ~0;
3237 for (i = 0; i < last_free_page; i++) {
3238 if (page_allocated_p(i)
3239 && (page_table[i].bytes_used != 0)
3240 && ((1 << page_table[i].gen) & genmask)) {
3241 page_index_t last_page;
3243 /* This should be the start of a contiguous block */
3244 gc_assert(page_starts_contiguous_block_p(i));
3246 /* Need to find the full extent of this contiguous block in case
3247 objects span pages. */
3249 /* Now work forward until the end of this contiguous area is
3250 found. */
3251 for (last_page = i; ;last_page++)
3252 /* Check whether this is the last page in this contiguous
3253 * block. */
3254 if (page_ends_contiguous_block_p(last_page, page_table[i].gen))
3255 break;
3257 proc(page_address(i),
3258 ((uword_t)(page_table[last_page].bytes_used
3259 + npage_bytes(last_page-i)))
3260 / N_WORD_BYTES);
3261 i = last_page;
3265 static void verify_generation(generation_index_t generation)
3267 walk_generation(verify_space, generation);
3270 /* Check that all the free space is zero filled. */
3271 static void
3272 verify_zero_fill(void)
3274 page_index_t page;
3276 for (page = 0; page < last_free_page; page++) {
3277 if (page_free_p(page)) {
3278 /* The whole page should be zero filled. */
3279 sword_t *start_addr = (sword_t *)page_address(page);
3280 sword_t i;
3281 for (i = 0; i < (sword_t)GENCGC_CARD_BYTES/N_WORD_BYTES; i++) {
3282 if (start_addr[i] != 0) {
3283 lose("free page not zero at %x\n", start_addr + i);
3286 } else {
3287 sword_t free_bytes = GENCGC_CARD_BYTES - page_table[page].bytes_used;
3288 if (free_bytes > 0) {
3289 sword_t *start_addr = (sword_t *)((uword_t)page_address(page)
3290 + page_table[page].bytes_used);
3291 sword_t size = free_bytes / N_WORD_BYTES;
3292 sword_t i;
3293 for (i = 0; i < size; i++) {
3294 if (start_addr[i] != 0) {
3295 lose("free region not zero at %x\n", start_addr + i);
3303 /* External entry point for verify_zero_fill */
3304 void
3305 gencgc_verify_zero_fill(void)
3307 /* Flush the alloc regions updating the tables. */
3308 gc_alloc_update_all_page_tables(1);
3309 SHOW("verifying zero fill");
3310 verify_zero_fill();
3313 static void
3314 verify_dynamic_space(void)
3316 verify_generation(-1);
3317 if (gencgc_enable_verify_zero_fill)
3318 verify_zero_fill();
3321 /* Write-protect all the dynamic boxed pages in the given generation. */
3322 static void
3323 write_protect_generation_pages(generation_index_t generation)
3325 page_index_t start;
3327 gc_assert(generation < SCRATCH_GENERATION);
3329 for (start = 0; start < last_free_page; start++) {
3330 if (protect_page_p(start, generation)) {
3331 void *page_start;
3332 page_index_t last;
3334 /* Note the page as protected in the page tables. */
3335 page_table[start].write_protected = 1;
3337 for (last = start + 1; last < last_free_page; last++) {
3338 if (!protect_page_p(last, generation))
3339 break;
3340 page_table[last].write_protected = 1;
3343 page_start = (void *)page_address(start);
3345 os_protect(page_start,
3346 npage_bytes(last - start),
3347 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
3349 start = last;
3353 if (gencgc_verbose > 1) {
3354 FSHOW((stderr,
3355 "/write protected %d of %d pages in generation %d\n",
3356 count_write_protect_generation_pages(generation),
3357 count_generation_pages(generation),
3358 generation));
3362 #if defined(LISP_FEATURE_SB_THREAD) && (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
3363 static void
3364 preserve_context_registers (os_context_t *c)
3366 void **ptr;
3367 /* On Darwin the signal context isn't a contiguous block of memory,
3368 * so just preserve_pointering its contents won't be sufficient.
3370 #if defined(LISP_FEATURE_DARWIN)||defined(LISP_FEATURE_WIN32)
3371 #if defined LISP_FEATURE_X86
3372 preserve_pointer((void*)*os_context_register_addr(c,reg_EAX));
3373 preserve_pointer((void*)*os_context_register_addr(c,reg_ECX));
3374 preserve_pointer((void*)*os_context_register_addr(c,reg_EDX));
3375 preserve_pointer((void*)*os_context_register_addr(c,reg_EBX));
3376 preserve_pointer((void*)*os_context_register_addr(c,reg_ESI));
3377 preserve_pointer((void*)*os_context_register_addr(c,reg_EDI));
3378 preserve_pointer((void*)*os_context_pc_addr(c));
3379 #elif defined LISP_FEATURE_X86_64
3380 preserve_pointer((void*)*os_context_register_addr(c,reg_RAX));
3381 preserve_pointer((void*)*os_context_register_addr(c,reg_RCX));
3382 preserve_pointer((void*)*os_context_register_addr(c,reg_RDX));
3383 preserve_pointer((void*)*os_context_register_addr(c,reg_RBX));
3384 preserve_pointer((void*)*os_context_register_addr(c,reg_RSI));
3385 preserve_pointer((void*)*os_context_register_addr(c,reg_RDI));
3386 preserve_pointer((void*)*os_context_register_addr(c,reg_R8));
3387 preserve_pointer((void*)*os_context_register_addr(c,reg_R9));
3388 preserve_pointer((void*)*os_context_register_addr(c,reg_R10));
3389 preserve_pointer((void*)*os_context_register_addr(c,reg_R11));
3390 preserve_pointer((void*)*os_context_register_addr(c,reg_R12));
3391 preserve_pointer((void*)*os_context_register_addr(c,reg_R13));
3392 preserve_pointer((void*)*os_context_register_addr(c,reg_R14));
3393 preserve_pointer((void*)*os_context_register_addr(c,reg_R15));
3394 preserve_pointer((void*)*os_context_pc_addr(c));
3395 #else
3396 #error "preserve_context_registers needs to be tweaked for non-x86 Darwin"
3397 #endif
3398 #endif
3399 #if !defined(LISP_FEATURE_WIN32)
3400 for(ptr = ((void **)(c+1))-1; ptr>=(void **)c; ptr--) {
3401 preserve_pointer(*ptr);
3403 #endif
3405 #endif
3407 static void
3408 move_pinned_pages_to_newspace()
3410 page_index_t i;
3412 /* scavenge() will evacuate all oldspace pages, but no newspace
3413 * pages. Pinned pages are precisely those pages which must not
3414 * be evacuated, so move them to newspace directly. */
3416 for (i = 0; i < last_free_page; i++) {
3417 if (page_table[i].dont_move &&
3418 /* dont_move is cleared lazily, so validate the space as well. */
3419 page_table[i].gen == from_space) {
3420 if (pinned_dwords(i) && do_wipe_p) {
3421 // do not move to newspace after all, this will be word-wiped
3422 continue;
3424 page_table[i].gen = new_space;
3425 /* And since we're moving the pages wholesale, also adjust
3426 * the generation allocation counters. */
3427 generations[new_space].bytes_allocated += page_table[i].bytes_used;
3428 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
3433 /* Garbage collect a generation. If raise is 0 then the remains of the
3434 * generation are not raised to the next generation. */
3435 static void
3436 garbage_collect_generation(generation_index_t generation, int raise)
3438 page_index_t i;
3439 uword_t static_space_size;
3440 struct thread *th;
3442 gc_assert(generation <= HIGHEST_NORMAL_GENERATION);
3444 /* The oldest generation can't be raised. */
3445 gc_assert((generation != HIGHEST_NORMAL_GENERATION) || (raise == 0));
3447 /* Check if weak hash tables were processed in the previous GC. */
3448 gc_assert(weak_hash_tables == NULL);
3450 /* Initialize the weak pointer list. */
3451 weak_pointers = NULL;
3453 /* When a generation is not being raised it is transported to a
3454 * temporary generation (NUM_GENERATIONS), and lowered when
3455 * done. Set up this new generation. There should be no pages
3456 * allocated to it yet. */
3457 if (!raise) {
3458 gc_assert(generations[SCRATCH_GENERATION].bytes_allocated == 0);
3461 /* Set the global src and dest. generations */
3462 from_space = generation;
3463 if (raise)
3464 new_space = generation+1;
3465 else
3466 new_space = SCRATCH_GENERATION;
3468 /* Change to a new space for allocation, resetting the alloc_start_page */
3469 gc_alloc_generation = new_space;
3470 generations[new_space].alloc_start_page = 0;
3471 generations[new_space].alloc_unboxed_start_page = 0;
3472 generations[new_space].alloc_large_start_page = 0;
3473 generations[new_space].alloc_large_unboxed_start_page = 0;
3475 /* Before any pointers are preserved, the dont_move flags on the
3476 * pages need to be cleared. */
3477 for (i = 0; i < last_free_page; i++)
3478 if(page_table[i].gen==from_space) {
3479 page_table[i].dont_move = 0;
3480 gc_assert(pinned_dwords(i) == NULL);
3483 /* Un-write-protect the old-space pages. This is essential for the
3484 * promoted pages as they may contain pointers into the old-space
3485 * which need to be scavenged. It also helps avoid unnecessary page
3486 * faults as forwarding pointers are written into them. They need to
3487 * be un-protected anyway before unmapping later. */
3488 unprotect_oldspace();
3490 /* Scavenge the stacks' conservative roots. */
3492 /* there are potentially two stacks for each thread: the main
3493 * stack, which may contain Lisp pointers, and the alternate stack.
3494 * We don't ever run Lisp code on the altstack, but it may
3495 * host a sigcontext with lisp objects in it */
3497 /* what we need to do: (1) find the stack pointer for the main
3498 * stack; scavenge it (2) find the interrupt context on the
3499 * alternate stack that might contain lisp values, and scavenge
3500 * that */
3502 /* we assume that none of the preceding applies to the thread that
3503 * initiates GC. If you ever call GC from inside an altstack
3504 * handler, you will lose. */
3506 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
3507 /* And if we're saving a core, there's no point in being conservative. */
3508 if (conservative_stack) {
3509 for_each_thread(th) {
3510 void **ptr;
3511 void **esp=(void **)-1;
3512 if (th->state == STATE_DEAD)
3513 continue;
3514 # if defined(LISP_FEATURE_SB_SAFEPOINT)
3515 /* Conservative collect_garbage is always invoked with a
3516 * foreign C call or an interrupt handler on top of every
3517 * existing thread, so the stored SP in each thread
3518 * structure is valid, no matter which thread we are looking
3519 * at. For threads that were running Lisp code, the pitstop
3520 * and edge functions maintain this value within the
3521 * interrupt or exception handler. */
3522 esp = os_get_csp(th);
3523 assert_on_stack(th, esp);
3525 /* In addition to pointers on the stack, also preserve the
3526 * return PC, the only value from the context that we need
3527 * in addition to the SP. The return PC gets saved by the
3528 * foreign call wrapper, and removed from the control stack
3529 * into a register. */
3530 preserve_pointer(th->pc_around_foreign_call);
3532 /* And on platforms with interrupts: scavenge ctx registers. */
3534 /* Disabled on Windows, because it does not have an explicit
3535 * stack of `interrupt_contexts'. The reported CSP has been
3536 * chosen so that the current context on the stack is
3537 * covered by the stack scan. See also set_csp_from_context(). */
3538 # ifndef LISP_FEATURE_WIN32
3539 if (th != arch_os_get_current_thread()) {
3540 long k = fixnum_value(
3541 SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
3542 while (k > 0)
3543 preserve_context_registers(th->interrupt_contexts[--k]);
3545 # endif
3546 # elif defined(LISP_FEATURE_SB_THREAD)
3547 sword_t i,free;
3548 if(th==arch_os_get_current_thread()) {
3549 /* Somebody is going to burn in hell for this, but casting
3550 * it in two steps shuts gcc up about strict aliasing. */
3551 esp = (void **)((void *)&raise);
3552 } else {
3553 void **esp1;
3554 free=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
3555 for(i=free-1;i>=0;i--) {
3556 os_context_t *c=th->interrupt_contexts[i];
3557 esp1 = (void **) *os_context_register_addr(c,reg_SP);
3558 if (esp1>=(void **)th->control_stack_start &&
3559 esp1<(void **)th->control_stack_end) {
3560 if(esp1<esp) esp=esp1;
3561 preserve_context_registers(c);
3565 # else
3566 esp = (void **)((void *)&raise);
3567 # endif
3568 if (!esp || esp == (void*) -1)
3569 lose("garbage_collect: no SP known for thread %x (OS %x)",
3570 th, th->os_thread);
3571 for (ptr = ((void **)th->control_stack_end)-1; ptr >= esp; ptr--) {
3572 preserve_pointer(*ptr);
3576 #else
3577 /* Non-x86oid systems don't have "conservative roots" as such, but
3578 * the same mechanism is used for objects pinned for use by alien
3579 * code. */
3580 for_each_thread(th) {
3581 lispobj pin_list = SymbolTlValue(PINNED_OBJECTS,th);
3582 while (pin_list != NIL) {
3583 struct cons *list_entry =
3584 (struct cons *)native_pointer(pin_list);
3585 preserve_pointer((void*)list_entry->car);
3586 pin_list = list_entry->cdr;
3589 #endif
3591 #if QSHOW
3592 if (gencgc_verbose > 1) {
3593 sword_t num_dont_move_pages = count_dont_move_pages();
3594 fprintf(stderr,
3595 "/non-movable pages due to conservative pointers = %ld (%lu bytes)\n",
3596 num_dont_move_pages,
3597 npage_bytes(num_dont_move_pages));
3599 #endif
3601 /* Now that all of the pinned (dont_move) pages are known, and
3602 * before we start to scavenge (and thus relocate) objects,
3603 * relocate the pinned pages to newspace, so that the scavenger
3604 * will not attempt to relocate their contents. */
3605 move_pinned_pages_to_newspace();
3607 /* Scavenge all the rest of the roots. */
3609 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
3611 * If not x86, we need to scavenge the interrupt context(s) and the
3612 * control stack.
3615 struct thread *th;
3616 for_each_thread(th) {
3617 scavenge_interrupt_contexts(th);
3618 scavenge_control_stack(th);
3621 # ifdef LISP_FEATURE_SB_SAFEPOINT
3622 /* In this case, scrub all stacks right here from the GCing thread
3623 * instead of doing what the comment below says. Suboptimal, but
3624 * easier. */
3625 for_each_thread(th)
3626 scrub_thread_control_stack(th);
3627 # else
3628 /* Scrub the unscavenged control stack space, so that we can't run
3629 * into any stale pointers in a later GC (this is done by the
3630 * stop-for-gc handler in the other threads). */
3631 scrub_control_stack();
3632 # endif
3634 #endif
3636 /* Scavenge the Lisp functions of the interrupt handlers, taking
3637 * care to avoid SIG_DFL and SIG_IGN. */
3638 for (i = 0; i < NSIG; i++) {
3639 union interrupt_handler handler = interrupt_handlers[i];
3640 if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) &&
3641 !ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
3642 scavenge((lispobj *)(interrupt_handlers + i), 1);
3645 /* Scavenge the binding stacks. */
3647 struct thread *th;
3648 for_each_thread(th) {
3649 sword_t len= (lispobj *)get_binding_stack_pointer(th) -
3650 th->binding_stack_start;
3651 scavenge((lispobj *) th->binding_stack_start,len);
3652 #ifdef LISP_FEATURE_SB_THREAD
3653 /* do the tls as well */
3654 len=(SymbolValue(FREE_TLS_INDEX,0) >> WORD_SHIFT) -
3655 (sizeof (struct thread))/(sizeof (lispobj));
3656 scavenge((lispobj *) (th+1),len);
3657 #endif
3661 /* Scavenge static space. */
3662 static_space_size =
3663 (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0) -
3664 (lispobj *)STATIC_SPACE_START;
3665 if (gencgc_verbose > 1) {
3666 FSHOW((stderr,
3667 "/scavenge static space: %d bytes\n",
3668 static_space_size * sizeof(lispobj)));
3670 scavenge( (lispobj *) STATIC_SPACE_START, static_space_size);
3672 /* All generations but the generation being GCed need to be
3673 * scavenged. The new_space generation needs special handling as
3674 * objects may be moved in - it is handled separately below. */
3675 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3676 scavenge_immobile_roots(generation+1, SCRATCH_GENERATION);
3677 #endif
3678 scavenge_generations(generation+1, PSEUDO_STATIC_GENERATION);
3680 scavenge_pinned_ranges();
3682 /* Finally scavenge the new_space generation. Keep going until no
3683 * more objects are moved into the new generation */
3684 scavenge_newspace_generation(new_space);
3686 /* FIXME: I tried reenabling this check when debugging unrelated
3687 * GC weirdness ca. sbcl-0.6.12.45, and it failed immediately.
3688 * Since the current GC code seems to work well, I'm guessing that
3689 * this debugging code is just stale, but I haven't tried to
3690 * figure it out. It should be figured out and then either made to
3691 * work or just deleted. */
3693 #define RESCAN_CHECK 0
3694 #if RESCAN_CHECK
3695 /* As a check re-scavenge the newspace once; no new objects should
3696 * be found. */
3698 os_vm_size_t old_bytes_allocated = bytes_allocated;
3699 os_vm_size_t bytes_allocated;
3701 /* Start with a full scavenge. */
3702 scavenge_newspace_generation_one_scan(new_space);
3704 /* Flush the current regions, updating the tables. */
3705 gc_alloc_update_all_page_tables(1);
3707 bytes_allocated = bytes_allocated - old_bytes_allocated;
3709 if (bytes_allocated != 0) {
3710 lose("Rescan of new_space allocated %d more bytes.\n",
3711 bytes_allocated);
3714 #endif
3716 scan_weak_hash_tables();
3717 scan_weak_pointers();
3718 wipe_nonpinned_words();
3719 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3720 // Do this last, because until wipe_nonpinned_words() happens,
3721 // not all page table entries have the 'gen' value updated,
3722 // which we need to correctly find all old->young pointers.
3723 sweep_immobile_space(raise);
3724 #endif
3726 /* Flush the current regions, updating the tables. */
3727 gc_alloc_update_all_page_tables(0);
3729 /* Free the pages in oldspace, but not those marked dont_move. */
3730 free_oldspace();
3732 /* If the GC is not raising the age then lower the generation back
3733 * to its normal generation number */
3734 if (!raise) {
3735 for (i = 0; i < last_free_page; i++)
3736 if ((page_table[i].bytes_used != 0)
3737 && (page_table[i].gen == SCRATCH_GENERATION))
3738 page_table[i].gen = generation;
3739 gc_assert(generations[generation].bytes_allocated == 0);
3740 generations[generation].bytes_allocated =
3741 generations[SCRATCH_GENERATION].bytes_allocated;
3742 generations[SCRATCH_GENERATION].bytes_allocated = 0;
3745 /* Reset the alloc_start_page for generation. */
3746 generations[generation].alloc_start_page = 0;
3747 generations[generation].alloc_unboxed_start_page = 0;
3748 generations[generation].alloc_large_start_page = 0;
3749 generations[generation].alloc_large_unboxed_start_page = 0;
3751 if (generation >= verify_gens) {
3752 if (gencgc_verbose) {
3753 SHOW("verifying");
3755 verify_gc();
3758 /* Set the new gc trigger for the GCed generation. */
3759 generations[generation].gc_trigger =
3760 generations[generation].bytes_allocated
3761 + generations[generation].bytes_consed_between_gc;
3763 if (raise)
3764 generations[generation].num_gc = 0;
3765 else
3766 ++generations[generation].num_gc;
3770 /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
3771 sword_t
3772 update_dynamic_space_free_pointer(void)
3774 page_index_t last_page = -1, i;
3776 for (i = 0; i < last_free_page; i++)
3777 if (page_allocated_p(i) && (page_table[i].bytes_used != 0))
3778 last_page = i;
3780 last_free_page = last_page+1;
3782 set_alloc_pointer((lispobj)(page_address(last_free_page)));
3783 return 0; /* dummy value: return something ... */
3786 static void
3787 remap_page_range (page_index_t from, page_index_t to)
3789 /* There's a mysterious Solaris/x86 problem with using mmap
3790 * tricks for memory zeroing. See sbcl-devel thread
3791 * "Re: patch: standalone executable redux".
3793 #if defined(LISP_FEATURE_SUNOS)
3794 zero_and_mark_pages(from, to);
3795 #else
3796 const page_index_t
3797 release_granularity = gencgc_release_granularity/GENCGC_CARD_BYTES,
3798 release_mask = release_granularity-1,
3799 end = to+1,
3800 aligned_from = (from+release_mask)&~release_mask,
3801 aligned_end = (end&~release_mask);
3803 if (aligned_from < aligned_end) {
3804 zero_pages_with_mmap(aligned_from, aligned_end-1);
3805 if (aligned_from != from)
3806 zero_and_mark_pages(from, aligned_from-1);
3807 if (aligned_end != end)
3808 zero_and_mark_pages(aligned_end, end-1);
3809 } else {
3810 zero_and_mark_pages(from, to);
3812 #endif
3815 static void
3816 remap_free_pages (page_index_t from, page_index_t to, int forcibly)
3818 page_index_t first_page, last_page;
3820 if (forcibly)
3821 return remap_page_range(from, to);
3823 for (first_page = from; first_page <= to; first_page++) {
3824 if (page_allocated_p(first_page) ||
3825 (page_table[first_page].need_to_zero == 0))
3826 continue;
3828 last_page = first_page + 1;
3829 while (page_free_p(last_page) &&
3830 (last_page <= to) &&
3831 (page_table[last_page].need_to_zero == 1))
3832 last_page++;
3834 remap_page_range(first_page, last_page-1);
3836 first_page = last_page;
3840 generation_index_t small_generation_limit = 1;
3842 /* GC all generations newer than last_gen, raising the objects in each
3843 * to the next older generation - we finish when all generations below
3844 * last_gen are empty. Then if last_gen is due for a GC, or if
3845 * last_gen==NUM_GENERATIONS (the scratch generation? eh?) we GC that
3846 * too. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
3848 * We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than
3849 * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */
3850 void
3851 collect_garbage(generation_index_t last_gen)
3853 generation_index_t gen = 0, i;
3854 int raise, more = 0;
3855 int gen_to_wp;
3856 /* The largest value of last_free_page seen since the time
3857 * remap_free_pages was called. */
3858 static page_index_t high_water_mark = 0;
3860 FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen));
3861 log_generation_stats(gc_logfile, "=== GC Start ===");
3863 gc_active_p = 1;
3865 if (last_gen > HIGHEST_NORMAL_GENERATION+1) {
3866 FSHOW((stderr,
3867 "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
3868 last_gen));
3869 last_gen = 0;
3872 /* Flush the alloc regions updating the tables. */
3873 gc_alloc_update_all_page_tables(1);
3875 /* Verify the new objects created by Lisp code. */
3876 if (pre_verify_gen_0) {
3877 FSHOW((stderr, "pre-checking generation 0\n"));
3878 verify_generation(0);
3881 if (gencgc_verbose > 1)
3882 print_generation_stats();
3884 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3885 /* Immobile space generation bits are lazily updated for gen0
3886 (not touched on every object allocation) so do it now */
3887 update_immobile_nursery_bits();
3888 #endif
3890 do {
3891 /* Collect the generation. */
3893 if (more || (gen >= gencgc_oldest_gen_to_gc)) {
3894 /* Never raise the oldest generation. Never raise the extra generation
3895 * collected due to more-flag. */
3896 raise = 0;
3897 more = 0;
3898 } else {
3899 raise =
3900 (gen < last_gen)
3901 || (generations[gen].num_gc >= generations[gen].number_of_gcs_before_promotion);
3902 /* If we would not normally raise this one, but we're
3903 * running low on space in comparison to the object-sizes
3904 * we've been seeing, raise it and collect the next one
3905 * too. */
3906 if (!raise && gen == last_gen) {
3907 more = (2*large_allocation) >= (dynamic_space_size - bytes_allocated);
3908 raise = more;
3912 if (gencgc_verbose > 1) {
3913 FSHOW((stderr,
3914 "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
3915 gen,
3916 raise,
3917 generations[gen].bytes_allocated,
3918 generations[gen].gc_trigger,
3919 generations[gen].num_gc));
3922 /* If an older generation is being filled, then update its
3923 * memory age. */
3924 if (raise == 1) {
3925 generations[gen+1].cum_sum_bytes_allocated +=
3926 generations[gen+1].bytes_allocated;
3929 garbage_collect_generation(gen, raise);
3931 /* Reset the memory age cum_sum. */
3932 generations[gen].cum_sum_bytes_allocated = 0;
3934 if (gencgc_verbose > 1) {
3935 FSHOW((stderr, "GC of generation %d finished:\n", gen));
3936 print_generation_stats();
3939 gen++;
3940 } while ((gen <= gencgc_oldest_gen_to_gc)
3941 && ((gen < last_gen)
3942 || more
3943 || (raise
3944 && (generations[gen].bytes_allocated
3945 > generations[gen].gc_trigger)
3946 && (generation_average_age(gen)
3947 > generations[gen].minimum_age_before_gc))));
3949 /* Now if gen-1 was raised all generations before gen are empty.
3950 * If it wasn't raised then all generations before gen-1 are empty.
3952 * Now objects within this gen's pages cannot point to younger
3953 * generations unless they are written to. This can be exploited
3954 * by write-protecting the pages of gen; then when younger
3955 * generations are GCed only the pages which have been written
3956 * need scanning. */
3957 if (raise)
3958 gen_to_wp = gen;
3959 else
3960 gen_to_wp = gen - 1;
3962 /* There's not much point in WPing pages in generation 0 as it is
3963 * never scavenged (except promoted pages). */
3964 if ((gen_to_wp > 0) && enable_page_protection) {
3965 /* Check that they are all empty. */
3966 for (i = 0; i < gen_to_wp; i++) {
3967 if (generations[i].bytes_allocated)
3968 lose("trying to write-protect gen. %d when gen. %d nonempty\n",
3969 gen_to_wp, i);
3971 write_protect_generation_pages(gen_to_wp);
3973 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3974 write_protect_immobile_space();
3975 #endif
3977 /* Set gc_alloc() back to generation 0. The current regions should
3978 * be flushed after the above GCs. */
3979 gc_assert((boxed_region.free_pointer - boxed_region.start_addr) == 0);
3980 gc_alloc_generation = 0;
3982 /* Save the high-water mark before updating last_free_page */
3983 if (last_free_page > high_water_mark)
3984 high_water_mark = last_free_page;
3986 update_dynamic_space_free_pointer();
3988 /* Update auto_gc_trigger. Make sure we trigger the next GC before
3989 * running out of heap! */
3990 if (bytes_consed_between_gcs <= (dynamic_space_size - bytes_allocated))
3991 auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
3992 else
3993 auto_gc_trigger = bytes_allocated + (dynamic_space_size - bytes_allocated)/2;
3995 if(gencgc_verbose)
3996 fprintf(stderr,"Next gc when %"OS_VM_SIZE_FMT" bytes have been consed\n",
3997 auto_gc_trigger);
3999 /* If we did a big GC (arbitrarily defined as gen > 1), release memory
4000 * back to the OS.
4002 if (gen > small_generation_limit) {
4003 if (last_free_page > high_water_mark)
4004 high_water_mark = last_free_page;
4005 remap_free_pages(0, high_water_mark, 0);
4006 high_water_mark = 0;
4009 gc_active_p = 0;
4010 large_allocation = 0;
4012 log_generation_stats(gc_logfile, "=== GC End ===");
4013 SHOW("returning from collect_garbage");
4016 void
4017 gc_init(void)
4019 page_index_t i;
4021 #if defined(LISP_FEATURE_SB_SAFEPOINT)
4022 alloc_gc_page();
4023 #endif
4025 /* Compute the number of pages needed for the dynamic space.
4026 * Dynamic space size should be aligned on page size. */
4027 page_table_pages = dynamic_space_size/GENCGC_CARD_BYTES;
4028 gc_assert(dynamic_space_size == npage_bytes(page_table_pages));
4030 /* Default nursery size to 5% of the total dynamic space size,
4031 * min 1Mb. */
4032 bytes_consed_between_gcs = dynamic_space_size/(os_vm_size_t)20;
4033 if (bytes_consed_between_gcs < (1024*1024))
4034 bytes_consed_between_gcs = 1024*1024;
4036 /* The page_table must be allocated using "calloc" to initialize
4037 * the page structures correctly. There used to be a separate
4038 * initialization loop (now commented out; see below) but that was
4039 * unnecessary and did hurt startup time. */
4040 page_table = calloc(page_table_pages, sizeof(struct page));
4041 gc_assert(page_table);
4042 #ifdef LISP_FEATURE_IMMOBILE_SPACE
4043 gc_init_immobile();
4044 #endif
4046 size_t pins_map_size_in_bytes =
4047 (n_dwords_in_card / N_WORD_BITS) * sizeof (uword_t) * page_table_pages;
4048 /* We use mmap directly here so that we can use a minimum of
4049 system calls per page during GC.
4050 All we need here now is a madvise(DONTNEED) at the end of GC. */
4051 page_table_pinned_dwords
4052 = (in_use_marker_t*)os_validate(NULL, pins_map_size_in_bytes);
4053 /* We do not need to zero */
4054 gc_assert(page_table_pinned_dwords);
4056 scavtab[WEAK_POINTER_WIDETAG] = scav_weak_pointer;
4057 transother[SIMPLE_ARRAY_WIDETAG] = trans_boxed_large;
4059 heap_base = (void*)DYNAMIC_SPACE_START;
4061 /* The page structures are initialized implicitly when page_table
4062 * is allocated with "calloc" above. Formerly we had the following
4063 * explicit initialization here (comments converted to C99 style
4064 * for readability as C's block comments don't nest):
4066 * // Initialize each page structure.
4067 * for (i = 0; i < page_table_pages; i++) {
4068 * // Initialize all pages as free.
4069 * page_table[i].allocated = FREE_PAGE_FLAG;
4070 * page_table[i].bytes_used = 0;
4072 * // Pages are not write-protected at startup.
4073 * page_table[i].write_protected = 0;
4076 * Without this loop the image starts up much faster when dynamic
4077 * space is large -- which it is on 64-bit platforms already by
4078 * default -- and when "calloc" for large arrays is implemented
4079 * using copy-on-write of a page of zeroes -- which it is at least
4080 * on Linux. In this case the pages that page_table_pages is stored
4081 * in are mapped and cleared not before the corresponding part of
4082 * dynamic space is used. For example, this saves clearing 16 MB of
4083 * memory at startup if the page size is 4 KB and the size of
4084 * dynamic space is 4 GB.
4085 * FREE_PAGE_FLAG must be 0 for this to work correctly which is
4086 * asserted below: */
4088 /* Compile time assertion: If triggered, declares an array
4089 * of dimension -1 forcing a syntax error. The intent of the
4090 * assignment is to avoid an "unused variable" warning. */
4091 char assert_free_page_flag_0[(FREE_PAGE_FLAG) ? -1 : 1];
4092 assert_free_page_flag_0[0] = assert_free_page_flag_0[0];
4095 bytes_allocated = 0;
4097 /* Initialize the generations. */
4098 for (i = 0; i < NUM_GENERATIONS; i++) {
4099 generations[i].alloc_start_page = 0;
4100 generations[i].alloc_unboxed_start_page = 0;
4101 generations[i].alloc_large_start_page = 0;
4102 generations[i].alloc_large_unboxed_start_page = 0;
4103 generations[i].bytes_allocated = 0;
4104 generations[i].gc_trigger = 2000000;
4105 generations[i].num_gc = 0;
4106 generations[i].cum_sum_bytes_allocated = 0;
4107 /* the tune-able parameters */
4108 generations[i].bytes_consed_between_gc
4109 = bytes_consed_between_gcs/(os_vm_size_t)HIGHEST_NORMAL_GENERATION;
4110 generations[i].number_of_gcs_before_promotion = 1;
4111 generations[i].minimum_age_before_gc = 0.75;
4114 /* Initialize gc_alloc. */
4115 gc_alloc_generation = 0;
4116 gc_set_region_empty(&boxed_region);
4117 gc_set_region_empty(&unboxed_region);
4119 last_free_page = 0;
4122 /* Pick up the dynamic space from after a core load.
4124 * The ALLOCATION_POINTER points to the end of the dynamic space.
4127 static void
4128 gencgc_pickup_dynamic(void)
4130 page_index_t page = 0;
4131 void *alloc_ptr = (void *)get_alloc_pointer();
4132 lispobj *prev=(lispobj *)page_address(page);
4133 generation_index_t gen = PSEUDO_STATIC_GENERATION;
4135 bytes_allocated = 0;
4137 do {
4138 lispobj *first,*ptr= (lispobj *)page_address(page);
4140 if (!gencgc_partial_pickup || page_allocated_p(page)) {
4141 /* It is possible, though rare, for the saved page table
4142 * to contain free pages below alloc_ptr. */
4143 page_table[page].gen = gen;
4144 page_table[page].bytes_used = GENCGC_CARD_BYTES;
4145 page_table[page].large_object = 0;
4146 page_table[page].write_protected = 0;
4147 page_table[page].write_protected_cleared = 0;
4148 page_table[page].dont_move = 0;
4149 page_table[page].need_to_zero = 1;
4151 bytes_allocated += GENCGC_CARD_BYTES;
4154 if (!gencgc_partial_pickup) {
4155 page_table[page].allocated = BOXED_PAGE_FLAG;
4156 first = gc_search_space3(ptr, prev, (ptr+2));
4157 if(ptr == first)
4158 prev=ptr;
4159 page_table[page].scan_start_offset =
4160 page_address(page) - (void *)prev;
4162 page++;
4163 } while (page_address(page) < alloc_ptr);
4165 last_free_page = page;
4167 generations[gen].bytes_allocated = bytes_allocated;
4169 gc_alloc_update_all_page_tables(1);
4170 write_protect_generation_pages(gen);
4173 void
4174 gc_initialize_pointers(void)
4176 gencgc_pickup_dynamic();
4180 /* alloc(..) is the external interface for memory allocation. It
4181 * allocates to generation 0. It is not called from within the garbage
4182 * collector as it is only external uses that need the check for heap
4183 * size (GC trigger) and to disable the interrupts (interrupts are
4184 * always disabled during a GC).
4186 * The vops that call alloc(..) assume that the returned space is zero-filled.
4187 * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
4189 * The check for a GC trigger is only performed when the current
4190 * region is full, so in most cases it's not needed. */
4192 static inline lispobj *
4193 general_alloc_internal(sword_t nbytes, int page_type_flag, struct alloc_region *region,
4194 struct thread *thread)
4196 #ifndef LISP_FEATURE_WIN32
4197 lispobj alloc_signal;
4198 #endif
4199 void *new_obj;
4200 void *new_free_pointer;
4201 os_vm_size_t trigger_bytes = 0;
4203 gc_assert(nbytes > 0);
4205 /* Check for alignment allocation problems. */
4206 gc_assert((((uword_t)region->free_pointer & LOWTAG_MASK) == 0)
4207 && ((nbytes & LOWTAG_MASK) == 0));
4209 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
4210 /* Must be inside a PA section. */
4211 gc_assert(get_pseudo_atomic_atomic(thread));
4212 #endif
4214 if ((os_vm_size_t) nbytes > large_allocation)
4215 large_allocation = nbytes;
4217 /* maybe we can do this quickly ... */
4218 new_free_pointer = region->free_pointer + nbytes;
4219 if (new_free_pointer <= region->end_addr) {
4220 new_obj = (void*)(region->free_pointer);
4221 region->free_pointer = new_free_pointer;
4222 return(new_obj); /* yup */
4225 /* We don't want to count nbytes against auto_gc_trigger unless we
4226 * have to: it speeds up the tenuring of objects and slows down
4227 * allocation. However, unless we do so when allocating _very_
4228 * large objects we are in danger of exhausting the heap without
4229 * running sufficient GCs.
4231 if ((os_vm_size_t) nbytes >= bytes_consed_between_gcs)
4232 trigger_bytes = nbytes;
4234 /* we have to go the long way around, it seems. Check whether we
4235 * should GC in the near future
4237 if (auto_gc_trigger && (bytes_allocated+trigger_bytes > auto_gc_trigger)) {
4238 /* Don't flood the system with interrupts if the need to gc is
4239 * already noted. This can happen for example when SUB-GC
4240 * allocates or after a gc triggered in a WITHOUT-GCING. */
4241 if (SymbolValue(GC_PENDING,thread) == NIL) {
4242 /* set things up so that GC happens when we finish the PA
4243 * section */
4244 SetSymbolValue(GC_PENDING,T,thread);
4245 if (SymbolValue(GC_INHIBIT,thread) == NIL) {
4246 #ifdef LISP_FEATURE_SB_SAFEPOINT
4247 thread_register_gc_trigger();
4248 #else
4249 set_pseudo_atomic_interrupted(thread);
4250 #ifdef GENCGC_IS_PRECISE
4251 /* PPC calls alloc() from a trap
4252 * look up the most context if it's from a trap. */
4254 os_context_t *context =
4255 thread->interrupt_data->allocation_trap_context;
4256 maybe_save_gc_mask_and_block_deferrables
4257 (context ? os_context_sigmask_addr(context) : NULL);
4259 #else
4260 maybe_save_gc_mask_and_block_deferrables(NULL);
4261 #endif
4262 #endif
4266 new_obj = gc_alloc_with_region(nbytes, page_type_flag, region, 0);
4268 #ifndef LISP_FEATURE_WIN32
4269 /* for sb-prof, and not supported on Windows yet */
4270 alloc_signal = SymbolValue(ALLOC_SIGNAL,thread);
4271 if ((alloc_signal & FIXNUM_TAG_MASK) == 0) {
4272 if ((sword_t) alloc_signal <= 0) {
4273 SetSymbolValue(ALLOC_SIGNAL, T, thread);
4274 raise(SIGPROF);
4275 } else {
4276 SetSymbolValue(ALLOC_SIGNAL,
4277 alloc_signal - (1 << N_FIXNUM_TAG_BITS),
4278 thread);
4281 #endif
4283 return (new_obj);
4286 lispobj *
4287 general_alloc(sword_t nbytes, int page_type_flag)
4289 struct thread *thread = arch_os_get_current_thread();
4290 /* Select correct region, and call general_alloc_internal with it.
4291 * For other then boxed allocation we must lock first, since the
4292 * region is shared. */
4293 if (BOXED_PAGE_FLAG & page_type_flag) {
4294 #ifdef LISP_FEATURE_SB_THREAD
4295 struct alloc_region *region = (thread ? &(thread->alloc_region) : &boxed_region);
4296 #else
4297 struct alloc_region *region = &boxed_region;
4298 #endif
4299 return general_alloc_internal(nbytes, page_type_flag, region, thread);
4300 } else if (UNBOXED_PAGE_FLAG == page_type_flag) {
4301 lispobj * obj;
4302 int result;
4303 result = thread_mutex_lock(&allocation_lock);
4304 gc_assert(!result);
4305 obj = general_alloc_internal(nbytes, page_type_flag, &unboxed_region, thread);
4306 result = thread_mutex_unlock(&allocation_lock);
4307 gc_assert(!result);
4308 return obj;
4309 } else {
4310 lose("bad page type flag: %d", page_type_flag);
4314 lispobj AMD64_SYSV_ABI *
4315 alloc(sword_t nbytes)
4317 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4318 struct thread *self = arch_os_get_current_thread();
4319 int was_pseudo_atomic = get_pseudo_atomic_atomic(self);
4320 if (!was_pseudo_atomic)
4321 set_pseudo_atomic_atomic(self);
4322 #else
4323 gc_assert(get_pseudo_atomic_atomic(arch_os_get_current_thread()));
4324 #endif
4326 lispobj *result = general_alloc(nbytes, BOXED_PAGE_FLAG);
4328 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4329 if (!was_pseudo_atomic)
4330 clear_pseudo_atomic_atomic(self);
4331 #endif
4333 return result;
4337 * shared support for the OS-dependent signal handlers which
4338 * catch GENCGC-related write-protect violations
4340 void unhandled_sigmemoryfault(void* addr);
4342 /* Depending on which OS we're running under, different signals might
4343 * be raised for a violation of write protection in the heap. This
4344 * function factors out the common generational GC magic which needs
4345 * to invoked in this case, and should be called from whatever signal
4346 * handler is appropriate for the OS we're running under.
4348 * Return true if this signal is a normal generational GC thing that
4349 * we were able to handle, or false if it was abnormal and control
4350 * should fall through to the general SIGSEGV/SIGBUS/whatever logic.
4352 * We have two control flags for this: one causes us to ignore faults
4353 * on unprotected pages completely, and the second complains to stderr
4354 * but allows us to continue without losing.
4356 extern boolean ignore_memoryfaults_on_unprotected_pages;
4357 boolean ignore_memoryfaults_on_unprotected_pages = 0;
4359 extern boolean continue_after_memoryfault_on_unprotected_pages;
4360 boolean continue_after_memoryfault_on_unprotected_pages = 0;
4363 gencgc_handle_wp_violation(void* fault_addr)
4365 page_index_t page_index = find_page_index(fault_addr);
4367 #if QSHOW_SIGNALS
4368 FSHOW((stderr,
4369 "heap WP violation? fault_addr=%p, page_index=%"PAGE_INDEX_FMT"\n",
4370 fault_addr, page_index));
4371 #endif
4373 /* Check whether the fault is within the dynamic space. */
4374 if (page_index == (-1)) {
4375 #ifdef LISP_FEATURE_IMMOBILE_SPACE
4376 extern int immobile_space_handle_wp_violation(void*);
4377 if (immobile_space_handle_wp_violation(fault_addr))
4378 return 1;
4379 #endif
4381 /* It can be helpful to be able to put a breakpoint on this
4382 * case to help diagnose low-level problems. */
4383 unhandled_sigmemoryfault(fault_addr);
4385 /* not within the dynamic space -- not our responsibility */
4386 return 0;
4388 } else {
4389 int ret;
4390 ret = thread_mutex_lock(&free_pages_lock);
4391 gc_assert(ret == 0);
4392 if (page_table[page_index].write_protected) {
4393 /* Unprotect the page. */
4394 os_protect(page_address(page_index), GENCGC_CARD_BYTES, OS_VM_PROT_ALL);
4395 page_table[page_index].write_protected_cleared = 1;
4396 page_table[page_index].write_protected = 0;
4397 } else if (!ignore_memoryfaults_on_unprotected_pages) {
4398 /* The only acceptable reason for this signal on a heap
4399 * access is that GENCGC write-protected the page.
4400 * However, if two CPUs hit a wp page near-simultaneously,
4401 * we had better not have the second one lose here if it
4402 * does this test after the first one has already set wp=0
4404 if(page_table[page_index].write_protected_cleared != 1) {
4405 void lisp_backtrace(int frames);
4406 lisp_backtrace(10);
4407 fprintf(stderr,
4408 "Fault @ %p, page %"PAGE_INDEX_FMT" not marked as write-protected:\n"
4409 " boxed_region.first_page: %"PAGE_INDEX_FMT","
4410 " boxed_region.last_page %"PAGE_INDEX_FMT"\n"
4411 " page.scan_start_offset: %"OS_VM_SIZE_FMT"\n"
4412 " page.bytes_used: %"PAGE_BYTES_FMT"\n"
4413 " page.allocated: %d\n"
4414 " page.write_protected: %d\n"
4415 " page.write_protected_cleared: %d\n"
4416 " page.generation: %d\n",
4417 fault_addr,
4418 page_index,
4419 boxed_region.first_page,
4420 boxed_region.last_page,
4421 page_table[page_index].scan_start_offset,
4422 page_table[page_index].bytes_used,
4423 page_table[page_index].allocated,
4424 page_table[page_index].write_protected,
4425 page_table[page_index].write_protected_cleared,
4426 page_table[page_index].gen);
4427 if (!continue_after_memoryfault_on_unprotected_pages)
4428 lose("Feh.\n");
4431 ret = thread_mutex_unlock(&free_pages_lock);
4432 gc_assert(ret == 0);
4433 /* Don't worry, we can handle it. */
4434 return 1;
4437 /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
4438 * it's not just a case of the program hitting the write barrier, and
4439 * are about to let Lisp deal with it. It's basically just a
4440 * convenient place to set a gdb breakpoint. */
4441 void
4442 unhandled_sigmemoryfault(void *addr)
4445 static void
4446 update_thread_page_tables(struct thread *th)
4448 gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &th->alloc_region);
4449 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
4450 gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &th->sprof_alloc_region);
4451 #endif
4454 /* GC is single-threaded and all memory allocations during a
4455 collection happen in the GC thread, so it is sufficient to update
4456 all the the page tables once at the beginning of a collection and
4457 update only page tables of the GC thread during the collection. */
4458 void gc_alloc_update_all_page_tables(int for_all_threads)
4460 /* Flush the alloc regions updating the tables. */
4461 struct thread *th;
4462 if (for_all_threads) {
4463 for_each_thread(th) {
4464 update_thread_page_tables(th);
4467 else {
4468 th = arch_os_get_current_thread();
4469 if (th) {
4470 update_thread_page_tables(th);
4473 gc_alloc_update_page_tables(UNBOXED_PAGE_FLAG, &unboxed_region);
4474 gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &boxed_region);
4477 void
4478 gc_set_region_empty(struct alloc_region *region)
4480 region->first_page = 0;
4481 region->last_page = -1;
4482 region->start_addr = page_address(0);
4483 region->free_pointer = page_address(0);
4484 region->end_addr = page_address(0);
4487 static void
4488 zero_all_free_pages()
4490 page_index_t i;
4492 for (i = 0; i < last_free_page; i++) {
4493 if (page_free_p(i)) {
4494 #ifdef READ_PROTECT_FREE_PAGES
4495 os_protect(page_address(i),
4496 GENCGC_CARD_BYTES,
4497 OS_VM_PROT_ALL);
4498 #endif
4499 zero_pages(i, i);
4504 /* Things to do before doing a final GC before saving a core (without
4505 * purify).
4507 * + Pages in large_object pages aren't moved by the GC, so we need to
4508 * unset that flag from all pages.
4509 * + The pseudo-static generation isn't normally collected, but it seems
4510 * reasonable to collect it at least when saving a core. So move the
4511 * pages to a normal generation.
4513 static void
4514 prepare_for_final_gc ()
4516 page_index_t i;
4518 #ifdef LISP_FEATURE_IMMOBILE_SPACE
4519 extern void prepare_immobile_space_for_final_gc();
4520 prepare_immobile_space_for_final_gc ();
4521 #endif
4522 do_wipe_p = 0;
4523 for (i = 0; i < last_free_page; i++) {
4524 page_table[i].large_object = 0;
4525 if (page_table[i].gen == PSEUDO_STATIC_GENERATION) {
4526 int used = page_table[i].bytes_used;
4527 page_table[i].gen = HIGHEST_NORMAL_GENERATION;
4528 generations[PSEUDO_STATIC_GENERATION].bytes_allocated -= used;
4529 generations[HIGHEST_NORMAL_GENERATION].bytes_allocated += used;
4535 /* Do a non-conservative GC, and then save a core with the initial
4536 * function being set to the value of the static symbol
4537 * SB!VM:RESTART-LISP-FUNCTION */
4538 void
4539 gc_and_save(char *filename, boolean prepend_runtime,
4540 boolean save_runtime_options, boolean compressed,
4541 int compression_level, int application_type)
4543 FILE *file;
4544 void *runtime_bytes = NULL;
4545 size_t runtime_size;
4547 file = prepare_to_save(filename, prepend_runtime, &runtime_bytes,
4548 &runtime_size);
4549 if (file == NULL)
4550 return;
4552 conservative_stack = 0;
4554 /* The filename might come from Lisp, and be moved by the now
4555 * non-conservative GC. */
4556 filename = strdup(filename);
4558 /* Collect twice: once into relatively high memory, and then back
4559 * into low memory. This compacts the retained data into the lower
4560 * pages, minimizing the size of the core file.
4562 prepare_for_final_gc();
4563 gencgc_alloc_start_page = last_free_page;
4564 collect_garbage(HIGHEST_NORMAL_GENERATION+1);
4566 prepare_for_final_gc();
4567 gencgc_alloc_start_page = -1;
4568 collect_garbage(HIGHEST_NORMAL_GENERATION+1);
4570 if (prepend_runtime)
4571 save_runtime_to_filehandle(file, runtime_bytes, runtime_size,
4572 application_type);
4574 /* The dumper doesn't know that pages need to be zeroed before use. */
4575 zero_all_free_pages();
4576 save_to_filehandle(file, filename, SymbolValue(RESTART_LISP_FUNCTION,0),
4577 prepend_runtime, save_runtime_options,
4578 compressed ? compression_level : COMPRESSION_LEVEL_NONE);
4579 /* Oops. Save still managed to fail. Since we've mangled the stack
4580 * beyond hope, there's not much we can do.
4581 * (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's
4582 * going to be rather unsatisfactory too... */
4583 lose("Attempt to save core after non-conservative GC failed.\n");