Fix conservative GC some more.
[sbcl.git] / src / runtime / gencgc.c
blob5dfcbfc59c08278d3808b98facb5150a2ce14c3b
1 /*
2 * GENerational Conservative Garbage Collector for SBCL
3 */
5 /*
6 * This software is part of the SBCL system. See the README file for
7 * more information.
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 * For a review of garbage collection techniques (e.g. generational
18 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
19 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
20 * had been accepted for _ACM Computing Surveys_ and was available
21 * as a PostScript preprint through
22 * <http://www.cs.utexas.edu/users/oops/papers.html>
23 * as
24 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <errno.h>
30 #include <string.h>
31 #include <inttypes.h>
32 #include "sbcl.h"
33 #if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)
34 #include "pthreads_win32.h"
35 #else
36 #include <signal.h>
37 #endif
38 #include "runtime.h"
39 #include "os.h"
40 #include "interr.h"
41 #include "globals.h"
42 #include "interrupt.h"
43 #include "validate.h"
44 #include "lispregs.h"
45 #include "arch.h"
46 #include "gc.h"
47 #include "gc-internal.h"
48 #include "thread.h"
49 #include "pseudo-atomic.h"
50 #include "alloc.h"
51 #include "genesis/gc-tables.h"
52 #include "genesis/vector.h"
53 #include "genesis/weak-pointer.h"
54 #include "genesis/fdefn.h"
55 #include "genesis/simple-fun.h"
56 #include "save.h"
57 #include "genesis/hash-table.h"
58 #include "genesis/instance.h"
59 #include "genesis/layout.h"
60 #include "gencgc.h"
61 #include "hopscotch.h"
62 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
63 #include "genesis/cons.h"
64 #endif
65 #include "forwarding-ptr.h"
67 /* forward declarations */
68 page_index_t gc_find_freeish_pages(page_index_t *restart_page_ptr, sword_t nbytes,
69 int page_type_flag);
73 * GC parameters
76 /* As usually configured, generations 0-5 are normal collected generations,
77 6 is pseudo-static (the objects in which are never moved nor reclaimed),
78 and 7 is scratch space used when collecting a generation without promotion,
79 wherein it is moved to generation 7 and back again.
81 enum {
82 SCRATCH_GENERATION = PSEUDO_STATIC_GENERATION+1,
83 NUM_GENERATIONS
86 /* Should we use page protection to help avoid the scavenging of pages
87 * that don't have pointers to younger generations? */
88 boolean enable_page_protection = 1;
90 /* Largest allocation seen since last GC. */
91 os_vm_size_t large_allocation = 0;
95 * debugging
98 /* the verbosity level. All non-error messages are disabled at level 0;
99 * and only a few rare messages are printed at level 1. */
100 #if QSHOW == 2
101 boolean gencgc_verbose = 1;
102 #else
103 boolean gencgc_verbose = 0;
104 #endif
106 /* FIXME: At some point enable the various error-checking things below
107 * and see what they say. */
109 /* We hunt for pointers to old-space, when GCing generations >= verify_gen.
110 * Set verify_gens to HIGHEST_NORMAL_GENERATION + 1 to disable this kind of
111 * check. */
112 generation_index_t verify_gens = HIGHEST_NORMAL_GENERATION + 1;
114 /* Should we do a pre-scan verify of generation 0 before it's GCed? */
115 boolean pre_verify_gen_0 = 0;
117 #ifdef LISP_FEATURE_X86
118 /* Should we check code objects for fixup errors after they are transported? */
119 boolean check_code_fixups = 0;
120 #endif
122 /* Should we check that newly allocated regions are zero filled? */
123 boolean gencgc_zero_check = 0;
125 /* Should we check that the free space is zero filled? */
126 boolean gencgc_enable_verify_zero_fill = 0;
128 /* When loading a core, don't do a full scan of the memory for the
129 * memory region boundaries. (Set to true by coreparse.c if the core
130 * contained a pagetable entry).
132 boolean gencgc_partial_pickup = 0;
134 /* If defined, free pages are read-protected to ensure that nothing
135 * accesses them.
138 /* #define READ_PROTECT_FREE_PAGES */
142 * GC structures and variables
145 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
146 os_vm_size_t bytes_allocated = 0;
147 os_vm_size_t auto_gc_trigger = 0;
149 /* the source and destination generations. These are set before a GC starts
150 * scavenging. */
151 generation_index_t from_space;
152 generation_index_t new_space;
154 /* Set to 1 when in GC */
155 boolean gc_active_p = 0;
157 /* should the GC be conservative on stack. If false (only right before
158 * saving a core), don't scan the stack / mark pages dont_move. */
159 static boolean conservative_stack = 1;
161 /* An array of page structures is allocated on gc initialization.
162 * This helps to quickly map between an address and its page structure.
163 * page_table_pages is set from the size of the dynamic space. */
164 page_index_t page_table_pages;
165 struct page *page_table;
166 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
167 struct hopscotch_table pinned_objects;
168 #endif
170 /* In GC cards that have conservative pointers to them, should we wipe out
171 * dwords in there that are not used, so that they do not act as false
172 * root to other things in the heap from then on? This is a new feature
173 * but in testing it is both reliable and no noticeable slowdown. */
174 int do_wipe_p = 1;
176 /// Constants defined in gc-internal:
177 /// #define BOXED_PAGE_FLAG 1
178 /// #define UNBOXED_PAGE_FLAG 2
179 /// #define OPEN_REGION_PAGE_FLAG 4
181 /// Return true if 'allocated' bits are: {001, 010, 011}, false if 1zz or 000.
182 static inline boolean page_allocated_no_region_p(page_index_t page) {
183 return (page_table[page].allocated ^ OPEN_REGION_PAGE_FLAG) > OPEN_REGION_PAGE_FLAG;
186 static inline boolean page_free_p(page_index_t page) {
187 return (page_table[page].allocated == FREE_PAGE_FLAG);
190 static inline boolean page_boxed_p(page_index_t page) {
191 return (page_table[page].allocated & BOXED_PAGE_FLAG);
194 /// Return true if 'allocated' bits are: {001, 011}, false otherwise.
195 /// i.e. true of pages which could hold boxed or partially boxed objects.
196 static inline boolean page_boxed_no_region_p(page_index_t page) {
197 return (page_table[page].allocated & 5) == BOXED_PAGE_FLAG;
200 /// Return true if page MUST NOT hold boxed objects (including code).
201 static inline boolean page_unboxed_p(page_index_t page) {
202 /* Both flags set == boxed code page */
203 return (page_table[page].allocated & 3) == UNBOXED_PAGE_FLAG;
206 static inline boolean protect_page_p(page_index_t page, generation_index_t generation) {
207 return (page_boxed_no_region_p(page)
208 && (page_bytes_used(page) != 0)
209 && !page_table[page].dont_move
210 && (page_table[page].gen == generation));
213 /* Calculate the start address for the given page number. */
214 inline void *
215 page_address(page_index_t page_num)
217 return (void*)(DYNAMIC_SPACE_START + (page_num * GENCGC_CARD_BYTES));
220 /* Calculate the address where the allocation region associated with
221 * the page starts. */
222 static inline void *
223 page_scan_start(page_index_t page_index)
225 return page_address(page_index)-page_scan_start_offset(page_index);
228 /* True if the page starts a contiguous block. */
229 static inline boolean
230 page_starts_contiguous_block_p(page_index_t page_index)
232 // Don't use the preprocessor macro: 0 means 0.
233 return page_table[page_index].scan_start_offset_ == 0;
236 /* True if the page is the last page in a contiguous block. */
237 static inline boolean
238 page_ends_contiguous_block_p(page_index_t page_index, generation_index_t gen)
240 return (/* page doesn't fill block */
241 (page_bytes_used(page_index) < GENCGC_CARD_BYTES)
242 /* page is last allocated page */
243 || ((page_index + 1) >= last_free_page)
244 /* next page free */
245 || page_free_p(page_index + 1)
246 /* next page contains no data */
247 || (page_bytes_used(page_index + 1) == 0)
248 /* next page is in different generation */
249 || (page_table[page_index + 1].gen != gen)
250 /* next page starts its own contiguous block */
251 || (page_starts_contiguous_block_p(page_index + 1)));
254 /// External function for calling from Lisp.
255 page_index_t ext_find_page_index(void *addr) { return find_page_index(addr); }
257 static os_vm_size_t
258 npage_bytes(page_index_t npages)
260 gc_assert(npages>=0);
261 return ((os_vm_size_t)npages)*GENCGC_CARD_BYTES;
264 /* Check that X is a higher address than Y and return offset from Y to
265 * X in bytes. */
266 static inline os_vm_size_t
267 void_diff(void *x, void *y)
269 gc_assert(x >= y);
270 return (pointer_sized_uint_t)x - (pointer_sized_uint_t)y;
273 /* a structure to hold the state of a generation
275 * CAUTION: If you modify this, make sure to touch up the alien
276 * definition in src/code/gc.lisp accordingly. ...or better yes,
277 * deal with the FIXME there...
279 struct generation {
281 /* the first page that gc_alloc() checks on its next call */
282 page_index_t alloc_start_page;
284 /* the first page that gc_alloc_unboxed() checks on its next call */
285 page_index_t alloc_unboxed_start_page;
287 /* the first page that gc_alloc_large (boxed) considers on its next
288 * call. (Although it always allocates after the boxed_region.) */
289 page_index_t alloc_large_start_page;
291 /* the first page that gc_alloc_large (unboxed) considers on its
292 * next call. (Although it always allocates after the
293 * current_unboxed_region.) */
294 page_index_t alloc_large_unboxed_start_page;
296 /* the bytes allocated to this generation */
297 os_vm_size_t bytes_allocated;
299 /* the number of bytes at which to trigger a GC */
300 os_vm_size_t gc_trigger;
302 /* to calculate a new level for gc_trigger */
303 os_vm_size_t bytes_consed_between_gc;
305 /* the number of GCs since the last raise */
306 int num_gc;
308 /* the number of GCs to run on the generations before raising objects to the
309 * next generation */
310 int number_of_gcs_before_promotion;
312 /* the cumulative sum of the bytes allocated to this generation. It is
313 * cleared after a GC on this generations, and update before new
314 * objects are added from a GC of a younger generation. Dividing by
315 * the bytes_allocated will give the average age of the memory in
316 * this generation since its last GC. */
317 os_vm_size_t cum_sum_bytes_allocated;
319 /* a minimum average memory age before a GC will occur helps
320 * prevent a GC when a large number of new live objects have been
321 * added, in which case a GC could be a waste of time */
322 double minimum_age_before_gc;
325 /* an array of generation structures. There needs to be one more
326 * generation structure than actual generations as the oldest
327 * generation is temporarily raised then lowered. */
328 struct generation generations[NUM_GENERATIONS];
330 /* the oldest generation that is will currently be GCed by default.
331 * Valid values are: 0, 1, ... HIGHEST_NORMAL_GENERATION
333 * The default of HIGHEST_NORMAL_GENERATION enables GC on all generations.
335 * Setting this to 0 effectively disables the generational nature of
336 * the GC. In some applications generational GC may not be useful
337 * because there are no long-lived objects.
339 * An intermediate value could be handy after moving long-lived data
340 * into an older generation so an unnecessary GC of this long-lived
341 * data can be avoided. */
342 generation_index_t gencgc_oldest_gen_to_gc = HIGHEST_NORMAL_GENERATION;
344 /* META: Is nobody aside from me bothered by this especially misleading
345 * use of the word "last"? It could mean either "ultimate" or "prior",
346 * but in fact means neither. It is the *FIRST* page that should be grabbed
347 * for more space, so it is min free page, or 1+ the max used page. */
348 /* The maximum free page in the heap is maintained and used to update
349 * ALLOCATION_POINTER which is used by the room function to limit its
350 * search of the heap. XX Gencgc obviously needs to be better
351 * integrated with the Lisp code. */
353 page_index_t last_free_page;
355 #ifdef LISP_FEATURE_SB_THREAD
356 /* This lock is to prevent multiple threads from simultaneously
357 * allocating new regions which overlap each other. Note that the
358 * majority of GC is single-threaded, but alloc() may be called from
359 * >1 thread at a time and must be thread-safe. This lock must be
360 * seized before all accesses to generations[] or to parts of
361 * page_table[] that other threads may want to see */
362 static pthread_mutex_t free_pages_lock = PTHREAD_MUTEX_INITIALIZER;
363 /* This lock is used to protect non-thread-local allocation. */
364 static pthread_mutex_t allocation_lock = PTHREAD_MUTEX_INITIALIZER;
365 #endif
367 extern os_vm_size_t gencgc_release_granularity;
368 os_vm_size_t gencgc_release_granularity = GENCGC_RELEASE_GRANULARITY;
370 extern os_vm_size_t gencgc_alloc_granularity;
371 os_vm_size_t gencgc_alloc_granularity = GENCGC_ALLOC_GRANULARITY;
375 * miscellaneous heap functions
378 /* Count the number of pages which are write-protected within the
379 * given generation. */
380 static page_index_t
381 count_write_protect_generation_pages(generation_index_t generation)
383 page_index_t i, count = 0;
385 for (i = 0; i < last_free_page; i++)
386 if (!page_free_p(i)
387 && (page_table[i].gen == generation)
388 && (page_table[i].write_protected == 1))
389 count++;
390 return count;
393 /* Count the number of pages within the given generation. */
394 static page_index_t
395 count_generation_pages(generation_index_t generation)
397 page_index_t i;
398 page_index_t count = 0;
400 for (i = 0; i < last_free_page; i++)
401 if (!page_free_p(i)
402 && (page_table[i].gen == generation))
403 count++;
404 return count;
407 #if QSHOW
408 static page_index_t
409 count_dont_move_pages(void)
411 page_index_t i;
412 page_index_t count = 0;
413 for (i = 0; i < last_free_page; i++) {
414 if (!page_free_p(i)
415 && (page_table[i].dont_move != 0)) {
416 ++count;
419 return count;
421 #endif /* QSHOW */
423 /* Work through the pages and add up the number of bytes used for the
424 * given generation. */
425 static __attribute__((unused)) os_vm_size_t
426 count_generation_bytes_allocated (generation_index_t gen)
428 page_index_t i;
429 os_vm_size_t result = 0;
430 for (i = 0; i < last_free_page; i++) {
431 if (!page_free_p(i)
432 && (page_table[i].gen == gen))
433 result += page_bytes_used(i);
435 return result;
438 /* Return the average age of the memory in a generation. */
439 extern double
440 generation_average_age(generation_index_t gen)
442 if (generations[gen].bytes_allocated == 0)
443 return 0.0;
445 return
446 ((double)generations[gen].cum_sum_bytes_allocated)
447 / ((double)generations[gen].bytes_allocated);
450 #ifdef LISP_FEATURE_X86
451 extern void fpu_save(void *);
452 extern void fpu_restore(void *);
453 #endif
455 #define PAGE_INDEX_FMT PRIdPTR
457 extern void
458 write_generation_stats(FILE *file)
460 generation_index_t i;
462 #ifdef LISP_FEATURE_X86
463 int fpu_state[27];
465 /* Can end up here after calling alloc_tramp which doesn't prepare
466 * the x87 state, and the C ABI uses a different mode */
467 fpu_save(fpu_state);
468 #endif
470 /* Print the heap stats. */
471 fprintf(file,
472 " Gen StaPg UbSta LaSta LUbSt Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
474 for (i = 0; i <= SCRATCH_GENERATION; i++) {
475 page_index_t j;
476 page_index_t boxed_cnt = 0;
477 page_index_t unboxed_cnt = 0;
478 page_index_t large_boxed_cnt = 0;
479 page_index_t large_unboxed_cnt = 0;
480 page_index_t pinned_cnt=0;
482 for (j = 0; j < last_free_page; j++)
483 if (page_table[j].gen == i) {
485 /* Count the number of boxed pages within the given
486 * generation. */
487 if (page_boxed_p(j)) {
488 if (page_table[j].large_object)
489 large_boxed_cnt++;
490 else
491 boxed_cnt++;
493 if(page_table[j].dont_move) pinned_cnt++;
494 /* Count the number of unboxed pages within the given
495 * generation. */
496 if (page_unboxed_p(j)) {
497 if (page_table[j].large_object)
498 large_unboxed_cnt++;
499 else
500 unboxed_cnt++;
504 gc_assert(generations[i].bytes_allocated
505 == count_generation_bytes_allocated(i));
506 fprintf(file,
507 " %1d: %5ld %5ld %5ld %5ld",
509 (long)generations[i].alloc_start_page,
510 (long)generations[i].alloc_unboxed_start_page,
511 (long)generations[i].alloc_large_start_page,
512 (long)generations[i].alloc_large_unboxed_start_page);
513 fprintf(file,
514 " %5"PAGE_INDEX_FMT" %5"PAGE_INDEX_FMT" %5"PAGE_INDEX_FMT
515 " %5"PAGE_INDEX_FMT" %5"PAGE_INDEX_FMT,
516 boxed_cnt, unboxed_cnt, large_boxed_cnt,
517 large_unboxed_cnt, pinned_cnt);
518 fprintf(file,
519 " %8"OS_VM_SIZE_FMT
520 " %5"OS_VM_SIZE_FMT
521 " %8"OS_VM_SIZE_FMT
522 " %4"PAGE_INDEX_FMT" %3d %7.4f\n",
523 generations[i].bytes_allocated,
524 (npage_bytes(count_generation_pages(i)) - generations[i].bytes_allocated),
525 generations[i].gc_trigger,
526 count_write_protect_generation_pages(i),
527 generations[i].num_gc,
528 generation_average_age(i));
530 fprintf(file," Total bytes allocated = %"OS_VM_SIZE_FMT"\n", bytes_allocated);
531 fprintf(file," Dynamic-space-size bytes = %"OS_VM_SIZE_FMT"\n", dynamic_space_size);
533 #ifdef LISP_FEATURE_X86
534 fpu_restore(fpu_state);
535 #endif
538 extern void
539 write_heap_exhaustion_report(FILE *file, long available, long requested,
540 struct thread *thread)
542 fprintf(file,
543 "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
544 gc_active_p ? "garbage collection" : "allocation",
545 available,
546 requested);
547 write_generation_stats(file);
548 fprintf(file, "GC control variables:\n");
549 fprintf(file, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n",
550 SymbolValue(GC_INHIBIT,thread)==NIL ? "false" : "true",
551 (SymbolValue(GC_PENDING, thread) == T) ?
552 "true" : ((SymbolValue(GC_PENDING, thread) == NIL) ?
553 "false" : "in progress"));
554 #ifdef LISP_FEATURE_SB_THREAD
555 fprintf(file, " *STOP-FOR-GC-PENDING* = %s\n",
556 SymbolValue(STOP_FOR_GC_PENDING,thread)==NIL ? "false" : "true");
557 #endif
560 extern void
561 print_generation_stats(void)
563 write_generation_stats(stderr);
566 extern char* gc_logfile;
567 char * gc_logfile = NULL;
569 extern void
570 log_generation_stats(char *logfile, char *header)
572 if (logfile) {
573 FILE * log = fopen(logfile, "a");
574 if (log) {
575 fprintf(log, "%s\n", header);
576 write_generation_stats(log);
577 fclose(log);
578 } else {
579 fprintf(stderr, "Could not open gc logfile: %s\n", logfile);
580 fflush(stderr);
585 extern void
586 report_heap_exhaustion(long available, long requested, struct thread *th)
588 if (gc_logfile) {
589 FILE * log = fopen(gc_logfile, "a");
590 if (log) {
591 write_heap_exhaustion_report(log, available, requested, th);
592 fclose(log);
593 } else {
594 fprintf(stderr, "Could not open gc logfile: %s\n", gc_logfile);
595 fflush(stderr);
598 /* Always to stderr as well. */
599 write_heap_exhaustion_report(stderr, available, requested, th);
603 #if defined(LISP_FEATURE_X86)
604 void fast_bzero(void*, size_t); /* in <arch>-assem.S */
605 #endif
607 /* Zero the pages from START to END (inclusive), but use mmap/munmap instead
608 * if zeroing it ourselves, i.e. in practice give the memory back to the
609 * OS. Generally done after a large GC.
611 void zero_pages_with_mmap(page_index_t start, page_index_t end) {
612 page_index_t i;
613 void *addr = page_address(start), *new_addr;
614 os_vm_size_t length = npage_bytes(1+end-start);
616 if (start > end)
617 return;
619 gc_assert(length >= gencgc_release_granularity);
620 gc_assert((length % gencgc_release_granularity) == 0);
622 #ifdef LISP_FEATURE_LINUX
623 extern os_vm_address_t anon_dynamic_space_start;
624 // We use MADV_DONTNEED only on Linux due to differing semantics from BSD.
625 // Linux treats it as a demand that the memory be 0-filled, or refreshed
626 // from a file that backs the range. BSD takes it as a hint that you don't
627 // care if the memory has to brought in from swap when next accessed,
628 // i.e. it's not a request to make a user-visible alteration to memory.
629 // So in theory this can bring a page in from the core file, if we happen
630 // to hit a page that resides in the portion of memory mapped by coreparse.
631 // In practice this should not happen because objects from a core file can't
632 // become garbage. Except in save-lisp-and-die they can, and we must be
633 // cautious not to resurrect bytes that originally came from the file.
634 if ((os_vm_address_t)addr >= anon_dynamic_space_start) {
635 if (madvise(addr, length, MADV_DONTNEED) != 0)
636 lose("madvise failed\n");
637 } else
638 #endif
640 os_invalidate(addr, length);
641 new_addr = os_validate(addr, length);
642 if (new_addr == NULL || new_addr != addr) {
643 lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x",
644 start, new_addr);
648 for (i = start; i <= end; i++)
649 set_page_need_to_zero(i, 0);
652 /* Zero the pages from START to END (inclusive). Generally done just after
653 * a new region has been allocated.
655 static void
656 zero_pages(page_index_t start, page_index_t end) {
657 if (start > end)
658 return;
660 #if defined(LISP_FEATURE_X86)
661 fast_bzero(page_address(start), npage_bytes(1+end-start));
662 #else
663 bzero(page_address(start), npage_bytes(1+end-start));
664 #endif
668 static void
669 zero_and_mark_pages(page_index_t start, page_index_t end) {
670 page_index_t i;
672 zero_pages(start, end);
673 for (i = start; i <= end; i++)
674 set_page_need_to_zero(i, 0);
677 /* Zero the pages from START to END (inclusive), except for those
678 * pages that are known to already zeroed. Mark all pages in the
679 * ranges as non-zeroed.
681 static void
682 zero_dirty_pages(page_index_t start, page_index_t end) {
683 page_index_t i, j;
685 for (i = start; i <= end; i++) {
686 if (!page_need_to_zero(i)) continue;
687 for (j = i+1; (j <= end) && page_need_to_zero(j) ; j++)
688 ; /* empty body */
689 zero_pages(i, j-1);
690 i = j;
693 for (i = start; i <= end; i++) {
694 set_page_need_to_zero(i, 1);
700 * To support quick and inline allocation, regions of memory can be
701 * allocated and then allocated from with just a free pointer and a
702 * check against an end address.
704 * Since objects can be allocated to spaces with different properties
705 * e.g. boxed/unboxed, generation, ages; there may need to be many
706 * allocation regions.
708 * Each allocation region may start within a partly used page. Many
709 * features of memory use are noted on a page wise basis, e.g. the
710 * generation; so if a region starts within an existing allocated page
711 * it must be consistent with this page.
713 * During the scavenging of the newspace, objects will be transported
714 * into an allocation region, and pointers updated to point to this
715 * allocation region. It is possible that these pointers will be
716 * scavenged again before the allocation region is closed, e.g. due to
717 * trans_list which jumps all over the place to cleanup the list. It
718 * is important to be able to determine properties of all objects
719 * pointed to when scavenging, e.g to detect pointers to the oldspace.
720 * Thus it's important that the allocation regions have the correct
721 * properties set when allocated, and not just set when closed. The
722 * region allocation routines return regions with the specified
723 * properties, and grab all the pages, setting their properties
724 * appropriately, except that the amount used is not known.
726 * These regions are used to support quicker allocation using just a
727 * free pointer. The actual space used by the region is not reflected
728 * in the pages tables until it is closed. It can't be scavenged until
729 * closed.
731 * When finished with the region it should be closed, which will
732 * update the page tables for the actual space used returning unused
733 * space. Further it may be noted in the new regions which is
734 * necessary when scavenging the newspace.
736 * Large objects may be allocated directly without an allocation
737 * region, the page tables are updated immediately.
739 * Unboxed objects don't contain pointers to other objects and so
740 * don't need scavenging. Further they can't contain pointers to
741 * younger generations so WP is not needed. By allocating pages to
742 * unboxed objects the whole page never needs scavenging or
743 * write-protecting. */
745 /* We are only using two regions at present. Both are for the current
746 * newspace generation. */
747 struct alloc_region boxed_region;
748 struct alloc_region unboxed_region;
750 /* The generation currently being allocated to. */
751 static generation_index_t gc_alloc_generation;
753 static inline page_index_t
754 generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large)
756 if (large) {
757 if (UNBOXED_PAGE_FLAG == page_type_flag) {
758 return generations[generation].alloc_large_unboxed_start_page;
759 } else if (BOXED_PAGE_FLAG & page_type_flag) {
760 /* Both code and data. */
761 return generations[generation].alloc_large_start_page;
762 } else {
763 lose("bad page type flag: %d", page_type_flag);
765 } else {
766 if (UNBOXED_PAGE_FLAG == page_type_flag) {
767 return generations[generation].alloc_unboxed_start_page;
768 } else if (BOXED_PAGE_FLAG & page_type_flag) {
769 /* Both code and data. */
770 return generations[generation].alloc_start_page;
771 } else {
772 lose("bad page_type_flag: %d", page_type_flag);
777 static inline void
778 set_generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large,
779 page_index_t page)
781 if (large) {
782 if (UNBOXED_PAGE_FLAG == page_type_flag) {
783 generations[generation].alloc_large_unboxed_start_page = page;
784 } else if (BOXED_PAGE_FLAG & page_type_flag) {
785 /* Both code and data. */
786 generations[generation].alloc_large_start_page = page;
787 } else {
788 lose("bad page type flag: %d", page_type_flag);
790 } else {
791 if (UNBOXED_PAGE_FLAG == page_type_flag) {
792 generations[generation].alloc_unboxed_start_page = page;
793 } else if (BOXED_PAGE_FLAG & page_type_flag) {
794 /* Both code and data. */
795 generations[generation].alloc_start_page = page;
796 } else {
797 lose("bad page type flag: %d", page_type_flag);
802 /* Find a new region with room for at least the given number of bytes.
804 * It starts looking at the current generation's alloc_start_page. So
805 * may pick up from the previous region if there is enough space. This
806 * keeps the allocation contiguous when scavenging the newspace.
808 * The alloc_region should have been closed by a call to
809 * gc_alloc_update_page_tables(), and will thus be in an empty state.
811 * To assist the scavenging functions write-protected pages are not
812 * used. Free pages should not be write-protected.
814 * It is critical to the conservative GC that the start of regions be
815 * known. To help achieve this only small regions are allocated at a
816 * time.
818 * During scavenging, pointers may be found to within the current
819 * region and the page generation must be set so that pointers to the
820 * from space can be recognized. Therefore the generation of pages in
821 * the region are set to gc_alloc_generation. To prevent another
822 * allocation call using the same pages, all the pages in the region
823 * are allocated, although they will initially be empty.
825 static void
826 gc_alloc_new_region(sword_t nbytes, int page_type_flag, struct alloc_region *alloc_region)
828 page_index_t first_page;
829 page_index_t last_page;
830 page_index_t i;
831 int ret;
834 FSHOW((stderr,
835 "/alloc_new_region for %d bytes from gen %d\n",
836 nbytes, gc_alloc_generation));
839 /* Check that the region is in a reset state. */
840 gc_assert((alloc_region->first_page == 0)
841 && (alloc_region->last_page == -1)
842 && (alloc_region->free_pointer == alloc_region->end_addr));
843 ret = thread_mutex_lock(&free_pages_lock);
844 gc_assert(ret == 0);
845 first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0);
846 last_page=gc_find_freeish_pages(&first_page, nbytes, page_type_flag);
848 /* Set up the alloc_region. */
849 alloc_region->first_page = first_page;
850 alloc_region->last_page = last_page;
851 alloc_region->start_addr = page_bytes_used(first_page)
852 + page_address(first_page);
853 alloc_region->free_pointer = alloc_region->start_addr;
854 alloc_region->end_addr = page_address(last_page+1);
856 /* Set up the pages. */
858 /* The first page may have already been in use. */
859 /* If so, just assert that it's consistent, otherwise, set it up. */
860 if (page_bytes_used(first_page)) {
861 gc_assert(page_table[first_page].allocated == page_type_flag);
862 gc_assert(page_table[first_page].gen == gc_alloc_generation);
863 gc_assert(page_table[first_page].large_object == 0);
864 } else {
865 page_table[first_page].allocated = page_type_flag;
866 page_table[first_page].gen = gc_alloc_generation;
867 page_table[first_page].large_object = 0;
868 set_page_scan_start_offset(first_page, 0);
870 page_table[first_page].allocated |= OPEN_REGION_PAGE_FLAG;
872 for (i = first_page+1; i <= last_page; i++) {
873 page_table[i].allocated = page_type_flag;
874 page_table[i].gen = gc_alloc_generation;
875 page_table[i].large_object = 0;
876 /* This may not be necessary for unboxed regions (think it was
877 * broken before!) */
878 set_page_scan_start_offset(i,
879 void_diff(page_address(i), alloc_region->start_addr));
880 page_table[i].allocated |= OPEN_REGION_PAGE_FLAG ;
882 /* Bump up last_free_page. */
883 if (last_page+1 > last_free_page) {
884 last_free_page = last_page+1;
885 /* do we only want to call this on special occasions? like for
886 * boxed_region? */
887 set_alloc_pointer((lispobj)page_address(last_free_page));
889 ret = thread_mutex_unlock(&free_pages_lock);
890 gc_assert(ret == 0);
892 #ifdef READ_PROTECT_FREE_PAGES
893 os_protect(page_address(first_page),
894 npage_bytes(1+last_page-first_page),
895 OS_VM_PROT_ALL);
896 #endif
898 /* If the first page was only partial, don't check whether it's
899 * zeroed (it won't be) and don't zero it (since the parts that
900 * we're interested in are guaranteed to be zeroed).
902 if (page_bytes_used(first_page)) {
903 first_page++;
906 zero_dirty_pages(first_page, last_page);
908 /* we can do this after releasing free_pages_lock */
909 if (gencgc_zero_check) {
910 word_t *p;
911 for (p = (word_t *)alloc_region->start_addr;
912 p < (word_t *)alloc_region->end_addr; p++) {
913 if (*p != 0) {
914 lose("The new region is not zero at %p (start=%p, end=%p).\n",
915 p, alloc_region->start_addr, alloc_region->end_addr);
921 /* If the record_new_objects flag is 2 then all new regions created
922 * are recorded.
924 * If it's 1 then then it is only recorded if the first page of the
925 * current region is <= new_areas_ignore_page. This helps avoid
926 * unnecessary recording when doing full scavenge pass.
928 * The new_object structure holds the page, byte offset, and size of
929 * new regions of objects. Each new area is placed in the array of
930 * these structures pointer to by new_areas. new_areas_index holds the
931 * offset into new_areas.
933 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
934 * later code must detect this and handle it, probably by doing a full
935 * scavenge of a generation. */
936 #define NUM_NEW_AREAS 512
937 static int record_new_objects = 0;
938 static page_index_t new_areas_ignore_page;
939 struct new_area {
940 page_index_t page;
941 size_t offset;
942 size_t size;
944 static struct new_area (*new_areas)[];
945 static size_t new_areas_index;
946 size_t max_new_areas;
948 /* Add a new area to new_areas. */
949 static void
950 add_new_area(page_index_t first_page, size_t offset, size_t size)
952 size_t new_area_start, c;
953 ssize_t i;
955 /* Ignore if full. */
956 if (new_areas_index >= NUM_NEW_AREAS)
957 return;
959 switch (record_new_objects) {
960 case 0:
961 return;
962 case 1:
963 if (first_page > new_areas_ignore_page)
964 return;
965 break;
966 case 2:
967 break;
968 default:
969 gc_abort();
972 new_area_start = npage_bytes(first_page) + offset;
974 /* Search backwards for a prior area that this follows from. If
975 found this will save adding a new area. */
976 for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
977 size_t area_end =
978 npage_bytes((*new_areas)[i].page)
979 + (*new_areas)[i].offset
980 + (*new_areas)[i].size;
981 /*FSHOW((stderr,
982 "/add_new_area S1 %d %d %d %d\n",
983 i, c, new_area_start, area_end));*/
984 if (new_area_start == area_end) {
985 /*FSHOW((stderr,
986 "/adding to [%d] %d %d %d with %d %d %d:\n",
988 (*new_areas)[i].page,
989 (*new_areas)[i].offset,
990 (*new_areas)[i].size,
991 first_page,
992 offset,
993 size);*/
994 (*new_areas)[i].size += size;
995 return;
999 (*new_areas)[new_areas_index].page = first_page;
1000 (*new_areas)[new_areas_index].offset = offset;
1001 (*new_areas)[new_areas_index].size = size;
1002 /*FSHOW((stderr,
1003 "/new_area %d page %d offset %d size %d\n",
1004 new_areas_index, first_page, offset, size));*/
1005 new_areas_index++;
1007 /* Note the max new_areas used. */
1008 if (new_areas_index > max_new_areas)
1009 max_new_areas = new_areas_index;
1012 /* Update the tables for the alloc_region. The region may be added to
1013 * the new_areas.
1015 * When done the alloc_region is set up so that the next quick alloc
1016 * will fail safely and thus a new region will be allocated. Further
1017 * it is safe to try to re-update the page table of this reset
1018 * alloc_region. */
1019 void
1020 gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_region)
1022 boolean more;
1023 page_index_t first_page;
1024 page_index_t next_page;
1025 os_vm_size_t bytes_used;
1026 os_vm_size_t region_size;
1027 os_vm_size_t byte_cnt;
1028 page_bytes_t orig_first_page_bytes_used;
1029 int ret;
1032 first_page = alloc_region->first_page;
1034 /* Catch an unused alloc_region. */
1035 if ((first_page == 0) && (alloc_region->last_page == -1))
1036 return;
1038 next_page = first_page+1;
1040 ret = thread_mutex_lock(&free_pages_lock);
1041 gc_assert(ret == 0);
1042 if (alloc_region->free_pointer != alloc_region->start_addr) {
1043 /* some bytes were allocated in the region */
1044 orig_first_page_bytes_used = page_bytes_used(first_page);
1046 gc_assert(alloc_region->start_addr ==
1047 (page_address(first_page) + page_bytes_used(first_page)));
1049 /* All the pages used need to be updated */
1051 /* Update the first page. */
1053 /* If the page was free then set up the gen, and
1054 * scan_start_offset. */
1055 if (page_bytes_used(first_page) == 0)
1056 gc_assert(page_starts_contiguous_block_p(first_page));
1057 page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
1059 gc_assert(page_table[first_page].allocated & page_type_flag);
1060 gc_assert(page_table[first_page].gen == gc_alloc_generation);
1061 gc_assert(page_table[first_page].large_object == 0);
1063 byte_cnt = 0;
1065 /* Calculate the number of bytes used in this page. This is not
1066 * always the number of new bytes, unless it was free. */
1067 more = 0;
1068 if ((bytes_used = void_diff(alloc_region->free_pointer,
1069 page_address(first_page)))
1070 >GENCGC_CARD_BYTES) {
1071 bytes_used = GENCGC_CARD_BYTES;
1072 more = 1;
1074 set_page_bytes_used(first_page, bytes_used);
1075 byte_cnt += bytes_used;
1078 /* All the rest of the pages should be free. We need to set
1079 * their scan_start_offset pointer to the start of the
1080 * region, and set the bytes_used. */
1081 while (more) {
1082 page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
1083 gc_assert(page_table[next_page].allocated & page_type_flag);
1084 gc_assert(page_bytes_used(next_page) == 0);
1085 gc_assert(page_table[next_page].gen == gc_alloc_generation);
1086 gc_assert(page_table[next_page].large_object == 0);
1087 gc_assert(page_scan_start_offset(next_page) ==
1088 void_diff(page_address(next_page),
1089 alloc_region->start_addr));
1091 /* Calculate the number of bytes used in this page. */
1092 more = 0;
1093 if ((bytes_used = void_diff(alloc_region->free_pointer,
1094 page_address(next_page)))>GENCGC_CARD_BYTES) {
1095 bytes_used = GENCGC_CARD_BYTES;
1096 more = 1;
1098 set_page_bytes_used(next_page, bytes_used);
1099 byte_cnt += bytes_used;
1101 next_page++;
1104 region_size = void_diff(alloc_region->free_pointer,
1105 alloc_region->start_addr);
1106 bytes_allocated += region_size;
1107 generations[gc_alloc_generation].bytes_allocated += region_size;
1109 gc_assert((byte_cnt- orig_first_page_bytes_used) == region_size);
1111 /* Set the generations alloc restart page to the last page of
1112 * the region. */
1113 set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0, next_page-1);
1115 /* Add the region to the new_areas if requested. */
1116 if (BOXED_PAGE_FLAG & page_type_flag)
1117 add_new_area(first_page,orig_first_page_bytes_used, region_size);
1120 FSHOW((stderr,
1121 "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
1122 region_size,
1123 gc_alloc_generation));
1125 } else {
1126 /* There are no bytes allocated. Unallocate the first_page if
1127 * there are 0 bytes_used. */
1128 page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
1129 if (page_bytes_used(first_page) == 0)
1130 page_table[first_page].allocated = FREE_PAGE_FLAG;
1133 /* Unallocate any unused pages. */
1134 while (next_page <= alloc_region->last_page) {
1135 gc_assert(page_bytes_used(next_page) == 0);
1136 page_table[next_page].allocated = FREE_PAGE_FLAG;
1137 next_page++;
1139 ret = thread_mutex_unlock(&free_pages_lock);
1140 gc_assert(ret == 0);
1142 /* alloc_region is per-thread, we're ok to do this unlocked */
1143 gc_set_region_empty(alloc_region);
1146 /* Allocate a possibly large object. */
1147 void *
1148 gc_alloc_large(sword_t nbytes, int page_type_flag, struct alloc_region *alloc_region)
1150 boolean more;
1151 page_index_t first_page, next_page, last_page;
1152 page_bytes_t orig_first_page_bytes_used;
1153 os_vm_size_t byte_cnt;
1154 os_vm_size_t bytes_used;
1155 int ret;
1157 ret = thread_mutex_lock(&free_pages_lock);
1158 gc_assert(ret == 0);
1160 first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1);
1161 if (first_page <= alloc_region->last_page) {
1162 first_page = alloc_region->last_page+1;
1165 last_page=gc_find_freeish_pages(&first_page,nbytes, page_type_flag);
1167 gc_assert(first_page > alloc_region->last_page);
1169 set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1, last_page);
1171 /* Set up the pages. */
1172 orig_first_page_bytes_used = page_bytes_used(first_page);
1174 /* Large objects don't share pages with other objects. */
1175 gc_assert(orig_first_page_bytes_used == 0);
1177 page_table[first_page].allocated = page_type_flag;
1178 page_table[first_page].gen = gc_alloc_generation;
1179 page_table[first_page].large_object = 1;
1180 set_page_scan_start_offset(first_page, 0);
1182 byte_cnt = 0;
1184 /* Calc. the number of bytes used in this page. This is not
1185 * always the number of new bytes, unless it was free. */
1186 more = 0;
1187 if ((bytes_used = nbytes+orig_first_page_bytes_used) > GENCGC_CARD_BYTES) {
1188 bytes_used = GENCGC_CARD_BYTES;
1189 more = 1;
1191 set_page_bytes_used(first_page, bytes_used);
1192 byte_cnt += bytes_used;
1194 next_page = first_page+1;
1196 /* All the rest of the pages should be free. We need to set their
1197 * scan_start_offset pointer to the start of the region, and set
1198 * the bytes_used. */
1199 while (more) {
1200 gc_assert(page_free_p(next_page));
1201 gc_assert(page_bytes_used(next_page) == 0);
1202 page_table[next_page].allocated = page_type_flag;
1203 page_table[next_page].gen = gc_alloc_generation;
1204 page_table[next_page].large_object = 1;
1206 set_page_scan_start_offset(next_page,
1207 npage_bytes(next_page-first_page) - orig_first_page_bytes_used);
1209 /* Calculate the number of bytes used in this page. */
1210 more = 0;
1211 bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt;
1212 if (bytes_used > GENCGC_CARD_BYTES) {
1213 bytes_used = GENCGC_CARD_BYTES;
1214 more = 1;
1216 set_page_bytes_used(next_page, bytes_used);
1217 page_table[next_page].write_protected=0;
1218 page_table[next_page].dont_move=0;
1219 byte_cnt += bytes_used;
1220 next_page++;
1223 gc_assert((byte_cnt-orig_first_page_bytes_used) == (size_t)nbytes);
1225 bytes_allocated += nbytes;
1226 generations[gc_alloc_generation].bytes_allocated += nbytes;
1228 /* Add the region to the new_areas if requested. */
1229 if (BOXED_PAGE_FLAG & page_type_flag)
1230 add_new_area(first_page,orig_first_page_bytes_used,nbytes);
1232 /* Bump up last_free_page */
1233 if (last_page+1 > last_free_page) {
1234 last_free_page = last_page+1;
1235 set_alloc_pointer((lispobj)(page_address(last_free_page)));
1237 ret = thread_mutex_unlock(&free_pages_lock);
1238 gc_assert(ret == 0);
1240 #ifdef READ_PROTECT_FREE_PAGES
1241 os_protect(page_address(first_page),
1242 npage_bytes(1+last_page-first_page),
1243 OS_VM_PROT_ALL);
1244 #endif
1246 zero_dirty_pages(first_page, last_page);
1248 return page_address(first_page);
1251 static page_index_t gencgc_alloc_start_page = -1;
1253 void
1254 gc_heap_exhausted_error_or_lose (sword_t available, sword_t requested)
1256 struct thread *thread = arch_os_get_current_thread();
1257 /* Write basic information before doing anything else: if we don't
1258 * call to lisp this is a must, and even if we do there is always
1259 * the danger that we bounce back here before the error has been
1260 * handled, or indeed even printed.
1262 report_heap_exhaustion(available, requested, thread);
1263 if (gc_active_p || (available == 0)) {
1264 /* If we are in GC, or totally out of memory there is no way
1265 * to sanely transfer control to the lisp-side of things.
1267 lose("Heap exhausted, game over.");
1269 else {
1270 /* FIXME: assert free_pages_lock held */
1271 (void)thread_mutex_unlock(&free_pages_lock);
1272 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
1273 gc_assert(get_pseudo_atomic_atomic(thread));
1274 clear_pseudo_atomic_atomic(thread);
1275 if (get_pseudo_atomic_interrupted(thread))
1276 do_pending_interrupt();
1277 #endif
1278 /* Another issue is that signalling HEAP-EXHAUSTED error leads
1279 * to running user code at arbitrary places, even in a
1280 * WITHOUT-INTERRUPTS which may lead to a deadlock without
1281 * running out of the heap. So at this point all bets are
1282 * off. */
1283 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
1284 corruption_warning_and_maybe_lose
1285 ("Signalling HEAP-EXHAUSTED in a WITHOUT-INTERRUPTS.");
1286 /* available and requested should be double word aligned, thus
1287 they can passed as fixnums and shifted later. */
1288 funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR), available, requested);
1289 lose("HEAP-EXHAUSTED-ERROR fell through");
1293 page_index_t
1294 gc_find_freeish_pages(page_index_t *restart_page_ptr, sword_t bytes,
1295 int page_type_flag)
1297 page_index_t most_bytes_found_from = 0, most_bytes_found_to = 0;
1298 page_index_t first_page, last_page, restart_page = *restart_page_ptr;
1299 os_vm_size_t nbytes = bytes;
1300 os_vm_size_t nbytes_goal = nbytes;
1301 os_vm_size_t bytes_found = 0;
1302 os_vm_size_t most_bytes_found = 0;
1303 boolean small_object = nbytes < GENCGC_CARD_BYTES;
1304 /* FIXME: assert(free_pages_lock is held); */
1306 if (nbytes_goal < gencgc_alloc_granularity)
1307 nbytes_goal = gencgc_alloc_granularity;
1309 /* Toggled by gc_and_save for heap compaction, normally -1. */
1310 if (gencgc_alloc_start_page != -1) {
1311 restart_page = gencgc_alloc_start_page;
1314 /* FIXME: This is on bytes instead of nbytes pending cleanup of
1315 * long from the interface. */
1316 gc_assert(bytes>=0);
1317 /* Search for a page with at least nbytes of space. We prefer
1318 * not to split small objects on multiple pages, to reduce the
1319 * number of contiguous allocation regions spaning multiple
1320 * pages: this helps avoid excessive conservativism.
1322 * For other objects, we guarantee that they start on their own
1323 * page boundary.
1325 first_page = restart_page;
1326 while (first_page < page_table_pages) {
1327 bytes_found = 0;
1328 if (page_free_p(first_page)) {
1329 gc_assert(0 == page_bytes_used(first_page));
1330 bytes_found = GENCGC_CARD_BYTES;
1331 } else if (small_object &&
1332 (page_table[first_page].allocated == page_type_flag) &&
1333 (page_table[first_page].large_object == 0) &&
1334 (page_table[first_page].gen == gc_alloc_generation) &&
1335 (page_table[first_page].write_protected == 0) &&
1336 (page_table[first_page].dont_move == 0)) {
1337 bytes_found = GENCGC_CARD_BYTES - page_bytes_used(first_page);
1338 if (bytes_found < nbytes) {
1339 if (bytes_found > most_bytes_found)
1340 most_bytes_found = bytes_found;
1341 first_page++;
1342 continue;
1344 } else {
1345 first_page++;
1346 continue;
1349 gc_assert(page_table[first_page].write_protected == 0);
1350 for (last_page = first_page+1;
1351 ((last_page < page_table_pages) &&
1352 page_free_p(last_page) &&
1353 (bytes_found < nbytes_goal));
1354 last_page++) {
1355 bytes_found += GENCGC_CARD_BYTES;
1356 gc_assert(0 == page_bytes_used(last_page));
1357 gc_assert(0 == page_table[last_page].write_protected);
1360 if (bytes_found > most_bytes_found) {
1361 most_bytes_found = bytes_found;
1362 most_bytes_found_from = first_page;
1363 most_bytes_found_to = last_page;
1365 if (bytes_found >= nbytes_goal)
1366 break;
1368 first_page = last_page;
1371 bytes_found = most_bytes_found;
1372 restart_page = first_page + 1;
1374 /* Check for a failure */
1375 if (bytes_found < nbytes) {
1376 gc_assert(restart_page >= page_table_pages);
1377 gc_heap_exhausted_error_or_lose(most_bytes_found, nbytes);
1380 gc_assert(most_bytes_found_to);
1381 *restart_page_ptr = most_bytes_found_from;
1382 return most_bytes_found_to-1;
1385 /* Allocate bytes. All the rest of the special-purpose allocation
1386 * functions will eventually call this */
1388 void *
1389 gc_alloc_with_region(sword_t nbytes,int page_type_flag, struct alloc_region *my_region,
1390 int quick_p)
1392 void *new_free_pointer;
1394 if (nbytes>=LARGE_OBJECT_SIZE)
1395 return gc_alloc_large(nbytes, page_type_flag, my_region);
1397 /* Check whether there is room in the current alloc region. */
1398 new_free_pointer = my_region->free_pointer + nbytes;
1400 /* fprintf(stderr, "alloc %d bytes from %p to %p\n", nbytes,
1401 my_region->free_pointer, new_free_pointer); */
1403 if (new_free_pointer <= my_region->end_addr) {
1404 /* If so then allocate from the current alloc region. */
1405 void *new_obj = my_region->free_pointer;
1406 my_region->free_pointer = new_free_pointer;
1408 /* Unless a `quick' alloc was requested, check whether the
1409 alloc region is almost empty. */
1410 if (!quick_p &&
1411 void_diff(my_region->end_addr,my_region->free_pointer) <= 32) {
1412 /* If so, finished with the current region. */
1413 gc_alloc_update_page_tables(page_type_flag, my_region);
1414 /* Set up a new region. */
1415 gc_alloc_new_region(32 /*bytes*/, page_type_flag, my_region);
1418 return((void *)new_obj);
1421 /* Else not enough free space in the current region: retry with a
1422 * new region. */
1424 gc_alloc_update_page_tables(page_type_flag, my_region);
1425 gc_alloc_new_region(nbytes, page_type_flag, my_region);
1426 return gc_alloc_with_region(nbytes, page_type_flag, my_region,0);
1429 /* Copy a large object. If the object is in a large object region then
1430 * it is simply promoted, else it is copied. If it's large enough then
1431 * it's copied to a large object region.
1433 * Bignums and vectors may have shrunk. If the object is not copied
1434 * the space needs to be reclaimed, and the page_tables corrected. */
1435 static lispobj
1436 general_copy_large_object(lispobj object, word_t nwords, boolean boxedp)
1438 lispobj *new;
1439 page_index_t first_page;
1441 CHECK_COPY_PRECONDITIONS(object, nwords);
1443 if ((nwords > 1024*1024) && gencgc_verbose) {
1444 FSHOW((stderr, "/general_copy_large_object: %d bytes\n",
1445 nwords*N_WORD_BYTES));
1448 /* Check whether it's a large object. */
1449 first_page = find_page_index((void *)object);
1450 gc_assert(first_page >= 0);
1452 if (page_table[first_page].large_object) {
1453 /* Promote the object. Note: Unboxed objects may have been
1454 * allocated to a BOXED region so it may be necessary to
1455 * change the region to UNBOXED. */
1456 os_vm_size_t remaining_bytes;
1457 os_vm_size_t bytes_freed;
1458 page_index_t next_page;
1459 page_bytes_t old_bytes_used;
1461 /* FIXME: This comment is somewhat stale.
1463 * Note: Any page write-protection must be removed, else a
1464 * later scavenge_newspace may incorrectly not scavenge these
1465 * pages. This would not be necessary if they are added to the
1466 * new areas, but let's do it for them all (they'll probably
1467 * be written anyway?). */
1469 gc_assert(page_starts_contiguous_block_p(first_page));
1470 next_page = first_page;
1471 remaining_bytes = nwords*N_WORD_BYTES;
1473 while (remaining_bytes > GENCGC_CARD_BYTES) {
1474 gc_assert(page_table[next_page].gen == from_space);
1475 gc_assert(page_table[next_page].large_object);
1476 gc_assert(page_scan_start_offset(next_page) ==
1477 npage_bytes(next_page-first_page));
1478 gc_assert(page_bytes_used(next_page) == GENCGC_CARD_BYTES);
1479 /* Should have been unprotected by unprotect_oldspace()
1480 * for boxed objects, and after promotion unboxed ones
1481 * should not be on protected pages at all. */
1482 gc_assert(!page_table[next_page].write_protected);
1484 if (boxedp)
1485 gc_assert(page_boxed_p(next_page));
1486 else {
1487 gc_assert(page_allocated_no_region_p(next_page));
1488 page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
1490 page_table[next_page].gen = new_space;
1492 remaining_bytes -= GENCGC_CARD_BYTES;
1493 next_page++;
1496 /* Now only one page remains, but the object may have shrunk so
1497 * there may be more unused pages which will be freed. */
1499 /* Object may have shrunk but shouldn't have grown - check. */
1500 gc_assert(page_bytes_used(next_page) >= remaining_bytes);
1502 page_table[next_page].gen = new_space;
1504 if (boxedp)
1505 gc_assert(page_boxed_p(next_page));
1506 else
1507 page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
1509 /* Adjust the bytes_used. */
1510 old_bytes_used = page_bytes_used(next_page);
1511 set_page_bytes_used(next_page, remaining_bytes);
1513 bytes_freed = old_bytes_used - remaining_bytes;
1515 /* Free any remaining pages; needs care. */
1516 next_page++;
1517 while ((old_bytes_used == GENCGC_CARD_BYTES) &&
1518 (page_table[next_page].gen == from_space) &&
1519 /* FIXME: It is not obvious to me why this is necessary
1520 * as a loop condition: it seems to me that the
1521 * scan_start_offset test should be sufficient, but
1522 * experimentally that is not the case. --NS
1523 * 2011-11-28 */
1524 (boxedp ?
1525 page_boxed_p(next_page) :
1526 page_allocated_no_region_p(next_page)) &&
1527 page_table[next_page].large_object &&
1528 (page_scan_start_offset(next_page) ==
1529 npage_bytes(next_page - first_page))) {
1530 /* Checks out OK, free the page. Don't need to both zeroing
1531 * pages as this should have been done before shrinking the
1532 * object. These pages shouldn't be write-protected, even if
1533 * boxed they should be zero filled. */
1534 gc_assert(page_table[next_page].write_protected == 0);
1536 old_bytes_used = page_bytes_used(next_page);
1537 page_table[next_page].allocated = FREE_PAGE_FLAG;
1538 set_page_bytes_used(next_page, 0);
1539 bytes_freed += old_bytes_used;
1540 next_page++;
1543 if ((bytes_freed > 0) && gencgc_verbose) {
1544 FSHOW((stderr,
1545 "/general_copy_large_object bytes_freed=%"OS_VM_SIZE_FMT"\n",
1546 bytes_freed));
1549 generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES
1550 + bytes_freed;
1551 generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
1552 bytes_allocated -= bytes_freed;
1554 /* Add the region to the new_areas if requested. */
1555 if (boxedp)
1556 add_new_area(first_page,0,nwords*N_WORD_BYTES);
1558 return(object);
1560 } else {
1561 /* Allocate space. */
1562 new = gc_general_alloc(nwords*N_WORD_BYTES,
1563 (boxedp ? BOXED_PAGE_FLAG : UNBOXED_PAGE_FLAG),
1564 ALLOC_QUICK);
1566 /* Copy the object. */
1567 memcpy(new,native_pointer(object),nwords*N_WORD_BYTES);
1569 /* Return Lisp pointer of new object. */
1570 return make_lispobj(new, lowtag_of(object));
1574 lispobj
1575 copy_large_object(lispobj object, sword_t nwords)
1577 return general_copy_large_object(object, nwords, 1);
1580 lispobj
1581 copy_large_unboxed_object(lispobj object, sword_t nwords)
1583 return general_copy_large_object(object, nwords, 0);
1586 /* to copy unboxed objects */
1587 lispobj
1588 copy_unboxed_object(lispobj object, sword_t nwords)
1590 return gc_general_copy_object(object, nwords, UNBOXED_PAGE_FLAG);
1595 * code and code-related objects
1598 static lispobj trans_fun_header(lispobj object);
1599 static lispobj trans_boxed(lispobj object);
1602 /* Scan a x86 compiled code object, looking for possible fixups that
1603 * have been missed after a move.
1605 * Two types of fixups are needed:
1606 * 1. Absolute fixups to within the code object.
1607 * 2. Relative fixups to outside the code object.
1609 * Currently only absolute fixups to the constant vector, or to the
1610 * code area are checked. */
1611 #ifdef LISP_FEATURE_X86
1612 void
1613 sniff_code_object(struct code *code, os_vm_size_t displacement)
1615 sword_t nheader_words, ncode_words, nwords;
1616 os_vm_address_t constants_start_addr = NULL, constants_end_addr, p;
1617 os_vm_address_t code_start_addr, code_end_addr;
1618 os_vm_address_t code_addr = (os_vm_address_t)code;
1619 int fixup_found = 0;
1621 if (!check_code_fixups)
1622 return;
1624 FSHOW((stderr, "/sniffing code: %p, %lu\n", code, displacement));
1626 ncode_words = code_instruction_words(code->code_size);
1627 nheader_words = code_header_words(*(lispobj *)code);
1628 nwords = ncode_words + nheader_words;
1630 constants_start_addr = code_addr + 5*N_WORD_BYTES;
1631 constants_end_addr = code_addr + nheader_words*N_WORD_BYTES;
1632 code_start_addr = code_addr + nheader_words*N_WORD_BYTES;
1633 code_end_addr = code_addr + nwords*N_WORD_BYTES;
1635 /* Work through the unboxed code. */
1636 for (p = code_start_addr; p < code_end_addr; p++) {
1637 void *data = *(void **)p;
1638 unsigned d1 = *((unsigned char *)p - 1);
1639 unsigned d2 = *((unsigned char *)p - 2);
1640 unsigned d3 = *((unsigned char *)p - 3);
1641 unsigned d4 = *((unsigned char *)p - 4);
1642 #if QSHOW
1643 unsigned d5 = *((unsigned char *)p - 5);
1644 unsigned d6 = *((unsigned char *)p - 6);
1645 #endif
1647 /* Check for code references. */
1648 /* Check for a 32 bit word that looks like an absolute
1649 reference to within the code adea of the code object. */
1650 if ((data >= (void*)(code_start_addr-displacement))
1651 && (data < (void*)(code_end_addr-displacement))) {
1652 /* function header */
1653 if ((d4 == 0x5e)
1654 && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) ==
1655 (unsigned)code)) {
1656 /* Skip the function header */
1657 p += 6*4 - 4 - 1;
1658 continue;
1660 /* the case of PUSH imm32 */
1661 if (d1 == 0x68) {
1662 fixup_found = 1;
1663 FSHOW((stderr,
1664 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1665 p, d6, d5, d4, d3, d2, d1, data));
1666 FSHOW((stderr, "/PUSH $0x%.8x\n", data));
1668 /* the case of MOV [reg-8],imm32 */
1669 if ((d3 == 0xc7)
1670 && (d2==0x40 || d2==0x41 || d2==0x42 || d2==0x43
1671 || d2==0x45 || d2==0x46 || d2==0x47)
1672 && (d1 == 0xf8)) {
1673 fixup_found = 1;
1674 FSHOW((stderr,
1675 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1676 p, d6, d5, d4, d3, d2, d1, data));
1677 FSHOW((stderr, "/MOV [reg-8],$0x%.8x\n", data));
1679 /* the case of LEA reg,[disp32] */
1680 if ((d2 == 0x8d) && ((d1 & 0xc7) == 5)) {
1681 fixup_found = 1;
1682 FSHOW((stderr,
1683 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1684 p, d6, d5, d4, d3, d2, d1, data));
1685 FSHOW((stderr,"/LEA reg,[$0x%.8x]\n", data));
1689 /* Check for constant references. */
1690 /* Check for a 32 bit word that looks like an absolute
1691 reference to within the constant vector. Constant references
1692 will be aligned. */
1693 if ((data >= (void*)(constants_start_addr-displacement))
1694 && (data < (void*)(constants_end_addr-displacement))
1695 && (((unsigned)data & 0x3) == 0)) {
1696 /* Mov eax,m32 */
1697 if (d1 == 0xa1) {
1698 fixup_found = 1;
1699 FSHOW((stderr,
1700 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1701 p, d6, d5, d4, d3, d2, d1, data));
1702 FSHOW((stderr,"/MOV eax,0x%.8x\n", data));
1705 /* the case of MOV m32,EAX */
1706 if (d1 == 0xa3) {
1707 fixup_found = 1;
1708 FSHOW((stderr,
1709 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1710 p, d6, d5, d4, d3, d2, d1, data));
1711 FSHOW((stderr, "/MOV 0x%.8x,eax\n", data));
1714 /* the case of CMP m32,imm32 */
1715 if ((d1 == 0x3d) && (d2 == 0x81)) {
1716 fixup_found = 1;
1717 FSHOW((stderr,
1718 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1719 p, d6, d5, d4, d3, d2, d1, data));
1720 /* XX Check this */
1721 FSHOW((stderr, "/CMP 0x%.8x,immed32\n", data));
1724 /* Check for a mod=00, r/m=101 byte. */
1725 if ((d1 & 0xc7) == 5) {
1726 /* Cmp m32,reg */
1727 if (d2 == 0x39) {
1728 fixup_found = 1;
1729 FSHOW((stderr,
1730 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1731 p, d6, d5, d4, d3, d2, d1, data));
1732 FSHOW((stderr,"/CMP 0x%.8x,reg\n", data));
1734 /* the case of CMP reg32,m32 */
1735 if (d2 == 0x3b) {
1736 fixup_found = 1;
1737 FSHOW((stderr,
1738 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1739 p, d6, d5, d4, d3, d2, d1, data));
1740 FSHOW((stderr, "/CMP reg32,0x%.8x\n", data));
1742 /* the case of MOV m32,reg32 */
1743 if (d2 == 0x89) {
1744 fixup_found = 1;
1745 FSHOW((stderr,
1746 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1747 p, d6, d5, d4, d3, d2, d1, data));
1748 FSHOW((stderr, "/MOV 0x%.8x,reg32\n", data));
1750 /* the case of MOV reg32,m32 */
1751 if (d2 == 0x8b) {
1752 fixup_found = 1;
1753 FSHOW((stderr,
1754 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1755 p, d6, d5, d4, d3, d2, d1, data));
1756 FSHOW((stderr, "/MOV reg32,0x%.8x\n", data));
1758 /* the case of LEA reg32,m32 */
1759 if (d2 == 0x8d) {
1760 fixup_found = 1;
1761 FSHOW((stderr,
1762 "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1763 p, d6, d5, d4, d3, d2, d1, data));
1764 FSHOW((stderr, "/LEA reg32,0x%.8x\n", data));
1770 /* If anything was found, print some information on the code
1771 * object. */
1772 if (fixup_found) {
1773 FSHOW((stderr,
1774 "/compiled code object at %x: header words = %d, code words = %d\n",
1775 code, nheader_words, ncode_words));
1776 FSHOW((stderr,
1777 "/const start = %x, end = %x\n",
1778 constants_start_addr, constants_end_addr));
1779 FSHOW((stderr,
1780 "/code start = %x, end = %x\n",
1781 code_start_addr, code_end_addr));
1784 #endif
1786 #ifdef LISP_FEATURE_X86
1787 void
1788 gencgc_apply_code_fixups(struct code *old_code, struct code *new_code)
1790 sword_t nheader_words, ncode_words, nwords;
1791 os_vm_address_t __attribute__((unused)) constants_start_addr, constants_end_addr;
1792 os_vm_address_t __attribute__((unused)) code_start_addr, code_end_addr;
1793 os_vm_address_t code_addr = (os_vm_address_t)new_code;
1794 os_vm_address_t old_addr = (os_vm_address_t)old_code;
1795 os_vm_size_t displacement = code_addr - old_addr;
1796 lispobj fixups = NIL;
1797 struct vector *fixups_vector;
1799 ncode_words = code_instruction_words(new_code->code_size);
1800 nheader_words = code_header_words(*(lispobj *)new_code);
1801 nwords = ncode_words + nheader_words;
1802 /* FSHOW((stderr,
1803 "/compiled code object at %x: header words = %d, code words = %d\n",
1804 new_code, nheader_words, ncode_words)); */
1805 constants_start_addr = code_addr + 5*N_WORD_BYTES;
1806 constants_end_addr = code_addr + nheader_words*N_WORD_BYTES;
1807 code_start_addr = code_addr + nheader_words*N_WORD_BYTES;
1808 code_end_addr = code_addr + nwords*N_WORD_BYTES;
1810 FSHOW((stderr,
1811 "/const start = %x, end = %x\n",
1812 constants_start_addr,constants_end_addr));
1813 FSHOW((stderr,
1814 "/code start = %x; end = %x\n",
1815 code_start_addr,code_end_addr));
1818 fixups = new_code->fixups;
1819 /* It will be a Lisp vector if valid, or 0 if there are no fixups */
1820 if (fixups == 0 || !is_lisp_pointer(fixups)) {
1821 /* Check for possible errors. */
1822 if (check_code_fixups)
1823 sniff_code_object(new_code, displacement);
1825 return;
1828 fixups_vector = (struct vector *)native_pointer(fixups);
1830 /* Could be pointing to a forwarding pointer. */
1831 /* This is extremely unlikely, because the only referent of the fixups
1832 is usually the code itself; so scavenging the vector won't occur
1833 until after the code object is known to be live. As we're just now
1834 enlivening the code, the fixups shouldn't have been forwarded.
1835 Maybe the vector is on the special binding stack though ... */
1836 if (is_lisp_pointer(fixups) &&
1837 (find_page_index((void*)fixups_vector) != -1) &&
1838 forwarding_pointer_p((lispobj*)fixups_vector)) {
1839 /* If so, then follow it. */
1840 /*SHOW("following pointer to a forwarding pointer");*/
1841 fixups_vector = (struct vector *)
1842 native_pointer(forwarding_pointer_value((lispobj*)fixups_vector));
1845 /*SHOW("got fixups");*/
1847 if (widetag_of(fixups_vector->header) == SIMPLE_ARRAY_WORD_WIDETAG) {
1848 /* Got the fixups for the code block. Now work through the vector,
1849 and apply a fixup at each address. */
1850 sword_t length = fixnum_value(fixups_vector->length);
1851 sword_t i;
1852 for (i = 0; i < length; i++) {
1853 long offset = fixups_vector->data[i];
1854 /* Now check the current value of offset. */
1855 os_vm_address_t old_value = *(os_vm_address_t *)(code_start_addr + offset);
1857 /* If it's within the old_code object then it must be an
1858 * absolute fixup (relative ones are not saved) */
1859 if ((old_value >= old_addr)
1860 && (old_value < (old_addr + nwords*N_WORD_BYTES)))
1861 /* So add the dispacement. */
1862 *(os_vm_address_t *)(code_start_addr + offset) =
1863 old_value + displacement;
1864 else
1865 /* It is outside the old code object so it must be a
1866 * relative fixup (absolute fixups are not saved). So
1867 * subtract the displacement. */
1868 *(os_vm_address_t *)(code_start_addr + offset) =
1869 old_value - displacement;
1871 } else {
1872 /* This used to just print a note to stderr, but a bogus fixup seems to
1873 * indicate real heap corruption, so a hard hailure is in order. */
1874 lose("fixup vector %p has a bad widetag: %d\n",
1875 fixups_vector, widetag_of(fixups_vector->header));
1878 /* Check for possible errors. */
1879 if (check_code_fixups) {
1880 sniff_code_object(new_code,displacement);
1883 #endif
1885 static lispobj
1886 trans_boxed_large(lispobj object)
1888 gc_assert(is_lisp_pointer(object));
1889 return copy_large_object(object,
1890 (HeaderValue(*native_pointer(object)) | 1) + 1);
1894 * weak pointers
1897 /* XX This is a hack adapted from cgc.c. These don't work too
1898 * efficiently with the gencgc as a list of the weak pointers is
1899 * maintained within the objects which causes writes to the pages. A
1900 * limited attempt is made to avoid unnecessary writes, but this needs
1901 * a re-think. */
1902 /* FIXME: now that we have non-Lisp hashtables in the GC, it might make sense
1903 * to stop chaining weak pointers through a slot in the object, as a remedy to
1904 * the above concern. It would also shorten the object by 2 words. */
1905 static sword_t
1906 scav_weak_pointer(lispobj *where, lispobj object)
1908 /* Since we overwrite the 'next' field, we have to make
1909 * sure not to do so for pointers already in the list.
1910 * Instead of searching the list of weak_pointers each
1911 * time, we ensure that next is always NULL when the weak
1912 * pointer isn't in the list, and not NULL otherwise.
1913 * Since we can't use NULL to denote end of list, we
1914 * use a pointer back to the same weak_pointer.
1916 struct weak_pointer * wp = (struct weak_pointer*)where;
1918 if (NULL == wp->next && weak_pointer_breakable_p(wp)) {
1919 wp->next = weak_pointers;
1920 weak_pointers = wp;
1921 if (NULL == wp->next)
1922 wp->next = wp;
1925 /* Do not let GC scavenge the value slot of the weak pointer.
1926 * (That is why it is a weak pointer.) */
1928 return WEAK_POINTER_NWORDS;
1932 lispobj *
1933 search_read_only_space(void *pointer)
1935 lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
1936 lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0);
1937 if ((pointer < (void *)start) || (pointer >= (void *)end))
1938 return NULL;
1939 return gc_search_space(start, pointer);
1942 lispobj *
1943 search_static_space(void *pointer)
1945 lispobj *start = (lispobj *)STATIC_SPACE_START;
1946 lispobj *end = (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0);
1947 if ((pointer < (void *)start) || (pointer >= (void *)end))
1948 return NULL;
1949 return gc_search_space(start, pointer);
1952 /* a faster version for searching the dynamic space. This will work even
1953 * if the object is in a current allocation region. */
1954 lispobj *
1955 search_dynamic_space(void *pointer)
1957 page_index_t page_index = find_page_index(pointer);
1958 lispobj *start;
1960 /* The address may be invalid, so do some checks. */
1961 if ((page_index == -1) || page_free_p(page_index))
1962 return NULL;
1963 start = (lispobj *)page_scan_start(page_index);
1964 return gc_search_space(start, pointer);
1967 // Return the starting address of the object containing 'addr'
1968 // if and only if the object is one which would be evacuated from 'from_space'
1969 // were it allowed to be either discarded as garbage or moved.
1970 // 'addr_page_index' is the page containing 'addr' and must not be -1.
1971 // Return 0 if there is no such object - that is, if addr is past the
1972 // end of the used bytes, or its pages are not in 'from_space' etc.
1973 static lispobj*
1974 conservative_root_p(void *addr, page_index_t addr_page_index)
1976 #ifdef GENCGC_IS_PRECISE
1977 /* If we're in precise gencgc (non-x86oid as of this writing) then
1978 * we are only called on valid object pointers in the first place,
1979 * so we just have to do a bounds-check against the heap, a
1980 * generation check, and the already-pinned check. */
1981 if ((page_table[addr_page_index].gen != from_space)
1982 || (page_table[addr_page_index].dont_move != 0))
1983 return 0;
1984 return (lispobj*)1;
1985 #else
1986 /* quick check 1: Address is quite likely to have been invalid. */
1987 if (page_free_p(addr_page_index)
1988 || (page_bytes_used(addr_page_index) == 0)
1989 || (page_table[addr_page_index].gen != from_space))
1990 return 0;
1991 gc_assert(!(page_table[addr_page_index].allocated&OPEN_REGION_PAGE_FLAG));
1993 /* quick check 2: Check the offset within the page.
1996 if (((uword_t)addr & (GENCGC_CARD_BYTES - 1)) > page_bytes_used(addr_page_index))
1997 return 0;
1999 /* Filter out anything which can't be a pointer to a Lisp object
2000 * (or, as a special case which also requires dont_move, a return
2001 * address referring to something in a CodeObject). This is
2002 * expensive but important, since it vastly reduces the
2003 * probability that random garbage will be bogusly interpreted as
2004 * a pointer which prevents a page from moving. */
2005 lispobj* object_start = search_dynamic_space(addr);
2006 if (!object_start) return 0;
2008 /* If the containing object is a code object and 'addr' points
2009 * anywhere beyond the boxed words,
2010 * presume it to be a valid unboxed return address. */
2011 if (instruction_ptr_p(addr, object_start))
2012 return object_start;
2014 /* Large object pages only contain ONE object, and it will never
2015 * be a CONS. However, arrays and bignums can be allocated larger
2016 * than necessary and then shrunk to fit, leaving what look like
2017 * (0 . 0) CONSes at the end. These appear valid to
2018 * properly_tagged_descriptor_p(), so pick them off here. */
2019 if (((lowtag_of((lispobj)addr) == LIST_POINTER_LOWTAG) &&
2020 page_table[addr_page_index].large_object)
2021 || !properly_tagged_descriptor_p(addr, object_start))
2022 return 0;
2024 return object_start;
2025 #endif
2028 /* Adjust large bignum and vector objects. This will adjust the
2029 * allocated region if the size has shrunk, and move unboxed objects
2030 * into unboxed pages. The pages are not promoted here, and the
2031 * promoted region is not added to the new_regions; this is really
2032 * only designed to be called from preserve_pointer(). Shouldn't fail
2033 * if this is missed, just may delay the moving of objects to unboxed
2034 * pages, and the freeing of pages. */
2035 static void
2036 maybe_adjust_large_object(lispobj *where)
2038 page_index_t first_page;
2039 page_index_t next_page;
2040 sword_t nwords;
2042 uword_t remaining_bytes;
2043 uword_t bytes_freed;
2044 uword_t old_bytes_used;
2046 int boxed;
2048 /* Check whether it's a vector or bignum object. */
2049 lispobj widetag = widetag_of(where[0]);
2050 if (widetag == SIMPLE_VECTOR_WIDETAG)
2051 boxed = BOXED_PAGE_FLAG;
2052 else if (specialized_vector_widetag_p(widetag) || widetag == BIGNUM_WIDETAG)
2053 boxed = UNBOXED_PAGE_FLAG;
2054 else
2055 return;
2057 /* Find its current size. */
2058 nwords = sizetab[widetag](where);
2060 first_page = find_page_index((void *)where);
2061 gc_assert(first_page >= 0);
2063 /* Note: Any page write-protection must be removed, else a later
2064 * scavenge_newspace may incorrectly not scavenge these pages.
2065 * This would not be necessary if they are added to the new areas,
2066 * but lets do it for them all (they'll probably be written
2067 * anyway?). */
2069 gc_assert(page_starts_contiguous_block_p(first_page));
2071 next_page = first_page;
2072 remaining_bytes = nwords*N_WORD_BYTES;
2073 while (remaining_bytes > GENCGC_CARD_BYTES) {
2074 gc_assert(page_table[next_page].gen == from_space);
2075 gc_assert(page_allocated_no_region_p(next_page));
2076 gc_assert(page_table[next_page].large_object);
2077 gc_assert(page_scan_start_offset(next_page) ==
2078 npage_bytes(next_page-first_page));
2079 gc_assert(page_bytes_used(next_page) == GENCGC_CARD_BYTES);
2081 page_table[next_page].allocated = boxed;
2083 /* Shouldn't be write-protected at this stage. Essential that the
2084 * pages aren't. */
2085 gc_assert(!page_table[next_page].write_protected);
2086 remaining_bytes -= GENCGC_CARD_BYTES;
2087 next_page++;
2090 /* Now only one page remains, but the object may have shrunk so
2091 * there may be more unused pages which will be freed. */
2093 /* Object may have shrunk but shouldn't have grown - check. */
2094 gc_assert(page_bytes_used(next_page) >= remaining_bytes);
2096 page_table[next_page].allocated = boxed;
2097 gc_assert(page_table[next_page].allocated ==
2098 page_table[first_page].allocated);
2100 /* Adjust the bytes_used. */
2101 old_bytes_used = page_bytes_used(next_page);
2102 set_page_bytes_used(next_page, remaining_bytes);
2104 bytes_freed = old_bytes_used - remaining_bytes;
2106 /* Free any remaining pages; needs care. */
2107 next_page++;
2108 while ((old_bytes_used == GENCGC_CARD_BYTES) &&
2109 (page_table[next_page].gen == from_space) &&
2110 page_allocated_no_region_p(next_page) &&
2111 page_table[next_page].large_object &&
2112 (page_scan_start_offset(next_page) ==
2113 npage_bytes(next_page - first_page))) {
2114 /* It checks out OK, free the page. We don't need to both zeroing
2115 * pages as this should have been done before shrinking the
2116 * object. These pages shouldn't be write protected as they
2117 * should be zero filled. */
2118 gc_assert(page_table[next_page].write_protected == 0);
2120 old_bytes_used = page_bytes_used(next_page);
2121 page_table[next_page].allocated = FREE_PAGE_FLAG;
2122 set_page_bytes_used(next_page, 0);
2123 bytes_freed += old_bytes_used;
2124 next_page++;
2127 if ((bytes_freed > 0) && gencgc_verbose) {
2128 FSHOW((stderr,
2129 "/maybe_adjust_large_object() freed %d\n",
2130 bytes_freed));
2133 generations[from_space].bytes_allocated -= bytes_freed;
2134 bytes_allocated -= bytes_freed;
2136 return;
2139 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
2140 # define hopscotch_init()
2141 # define hopscotch_reset(a)
2142 # define scavenge_pinned_ranges()
2143 # define wipe_nonpinned_words()
2144 # define hopscotch_create(a,b,c,d,e)
2145 # define hopscotch_log_stats(a,b)
2146 /* After scavenging of the roots is done, we go back to the pinned objects
2147 * and look within them for pointers. While heap_scavenge() could certainly
2148 * do this, it would potentially lead to extra work, since we can't know
2149 * whether any given object has been examined at least once, since there is
2150 * no telltale forwarding-pointer. The easiest thing to do is defer all
2151 * pinned objects to a subsequent pass, as is done here.
2153 #else
2154 static void
2155 scavenge_pinned_ranges()
2157 int i;
2158 lispobj key;
2159 for_each_hopscotch_key(i, key, pinned_objects) {
2160 lispobj* obj = native_pointer(key);
2161 lispobj header = *obj;
2162 // Never invoke scavenger on a simple-fun, just code components.
2163 if (is_cons_half(header))
2164 scavenge(obj, 2);
2165 else if (widetag_of(header) != SIMPLE_FUN_WIDETAG)
2166 scavtab[widetag_of(header)](obj, header);
2170 /* Zero out the byte ranges on small object pages marked dont_move,
2171 * carefully skipping over objects in the pin hashtable.
2172 * TODO: by recording an additional bit per page indicating whether
2173 * there is more than one pinned object on it, we could avoid qsort()
2174 * except in the case where there is more than one. */
2175 static void
2176 wipe_nonpinned_words()
2178 void gc_heapsort_uwords(uword_t*, int);
2179 // Loop over the keys in pinned_objects and pack them densely into
2180 // the same array - pinned_objects.keys[] - but skip any simple-funs.
2181 // Admittedly this is abstraction breakage.
2182 int limit = hopscotch_max_key_index(pinned_objects);
2183 int n_pins = 0, i;
2184 for (i = 0; i <= limit; ++i) {
2185 lispobj key = pinned_objects.keys[i];
2186 if (key) {
2187 lispobj* obj = native_pointer(key);
2188 // No need to check for is_cons_half() - it will be false
2189 // on a simple-fun header, and that's the correct answer.
2190 if (widetag_of(*obj) != SIMPLE_FUN_WIDETAG)
2191 pinned_objects.keys[n_pins++] = (uword_t)obj;
2194 // Store a sentinel at the end. Even if n_pins = table capacity (unlikely),
2195 // it is safe to write one more word, because the hops[] array immediately
2196 // follows the keys[] array in memory. At worst, 2 elements of hops[]
2197 // are clobbered, which is irrelevant since the table has already been
2198 // rendered unusable by stealing its key array for a different purpose.
2199 pinned_objects.keys[n_pins] = 0;
2200 // Order by ascending address, stopping short of the sentinel.
2201 gc_heapsort_uwords(pinned_objects.keys, n_pins);
2202 #if 0
2203 printf("Sorted pin list:\n");
2204 for (i = 0; i < n_pins; ++i) {
2205 lispobj* obj = (lispobj*)pinned_objects.keys[i];
2206 if (!is_cons_half(*obj))
2207 printf("%p: %5d words\n", obj, (int)sizetab[widetag_of(*obj)](obj));
2208 else printf("%p: CONS\n", obj);
2210 #endif
2211 // Each entry in the pinned objects demarcates two ranges to be cleared:
2212 // - the range preceding it back to either the page start, or prior object.
2213 // - the range after it, up to the lesser of page bytes used or next object.
2214 uword_t preceding_object = 0;
2215 uword_t this_page_end = 0;
2216 #define page_base_address(x) (x&~(GENCGC_CARD_BYTES-1))
2217 for (i = 0; i < n_pins; ++i) {
2218 // Handle the preceding range. If this object is on the same page as
2219 // its predecessor, then intervening bytes were already zeroed.
2220 // If not, then start a new page and do some bookkeeping.
2221 lispobj* obj = (lispobj*)pinned_objects.keys[i];
2222 uword_t this_page_base = page_base_address((uword_t)obj);
2223 /* printf("i=%d obj=%p base=%p\n", i, obj, (void*)this_page_base); */
2224 if (this_page_base > page_base_address(preceding_object)) {
2225 bzero((void*)this_page_base,
2226 void_diff((void*)obj, (void*)this_page_base));
2227 // Move the page to newspace
2228 page_index_t page = find_page_index(obj);
2229 int used = page_bytes_used(page);
2230 this_page_end = this_page_base + used;
2231 /* printf(" Clearing %p .. %p (limit=%p)\n",
2232 (void*)this_page_base, obj, (void*)this_page_end); */
2233 generations[new_space].bytes_allocated += used;
2234 generations[page_table[page].gen].bytes_allocated -= used;
2235 page_table[page].gen = new_space;
2236 page_table[page].has_pins = 0;
2238 // Handle the following range.
2239 lispobj word = *obj;
2240 size_t nwords = is_cons_half(word) ? 2 : sizetab[widetag_of(word)](obj);
2241 uword_t range_start = (uword_t)(obj + nwords);
2242 uword_t range_end = this_page_end;
2243 // There is always an i+1'th key due to the sentinel value.
2244 if (page_base_address(pinned_objects.keys[i+1]) == this_page_base)
2245 range_end = pinned_objects.keys[i+1];
2246 /* printf(" Clearing %p .. %p\n", (void*)range_start, (void*)range_end); */
2247 bzero((void*)range_start,
2248 void_diff((void*)range_end, (void*)range_start));
2249 preceding_object = (uword_t)obj;
2253 /* Add 'object' to the hashtable, and if the object is a code component,
2254 * then also add all of the embedded simple-funs.
2255 * The rationale for the extra work on code components is that without it,
2256 * every test of pinned_p() on an object would have to check if the pointer
2257 * is to a simple-fun - entailing an extra read of the header - and mapping
2258 * to its code component if so. Since more calls to pinned_p occur than to
2259 * pin_object, the extra burden should be on this function.
2260 * Experimentation bears out that this is the better technique.
2261 * Also, we wouldn't often expect code components in the collected generation
2262 * so the extra work here is quite minimal, even if it can generally add to
2263 * the number of keys in the hashtable.
2265 static void
2266 pin_object(lispobj object)
2268 if (!hopscotch_containsp(&pinned_objects, object)) {
2269 hopscotch_insert(&pinned_objects, object, 1);
2270 struct code* maybe_code = (struct code*)native_pointer(object);
2271 if (widetag_of(maybe_code->header) == CODE_HEADER_WIDETAG) {
2272 for_each_simple_fun(i, fun, maybe_code, 0, {
2273 hopscotch_insert(&pinned_objects,
2274 make_lispobj(fun, FUN_POINTER_LOWTAG),
2280 #endif
2282 /* Take a possible pointer to a Lisp object and mark its page in the
2283 * page_table so that it will not be relocated during a GC.
2285 * This involves locating the page it points to, then backing up to
2286 * the start of its region, then marking all pages dont_move from there
2287 * up to the first page that's not full or has a different generation
2289 * It is assumed that all the page static flags have been cleared at
2290 * the start of a GC.
2292 * It is also assumed that the current gc_alloc() region has been
2293 * flushed and the tables updated. */
2295 // TODO: there's probably a way to be a little more efficient here.
2296 // As things are, we start by finding the object that encloses 'addr',
2297 // then we see if 'addr' was a "valid" Lisp pointer to that object
2298 // - meaning we expect the correct lowtag on the pointer - except
2299 // that for code objects we don't require a correct lowtag
2300 // and we allow a pointer to anywhere in the object.
2302 // It should be possible to avoid calling search_dynamic_space
2303 // more of the time. First, check if the page pointed to might hold code.
2304 // If it does, then we continue regardless of the pointer's lowtag
2305 // (because of the special allowance). If the page definitely does *not*
2306 // hold code, then we require up front that the lowtake make sense,
2307 // by doing the same checks that are in properly_tagged_descriptor_p.
2309 // Problem: when code is allocated from a per-thread region,
2310 // does it ensure that the occupied pages are flagged as having code?
2312 static void
2313 preserve_pointer(void *addr)
2315 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2316 /* Immobile space MUST be lower than dynamic space,
2317 or else this test needs to be revised */
2318 if (addr < (void*)IMMOBILE_SPACE_END) {
2319 extern void immobile_space_preserve_pointer(void*);
2320 immobile_space_preserve_pointer(addr);
2321 return;
2323 #endif
2324 page_index_t addr_page_index = find_page_index(addr);
2325 lispobj *object_start;
2327 if (addr_page_index == -1
2328 || (object_start = conservative_root_p(addr, addr_page_index)) == 0)
2329 return;
2331 /* (Now that we know that addr_page_index is in range, it's
2332 * safe to index into page_table[] with it.) */
2333 unsigned int region_allocation = page_table[addr_page_index].allocated;
2335 /* Find the beginning of the region. Note that there may be
2336 * objects in the region preceding the one that we were passed a
2337 * pointer to: if this is the case, we will write-protect all the
2338 * previous objects' pages too. */
2340 #if 0
2341 /* I think this'd work just as well, but without the assertions.
2342 * -dan 2004.01.01 */
2343 page_index_t first_page = find_page_index(page_scan_start(addr_page_index))
2344 #else
2345 page_index_t first_page = addr_page_index;
2346 while (!page_starts_contiguous_block_p(first_page)) {
2347 --first_page;
2348 /* Do some checks. */
2349 gc_assert(page_bytes_used(first_page) == GENCGC_CARD_BYTES);
2350 gc_assert(page_table[first_page].gen == from_space);
2351 gc_assert(page_table[first_page].allocated == region_allocation);
2353 #endif
2355 /* Adjust any large objects before promotion as they won't be
2356 * copied after promotion. */
2357 if (page_table[first_page].large_object) {
2358 maybe_adjust_large_object(page_address(first_page));
2359 /* It may have moved to unboxed pages. */
2360 region_allocation = page_table[first_page].allocated;
2363 /* Now work forward until the end of this contiguous area is found,
2364 * marking all pages as dont_move. */
2365 page_index_t i;
2366 for (i = first_page; ;i++) {
2367 gc_assert(page_table[i].allocated == region_allocation);
2369 /* Mark the page static. */
2370 page_table[i].dont_move = 1;
2372 /* It is essential that the pages are not write protected as
2373 * they may have pointers into the old-space which need
2374 * scavenging. They shouldn't be write protected at this
2375 * stage. */
2376 gc_assert(!page_table[i].write_protected);
2378 /* Check whether this is the last page in this contiguous block.. */
2379 if (page_ends_contiguous_block_p(i, from_space))
2380 break;
2383 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2384 /* Do not do this for multi-page objects. Those pages do not need
2385 * object wipeout anyway.
2387 if (do_wipe_p && i == first_page) { // single-page object
2388 lispobj word = *object_start;
2389 int lowtag = is_cons_half(word) ?
2390 LIST_POINTER_LOWTAG : lowtag_for_widetag[widetag_of(word)>>2];
2391 pin_object(make_lispobj(object_start, lowtag));
2392 page_table[i].has_pins = 1;
2394 #endif
2396 /* Check that the page is now static. */
2397 gc_assert(page_table[addr_page_index].dont_move != 0);
2400 /* If the given page is not write-protected, then scan it for pointers
2401 * to younger generations or the top temp. generation, if no
2402 * suspicious pointers are found then the page is write-protected.
2404 * Care is taken to check for pointers to the current gc_alloc()
2405 * region if it is a younger generation or the temp. generation. This
2406 * frees the caller from doing a gc_alloc_update_page_tables(). Actually
2407 * the gc_alloc_generation does not need to be checked as this is only
2408 * called from scavenge_generation() when the gc_alloc generation is
2409 * younger, so it just checks if there is a pointer to the current
2410 * region.
2412 * We return 1 if the page was write-protected, else 0. */
2413 static int
2414 update_page_write_prot(page_index_t page)
2416 generation_index_t gen = page_table[page].gen;
2417 sword_t j;
2418 int wp_it = 1;
2419 void **page_addr = (void **)page_address(page);
2420 sword_t num_words = page_bytes_used(page) / N_WORD_BYTES;
2422 /* Shouldn't be a free page. */
2423 gc_assert(!page_free_p(page));
2424 gc_assert(page_bytes_used(page) != 0);
2426 /* Skip if it's already write-protected, pinned, or unboxed */
2427 if (page_table[page].write_protected
2428 /* FIXME: What's the reason for not write-protecting pinned pages? */
2429 || page_table[page].dont_move
2430 || page_unboxed_p(page))
2431 return (0);
2433 /* Scan the page for pointers to younger generations or the
2434 * top temp. generation. */
2436 /* This is conservative: any word satisfying is_lisp_pointer() is
2437 * assumed to be a pointer. To do otherwise would require a family
2438 * of scavenge-like functions. */
2439 for (j = 0; j < num_words; j++) {
2440 void *ptr = *(page_addr+j);
2441 page_index_t index;
2442 lispobj __attribute__((unused)) header;
2444 if (!is_lisp_pointer((lispobj)ptr))
2445 continue;
2446 /* Check that it's in the dynamic space */
2447 if ((index = find_page_index(ptr)) != -1) {
2448 if (/* Does it point to a younger or the temp. generation? */
2449 (!page_free_p(index)
2450 && (page_bytes_used(index) != 0)
2451 && ((page_table[index].gen < gen)
2452 || (page_table[index].gen == SCRATCH_GENERATION)))
2454 /* Or does it point within a current gc_alloc() region? */
2455 || ((boxed_region.start_addr <= ptr)
2456 && (ptr <= boxed_region.free_pointer))
2457 || ((unboxed_region.start_addr <= ptr)
2458 && (ptr <= unboxed_region.free_pointer))) {
2459 wp_it = 0;
2460 break;
2463 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2464 else if ((index = find_immobile_page_index(ptr)) >= 0 &&
2465 other_immediate_lowtag_p(header = *native_pointer((lispobj)ptr))) {
2466 // This is *possibly* a pointer to an object in immobile space,
2467 // given that above two conditions were satisfied.
2468 // But unlike in the dynamic space case, we need to read a byte
2469 // from the object to determine its generation, which requires care.
2470 // Consider an unboxed word that looks like a pointer to a word that
2471 // looks like fun-header-widetag. We can't naively back up to the
2472 // underlying code object since the alleged header might not be one.
2473 int obj_gen = gen; // Make comparison fail if we fall through
2474 if (lowtag_of((lispobj)ptr) != FUN_POINTER_LOWTAG) {
2475 obj_gen = __immobile_obj_generation(native_pointer((lispobj)ptr));
2476 } else if (widetag_of(header) == SIMPLE_FUN_WIDETAG) {
2477 lispobj* code = fun_code_header((lispobj)ptr - FUN_POINTER_LOWTAG);
2478 // This is a heuristic, since we're not actually looking for
2479 // an object boundary. Precise scanning of 'page' would obviate
2480 // the guard conditions here.
2481 if ((lispobj)code >= IMMOBILE_VARYOBJ_SUBSPACE_START
2482 && widetag_of(*code) == CODE_HEADER_WIDETAG)
2483 obj_gen = __immobile_obj_generation(code);
2485 // A bogus generation number implies a not-really-pointer,
2486 // but it won't cause misbehavior.
2487 if (obj_gen < gen || obj_gen == SCRATCH_GENERATION) {
2488 wp_it = 0;
2489 break;
2492 #endif
2495 if (wp_it == 1) {
2496 /* Write-protect the page. */
2497 /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
2499 os_protect((void *)page_addr,
2500 GENCGC_CARD_BYTES,
2501 OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
2503 /* Note the page as protected in the page tables. */
2504 page_table[page].write_protected = 1;
2507 return (wp_it);
2510 /* Is this page holding a normal (non-hashtable) large-object
2511 * simple-vector? */
2512 static inline boolean large_simple_vector_p(page_index_t page) {
2513 if (!page_table[page].large_object)
2514 return 0;
2515 lispobj object = *(lispobj *)page_address(page);
2516 return widetag_of(object) == SIMPLE_VECTOR_WIDETAG &&
2517 (HeaderValue(object) & 0xFF) == subtype_VectorNormal;
2521 /* Scavenge all generations from FROM to TO, inclusive, except for
2522 * new_space which needs special handling, as new objects may be
2523 * added which are not checked here - use scavenge_newspace generation.
2525 * Write-protected pages should not have any pointers to the
2526 * from_space so do need scavenging; thus write-protected pages are
2527 * not always scavenged. There is some code to check that these pages
2528 * are not written; but to check fully the write-protected pages need
2529 * to be scavenged by disabling the code to skip them.
2531 * Under the current scheme when a generation is GCed the younger
2532 * generations will be empty. So, when a generation is being GCed it
2533 * is only necessary to scavenge the older generations for pointers
2534 * not the younger. So a page that does not have pointers to younger
2535 * generations does not need to be scavenged.
2537 * The write-protection can be used to note pages that don't have
2538 * pointers to younger pages. But pages can be written without having
2539 * pointers to younger generations. After the pages are scavenged here
2540 * they can be scanned for pointers to younger generations and if
2541 * there are none the page can be write-protected.
2543 * One complication is when the newspace is the top temp. generation.
2545 * Enabling SC_GEN_CK scavenges the write-protected pages and checks
2546 * that none were written, which they shouldn't be as they should have
2547 * no pointers to younger generations. This breaks down for weak
2548 * pointers as the objects contain a link to the next and are written
2549 * if a weak pointer is scavenged. Still it's a useful check. */
2550 static void
2551 scavenge_generations(generation_index_t from, generation_index_t to)
2553 page_index_t i;
2554 page_index_t num_wp = 0;
2556 #define SC_GEN_CK 0
2557 #if SC_GEN_CK
2558 /* Clear the write_protected_cleared flags on all pages. */
2559 for (i = 0; i < page_table_pages; i++)
2560 page_table[i].write_protected_cleared = 0;
2561 #endif
2563 for (i = 0; i < last_free_page; i++) {
2564 generation_index_t generation = page_table[i].gen;
2565 if (page_boxed_p(i)
2566 && (page_bytes_used(i) != 0)
2567 && (generation != new_space)
2568 && (generation >= from)
2569 && (generation <= to)) {
2570 page_index_t last_page,j;
2571 int write_protected=1;
2573 /* This should be the start of a region */
2574 gc_assert(page_starts_contiguous_block_p(i));
2576 if (large_simple_vector_p(i)) {
2577 /* Scavenge only the unprotected pages of a
2578 * large-object vector, other large objects could be
2579 * handled as well, but vectors are easier to deal
2580 * with and are more likely to grow to very large
2581 * sizes where avoiding scavenging the whole thing is
2582 * worthwile */
2583 if (!page_table[i].write_protected) {
2584 scavenge((lispobj*)page_address(i) + 2,
2585 GENCGC_CARD_BYTES / N_WORD_BYTES - 2);
2586 update_page_write_prot(i);
2588 for (last_page = i + 1; ; last_page++) {
2589 lispobj* start = page_address(last_page);
2590 write_protected = page_table[last_page].write_protected;
2591 if (page_ends_contiguous_block_p(last_page, generation)) {
2592 if (!write_protected) {
2593 scavenge(start, page_bytes_used(last_page) / N_WORD_BYTES);
2594 update_page_write_prot(last_page);
2596 break;
2598 if (!write_protected) {
2599 scavenge(start, GENCGC_CARD_BYTES / N_WORD_BYTES);
2600 update_page_write_prot(last_page);
2603 } else {
2604 /* Now work forward until the end of the region */
2605 for (last_page = i; ; last_page++) {
2606 write_protected =
2607 write_protected && page_table[last_page].write_protected;
2608 if (page_ends_contiguous_block_p(last_page, generation))
2609 break;
2611 if (!write_protected) {
2612 heap_scavenge(page_address(i),
2613 (lispobj*)((char*)page_address(last_page)
2614 + page_bytes_used(last_page)));
2616 /* Now scan the pages and write protect those that
2617 * don't have pointers to younger generations. */
2618 if (enable_page_protection) {
2619 for (j = i; j <= last_page; j++) {
2620 num_wp += update_page_write_prot(j);
2623 if ((gencgc_verbose > 1) && (num_wp != 0)) {
2624 FSHOW((stderr,
2625 "/write protected %d pages within generation %d\n",
2626 num_wp, generation));
2630 i = last_page;
2634 #if SC_GEN_CK
2635 /* Check that none of the write_protected pages in this generation
2636 * have been written to. */
2637 for (i = 0; i < page_table_pages; i++) {
2638 if (!page_free_p(i)
2639 && (page_bytes_used(i) != 0)
2640 && (page_table[i].gen == generation)
2641 && (page_table[i].write_protected_cleared != 0)) {
2642 FSHOW((stderr, "/scavenge_generation() %d\n", generation));
2643 FSHOW((stderr,
2644 "/page bytes_used=%d scan_start_offset=%lu dont_move=%d\n",
2645 page_bytes_used(i),
2646 scan_start_offset(page_table[i]),
2647 page_table[i].dont_move));
2648 lose("write to protected page %d in scavenge_generation()\n", i);
2651 #endif
2655 /* Scavenge a newspace generation. As it is scavenged new objects may
2656 * be allocated to it; these will also need to be scavenged. This
2657 * repeats until there are no more objects unscavenged in the
2658 * newspace generation.
2660 * To help improve the efficiency, areas written are recorded by
2661 * gc_alloc() and only these scavenged. Sometimes a little more will be
2662 * scavenged, but this causes no harm. An easy check is done that the
2663 * scavenged bytes equals the number allocated in the previous
2664 * scavenge.
2666 * Write-protected pages are not scanned except if they are marked
2667 * dont_move in which case they may have been promoted and still have
2668 * pointers to the from space.
2670 * Write-protected pages could potentially be written by alloc however
2671 * to avoid having to handle re-scavenging of write-protected pages
2672 * gc_alloc() does not write to write-protected pages.
2674 * New areas of objects allocated are recorded alternatively in the two
2675 * new_areas arrays below. */
2676 static struct new_area new_areas_1[NUM_NEW_AREAS];
2677 static struct new_area new_areas_2[NUM_NEW_AREAS];
2679 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2680 extern unsigned int immobile_scav_queue_count;
2681 extern void
2682 gc_init_immobile(),
2683 update_immobile_nursery_bits(),
2684 scavenge_immobile_roots(generation_index_t,generation_index_t),
2685 scavenge_immobile_newspace(),
2686 sweep_immobile_space(int raise),
2687 write_protect_immobile_space();
2688 #else
2689 #define immobile_scav_queue_count 0
2690 #endif
2692 /* Do one full scan of the new space generation. This is not enough to
2693 * complete the job as new objects may be added to the generation in
2694 * the process which are not scavenged. */
2695 static void
2696 scavenge_newspace_generation_one_scan(generation_index_t generation)
2698 page_index_t i;
2700 FSHOW((stderr,
2701 "/starting one full scan of newspace generation %d\n",
2702 generation));
2703 for (i = 0; i < last_free_page; i++) {
2704 /* Note that this skips over open regions when it encounters them. */
2705 if (page_boxed_p(i)
2706 && (page_bytes_used(i) != 0)
2707 && (page_table[i].gen == generation)
2708 && ((page_table[i].write_protected == 0)
2709 /* (This may be redundant as write_protected is now
2710 * cleared before promotion.) */
2711 || (page_table[i].dont_move == 1))) {
2712 page_index_t last_page;
2713 int all_wp=1;
2715 /* The scavenge will start at the scan_start_offset of
2716 * page i.
2718 * We need to find the full extent of this contiguous
2719 * block in case objects span pages.
2721 * Now work forward until the end of this contiguous area
2722 * is found. A small area is preferred as there is a
2723 * better chance of its pages being write-protected. */
2724 for (last_page = i; ;last_page++) {
2725 /* If all pages are write-protected and movable,
2726 * then no need to scavenge */
2727 all_wp=all_wp && page_table[last_page].write_protected &&
2728 !page_table[last_page].dont_move;
2730 /* Check whether this is the last page in this
2731 * contiguous block */
2732 if (page_ends_contiguous_block_p(last_page, generation))
2733 break;
2736 /* Do a limited check for write-protected pages. */
2737 if (!all_wp) {
2738 new_areas_ignore_page = last_page;
2739 heap_scavenge(page_scan_start(i),
2740 (lispobj*)((char*)page_address(last_page)
2741 + page_bytes_used(last_page)));
2743 i = last_page;
2746 FSHOW((stderr,
2747 "/done with one full scan of newspace generation %d\n",
2748 generation));
2751 /* Do a complete scavenge of the newspace generation. */
2752 static void
2753 scavenge_newspace_generation(generation_index_t generation)
2755 size_t i;
2757 /* the new_areas array currently being written to by gc_alloc() */
2758 struct new_area (*current_new_areas)[] = &new_areas_1;
2759 size_t current_new_areas_index;
2761 /* the new_areas created by the previous scavenge cycle */
2762 struct new_area (*previous_new_areas)[] = NULL;
2763 size_t previous_new_areas_index;
2765 /* Flush the current regions updating the tables. */
2766 gc_alloc_update_all_page_tables(0);
2768 /* Turn on the recording of new areas by gc_alloc(). */
2769 new_areas = current_new_areas;
2770 new_areas_index = 0;
2772 /* Don't need to record new areas that get scavenged anyway during
2773 * scavenge_newspace_generation_one_scan. */
2774 record_new_objects = 1;
2776 /* Start with a full scavenge. */
2777 scavenge_newspace_generation_one_scan(generation);
2779 /* Record all new areas now. */
2780 record_new_objects = 2;
2782 /* Give a chance to weak hash tables to make other objects live.
2783 * FIXME: The algorithm implemented here for weak hash table gcing
2784 * is O(W^2+N) as Bruno Haible warns in
2785 * http://www.haible.de/bruno/papers/cs/weak/WeakDatastructures-writeup.html
2786 * see "Implementation 2". */
2787 scav_weak_hash_tables();
2789 /* Flush the current regions updating the tables. */
2790 gc_alloc_update_all_page_tables(0);
2792 /* Grab new_areas_index. */
2793 current_new_areas_index = new_areas_index;
2795 /*FSHOW((stderr,
2796 "The first scan is finished; current_new_areas_index=%d.\n",
2797 current_new_areas_index));*/
2799 while (current_new_areas_index > 0 || immobile_scav_queue_count) {
2800 /* Move the current to the previous new areas */
2801 previous_new_areas = current_new_areas;
2802 previous_new_areas_index = current_new_areas_index;
2804 /* Scavenge all the areas in previous new areas. Any new areas
2805 * allocated are saved in current_new_areas. */
2807 /* Allocate an array for current_new_areas; alternating between
2808 * new_areas_1 and 2 */
2809 if (previous_new_areas == &new_areas_1)
2810 current_new_areas = &new_areas_2;
2811 else
2812 current_new_areas = &new_areas_1;
2814 /* Set up for gc_alloc(). */
2815 new_areas = current_new_areas;
2816 new_areas_index = 0;
2818 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2819 scavenge_immobile_newspace();
2820 #endif
2821 /* Check whether previous_new_areas had overflowed. */
2822 if (previous_new_areas_index >= NUM_NEW_AREAS) {
2824 /* New areas of objects allocated have been lost so need to do a
2825 * full scan to be sure! If this becomes a problem try
2826 * increasing NUM_NEW_AREAS. */
2827 if (gencgc_verbose) {
2828 SHOW("new_areas overflow, doing full scavenge");
2831 /* Don't need to record new areas that get scavenged
2832 * anyway during scavenge_newspace_generation_one_scan. */
2833 record_new_objects = 1;
2835 scavenge_newspace_generation_one_scan(generation);
2837 /* Record all new areas now. */
2838 record_new_objects = 2;
2840 scav_weak_hash_tables();
2842 /* Flush the current regions updating the tables. */
2843 gc_alloc_update_all_page_tables(0);
2845 } else {
2847 /* Work through previous_new_areas. */
2848 for (i = 0; i < previous_new_areas_index; i++) {
2849 page_index_t page = (*previous_new_areas)[i].page;
2850 size_t offset = (*previous_new_areas)[i].offset;
2851 size_t size = (*previous_new_areas)[i].size;
2852 gc_assert(size % N_WORD_BYTES == 0);
2853 lispobj *start = (lispobj*)((char*)page_address(page) + offset);
2854 heap_scavenge(start, (lispobj*)((char*)start + size));
2857 scav_weak_hash_tables();
2859 /* Flush the current regions updating the tables. */
2860 gc_alloc_update_all_page_tables(0);
2863 current_new_areas_index = new_areas_index;
2865 /*FSHOW((stderr,
2866 "The re-scan has finished; current_new_areas_index=%d.\n",
2867 current_new_areas_index));*/
2870 /* Turn off recording of areas allocated by gc_alloc(). */
2871 record_new_objects = 0;
2873 #if SC_NS_GEN_CK
2875 page_index_t i;
2876 /* Check that none of the write_protected pages in this generation
2877 * have been written to. */
2878 for (i = 0; i < page_table_pages; i++) {
2879 if (!page_free_p(i)
2880 && (page_bytes_used(i) != 0)
2881 && (page_table[i].gen == generation)
2882 && (page_table[i].write_protected_cleared != 0)
2883 && (page_table[i].dont_move == 0)) {
2884 lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d\n",
2885 i, generation, page_table[i].dont_move);
2889 #endif
2892 /* Un-write-protect all the pages in from_space. This is done at the
2893 * start of a GC else there may be many page faults while scavenging
2894 * the newspace (I've seen drive the system time to 99%). These pages
2895 * would need to be unprotected anyway before unmapping in
2896 * free_oldspace; not sure what effect this has on paging.. */
2897 static void
2898 unprotect_oldspace(void)
2900 page_index_t i;
2901 void *region_addr = 0;
2902 void *page_addr = 0;
2903 uword_t region_bytes = 0;
2905 for (i = 0; i < last_free_page; i++) {
2906 if (!page_free_p(i)
2907 && (page_bytes_used(i) != 0)
2908 && (page_table[i].gen == from_space)) {
2910 /* Remove any write-protection. We should be able to rely
2911 * on the write-protect flag to avoid redundant calls. */
2912 if (page_table[i].write_protected) {
2913 page_table[i].write_protected = 0;
2914 page_addr = page_address(i);
2915 if (!region_addr) {
2916 /* First region. */
2917 region_addr = page_addr;
2918 region_bytes = GENCGC_CARD_BYTES;
2919 } else if (region_addr + region_bytes == page_addr) {
2920 /* Region continue. */
2921 region_bytes += GENCGC_CARD_BYTES;
2922 } else {
2923 /* Unprotect previous region. */
2924 os_protect(region_addr, region_bytes, OS_VM_PROT_ALL);
2925 /* First page in new region. */
2926 region_addr = page_addr;
2927 region_bytes = GENCGC_CARD_BYTES;
2932 if (region_addr) {
2933 /* Unprotect last region. */
2934 os_protect(region_addr, region_bytes, OS_VM_PROT_ALL);
2938 /* Work through all the pages and free any in from_space. This
2939 * assumes that all objects have been copied or promoted to an older
2940 * generation. Bytes_allocated and the generation bytes_allocated
2941 * counter are updated. The number of bytes freed is returned. */
2942 static uword_t
2943 free_oldspace(void)
2945 uword_t bytes_freed = 0;
2946 page_index_t first_page, last_page;
2948 first_page = 0;
2950 do {
2951 /* Find a first page for the next region of pages. */
2952 while ((first_page < last_free_page)
2953 && (page_free_p(first_page)
2954 || (page_bytes_used(first_page) == 0)
2955 || (page_table[first_page].gen != from_space)))
2956 first_page++;
2958 if (first_page >= last_free_page)
2959 break;
2961 /* Find the last page of this region. */
2962 last_page = first_page;
2964 do {
2965 /* Free the page. */
2966 bytes_freed += page_bytes_used(last_page);
2967 generations[page_table[last_page].gen].bytes_allocated -=
2968 page_bytes_used(last_page);
2969 page_table[last_page].allocated = FREE_PAGE_FLAG;
2970 set_page_bytes_used(last_page, 0);
2971 /* Should already be unprotected by unprotect_oldspace(). */
2972 gc_assert(!page_table[last_page].write_protected);
2973 last_page++;
2975 while ((last_page < last_free_page)
2976 && !page_free_p(last_page)
2977 && (page_bytes_used(last_page) != 0)
2978 && (page_table[last_page].gen == from_space));
2980 #ifdef READ_PROTECT_FREE_PAGES
2981 os_protect(page_address(first_page),
2982 npage_bytes(last_page-first_page),
2983 OS_VM_PROT_NONE);
2984 #endif
2985 first_page = last_page;
2986 } while (first_page < last_free_page);
2988 bytes_allocated -= bytes_freed;
2989 return bytes_freed;
2992 #if 0
2993 /* Print some information about a pointer at the given address. */
2994 static void
2995 print_ptr(lispobj *addr)
2997 /* If addr is in the dynamic space then out the page information. */
2998 page_index_t pi1 = find_page_index((void*)addr);
3000 if (pi1 != -1)
3001 fprintf(stderr," %p: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n",
3002 addr,
3003 pi1,
3004 page_table[pi1].allocated,
3005 page_table[pi1].gen,
3006 page_bytes_used(pi1),
3007 scan_start_offset(page_table[pi1]),
3008 page_table[pi1].dont_move);
3009 fprintf(stderr," %x %x %x %x (%x) %x %x %x %x\n",
3010 *(addr-4),
3011 *(addr-3),
3012 *(addr-2),
3013 *(addr-1),
3014 *(addr-0),
3015 *(addr+1),
3016 *(addr+2),
3017 *(addr+3),
3018 *(addr+4));
3020 #endif
3022 static int
3023 is_in_stack_space(lispobj ptr)
3025 /* For space verification: Pointers can be valid if they point
3026 * to a thread stack space. This would be faster if the thread
3027 * structures had page-table entries as if they were part of
3028 * the heap space. */
3029 struct thread *th;
3030 for_each_thread(th) {
3031 if ((th->control_stack_start <= (lispobj *)ptr) &&
3032 (th->control_stack_end >= (lispobj *)ptr)) {
3033 return 1;
3036 return 0;
3039 // NOTE: This function can produces false failure indications,
3040 // usually related to dynamic space pointing to the stack of a
3041 // dead thread, but there may be other reasons as well.
3042 static void
3043 verify_range(lispobj *start, size_t words)
3045 extern int valid_lisp_pointer_p(lispobj);
3046 int is_in_readonly_space =
3047 (READ_ONLY_SPACE_START <= (uword_t)start &&
3048 (uword_t)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
3049 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3050 int is_in_immobile_space =
3051 (IMMOBILE_SPACE_START <= (uword_t)start &&
3052 (uword_t)start < SymbolValue(IMMOBILE_SPACE_FREE_POINTER,0));
3053 #endif
3055 lispobj *end = start + words;
3056 size_t count;
3057 for ( ; start < end ; start += count) {
3058 count = 1;
3059 lispobj thing = *start;
3060 lispobj __attribute__((unused)) pointee;
3062 if (is_lisp_pointer(thing)) {
3063 page_index_t page_index = find_page_index((void*)thing);
3064 sword_t to_readonly_space =
3065 (READ_ONLY_SPACE_START <= thing &&
3066 thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
3067 sword_t to_static_space =
3068 (STATIC_SPACE_START <= thing &&
3069 thing < SymbolValue(STATIC_SPACE_FREE_POINTER,0));
3070 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3071 sword_t to_immobile_space =
3072 (IMMOBILE_SPACE_START <= thing &&
3073 thing < SymbolValue(IMMOBILE_FIXEDOBJ_FREE_POINTER,0)) ||
3074 (IMMOBILE_VARYOBJ_SUBSPACE_START <= thing &&
3075 thing < SymbolValue(IMMOBILE_SPACE_FREE_POINTER,0));
3076 #endif
3078 /* Does it point to the dynamic space? */
3079 if (page_index != -1) {
3080 /* If it's within the dynamic space it should point to a used page. */
3081 if (page_free_p(page_index))
3082 lose ("Ptr %p @ %p sees free page.\n", thing, start);
3083 if ((thing & (GENCGC_CARD_BYTES-1)) >= page_bytes_used(page_index))
3084 lose ("Ptr %p @ %p sees unallocated space.\n", thing, start);
3085 /* Check that it doesn't point to a forwarding pointer! */
3086 if (*native_pointer(thing) == 0x01) {
3087 lose("Ptr %p @ %p sees forwarding ptr.\n", thing, start);
3089 /* Check that its not in the RO space as it would then be a
3090 * pointer from the RO to the dynamic space. */
3091 if (is_in_readonly_space) {
3092 lose("ptr to dynamic space %p from RO space %x\n",
3093 thing, start);
3095 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3096 // verify all immobile space -> dynamic space pointers
3097 if (is_in_immobile_space && !valid_lisp_pointer_p(thing)) {
3098 lose("Ptr %p @ %p sees junk.\n", thing, start);
3100 #endif
3101 /* Does it point to a plausible object? This check slows
3102 * it down a lot (so it's commented out).
3104 * "a lot" is serious: it ate 50 minutes cpu time on
3105 * my duron 950 before I came back from lunch and
3106 * killed it.
3108 * FIXME: Add a variable to enable this
3109 * dynamically. */
3111 if (!valid_lisp_pointer_p((lispobj *)thing) {
3112 lose("ptr %p to invalid object %p\n", thing, start);
3115 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3116 } else if (to_immobile_space) {
3117 // the object pointed to must not have been discarded as garbage
3118 if (!other_immediate_lowtag_p(*native_pointer(thing))
3119 || immobile_filler_p(native_pointer(thing)))
3120 lose("Ptr %p @ %p sees trashed object.\n", (void*)thing, start);
3121 // verify all pointers to immobile space
3122 if (!valid_lisp_pointer_p(thing))
3123 lose("Ptr %p @ %p sees junk.\n", thing, start);
3124 #endif
3125 } else {
3126 extern char __attribute__((unused)) funcallable_instance_tramp;
3127 /* Verify that it points to another valid space. */
3128 if (!to_readonly_space && !to_static_space
3129 && !is_in_stack_space(thing)) {
3130 lose("Ptr %p @ %p sees junk.\n", thing, start);
3133 continue;
3135 int widetag = widetag_of(thing);
3136 if (is_lisp_immediate(thing) || widetag == NO_TLS_VALUE_MARKER_WIDETAG) {
3137 /* skip immediates */
3138 } else if (!(other_immediate_lowtag_p(widetag)
3139 && lowtag_for_widetag[widetag>>2])) {
3140 lose("Unhandled widetag %p at %p\n", widetag, start);
3141 } else if (unboxed_obj_widetag_p(widetag)) {
3142 count = sizetab[widetag](start);
3143 } else switch(widetag) {
3144 /* boxed or partially boxed objects */
3145 // FIXME: x86-64 can have partially unboxed FINs. The raw words
3146 // are at the moment valid fixnums by blind luck.
3147 case INSTANCE_WIDETAG:
3148 if (instance_layout(start)) {
3149 sword_t nslots = instance_length(thing) | 1;
3150 instance_scan(verify_range, start+1, nslots,
3151 ((struct layout*)
3152 native_pointer(instance_layout(start)))->bitmap);
3153 count = 1 + nslots;
3155 break;
3156 case CODE_HEADER_WIDETAG:
3158 struct code *code = (struct code *) start;
3159 sword_t nheader_words = code_header_words(code->header);
3160 /* Scavenge the boxed section of the code data block */
3161 verify_range(start + 1, nheader_words - 1);
3163 /* Scavenge the boxed section of each function
3164 * object in the code data block. */
3165 for_each_simple_fun(i, fheaderp, code, 1, {
3166 verify_range(SIMPLE_FUN_SCAV_START(fheaderp),
3167 SIMPLE_FUN_SCAV_NWORDS(fheaderp)); });
3168 count = nheader_words + code_instruction_words(code->code_size);
3169 break;
3171 #ifdef LISP_FEATURE_IMMOBILE_CODE
3172 case FDEFN_WIDETAG:
3173 verify_range(start + 1, 2);
3174 pointee = fdefn_raw_referent((struct fdefn*)start);
3175 verify_range(&pointee, 1);
3176 count = CEILING(sizeof (struct fdefn)/sizeof(lispobj), 2);
3177 break;
3178 #endif
3182 static uword_t verify_space(lispobj start, lispobj end) {
3183 verify_range((lispobj*)start, (end-start)>>WORD_SHIFT);
3184 return 0;
3187 static void verify_dynamic_space();
3189 static void
3190 verify_gc(void)
3192 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3193 # ifdef __linux__
3194 // Try this verification if marknsweep was compiled with extra debugging.
3195 // But weak symbols don't work on macOS.
3196 extern void __attribute__((weak)) check_varyobj_pages();
3197 if (&check_varyobj_pages) check_varyobj_pages();
3198 # endif
3199 verify_space(IMMOBILE_SPACE_START,
3200 SymbolValue(IMMOBILE_FIXEDOBJ_FREE_POINTER,0));
3201 verify_space(IMMOBILE_VARYOBJ_SUBSPACE_START,
3202 SymbolValue(IMMOBILE_SPACE_FREE_POINTER,0));
3203 #endif
3204 struct thread *th;
3205 for_each_thread(th) {
3206 verify_space((lispobj)th->binding_stack_start,
3207 (lispobj)get_binding_stack_pointer(th));
3209 verify_space(READ_ONLY_SPACE_START,
3210 SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
3211 verify_space(STATIC_SPACE_START,
3212 SymbolValue(STATIC_SPACE_FREE_POINTER,0));
3213 verify_dynamic_space();
3216 /* Call 'proc' with pairs of addresses demarcating ranges in the
3217 * specified generation.
3218 * Stop if any invocation returns non-zero, and return that value */
3219 uword_t
3220 walk_generation(uword_t (*proc)(lispobj*,lispobj*),
3221 generation_index_t generation)
3223 page_index_t i;
3224 int genmask = generation >= 0 ? 1 << generation : ~0;
3226 for (i = 0; i < last_free_page; i++) {
3227 if (!page_free_p(i)
3228 && (page_bytes_used(i) != 0)
3229 && ((1 << page_table[i].gen) & genmask)) {
3230 page_index_t last_page;
3232 /* This should be the start of a contiguous block */
3233 gc_assert(page_starts_contiguous_block_p(i));
3235 /* Need to find the full extent of this contiguous block in case
3236 objects span pages. */
3238 /* Now work forward until the end of this contiguous area is
3239 found. */
3240 for (last_page = i; ;last_page++)
3241 /* Check whether this is the last page in this contiguous
3242 * block. */
3243 if (page_ends_contiguous_block_p(last_page, page_table[i].gen))
3244 break;
3246 uword_t result =
3247 proc(page_address(i),
3248 (lispobj*)(page_bytes_used(last_page)
3249 + (char*)page_address(last_page)));
3250 if (result) return result;
3252 i = last_page;
3255 return 0;
3257 static void verify_generation(generation_index_t generation)
3259 walk_generation((uword_t(*)(lispobj*,lispobj*))verify_space, generation);
3262 /* Check that all the free space is zero filled. */
3263 static void
3264 verify_zero_fill(void)
3266 page_index_t page;
3268 for (page = 0; page < last_free_page; page++) {
3269 if (page_free_p(page)) {
3270 /* The whole page should be zero filled. */
3271 sword_t *start_addr = (sword_t *)page_address(page);
3272 sword_t i;
3273 for (i = 0; i < (sword_t)GENCGC_CARD_BYTES/N_WORD_BYTES; i++) {
3274 if (start_addr[i] != 0) {
3275 lose("free page not zero at %x\n", start_addr + i);
3278 } else {
3279 sword_t free_bytes = GENCGC_CARD_BYTES - page_bytes_used(page);
3280 if (free_bytes > 0) {
3281 sword_t *start_addr = (sword_t *)((uword_t)page_address(page)
3282 + page_bytes_used(page));
3283 sword_t size = free_bytes / N_WORD_BYTES;
3284 sword_t i;
3285 for (i = 0; i < size; i++) {
3286 if (start_addr[i] != 0) {
3287 lose("free region not zero at %x\n", start_addr + i);
3295 /* External entry point for verify_zero_fill */
3296 void
3297 gencgc_verify_zero_fill(void)
3299 /* Flush the alloc regions updating the tables. */
3300 gc_alloc_update_all_page_tables(1);
3301 SHOW("verifying zero fill");
3302 verify_zero_fill();
3305 static void
3306 verify_dynamic_space(void)
3308 verify_generation(-1);
3309 if (gencgc_enable_verify_zero_fill)
3310 verify_zero_fill();
3313 /* Write-protect all the dynamic boxed pages in the given generation. */
3314 static void
3315 write_protect_generation_pages(generation_index_t generation)
3317 page_index_t start;
3319 gc_assert(generation < SCRATCH_GENERATION);
3321 for (start = 0; start < last_free_page; start++) {
3322 if (protect_page_p(start, generation)) {
3323 void *page_start;
3324 page_index_t last;
3326 /* Note the page as protected in the page tables. */
3327 page_table[start].write_protected = 1;
3329 for (last = start + 1; last < last_free_page; last++) {
3330 if (!protect_page_p(last, generation))
3331 break;
3332 page_table[last].write_protected = 1;
3335 page_start = (void *)page_address(start);
3337 os_protect(page_start,
3338 npage_bytes(last - start),
3339 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
3341 start = last;
3345 if (gencgc_verbose > 1) {
3346 FSHOW((stderr,
3347 "/write protected %d of %d pages in generation %d\n",
3348 count_write_protect_generation_pages(generation),
3349 count_generation_pages(generation),
3350 generation));
3354 #if defined(LISP_FEATURE_SB_THREAD) && (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
3355 static void
3356 preserve_context_registers (void (*proc)(os_context_register_t), os_context_t *c)
3358 void **ptr;
3359 /* On Darwin the signal context isn't a contiguous block of memory,
3360 * so just preserve_pointering its contents won't be sufficient.
3362 #if defined(LISP_FEATURE_DARWIN)||defined(LISP_FEATURE_WIN32)
3363 #if defined LISP_FEATURE_X86
3364 proc(*os_context_register_addr(c,reg_EAX));
3365 proc(*os_context_register_addr(c,reg_ECX));
3366 proc(*os_context_register_addr(c,reg_EDX));
3367 proc(*os_context_register_addr(c,reg_EBX));
3368 proc(*os_context_register_addr(c,reg_ESI));
3369 proc(*os_context_register_addr(c,reg_EDI));
3370 proc(*os_context_pc_addr(c));
3371 #elif defined LISP_FEATURE_X86_64
3372 proc(*os_context_register_addr(c,reg_RAX));
3373 proc(*os_context_register_addr(c,reg_RCX));
3374 proc(*os_context_register_addr(c,reg_RDX));
3375 proc(*os_context_register_addr(c,reg_RBX));
3376 proc(*os_context_register_addr(c,reg_RSI));
3377 proc(*os_context_register_addr(c,reg_RDI));
3378 proc(*os_context_register_addr(c,reg_R8));
3379 proc(*os_context_register_addr(c,reg_R9));
3380 proc(*os_context_register_addr(c,reg_R10));
3381 proc(*os_context_register_addr(c,reg_R11));
3382 proc(*os_context_register_addr(c,reg_R12));
3383 proc(*os_context_register_addr(c,reg_R13));
3384 proc(*os_context_register_addr(c,reg_R14));
3385 proc(*os_context_register_addr(c,reg_R15));
3386 proc(*os_context_pc_addr(c));
3387 #else
3388 #error "preserve_context_registers needs to be tweaked for non-x86 Darwin"
3389 #endif
3390 #endif
3391 #if !defined(LISP_FEATURE_WIN32)
3392 for(ptr = ((void **)(c+1))-1; ptr>=(void **)c; ptr--) {
3393 proc((os_context_register_t)*ptr);
3395 #endif
3397 #endif
3399 static void
3400 move_pinned_pages_to_newspace()
3402 page_index_t i;
3404 /* scavenge() will evacuate all oldspace pages, but no newspace
3405 * pages. Pinned pages are precisely those pages which must not
3406 * be evacuated, so move them to newspace directly. */
3408 for (i = 0; i < last_free_page; i++) {
3409 if (page_table[i].dont_move &&
3410 /* dont_move is cleared lazily, so validate the space as well. */
3411 page_table[i].gen == from_space) {
3412 if (do_wipe_p && page_table[i].has_pins) {
3413 // do not move to newspace after all, this will be word-wiped
3414 continue;
3416 page_table[i].gen = new_space;
3417 /* And since we're moving the pages wholesale, also adjust
3418 * the generation allocation counters. */
3419 int used = page_bytes_used(i);
3420 generations[new_space].bytes_allocated += used;
3421 generations[from_space].bytes_allocated -= used;
3426 /* Garbage collect a generation. If raise is 0 then the remains of the
3427 * generation are not raised to the next generation. */
3428 static void
3429 garbage_collect_generation(generation_index_t generation, int raise)
3431 page_index_t i;
3432 struct thread *th;
3434 gc_assert(generation <= HIGHEST_NORMAL_GENERATION);
3436 /* The oldest generation can't be raised. */
3437 gc_assert((generation != HIGHEST_NORMAL_GENERATION) || (raise == 0));
3439 /* Check if weak hash tables were processed in the previous GC. */
3440 gc_assert(weak_hash_tables == NULL);
3442 /* Initialize the weak pointer list. */
3443 weak_pointers = NULL;
3445 /* When a generation is not being raised it is transported to a
3446 * temporary generation (NUM_GENERATIONS), and lowered when
3447 * done. Set up this new generation. There should be no pages
3448 * allocated to it yet. */
3449 if (!raise) {
3450 gc_assert(generations[SCRATCH_GENERATION].bytes_allocated == 0);
3453 /* Set the global src and dest. generations */
3454 from_space = generation;
3455 if (raise)
3456 new_space = generation+1;
3457 else
3458 new_space = SCRATCH_GENERATION;
3460 /* Change to a new space for allocation, resetting the alloc_start_page */
3461 gc_alloc_generation = new_space;
3462 generations[new_space].alloc_start_page = 0;
3463 generations[new_space].alloc_unboxed_start_page = 0;
3464 generations[new_space].alloc_large_start_page = 0;
3465 generations[new_space].alloc_large_unboxed_start_page = 0;
3467 hopscotch_reset(&pinned_objects);
3468 /* Before any pointers are preserved, the dont_move flags on the
3469 * pages need to be cleared. */
3470 /* FIXME: consider moving this bitmap into its own range of words,
3471 * out of the page table. Then we can just bzero() it.
3472 * This will also obviate the extra test at the comment
3473 * "dont_move is cleared lazily" in move_pinned_pages_to_newspace().
3475 for (i = 0; i < last_free_page; i++)
3476 if(page_table[i].gen==from_space) {
3477 page_table[i].dont_move = 0;
3480 /* Un-write-protect the old-space pages. This is essential for the
3481 * promoted pages as they may contain pointers into the old-space
3482 * which need to be scavenged. It also helps avoid unnecessary page
3483 * faults as forwarding pointers are written into them. They need to
3484 * be un-protected anyway before unmapping later. */
3485 unprotect_oldspace();
3487 /* Scavenge the stacks' conservative roots. */
3489 /* there are potentially two stacks for each thread: the main
3490 * stack, which may contain Lisp pointers, and the alternate stack.
3491 * We don't ever run Lisp code on the altstack, but it may
3492 * host a sigcontext with lisp objects in it */
3494 /* what we need to do: (1) find the stack pointer for the main
3495 * stack; scavenge it (2) find the interrupt context on the
3496 * alternate stack that might contain lisp values, and scavenge
3497 * that */
3499 /* we assume that none of the preceding applies to the thread that
3500 * initiates GC. If you ever call GC from inside an altstack
3501 * handler, you will lose. */
3503 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
3504 /* And if we're saving a core, there's no point in being conservative. */
3505 if (conservative_stack) {
3506 for_each_thread(th) {
3507 void **ptr;
3508 void **esp=(void **)-1;
3509 if (th->state == STATE_DEAD)
3510 continue;
3511 # if defined(LISP_FEATURE_SB_SAFEPOINT)
3512 /* Conservative collect_garbage is always invoked with a
3513 * foreign C call or an interrupt handler on top of every
3514 * existing thread, so the stored SP in each thread
3515 * structure is valid, no matter which thread we are looking
3516 * at. For threads that were running Lisp code, the pitstop
3517 * and edge functions maintain this value within the
3518 * interrupt or exception handler. */
3519 esp = os_get_csp(th);
3520 assert_on_stack(th, esp);
3522 /* In addition to pointers on the stack, also preserve the
3523 * return PC, the only value from the context that we need
3524 * in addition to the SP. The return PC gets saved by the
3525 * foreign call wrapper, and removed from the control stack
3526 * into a register. */
3527 preserve_pointer(th->pc_around_foreign_call);
3529 /* And on platforms with interrupts: scavenge ctx registers. */
3531 /* Disabled on Windows, because it does not have an explicit
3532 * stack of `interrupt_contexts'. The reported CSP has been
3533 * chosen so that the current context on the stack is
3534 * covered by the stack scan. See also set_csp_from_context(). */
3535 # ifndef LISP_FEATURE_WIN32
3536 if (th != arch_os_get_current_thread()) {
3537 long k = fixnum_value(
3538 SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
3539 while (k > 0)
3540 preserve_context_registers((void(*)(os_context_register_t))preserve_pointer,
3541 th->interrupt_contexts[--k]);
3543 # endif
3544 # elif defined(LISP_FEATURE_SB_THREAD)
3545 sword_t i,free;
3546 if(th==arch_os_get_current_thread()) {
3547 /* Somebody is going to burn in hell for this, but casting
3548 * it in two steps shuts gcc up about strict aliasing. */
3549 esp = (void **)((void *)&raise);
3550 } else {
3551 void **esp1;
3552 free=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
3553 for(i=free-1;i>=0;i--) {
3554 os_context_t *c=th->interrupt_contexts[i];
3555 esp1 = (void **) *os_context_register_addr(c,reg_SP);
3556 if (esp1>=(void **)th->control_stack_start &&
3557 esp1<(void **)th->control_stack_end) {
3558 if(esp1<esp) esp=esp1;
3559 preserve_context_registers((void(*)(os_context_register_t))preserve_pointer,
3564 # else
3565 esp = (void **)((void *)&raise);
3566 # endif
3567 if (!esp || esp == (void*) -1)
3568 lose("garbage_collect: no SP known for thread %x (OS %x)",
3569 th, th->os_thread);
3570 for (ptr = ((void **)th->control_stack_end)-1; ptr >= esp; ptr--) {
3571 preserve_pointer(*ptr);
3575 #else
3576 /* Non-x86oid systems don't have "conservative roots" as such, but
3577 * the same mechanism is used for objects pinned for use by alien
3578 * code. */
3579 for_each_thread(th) {
3580 lispobj pin_list = SymbolTlValue(PINNED_OBJECTS,th);
3581 while (pin_list != NIL) {
3582 struct cons *list_entry =
3583 (struct cons *)native_pointer(pin_list);
3584 preserve_pointer((void*)list_entry->car);
3585 pin_list = list_entry->cdr;
3588 #endif
3590 #if QSHOW
3591 if (gencgc_verbose > 1) {
3592 sword_t num_dont_move_pages = count_dont_move_pages();
3593 fprintf(stderr,
3594 "/non-movable pages due to conservative pointers = %ld (%lu bytes)\n",
3595 num_dont_move_pages,
3596 npage_bytes(num_dont_move_pages));
3598 #endif
3600 /* Now that all of the pinned (dont_move) pages are known, and
3601 * before we start to scavenge (and thus relocate) objects,
3602 * relocate the pinned pages to newspace, so that the scavenger
3603 * will not attempt to relocate their contents. */
3604 move_pinned_pages_to_newspace();
3606 /* Scavenge all the rest of the roots. */
3608 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
3610 * If not x86, we need to scavenge the interrupt context(s) and the
3611 * control stack.
3614 struct thread *th;
3615 for_each_thread(th) {
3616 scavenge_interrupt_contexts(th);
3617 scavenge_control_stack(th);
3620 # ifdef LISP_FEATURE_SB_SAFEPOINT
3621 /* In this case, scrub all stacks right here from the GCing thread
3622 * instead of doing what the comment below says. Suboptimal, but
3623 * easier. */
3624 for_each_thread(th)
3625 scrub_thread_control_stack(th);
3626 # else
3627 /* Scrub the unscavenged control stack space, so that we can't run
3628 * into any stale pointers in a later GC (this is done by the
3629 * stop-for-gc handler in the other threads). */
3630 scrub_control_stack();
3631 # endif
3633 #endif
3635 /* Scavenge the Lisp functions of the interrupt handlers, taking
3636 * care to avoid SIG_DFL and SIG_IGN. */
3637 for (i = 0; i < NSIG; i++) {
3638 union interrupt_handler handler = interrupt_handlers[i];
3639 if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) &&
3640 !ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
3641 scavenge((lispobj *)(interrupt_handlers + i), 1);
3644 /* Scavenge the binding stacks. */
3646 struct thread *th;
3647 for_each_thread(th) {
3648 sword_t len= (lispobj *)get_binding_stack_pointer(th) -
3649 th->binding_stack_start;
3650 scavenge((lispobj *) th->binding_stack_start,len);
3651 #ifdef LISP_FEATURE_SB_THREAD
3652 /* do the tls as well */
3653 len=(SymbolValue(FREE_TLS_INDEX,0) >> WORD_SHIFT) -
3654 (sizeof (struct thread))/(sizeof (lispobj));
3655 scavenge((lispobj *) (th+1),len);
3656 #endif
3660 /* Scavenge static space. */
3661 if (gencgc_verbose > 1) {
3662 FSHOW((stderr,
3663 "/scavenge static space: %d bytes\n",
3664 SymbolValue(STATIC_SPACE_FREE_POINTER,0) - STATIC_SPACE_START));
3666 heap_scavenge((lispobj*)STATIC_SPACE_START,
3667 (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0));
3669 /* All generations but the generation being GCed need to be
3670 * scavenged. The new_space generation needs special handling as
3671 * objects may be moved in - it is handled separately below. */
3672 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3673 scavenge_immobile_roots(generation+1, SCRATCH_GENERATION);
3674 #endif
3675 scavenge_generations(generation+1, PSEUDO_STATIC_GENERATION);
3677 scavenge_pinned_ranges();
3679 /* Finally scavenge the new_space generation. Keep going until no
3680 * more objects are moved into the new generation */
3681 scavenge_newspace_generation(new_space);
3683 /* FIXME: I tried reenabling this check when debugging unrelated
3684 * GC weirdness ca. sbcl-0.6.12.45, and it failed immediately.
3685 * Since the current GC code seems to work well, I'm guessing that
3686 * this debugging code is just stale, but I haven't tried to
3687 * figure it out. It should be figured out and then either made to
3688 * work or just deleted. */
3690 #define RESCAN_CHECK 0
3691 #if RESCAN_CHECK
3692 /* As a check re-scavenge the newspace once; no new objects should
3693 * be found. */
3695 os_vm_size_t old_bytes_allocated = bytes_allocated;
3696 os_vm_size_t bytes_allocated;
3698 /* Start with a full scavenge. */
3699 scavenge_newspace_generation_one_scan(new_space);
3701 /* Flush the current regions, updating the tables. */
3702 gc_alloc_update_all_page_tables(1);
3704 bytes_allocated = bytes_allocated - old_bytes_allocated;
3706 if (bytes_allocated != 0) {
3707 lose("Rescan of new_space allocated %d more bytes.\n",
3708 bytes_allocated);
3711 #endif
3713 scan_weak_hash_tables();
3714 scan_weak_pointers();
3715 wipe_nonpinned_words();
3716 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3717 // Do this last, because until wipe_nonpinned_words() happens,
3718 // not all page table entries have the 'gen' value updated,
3719 // which we need to correctly find all old->young pointers.
3720 sweep_immobile_space(raise);
3721 #endif
3723 /* Flush the current regions, updating the tables. */
3724 gc_alloc_update_all_page_tables(0);
3725 hopscotch_log_stats(&pinned_objects, "pins");
3727 /* Free the pages in oldspace, but not those marked dont_move. */
3728 free_oldspace();
3730 /* If the GC is not raising the age then lower the generation back
3731 * to its normal generation number */
3732 if (!raise) {
3733 for (i = 0; i < last_free_page; i++)
3734 if ((page_bytes_used(i) != 0)
3735 && (page_table[i].gen == SCRATCH_GENERATION))
3736 page_table[i].gen = generation;
3737 gc_assert(generations[generation].bytes_allocated == 0);
3738 generations[generation].bytes_allocated =
3739 generations[SCRATCH_GENERATION].bytes_allocated;
3740 generations[SCRATCH_GENERATION].bytes_allocated = 0;
3743 /* Reset the alloc_start_page for generation. */
3744 generations[generation].alloc_start_page = 0;
3745 generations[generation].alloc_unboxed_start_page = 0;
3746 generations[generation].alloc_large_start_page = 0;
3747 generations[generation].alloc_large_unboxed_start_page = 0;
3749 if (generation >= verify_gens) {
3750 if (gencgc_verbose) {
3751 SHOW("verifying");
3753 verify_gc();
3756 /* Set the new gc trigger for the GCed generation. */
3757 generations[generation].gc_trigger =
3758 generations[generation].bytes_allocated
3759 + generations[generation].bytes_consed_between_gc;
3761 if (raise)
3762 generations[generation].num_gc = 0;
3763 else
3764 ++generations[generation].num_gc;
3768 /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
3769 sword_t
3770 update_dynamic_space_free_pointer(void)
3772 page_index_t last_page = -1, i;
3774 for (i = 0; i < last_free_page; i++)
3775 if (!page_free_p(i) && (page_bytes_used(i) != 0))
3776 last_page = i;
3778 last_free_page = last_page+1;
3780 set_alloc_pointer((lispobj)(page_address(last_free_page)));
3781 return 0; /* dummy value: return something ... */
3784 static void
3785 remap_page_range (page_index_t from, page_index_t to)
3787 /* There's a mysterious Solaris/x86 problem with using mmap
3788 * tricks for memory zeroing. See sbcl-devel thread
3789 * "Re: patch: standalone executable redux".
3791 #if defined(LISP_FEATURE_SUNOS)
3792 zero_and_mark_pages(from, to);
3793 #else
3794 const page_index_t
3795 release_granularity = gencgc_release_granularity/GENCGC_CARD_BYTES,
3796 release_mask = release_granularity-1,
3797 end = to+1,
3798 aligned_from = (from+release_mask)&~release_mask,
3799 aligned_end = (end&~release_mask);
3801 if (aligned_from < aligned_end) {
3802 zero_pages_with_mmap(aligned_from, aligned_end-1);
3803 if (aligned_from != from)
3804 zero_and_mark_pages(from, aligned_from-1);
3805 if (aligned_end != end)
3806 zero_and_mark_pages(aligned_end, end-1);
3807 } else {
3808 zero_and_mark_pages(from, to);
3810 #endif
3813 static void
3814 remap_free_pages (page_index_t from, page_index_t to, int forcibly)
3816 page_index_t first_page, last_page;
3818 if (forcibly)
3819 return remap_page_range(from, to);
3821 for (first_page = from; first_page <= to; first_page++) {
3822 if (!page_free_p(first_page) || !page_need_to_zero(first_page))
3823 continue;
3825 last_page = first_page + 1;
3826 while (page_free_p(last_page) &&
3827 (last_page <= to) &&
3828 (page_need_to_zero(last_page)))
3829 last_page++;
3831 remap_page_range(first_page, last_page-1);
3833 first_page = last_page;
3837 generation_index_t small_generation_limit = 1;
3839 /* GC all generations newer than last_gen, raising the objects in each
3840 * to the next older generation - we finish when all generations below
3841 * last_gen are empty. Then if last_gen is due for a GC, or if
3842 * last_gen==NUM_GENERATIONS (the scratch generation? eh?) we GC that
3843 * too. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
3845 * We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than
3846 * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */
3847 void
3848 collect_garbage(generation_index_t last_gen)
3850 generation_index_t gen = 0, i;
3851 int raise, more = 0;
3852 int gen_to_wp;
3853 /* The largest value of last_free_page seen since the time
3854 * remap_free_pages was called. */
3855 static page_index_t high_water_mark = 0;
3857 FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen));
3858 log_generation_stats(gc_logfile, "=== GC Start ===");
3860 gc_active_p = 1;
3862 if (last_gen > HIGHEST_NORMAL_GENERATION+1) {
3863 FSHOW((stderr,
3864 "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
3865 last_gen));
3866 last_gen = 0;
3869 /* Flush the alloc regions updating the tables. */
3870 gc_alloc_update_all_page_tables(1);
3872 /* Verify the new objects created by Lisp code. */
3873 if (pre_verify_gen_0) {
3874 FSHOW((stderr, "pre-checking generation 0\n"));
3875 verify_generation(0);
3878 if (gencgc_verbose > 1)
3879 print_generation_stats();
3881 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3882 /* Immobile space generation bits are lazily updated for gen0
3883 (not touched on every object allocation) so do it now */
3884 update_immobile_nursery_bits();
3885 #endif
3887 do {
3888 /* Collect the generation. */
3890 if (more || (gen >= gencgc_oldest_gen_to_gc)) {
3891 /* Never raise the oldest generation. Never raise the extra generation
3892 * collected due to more-flag. */
3893 raise = 0;
3894 more = 0;
3895 } else {
3896 raise =
3897 (gen < last_gen)
3898 || (generations[gen].num_gc >= generations[gen].number_of_gcs_before_promotion);
3899 /* If we would not normally raise this one, but we're
3900 * running low on space in comparison to the object-sizes
3901 * we've been seeing, raise it and collect the next one
3902 * too. */
3903 if (!raise && gen == last_gen) {
3904 more = (2*large_allocation) >= (dynamic_space_size - bytes_allocated);
3905 raise = more;
3909 if (gencgc_verbose > 1) {
3910 FSHOW((stderr,
3911 "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
3912 gen,
3913 raise,
3914 generations[gen].bytes_allocated,
3915 generations[gen].gc_trigger,
3916 generations[gen].num_gc));
3919 /* If an older generation is being filled, then update its
3920 * memory age. */
3921 if (raise == 1) {
3922 generations[gen+1].cum_sum_bytes_allocated +=
3923 generations[gen+1].bytes_allocated;
3926 garbage_collect_generation(gen, raise);
3928 /* Reset the memory age cum_sum. */
3929 generations[gen].cum_sum_bytes_allocated = 0;
3931 if (gencgc_verbose > 1) {
3932 FSHOW((stderr, "GC of generation %d finished:\n", gen));
3933 print_generation_stats();
3936 gen++;
3937 } while ((gen <= gencgc_oldest_gen_to_gc)
3938 && ((gen < last_gen)
3939 || more
3940 || (raise
3941 && (generations[gen].bytes_allocated
3942 > generations[gen].gc_trigger)
3943 && (generation_average_age(gen)
3944 > generations[gen].minimum_age_before_gc))));
3946 /* Now if gen-1 was raised all generations before gen are empty.
3947 * If it wasn't raised then all generations before gen-1 are empty.
3949 * Now objects within this gen's pages cannot point to younger
3950 * generations unless they are written to. This can be exploited
3951 * by write-protecting the pages of gen; then when younger
3952 * generations are GCed only the pages which have been written
3953 * need scanning. */
3954 if (raise)
3955 gen_to_wp = gen;
3956 else
3957 gen_to_wp = gen - 1;
3959 /* There's not much point in WPing pages in generation 0 as it is
3960 * never scavenged (except promoted pages). */
3961 if ((gen_to_wp > 0) && enable_page_protection) {
3962 /* Check that they are all empty. */
3963 for (i = 0; i < gen_to_wp; i++) {
3964 if (generations[i].bytes_allocated)
3965 lose("trying to write-protect gen. %d when gen. %d nonempty\n",
3966 gen_to_wp, i);
3968 write_protect_generation_pages(gen_to_wp);
3970 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3971 write_protect_immobile_space();
3972 #endif
3974 /* Set gc_alloc() back to generation 0. The current regions should
3975 * be flushed after the above GCs. */
3976 gc_assert((boxed_region.free_pointer - boxed_region.start_addr) == 0);
3977 gc_alloc_generation = 0;
3979 /* Save the high-water mark before updating last_free_page */
3980 if (last_free_page > high_water_mark)
3981 high_water_mark = last_free_page;
3983 update_dynamic_space_free_pointer();
3985 /* Update auto_gc_trigger. Make sure we trigger the next GC before
3986 * running out of heap! */
3987 if (bytes_consed_between_gcs <= (dynamic_space_size - bytes_allocated))
3988 auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
3989 else
3990 auto_gc_trigger = bytes_allocated + (dynamic_space_size - bytes_allocated)/2;
3992 if(gencgc_verbose)
3993 fprintf(stderr,"Next gc when %"OS_VM_SIZE_FMT" bytes have been consed\n",
3994 auto_gc_trigger);
3996 /* If we did a big GC (arbitrarily defined as gen > 1), release memory
3997 * back to the OS.
3999 if (gen > small_generation_limit) {
4000 if (last_free_page > high_water_mark)
4001 high_water_mark = last_free_page;
4002 remap_free_pages(0, high_water_mark, 0);
4003 high_water_mark = 0;
4006 gc_active_p = 0;
4007 large_allocation = 0;
4009 log_generation_stats(gc_logfile, "=== GC End ===");
4010 SHOW("returning from collect_garbage");
4013 void
4014 gc_init(void)
4016 page_index_t i;
4018 #if defined(LISP_FEATURE_SB_SAFEPOINT)
4019 alloc_gc_page();
4020 #endif
4022 /* Compute the number of pages needed for the dynamic space.
4023 * Dynamic space size should be aligned on page size. */
4024 page_table_pages = dynamic_space_size/GENCGC_CARD_BYTES;
4025 gc_assert(dynamic_space_size == npage_bytes(page_table_pages));
4027 /* Default nursery size to 5% of the total dynamic space size,
4028 * min 1Mb. */
4029 bytes_consed_between_gcs = dynamic_space_size/(os_vm_size_t)20;
4030 if (bytes_consed_between_gcs < (1024*1024))
4031 bytes_consed_between_gcs = 1024*1024;
4033 /* The page_table must be allocated using "calloc" to initialize
4034 * the page structures correctly. There used to be a separate
4035 * initialization loop (now commented out; see below) but that was
4036 * unnecessary and did hurt startup time. */
4037 page_table = calloc(page_table_pages, sizeof(struct page));
4038 gc_assert(page_table);
4039 #ifdef LISP_FEATURE_IMMOBILE_SPACE
4040 gc_init_immobile();
4041 #endif
4043 hopscotch_init();
4044 hopscotch_create(&pinned_objects, HOPSCOTCH_HASH_FUN_DEFAULT, 0 /* hashset */,
4045 32 /* logical bin count */, 0 /* default range */);
4047 scavtab[WEAK_POINTER_WIDETAG] = scav_weak_pointer;
4048 transother[SIMPLE_ARRAY_WIDETAG] = trans_boxed_large;
4050 /* The page structures are initialized implicitly when page_table
4051 * is allocated with "calloc" above. Formerly we had the following
4052 * explicit initialization here (comments converted to C99 style
4053 * for readability as C's block comments don't nest):
4055 * // Initialize each page structure.
4056 * for (i = 0; i < page_table_pages; i++) {
4057 * // Initialize all pages as free.
4058 * page_table[i].allocated = FREE_PAGE_FLAG;
4059 * page_table[i].bytes_used = 0;
4061 * // Pages are not write-protected at startup.
4062 * page_table[i].write_protected = 0;
4065 * Without this loop the image starts up much faster when dynamic
4066 * space is large -- which it is on 64-bit platforms already by
4067 * default -- and when "calloc" for large arrays is implemented
4068 * using copy-on-write of a page of zeroes -- which it is at least
4069 * on Linux. In this case the pages that page_table_pages is stored
4070 * in are mapped and cleared not before the corresponding part of
4071 * dynamic space is used. For example, this saves clearing 16 MB of
4072 * memory at startup if the page size is 4 KB and the size of
4073 * dynamic space is 4 GB.
4074 * FREE_PAGE_FLAG must be 0 for this to work correctly which is
4075 * asserted below: */
4077 /* Compile time assertion: If triggered, declares an array
4078 * of dimension -1 forcing a syntax error. The intent of the
4079 * assignment is to avoid an "unused variable" warning. */
4080 char assert_free_page_flag_0[(FREE_PAGE_FLAG) ? -1 : 1];
4081 assert_free_page_flag_0[0] = assert_free_page_flag_0[0];
4084 bytes_allocated = 0;
4086 /* Initialize the generations. */
4087 for (i = 0; i < NUM_GENERATIONS; i++) {
4088 generations[i].alloc_start_page = 0;
4089 generations[i].alloc_unboxed_start_page = 0;
4090 generations[i].alloc_large_start_page = 0;
4091 generations[i].alloc_large_unboxed_start_page = 0;
4092 generations[i].bytes_allocated = 0;
4093 generations[i].gc_trigger = 2000000;
4094 generations[i].num_gc = 0;
4095 generations[i].cum_sum_bytes_allocated = 0;
4096 /* the tune-able parameters */
4097 generations[i].bytes_consed_between_gc
4098 = bytes_consed_between_gcs/(os_vm_size_t)HIGHEST_NORMAL_GENERATION;
4099 generations[i].number_of_gcs_before_promotion = 1;
4100 generations[i].minimum_age_before_gc = 0.75;
4103 /* Initialize gc_alloc. */
4104 gc_alloc_generation = 0;
4105 gc_set_region_empty(&boxed_region);
4106 gc_set_region_empty(&unboxed_region);
4108 last_free_page = 0;
4111 /* Pick up the dynamic space from after a core load.
4113 * The ALLOCATION_POINTER points to the end of the dynamic space.
4116 static void
4117 gencgc_pickup_dynamic(void)
4119 page_index_t page = 0;
4120 void *alloc_ptr = (void *)get_alloc_pointer();
4121 lispobj *prev=(lispobj *)page_address(page);
4122 generation_index_t gen = PSEUDO_STATIC_GENERATION;
4124 bytes_allocated = 0;
4126 do {
4127 lispobj *first,*ptr= (lispobj *)page_address(page);
4129 if (!gencgc_partial_pickup || !page_free_p(page)) {
4130 /* It is possible, though rare, for the saved page table
4131 * to contain free pages below alloc_ptr. */
4132 page_table[page].gen = gen;
4133 set_page_bytes_used(page, GENCGC_CARD_BYTES);
4134 page_table[page].large_object = 0;
4135 page_table[page].write_protected = 0;
4136 page_table[page].write_protected_cleared = 0;
4137 page_table[page].dont_move = 0;
4138 set_page_need_to_zero(page, 1);
4140 bytes_allocated += GENCGC_CARD_BYTES;
4143 if (!gencgc_partial_pickup) {
4144 page_table[page].allocated = BOXED_PAGE_FLAG;
4145 first = gc_search_space3(ptr, prev, (ptr+2));
4146 if(ptr == first)
4147 prev=ptr;
4148 set_page_scan_start_offset(page,
4149 page_address(page) - (void *)prev);
4151 page++;
4152 } while (page_address(page) < alloc_ptr);
4154 last_free_page = page;
4156 generations[gen].bytes_allocated = bytes_allocated;
4158 gc_alloc_update_all_page_tables(1);
4159 write_protect_generation_pages(gen);
4162 void
4163 gc_initialize_pointers(void)
4165 gencgc_pickup_dynamic();
4169 /* alloc(..) is the external interface for memory allocation. It
4170 * allocates to generation 0. It is not called from within the garbage
4171 * collector as it is only external uses that need the check for heap
4172 * size (GC trigger) and to disable the interrupts (interrupts are
4173 * always disabled during a GC).
4175 * The vops that call alloc(..) assume that the returned space is zero-filled.
4176 * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
4178 * The check for a GC trigger is only performed when the current
4179 * region is full, so in most cases it's not needed. */
4181 static inline lispobj *
4182 general_alloc_internal(sword_t nbytes, int page_type_flag, struct alloc_region *region,
4183 struct thread *thread)
4185 #ifndef LISP_FEATURE_WIN32
4186 lispobj alloc_signal;
4187 #endif
4188 void *new_obj;
4189 void *new_free_pointer;
4190 os_vm_size_t trigger_bytes = 0;
4192 gc_assert(nbytes > 0);
4194 /* Check for alignment allocation problems. */
4195 gc_assert((((uword_t)region->free_pointer & LOWTAG_MASK) == 0)
4196 && ((nbytes & LOWTAG_MASK) == 0));
4198 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
4199 /* Must be inside a PA section. */
4200 gc_assert(get_pseudo_atomic_atomic(thread));
4201 #endif
4203 if ((os_vm_size_t) nbytes > large_allocation)
4204 large_allocation = nbytes;
4206 /* maybe we can do this quickly ... */
4207 new_free_pointer = region->free_pointer + nbytes;
4208 if (new_free_pointer <= region->end_addr) {
4209 new_obj = (void*)(region->free_pointer);
4210 region->free_pointer = new_free_pointer;
4211 return(new_obj); /* yup */
4214 /* We don't want to count nbytes against auto_gc_trigger unless we
4215 * have to: it speeds up the tenuring of objects and slows down
4216 * allocation. However, unless we do so when allocating _very_
4217 * large objects we are in danger of exhausting the heap without
4218 * running sufficient GCs.
4220 if ((os_vm_size_t) nbytes >= bytes_consed_between_gcs)
4221 trigger_bytes = nbytes;
4223 /* we have to go the long way around, it seems. Check whether we
4224 * should GC in the near future
4226 if (auto_gc_trigger && (bytes_allocated+trigger_bytes > auto_gc_trigger)) {
4227 /* Don't flood the system with interrupts if the need to gc is
4228 * already noted. This can happen for example when SUB-GC
4229 * allocates or after a gc triggered in a WITHOUT-GCING. */
4230 if (SymbolValue(GC_PENDING,thread) == NIL) {
4231 /* set things up so that GC happens when we finish the PA
4232 * section */
4233 SetSymbolValue(GC_PENDING,T,thread);
4234 if (SymbolValue(GC_INHIBIT,thread) == NIL) {
4235 #ifdef LISP_FEATURE_SB_SAFEPOINT
4236 thread_register_gc_trigger();
4237 #else
4238 set_pseudo_atomic_interrupted(thread);
4239 #ifdef GENCGC_IS_PRECISE
4240 /* PPC calls alloc() from a trap
4241 * look up the most context if it's from a trap. */
4243 os_context_t *context =
4244 thread->interrupt_data->allocation_trap_context;
4245 maybe_save_gc_mask_and_block_deferrables
4246 (context ? os_context_sigmask_addr(context) : NULL);
4248 #else
4249 maybe_save_gc_mask_and_block_deferrables(NULL);
4250 #endif
4251 #endif
4255 new_obj = gc_alloc_with_region(nbytes, page_type_flag, region, 0);
4257 #ifndef LISP_FEATURE_WIN32
4258 /* for sb-prof, and not supported on Windows yet */
4259 alloc_signal = SymbolValue(ALLOC_SIGNAL,thread);
4260 if ((alloc_signal & FIXNUM_TAG_MASK) == 0) {
4261 if ((sword_t) alloc_signal <= 0) {
4262 SetSymbolValue(ALLOC_SIGNAL, T, thread);
4263 raise(SIGPROF);
4264 } else {
4265 SetSymbolValue(ALLOC_SIGNAL,
4266 alloc_signal - (1 << N_FIXNUM_TAG_BITS),
4267 thread);
4270 #endif
4272 return (new_obj);
4275 lispobj *
4276 general_alloc(sword_t nbytes, int page_type_flag)
4278 struct thread *thread = arch_os_get_current_thread();
4279 /* Select correct region, and call general_alloc_internal with it.
4280 * For other then boxed allocation we must lock first, since the
4281 * region is shared. */
4282 if (BOXED_PAGE_FLAG & page_type_flag) {
4283 #ifdef LISP_FEATURE_SB_THREAD
4284 struct alloc_region *region = (thread ? &(thread->alloc_region) : &boxed_region);
4285 #else
4286 struct alloc_region *region = &boxed_region;
4287 #endif
4288 return general_alloc_internal(nbytes, page_type_flag, region, thread);
4289 } else if (UNBOXED_PAGE_FLAG == page_type_flag) {
4290 lispobj * obj;
4291 int result;
4292 result = thread_mutex_lock(&allocation_lock);
4293 gc_assert(!result);
4294 obj = general_alloc_internal(nbytes, page_type_flag, &unboxed_region, thread);
4295 result = thread_mutex_unlock(&allocation_lock);
4296 gc_assert(!result);
4297 return obj;
4298 } else {
4299 lose("bad page type flag: %d", page_type_flag);
4303 lispobj AMD64_SYSV_ABI *
4304 alloc(sword_t nbytes)
4306 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4307 struct thread *self = arch_os_get_current_thread();
4308 int was_pseudo_atomic = get_pseudo_atomic_atomic(self);
4309 if (!was_pseudo_atomic)
4310 set_pseudo_atomic_atomic(self);
4311 #else
4312 gc_assert(get_pseudo_atomic_atomic(arch_os_get_current_thread()));
4313 #endif
4315 lispobj *result = general_alloc(nbytes, BOXED_PAGE_FLAG);
4317 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4318 if (!was_pseudo_atomic)
4319 clear_pseudo_atomic_atomic(self);
4320 #endif
4322 return result;
4326 * shared support for the OS-dependent signal handlers which
4327 * catch GENCGC-related write-protect violations
4329 void unhandled_sigmemoryfault(void* addr);
4331 /* Depending on which OS we're running under, different signals might
4332 * be raised for a violation of write protection in the heap. This
4333 * function factors out the common generational GC magic which needs
4334 * to invoked in this case, and should be called from whatever signal
4335 * handler is appropriate for the OS we're running under.
4337 * Return true if this signal is a normal generational GC thing that
4338 * we were able to handle, or false if it was abnormal and control
4339 * should fall through to the general SIGSEGV/SIGBUS/whatever logic.
4341 * We have two control flags for this: one causes us to ignore faults
4342 * on unprotected pages completely, and the second complains to stderr
4343 * but allows us to continue without losing.
4345 extern boolean ignore_memoryfaults_on_unprotected_pages;
4346 boolean ignore_memoryfaults_on_unprotected_pages = 0;
4348 extern boolean continue_after_memoryfault_on_unprotected_pages;
4349 boolean continue_after_memoryfault_on_unprotected_pages = 0;
4352 gencgc_handle_wp_violation(void* fault_addr)
4354 page_index_t page_index = find_page_index(fault_addr);
4356 #if QSHOW_SIGNALS
4357 FSHOW((stderr,
4358 "heap WP violation? fault_addr=%p, page_index=%"PAGE_INDEX_FMT"\n",
4359 fault_addr, page_index));
4360 #endif
4362 /* Check whether the fault is within the dynamic space. */
4363 if (page_index == (-1)) {
4364 #ifdef LISP_FEATURE_IMMOBILE_SPACE
4365 extern int immobile_space_handle_wp_violation(void*);
4366 if (immobile_space_handle_wp_violation(fault_addr))
4367 return 1;
4368 #endif
4370 /* It can be helpful to be able to put a breakpoint on this
4371 * case to help diagnose low-level problems. */
4372 unhandled_sigmemoryfault(fault_addr);
4374 /* not within the dynamic space -- not our responsibility */
4375 return 0;
4377 } else {
4378 int ret;
4379 ret = thread_mutex_lock(&free_pages_lock);
4380 gc_assert(ret == 0);
4381 if (page_table[page_index].write_protected) {
4382 /* Unprotect the page. */
4383 os_protect(page_address(page_index), GENCGC_CARD_BYTES, OS_VM_PROT_ALL);
4384 page_table[page_index].write_protected_cleared = 1;
4385 page_table[page_index].write_protected = 0;
4386 } else if (!ignore_memoryfaults_on_unprotected_pages) {
4387 /* The only acceptable reason for this signal on a heap
4388 * access is that GENCGC write-protected the page.
4389 * However, if two CPUs hit a wp page near-simultaneously,
4390 * we had better not have the second one lose here if it
4391 * does this test after the first one has already set wp=0
4393 if(page_table[page_index].write_protected_cleared != 1) {
4394 void lisp_backtrace(int frames);
4395 lisp_backtrace(10);
4396 fprintf(stderr,
4397 "Fault @ %p, page %"PAGE_INDEX_FMT" not marked as write-protected:\n"
4398 " boxed_region.first_page: %"PAGE_INDEX_FMT","
4399 " boxed_region.last_page %"PAGE_INDEX_FMT"\n"
4400 " page.scan_start_offset: %"OS_VM_SIZE_FMT"\n"
4401 " page.bytes_used: %u\n"
4402 " page.allocated: %d\n"
4403 " page.write_protected: %d\n"
4404 " page.write_protected_cleared: %d\n"
4405 " page.generation: %d\n",
4406 fault_addr,
4407 page_index,
4408 boxed_region.first_page,
4409 boxed_region.last_page,
4410 page_scan_start_offset(page_index),
4411 page_bytes_used(page_index),
4412 page_table[page_index].allocated,
4413 page_table[page_index].write_protected,
4414 page_table[page_index].write_protected_cleared,
4415 page_table[page_index].gen);
4416 if (!continue_after_memoryfault_on_unprotected_pages)
4417 lose("Feh.\n");
4420 ret = thread_mutex_unlock(&free_pages_lock);
4421 gc_assert(ret == 0);
4422 /* Don't worry, we can handle it. */
4423 return 1;
4426 /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
4427 * it's not just a case of the program hitting the write barrier, and
4428 * are about to let Lisp deal with it. It's basically just a
4429 * convenient place to set a gdb breakpoint. */
4430 void
4431 unhandled_sigmemoryfault(void *addr)
4434 static void
4435 update_thread_page_tables(struct thread *th)
4437 gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &th->alloc_region);
4438 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
4439 gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &th->sprof_alloc_region);
4440 #endif
4443 /* GC is single-threaded and all memory allocations during a
4444 collection happen in the GC thread, so it is sufficient to update
4445 all the the page tables once at the beginning of a collection and
4446 update only page tables of the GC thread during the collection. */
4447 void gc_alloc_update_all_page_tables(int for_all_threads)
4449 /* Flush the alloc regions updating the tables. */
4450 struct thread *th;
4451 if (for_all_threads) {
4452 for_each_thread(th) {
4453 update_thread_page_tables(th);
4456 else {
4457 th = arch_os_get_current_thread();
4458 if (th) {
4459 update_thread_page_tables(th);
4462 gc_alloc_update_page_tables(UNBOXED_PAGE_FLAG, &unboxed_region);
4463 gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &boxed_region);
4466 void
4467 gc_set_region_empty(struct alloc_region *region)
4469 region->first_page = 0;
4470 region->last_page = -1;
4471 region->start_addr = page_address(0);
4472 region->free_pointer = page_address(0);
4473 region->end_addr = page_address(0);
4476 static void
4477 zero_all_free_pages()
4479 page_index_t i;
4481 for (i = 0; i < last_free_page; i++) {
4482 if (page_free_p(i)) {
4483 #ifdef READ_PROTECT_FREE_PAGES
4484 os_protect(page_address(i),
4485 GENCGC_CARD_BYTES,
4486 OS_VM_PROT_ALL);
4487 #endif
4488 zero_pages(i, i);
4493 /* Things to do before doing a final GC before saving a core (without
4494 * purify).
4496 * + Pages in large_object pages aren't moved by the GC, so we need to
4497 * unset that flag from all pages.
4498 * + The pseudo-static generation isn't normally collected, but it seems
4499 * reasonable to collect it at least when saving a core. So move the
4500 * pages to a normal generation.
4502 static void
4503 prepare_for_final_gc ()
4505 page_index_t i;
4507 #ifdef LISP_FEATURE_IMMOBILE_SPACE
4508 extern void prepare_immobile_space_for_final_gc();
4509 prepare_immobile_space_for_final_gc ();
4510 #endif
4511 do_wipe_p = 0;
4512 for (i = 0; i < last_free_page; i++) {
4513 page_table[i].large_object = 0;
4514 if (page_table[i].gen == PSEUDO_STATIC_GENERATION) {
4515 int used = page_bytes_used(i);
4516 page_table[i].gen = HIGHEST_NORMAL_GENERATION;
4517 generations[PSEUDO_STATIC_GENERATION].bytes_allocated -= used;
4518 generations[HIGHEST_NORMAL_GENERATION].bytes_allocated += used;
4524 /* Do a non-conservative GC, and then save a core with the initial
4525 * function being set to the value of the static symbol
4526 * SB!VM:RESTART-LISP-FUNCTION */
4527 void
4528 gc_and_save(char *filename, boolean prepend_runtime,
4529 boolean save_runtime_options, boolean compressed,
4530 int compression_level, int application_type)
4532 FILE *file;
4533 void *runtime_bytes = NULL;
4534 size_t runtime_size;
4536 file = prepare_to_save(filename, prepend_runtime, &runtime_bytes,
4537 &runtime_size);
4538 if (file == NULL)
4539 return;
4541 conservative_stack = 0;
4543 /* The filename might come from Lisp, and be moved by the now
4544 * non-conservative GC. */
4545 filename = strdup(filename);
4547 /* Collect twice: once into relatively high memory, and then back
4548 * into low memory. This compacts the retained data into the lower
4549 * pages, minimizing the size of the core file.
4551 prepare_for_final_gc();
4552 gencgc_alloc_start_page = last_free_page;
4553 collect_garbage(HIGHEST_NORMAL_GENERATION+1);
4555 prepare_for_final_gc();
4556 gencgc_alloc_start_page = -1;
4557 collect_garbage(HIGHEST_NORMAL_GENERATION+1);
4559 if (prepend_runtime)
4560 save_runtime_to_filehandle(file, runtime_bytes, runtime_size,
4561 application_type);
4563 /* The dumper doesn't know that pages need to be zeroed before use. */
4564 zero_all_free_pages();
4565 save_to_filehandle(file, filename, SymbolValue(RESTART_LISP_FUNCTION,0),
4566 prepend_runtime, save_runtime_options,
4567 compressed ? compression_level : COMPRESSION_LEVEL_NONE);
4568 /* Oops. Save still managed to fail. Since we've mangled the stack
4569 * beyond hope, there's not much we can do.
4570 * (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's
4571 * going to be rather unsatisfactory too... */
4572 lose("Attempt to save core after non-conservative GC failed.\n");