2 * GENerational Conservative Garbage Collector for SBCL
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 * For a review of garbage collection techniques (e.g. generational
18 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
19 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
20 * had been accepted for _ACM Computing Surveys_ and was available
21 * as a PostScript preprint through
22 * <http://www.cs.utexas.edu/users/oops/papers.html>
24 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
32 #if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)
33 #include "pthreads_win32.h"
41 #include "interrupt.h"
46 #include "gc-internal.h"
48 #include "pseudo-atomic.h"
50 #include "genesis/vector.h"
51 #include "genesis/weak-pointer.h"
52 #include "genesis/fdefn.h"
53 #include "genesis/simple-fun.h"
55 #include "genesis/hash-table.h"
56 #include "genesis/instance.h"
57 #include "genesis/layout.h"
59 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
60 #include "genesis/cons.h"
63 /* forward declarations */
64 page_index_t
gc_find_freeish_pages(page_index_t
*restart_page_ptr
, sword_t nbytes
,
72 /* As usually configured, generations 0-5 are normal collected generations,
73 6 is pseudo-static (the objects in which are never moved nor reclaimed),
74 and 7 is scratch space used when collecting a generation without promotion,
75 wherein it is moved to generation 7 and back again.
78 SCRATCH_GENERATION
= PSEUDO_STATIC_GENERATION
+1,
82 /* Should we use page protection to help avoid the scavenging of pages
83 * that don't have pointers to younger generations? */
84 boolean enable_page_protection
= 1;
86 /* the minimum size (in bytes) for a large object*/
87 /* NB this logic is unfortunately copied in 'compiler/x86-64/macros.lisp' */
88 #if (GENCGC_ALLOC_GRANULARITY >= PAGE_BYTES) && (GENCGC_ALLOC_GRANULARITY >= GENCGC_CARD_BYTES)
89 os_vm_size_t large_object_size
= 4 * GENCGC_ALLOC_GRANULARITY
;
90 #elif (GENCGC_CARD_BYTES >= PAGE_BYTES) && (GENCGC_CARD_BYTES >= GENCGC_ALLOC_GRANULARITY)
91 os_vm_size_t large_object_size
= 4 * GENCGC_CARD_BYTES
;
93 os_vm_size_t large_object_size
= 4 * PAGE_BYTES
;
96 /* Largest allocation seen since last GC. */
97 os_vm_size_t large_allocation
= 0;
104 /* the verbosity level. All non-error messages are disabled at level 0;
105 * and only a few rare messages are printed at level 1. */
107 boolean gencgc_verbose
= 1;
109 boolean gencgc_verbose
= 0;
112 /* FIXME: At some point enable the various error-checking things below
113 * and see what they say. */
115 /* We hunt for pointers to old-space, when GCing generations >= verify_gen.
116 * Set verify_gens to HIGHEST_NORMAL_GENERATION + 1 to disable this kind of
118 generation_index_t verify_gens
= HIGHEST_NORMAL_GENERATION
+ 1;
120 /* Should we do a pre-scan verify of generation 0 before it's GCed? */
121 boolean pre_verify_gen_0
= 0;
123 /* Should we check for bad pointers after gc_free_heap is called
124 * from Lisp PURIFY? */
125 boolean verify_after_free_heap
= 0;
127 /* Should we print a note when code objects are found in the dynamic space
128 * during a heap verify? */
129 boolean verify_dynamic_code_check
= 0;
131 #ifdef LISP_FEATURE_X86
132 /* Should we check code objects for fixup errors after they are transported? */
133 boolean check_code_fixups
= 0;
136 /* Should we check that newly allocated regions are zero filled? */
137 boolean gencgc_zero_check
= 0;
139 /* Should we check that the free space is zero filled? */
140 boolean gencgc_enable_verify_zero_fill
= 0;
142 /* Should we check that free pages are zero filled during gc_free_heap
143 * called after Lisp PURIFY? */
144 boolean gencgc_zero_check_during_free_heap
= 0;
146 /* When loading a core, don't do a full scan of the memory for the
147 * memory region boundaries. (Set to true by coreparse.c if the core
148 * contained a pagetable entry).
150 boolean gencgc_partial_pickup
= 0;
152 /* If defined, free pages are read-protected to ensure that nothing
156 /* #define READ_PROTECT_FREE_PAGES */
160 * GC structures and variables
163 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
164 os_vm_size_t bytes_allocated
= 0;
165 os_vm_size_t auto_gc_trigger
= 0;
167 /* the source and destination generations. These are set before a GC starts
169 generation_index_t from_space
;
170 generation_index_t new_space
;
172 /* Set to 1 when in GC */
173 boolean gc_active_p
= 0;
175 /* should the GC be conservative on stack. If false (only right before
176 * saving a core), don't scan the stack / mark pages dont_move. */
177 static boolean conservative_stack
= 1;
179 /* An array of page structures is allocated on gc initialization.
180 * This helps to quickly map between an address and its page structure.
181 * page_table_pages is set from the size of the dynamic space. */
182 page_index_t page_table_pages
;
183 struct page
*page_table
;
185 in_use_marker_t
*page_table_dontmove_dwords
;
186 size_t page_table_dontmove_dwords_size_in_bytes
;
188 /* In GC cards that have conservative pointers to them, should we wipe out
189 * dwords in there that are not used, so that they do not act as false
190 * root to other things in the heap from then on? This is a new feature
191 * but in testing it is both reliable and no noticeable slowdown. */
194 /* a value that we use to wipe out unused words in GC cards that
195 * live alongside conservatively to pointed words. */
196 const lispobj wipe_with
= 0;
198 static inline boolean
page_allocated_p(page_index_t page
) {
199 return (page_table
[page
].allocated
!= FREE_PAGE_FLAG
);
202 static inline boolean
page_no_region_p(page_index_t page
) {
203 return !(page_table
[page
].allocated
& OPEN_REGION_PAGE_FLAG
);
206 static inline boolean
page_allocated_no_region_p(page_index_t page
) {
207 return ((page_table
[page
].allocated
& (UNBOXED_PAGE_FLAG
| BOXED_PAGE_FLAG
))
208 && page_no_region_p(page
));
211 static inline boolean
page_free_p(page_index_t page
) {
212 return (page_table
[page
].allocated
== FREE_PAGE_FLAG
);
215 static inline boolean
page_boxed_p(page_index_t page
) {
216 return (page_table
[page
].allocated
& BOXED_PAGE_FLAG
);
219 static inline boolean
page_boxed_no_region_p(page_index_t page
) {
220 return page_boxed_p(page
) && page_no_region_p(page
);
223 static inline boolean
page_unboxed_p(page_index_t page
) {
224 /* Both flags set == boxed code page */
225 return ((page_table
[page
].allocated
& UNBOXED_PAGE_FLAG
)
226 && !page_boxed_p(page
));
229 static inline boolean
protect_page_p(page_index_t page
, generation_index_t generation
) {
230 return (page_boxed_no_region_p(page
)
231 && (page_table
[page
].bytes_used
!= 0)
232 && !page_table
[page
].dont_move
233 && (page_table
[page
].gen
== generation
));
236 /* To map addresses to page structures the address of the first page
238 void *heap_base
= NULL
;
240 /* Calculate the start address for the given page number. */
242 page_address(page_index_t page_num
)
244 return (heap_base
+ (page_num
* GENCGC_CARD_BYTES
));
247 /* Calculate the address where the allocation region associated with
248 * the page starts. */
250 page_scan_start(page_index_t page_index
)
252 return page_address(page_index
)-page_table
[page_index
].scan_start_offset
;
255 /* True if the page starts a contiguous block. */
256 static inline boolean
257 page_starts_contiguous_block_p(page_index_t page_index
)
259 return page_table
[page_index
].scan_start_offset
== 0;
262 /* True if the page is the last page in a contiguous block. */
263 static inline boolean
264 page_ends_contiguous_block_p(page_index_t page_index
, generation_index_t gen
)
266 return (/* page doesn't fill block */
267 (page_table
[page_index
].bytes_used
< GENCGC_CARD_BYTES
)
268 /* page is last allocated page */
269 || ((page_index
+ 1) >= last_free_page
)
271 || page_free_p(page_index
+ 1)
272 /* next page contains no data */
273 || (page_table
[page_index
+ 1].bytes_used
== 0)
274 /* next page is in different generation */
275 || (page_table
[page_index
+ 1].gen
!= gen
)
276 /* next page starts its own contiguous block */
277 || (page_starts_contiguous_block_p(page_index
+ 1)));
280 /* Find the page index within the page_table for the given
281 * address. Return -1 on failure. */
283 find_page_index(void *addr
)
285 if (addr
>= heap_base
) {
286 page_index_t index
= ((pointer_sized_uint_t
)addr
-
287 (pointer_sized_uint_t
)heap_base
) / GENCGC_CARD_BYTES
;
288 if (index
< page_table_pages
)
295 npage_bytes(page_index_t npages
)
297 gc_assert(npages
>=0);
298 return ((os_vm_size_t
)npages
)*GENCGC_CARD_BYTES
;
301 /* Check that X is a higher address than Y and return offset from Y to
303 static inline os_vm_size_t
304 void_diff(void *x
, void *y
)
307 return (pointer_sized_uint_t
)x
- (pointer_sized_uint_t
)y
;
310 /* a structure to hold the state of a generation
312 * CAUTION: If you modify this, make sure to touch up the alien
313 * definition in src/code/gc.lisp accordingly. ...or better yes,
314 * deal with the FIXME there...
318 /* the first page that gc_alloc() checks on its next call */
319 page_index_t alloc_start_page
;
321 /* the first page that gc_alloc_unboxed() checks on its next call */
322 page_index_t alloc_unboxed_start_page
;
324 /* the first page that gc_alloc_large (boxed) considers on its next
325 * call. (Although it always allocates after the boxed_region.) */
326 page_index_t alloc_large_start_page
;
328 /* the first page that gc_alloc_large (unboxed) considers on its
329 * next call. (Although it always allocates after the
330 * current_unboxed_region.) */
331 page_index_t alloc_large_unboxed_start_page
;
333 /* the bytes allocated to this generation */
334 os_vm_size_t bytes_allocated
;
336 /* the number of bytes at which to trigger a GC */
337 os_vm_size_t gc_trigger
;
339 /* to calculate a new level for gc_trigger */
340 os_vm_size_t bytes_consed_between_gc
;
342 /* the number of GCs since the last raise */
345 /* the number of GCs to run on the generations before raising objects to the
347 int number_of_gcs_before_promotion
;
349 /* the cumulative sum of the bytes allocated to this generation. It is
350 * cleared after a GC on this generations, and update before new
351 * objects are added from a GC of a younger generation. Dividing by
352 * the bytes_allocated will give the average age of the memory in
353 * this generation since its last GC. */
354 os_vm_size_t cum_sum_bytes_allocated
;
356 /* a minimum average memory age before a GC will occur helps
357 * prevent a GC when a large number of new live objects have been
358 * added, in which case a GC could be a waste of time */
359 double minimum_age_before_gc
;
362 /* an array of generation structures. There needs to be one more
363 * generation structure than actual generations as the oldest
364 * generation is temporarily raised then lowered. */
365 struct generation generations
[NUM_GENERATIONS
];
367 /* the oldest generation that is will currently be GCed by default.
368 * Valid values are: 0, 1, ... HIGHEST_NORMAL_GENERATION
370 * The default of HIGHEST_NORMAL_GENERATION enables GC on all generations.
372 * Setting this to 0 effectively disables the generational nature of
373 * the GC. In some applications generational GC may not be useful
374 * because there are no long-lived objects.
376 * An intermediate value could be handy after moving long-lived data
377 * into an older generation so an unnecessary GC of this long-lived
378 * data can be avoided. */
379 generation_index_t gencgc_oldest_gen_to_gc
= HIGHEST_NORMAL_GENERATION
;
381 /* META: Is nobody aside from me bothered by this especially misleading
382 * use of the word "last"? It could mean either "ultimate" or "prior",
383 * but in fact means neither. It is the *FIRST* page that should be grabbed
384 * for more space, so it is min free page, or 1+ the max used page. */
385 /* The maximum free page in the heap is maintained and used to update
386 * ALLOCATION_POINTER which is used by the room function to limit its
387 * search of the heap. XX Gencgc obviously needs to be better
388 * integrated with the Lisp code. */
390 page_index_t last_free_page
;
392 #ifdef LISP_FEATURE_SB_THREAD
393 /* This lock is to prevent multiple threads from simultaneously
394 * allocating new regions which overlap each other. Note that the
395 * majority of GC is single-threaded, but alloc() may be called from
396 * >1 thread at a time and must be thread-safe. This lock must be
397 * seized before all accesses to generations[] or to parts of
398 * page_table[] that other threads may want to see */
399 static pthread_mutex_t free_pages_lock
= PTHREAD_MUTEX_INITIALIZER
;
400 /* This lock is used to protect non-thread-local allocation. */
401 static pthread_mutex_t allocation_lock
= PTHREAD_MUTEX_INITIALIZER
;
404 extern os_vm_size_t gencgc_release_granularity
;
405 os_vm_size_t gencgc_release_granularity
= GENCGC_RELEASE_GRANULARITY
;
407 extern os_vm_size_t gencgc_alloc_granularity
;
408 os_vm_size_t gencgc_alloc_granularity
= GENCGC_ALLOC_GRANULARITY
;
412 * miscellaneous heap functions
415 /* Count the number of pages which are write-protected within the
416 * given generation. */
418 count_write_protect_generation_pages(generation_index_t generation
)
420 page_index_t i
, count
= 0;
422 for (i
= 0; i
< last_free_page
; i
++)
423 if (page_allocated_p(i
)
424 && (page_table
[i
].gen
== generation
)
425 && (page_table
[i
].write_protected
== 1))
430 /* Count the number of pages within the given generation. */
432 count_generation_pages(generation_index_t generation
)
435 page_index_t count
= 0;
437 for (i
= 0; i
< last_free_page
; i
++)
438 if (page_allocated_p(i
)
439 && (page_table
[i
].gen
== generation
))
446 count_dont_move_pages(void)
449 page_index_t count
= 0;
450 for (i
= 0; i
< last_free_page
; i
++) {
451 if (page_allocated_p(i
)
452 && (page_table
[i
].dont_move
!= 0)) {
460 /* Work through the pages and add up the number of bytes used for the
461 * given generation. */
463 count_generation_bytes_allocated (generation_index_t gen
)
466 os_vm_size_t result
= 0;
467 for (i
= 0; i
< last_free_page
; i
++) {
468 if (page_allocated_p(i
)
469 && (page_table
[i
].gen
== gen
))
470 result
+= page_table
[i
].bytes_used
;
475 /* Return the average age of the memory in a generation. */
477 generation_average_age(generation_index_t gen
)
479 if (generations
[gen
].bytes_allocated
== 0)
483 ((double)generations
[gen
].cum_sum_bytes_allocated
)
484 / ((double)generations
[gen
].bytes_allocated
);
488 write_generation_stats(FILE *file
)
490 generation_index_t i
;
492 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
493 #define FPU_STATE_SIZE 27
494 int fpu_state
[FPU_STATE_SIZE
];
495 #elif defined(LISP_FEATURE_PPC)
496 #define FPU_STATE_SIZE 32
497 long long fpu_state
[FPU_STATE_SIZE
];
498 #elif defined(LISP_FEATURE_SPARC)
500 * 32 (single-precision) FP registers, and the FP state register.
501 * But Sparc V9 has 32 double-precision registers (equivalent to 64
502 * single-precision, but can't be accessed), so we leave enough room
505 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
506 long long fpu_state
[FPU_STATE_SIZE
];
507 #elif defined(LISP_FEATURE_ARM)
508 #define FPU_STATE_SIZE 8
509 long long fpu_state
[FPU_STATE_SIZE
];
512 /* This code uses the FP instructions which may be set up for Lisp
513 * so they need to be saved and reset for C. */
516 /* Print the heap stats. */
518 " Gen StaPg UbSta LaSta LUbSt Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
520 for (i
= 0; i
< SCRATCH_GENERATION
; i
++) {
522 page_index_t boxed_cnt
= 0;
523 page_index_t unboxed_cnt
= 0;
524 page_index_t large_boxed_cnt
= 0;
525 page_index_t large_unboxed_cnt
= 0;
526 page_index_t pinned_cnt
=0;
528 for (j
= 0; j
< last_free_page
; j
++)
529 if (page_table
[j
].gen
== i
) {
531 /* Count the number of boxed pages within the given
533 if (page_boxed_p(j
)) {
534 if (page_table
[j
].large_object
)
539 if(page_table
[j
].dont_move
) pinned_cnt
++;
540 /* Count the number of unboxed pages within the given
542 if (page_unboxed_p(j
)) {
543 if (page_table
[j
].large_object
)
550 gc_assert(generations
[i
].bytes_allocated
551 == count_generation_bytes_allocated(i
));
553 " %1d: %5ld %5ld %5ld %5ld",
555 generations
[i
].alloc_start_page
,
556 generations
[i
].alloc_unboxed_start_page
,
557 generations
[i
].alloc_large_start_page
,
558 generations
[i
].alloc_large_unboxed_start_page
);
560 " %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
561 " %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
,
562 boxed_cnt
, unboxed_cnt
, large_boxed_cnt
,
563 large_unboxed_cnt
, pinned_cnt
);
568 " %4"PAGE_INDEX_FMT
" %3d %7.4f\n",
569 generations
[i
].bytes_allocated
,
570 (npage_bytes(count_generation_pages(i
)) - generations
[i
].bytes_allocated
),
571 generations
[i
].gc_trigger
,
572 count_write_protect_generation_pages(i
),
573 generations
[i
].num_gc
,
574 generation_average_age(i
));
576 fprintf(file
," Total bytes allocated = %"OS_VM_SIZE_FMT
"\n", bytes_allocated
);
577 fprintf(file
," Dynamic-space-size bytes = %"OS_VM_SIZE_FMT
"\n", dynamic_space_size
);
579 fpu_restore(fpu_state
);
583 write_heap_exhaustion_report(FILE *file
, long available
, long requested
,
584 struct thread
*thread
)
587 "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
588 gc_active_p
? "garbage collection" : "allocation",
591 write_generation_stats(file
);
592 fprintf(file
, "GC control variables:\n");
593 fprintf(file
, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n",
594 SymbolValue(GC_INHIBIT
,thread
)==NIL
? "false" : "true",
595 (SymbolValue(GC_PENDING
, thread
) == T
) ?
596 "true" : ((SymbolValue(GC_PENDING
, thread
) == NIL
) ?
597 "false" : "in progress"));
598 #ifdef LISP_FEATURE_SB_THREAD
599 fprintf(file
, " *STOP-FOR-GC-PENDING* = %s\n",
600 SymbolValue(STOP_FOR_GC_PENDING
,thread
)==NIL
? "false" : "true");
605 print_generation_stats(void)
607 write_generation_stats(stderr
);
610 extern char* gc_logfile
;
611 char * gc_logfile
= NULL
;
614 log_generation_stats(char *logfile
, char *header
)
617 FILE * log
= fopen(logfile
, "a");
619 fprintf(log
, "%s\n", header
);
620 write_generation_stats(log
);
623 fprintf(stderr
, "Could not open gc logfile: %s\n", logfile
);
630 report_heap_exhaustion(long available
, long requested
, struct thread
*th
)
633 FILE * log
= fopen(gc_logfile
, "a");
635 write_heap_exhaustion_report(log
, available
, requested
, th
);
638 fprintf(stderr
, "Could not open gc logfile: %s\n", gc_logfile
);
642 /* Always to stderr as well. */
643 write_heap_exhaustion_report(stderr
, available
, requested
, th
);
647 #if defined(LISP_FEATURE_X86)
648 void fast_bzero(void*, size_t); /* in <arch>-assem.S */
651 /* Zero the pages from START to END (inclusive), but use mmap/munmap instead
652 * if zeroing it ourselves, i.e. in practice give the memory back to the
653 * OS. Generally done after a large GC.
655 void zero_pages_with_mmap(page_index_t start
, page_index_t end
) {
657 void *addr
= page_address(start
), *new_addr
;
658 os_vm_size_t length
= npage_bytes(1+end
-start
);
663 gc_assert(length
>= gencgc_release_granularity
);
664 gc_assert((length
% gencgc_release_granularity
) == 0);
666 os_invalidate(addr
, length
);
667 new_addr
= os_validate(addr
, length
);
668 if (new_addr
== NULL
|| new_addr
!= addr
) {
669 lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x",
673 for (i
= start
; i
<= end
; i
++) {
674 page_table
[i
].need_to_zero
= 0;
678 /* Zero the pages from START to END (inclusive). Generally done just after
679 * a new region has been allocated.
682 zero_pages(page_index_t start
, page_index_t end
) {
686 #if defined(LISP_FEATURE_X86)
687 fast_bzero(page_address(start
), npage_bytes(1+end
-start
));
689 bzero(page_address(start
), npage_bytes(1+end
-start
));
695 zero_and_mark_pages(page_index_t start
, page_index_t end
) {
698 zero_pages(start
, end
);
699 for (i
= start
; i
<= end
; i
++)
700 page_table
[i
].need_to_zero
= 0;
703 /* Zero the pages from START to END (inclusive), except for those
704 * pages that are known to already zeroed. Mark all pages in the
705 * ranges as non-zeroed.
708 zero_dirty_pages(page_index_t start
, page_index_t end
) {
711 for (i
= start
; i
<= end
; i
++) {
712 if (!page_table
[i
].need_to_zero
) continue;
713 for (j
= i
+1; (j
<= end
) && (page_table
[j
].need_to_zero
); j
++);
718 for (i
= start
; i
<= end
; i
++) {
719 page_table
[i
].need_to_zero
= 1;
725 * To support quick and inline allocation, regions of memory can be
726 * allocated and then allocated from with just a free pointer and a
727 * check against an end address.
729 * Since objects can be allocated to spaces with different properties
730 * e.g. boxed/unboxed, generation, ages; there may need to be many
731 * allocation regions.
733 * Each allocation region may start within a partly used page. Many
734 * features of memory use are noted on a page wise basis, e.g. the
735 * generation; so if a region starts within an existing allocated page
736 * it must be consistent with this page.
738 * During the scavenging of the newspace, objects will be transported
739 * into an allocation region, and pointers updated to point to this
740 * allocation region. It is possible that these pointers will be
741 * scavenged again before the allocation region is closed, e.g. due to
742 * trans_list which jumps all over the place to cleanup the list. It
743 * is important to be able to determine properties of all objects
744 * pointed to when scavenging, e.g to detect pointers to the oldspace.
745 * Thus it's important that the allocation regions have the correct
746 * properties set when allocated, and not just set when closed. The
747 * region allocation routines return regions with the specified
748 * properties, and grab all the pages, setting their properties
749 * appropriately, except that the amount used is not known.
751 * These regions are used to support quicker allocation using just a
752 * free pointer. The actual space used by the region is not reflected
753 * in the pages tables until it is closed. It can't be scavenged until
756 * When finished with the region it should be closed, which will
757 * update the page tables for the actual space used returning unused
758 * space. Further it may be noted in the new regions which is
759 * necessary when scavenging the newspace.
761 * Large objects may be allocated directly without an allocation
762 * region, the page tables are updated immediately.
764 * Unboxed objects don't contain pointers to other objects and so
765 * don't need scavenging. Further they can't contain pointers to
766 * younger generations so WP is not needed. By allocating pages to
767 * unboxed objects the whole page never needs scavenging or
768 * write-protecting. */
770 /* We are only using two regions at present. Both are for the current
771 * newspace generation. */
772 struct alloc_region boxed_region
;
773 struct alloc_region unboxed_region
;
775 /* The generation currently being allocated to. */
776 static generation_index_t gc_alloc_generation
;
778 static inline page_index_t
779 generation_alloc_start_page(generation_index_t generation
, int page_type_flag
, int large
)
782 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
783 return generations
[generation
].alloc_large_unboxed_start_page
;
784 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
785 /* Both code and data. */
786 return generations
[generation
].alloc_large_start_page
;
788 lose("bad page type flag: %d", page_type_flag
);
791 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
792 return generations
[generation
].alloc_unboxed_start_page
;
793 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
794 /* Both code and data. */
795 return generations
[generation
].alloc_start_page
;
797 lose("bad page_type_flag: %d", page_type_flag
);
803 set_generation_alloc_start_page(generation_index_t generation
, int page_type_flag
, int large
,
807 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
808 generations
[generation
].alloc_large_unboxed_start_page
= page
;
809 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
810 /* Both code and data. */
811 generations
[generation
].alloc_large_start_page
= page
;
813 lose("bad page type flag: %d", page_type_flag
);
816 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
817 generations
[generation
].alloc_unboxed_start_page
= page
;
818 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
819 /* Both code and data. */
820 generations
[generation
].alloc_start_page
= page
;
822 lose("bad page type flag: %d", page_type_flag
);
827 const int n_dwords_in_card
= GENCGC_CARD_BYTES
/ N_WORD_BYTES
/ 2;
829 dontmove_dwords(page_index_t page
)
831 if (page_table
[page
].has_dontmove_dwords
)
832 return &page_table_dontmove_dwords
[page
* n_dwords_in_card
];
836 /* Find a new region with room for at least the given number of bytes.
838 * It starts looking at the current generation's alloc_start_page. So
839 * may pick up from the previous region if there is enough space. This
840 * keeps the allocation contiguous when scavenging the newspace.
842 * The alloc_region should have been closed by a call to
843 * gc_alloc_update_page_tables(), and will thus be in an empty state.
845 * To assist the scavenging functions write-protected pages are not
846 * used. Free pages should not be write-protected.
848 * It is critical to the conservative GC that the start of regions be
849 * known. To help achieve this only small regions are allocated at a
852 * During scavenging, pointers may be found to within the current
853 * region and the page generation must be set so that pointers to the
854 * from space can be recognized. Therefore the generation of pages in
855 * the region are set to gc_alloc_generation. To prevent another
856 * allocation call using the same pages, all the pages in the region
857 * are allocated, although they will initially be empty.
860 gc_alloc_new_region(sword_t nbytes
, int page_type_flag
, struct alloc_region
*alloc_region
)
862 page_index_t first_page
;
863 page_index_t last_page
;
864 os_vm_size_t bytes_found
;
870 "/alloc_new_region for %d bytes from gen %d\n",
871 nbytes, gc_alloc_generation));
874 /* Check that the region is in a reset state. */
875 gc_assert((alloc_region
->first_page
== 0)
876 && (alloc_region
->last_page
== -1)
877 && (alloc_region
->free_pointer
== alloc_region
->end_addr
));
878 ret
= thread_mutex_lock(&free_pages_lock
);
880 first_page
= generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 0);
881 last_page
=gc_find_freeish_pages(&first_page
, nbytes
, page_type_flag
);
882 bytes_found
=(GENCGC_CARD_BYTES
- page_table
[first_page
].bytes_used
)
883 + npage_bytes(last_page
-first_page
);
885 /* Set up the alloc_region. */
886 alloc_region
->first_page
= first_page
;
887 alloc_region
->last_page
= last_page
;
888 alloc_region
->start_addr
= page_table
[first_page
].bytes_used
889 + page_address(first_page
);
890 alloc_region
->free_pointer
= alloc_region
->start_addr
;
891 alloc_region
->end_addr
= alloc_region
->start_addr
+ bytes_found
;
893 /* Set up the pages. */
895 /* The first page may have already been in use. */
896 if (page_table
[first_page
].bytes_used
== 0) {
897 page_table
[first_page
].allocated
= page_type_flag
;
898 page_table
[first_page
].gen
= gc_alloc_generation
;
899 page_table
[first_page
].large_object
= 0;
900 page_table
[first_page
].scan_start_offset
= 0;
901 // wiping should have free()ed and :=NULL
902 gc_assert(dontmove_dwords(first_page
) == NULL
);
905 gc_assert(page_table
[first_page
].allocated
== page_type_flag
);
906 page_table
[first_page
].allocated
|= OPEN_REGION_PAGE_FLAG
;
908 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
909 gc_assert(page_table
[first_page
].large_object
== 0);
911 for (i
= first_page
+1; i
<= last_page
; i
++) {
912 page_table
[i
].allocated
= page_type_flag
;
913 page_table
[i
].gen
= gc_alloc_generation
;
914 page_table
[i
].large_object
= 0;
915 /* This may not be necessary for unboxed regions (think it was
917 page_table
[i
].scan_start_offset
=
918 void_diff(page_address(i
),alloc_region
->start_addr
);
919 page_table
[i
].allocated
|= OPEN_REGION_PAGE_FLAG
;
921 /* Bump up last_free_page. */
922 if (last_page
+1 > last_free_page
) {
923 last_free_page
= last_page
+1;
924 /* do we only want to call this on special occasions? like for
926 set_alloc_pointer((lispobj
)page_address(last_free_page
));
928 ret
= thread_mutex_unlock(&free_pages_lock
);
931 #ifdef READ_PROTECT_FREE_PAGES
932 os_protect(page_address(first_page
),
933 npage_bytes(1+last_page
-first_page
),
937 /* If the first page was only partial, don't check whether it's
938 * zeroed (it won't be) and don't zero it (since the parts that
939 * we're interested in are guaranteed to be zeroed).
941 if (page_table
[first_page
].bytes_used
) {
945 zero_dirty_pages(first_page
, last_page
);
947 /* we can do this after releasing free_pages_lock */
948 if (gencgc_zero_check
) {
950 for (p
= (word_t
*)alloc_region
->start_addr
;
951 p
< (word_t
*)alloc_region
->end_addr
; p
++) {
953 lose("The new region is not zero at %p (start=%p, end=%p).\n",
954 p
, alloc_region
->start_addr
, alloc_region
->end_addr
);
960 /* If the record_new_objects flag is 2 then all new regions created
963 * If it's 1 then then it is only recorded if the first page of the
964 * current region is <= new_areas_ignore_page. This helps avoid
965 * unnecessary recording when doing full scavenge pass.
967 * The new_object structure holds the page, byte offset, and size of
968 * new regions of objects. Each new area is placed in the array of
969 * these structures pointer to by new_areas. new_areas_index holds the
970 * offset into new_areas.
972 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
973 * later code must detect this and handle it, probably by doing a full
974 * scavenge of a generation. */
975 #define NUM_NEW_AREAS 512
976 static int record_new_objects
= 0;
977 static page_index_t new_areas_ignore_page
;
983 static struct new_area (*new_areas
)[];
984 static size_t new_areas_index
;
985 size_t max_new_areas
;
987 /* Add a new area to new_areas. */
989 add_new_area(page_index_t first_page
, size_t offset
, size_t size
)
991 size_t new_area_start
, c
;
994 /* Ignore if full. */
995 if (new_areas_index
>= NUM_NEW_AREAS
)
998 switch (record_new_objects
) {
1002 if (first_page
> new_areas_ignore_page
)
1011 new_area_start
= npage_bytes(first_page
) + offset
;
1013 /* Search backwards for a prior area that this follows from. If
1014 found this will save adding a new area. */
1015 for (i
= new_areas_index
-1, c
= 0; (i
>= 0) && (c
< 8); i
--, c
++) {
1017 npage_bytes((*new_areas
)[i
].page
)
1018 + (*new_areas
)[i
].offset
1019 + (*new_areas
)[i
].size
;
1021 "/add_new_area S1 %d %d %d %d\n",
1022 i, c, new_area_start, area_end));*/
1023 if (new_area_start
== area_end
) {
1025 "/adding to [%d] %d %d %d with %d %d %d:\n",
1027 (*new_areas)[i].page,
1028 (*new_areas)[i].offset,
1029 (*new_areas)[i].size,
1033 (*new_areas
)[i
].size
+= size
;
1038 (*new_areas
)[new_areas_index
].page
= first_page
;
1039 (*new_areas
)[new_areas_index
].offset
= offset
;
1040 (*new_areas
)[new_areas_index
].size
= size
;
1042 "/new_area %d page %d offset %d size %d\n",
1043 new_areas_index, first_page, offset, size));*/
1046 /* Note the max new_areas used. */
1047 if (new_areas_index
> max_new_areas
)
1048 max_new_areas
= new_areas_index
;
1051 /* Update the tables for the alloc_region. The region may be added to
1054 * When done the alloc_region is set up so that the next quick alloc
1055 * will fail safely and thus a new region will be allocated. Further
1056 * it is safe to try to re-update the page table of this reset
1059 gc_alloc_update_page_tables(int page_type_flag
, struct alloc_region
*alloc_region
)
1062 page_index_t first_page
;
1063 page_index_t next_page
;
1064 os_vm_size_t bytes_used
;
1065 os_vm_size_t region_size
;
1066 os_vm_size_t byte_cnt
;
1067 page_bytes_t orig_first_page_bytes_used
;
1071 first_page
= alloc_region
->first_page
;
1073 /* Catch an unused alloc_region. */
1074 if ((first_page
== 0) && (alloc_region
->last_page
== -1))
1077 next_page
= first_page
+1;
1079 ret
= thread_mutex_lock(&free_pages_lock
);
1080 gc_assert(ret
== 0);
1081 if (alloc_region
->free_pointer
!= alloc_region
->start_addr
) {
1082 /* some bytes were allocated in the region */
1083 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
1085 gc_assert(alloc_region
->start_addr
==
1086 (page_address(first_page
)
1087 + page_table
[first_page
].bytes_used
));
1089 /* All the pages used need to be updated */
1091 /* Update the first page. */
1093 /* If the page was free then set up the gen, and
1094 * scan_start_offset. */
1095 if (page_table
[first_page
].bytes_used
== 0)
1096 gc_assert(page_starts_contiguous_block_p(first_page
));
1097 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1099 gc_assert(page_table
[first_page
].allocated
& page_type_flag
);
1100 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
1101 gc_assert(page_table
[first_page
].large_object
== 0);
1105 /* Calculate the number of bytes used in this page. This is not
1106 * always the number of new bytes, unless it was free. */
1108 if ((bytes_used
= void_diff(alloc_region
->free_pointer
,
1109 page_address(first_page
)))
1110 >GENCGC_CARD_BYTES
) {
1111 bytes_used
= GENCGC_CARD_BYTES
;
1114 page_table
[first_page
].bytes_used
= bytes_used
;
1115 byte_cnt
+= bytes_used
;
1118 /* All the rest of the pages should be free. We need to set
1119 * their scan_start_offset pointer to the start of the
1120 * region, and set the bytes_used. */
1122 page_table
[next_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1123 gc_assert(page_table
[next_page
].allocated
& page_type_flag
);
1124 gc_assert(page_table
[next_page
].bytes_used
== 0);
1125 gc_assert(page_table
[next_page
].gen
== gc_alloc_generation
);
1126 gc_assert(page_table
[next_page
].large_object
== 0);
1128 gc_assert(page_table
[next_page
].scan_start_offset
==
1129 void_diff(page_address(next_page
),
1130 alloc_region
->start_addr
));
1132 /* Calculate the number of bytes used in this page. */
1134 if ((bytes_used
= void_diff(alloc_region
->free_pointer
,
1135 page_address(next_page
)))>GENCGC_CARD_BYTES
) {
1136 bytes_used
= GENCGC_CARD_BYTES
;
1139 page_table
[next_page
].bytes_used
= bytes_used
;
1140 byte_cnt
+= bytes_used
;
1145 region_size
= void_diff(alloc_region
->free_pointer
,
1146 alloc_region
->start_addr
);
1147 bytes_allocated
+= region_size
;
1148 generations
[gc_alloc_generation
].bytes_allocated
+= region_size
;
1150 gc_assert((byte_cnt
- orig_first_page_bytes_used
) == region_size
);
1152 /* Set the generations alloc restart page to the last page of
1154 set_generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 0, next_page
-1);
1156 /* Add the region to the new_areas if requested. */
1157 if (BOXED_PAGE_FLAG
& page_type_flag
)
1158 add_new_area(first_page
,orig_first_page_bytes_used
, region_size
);
1162 "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
1164 gc_alloc_generation));
1167 /* There are no bytes allocated. Unallocate the first_page if
1168 * there are 0 bytes_used. */
1169 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1170 if (page_table
[first_page
].bytes_used
== 0)
1171 page_table
[first_page
].allocated
= FREE_PAGE_FLAG
;
1174 /* Unallocate any unused pages. */
1175 while (next_page
<= alloc_region
->last_page
) {
1176 gc_assert(page_table
[next_page
].bytes_used
== 0);
1177 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1180 ret
= thread_mutex_unlock(&free_pages_lock
);
1181 gc_assert(ret
== 0);
1183 /* alloc_region is per-thread, we're ok to do this unlocked */
1184 gc_set_region_empty(alloc_region
);
1187 /* Allocate a possibly large object. */
1189 gc_alloc_large(sword_t nbytes
, int page_type_flag
, struct alloc_region
*alloc_region
)
1192 page_index_t first_page
, next_page
, last_page
;
1193 page_bytes_t orig_first_page_bytes_used
;
1194 os_vm_size_t byte_cnt
;
1195 os_vm_size_t bytes_used
;
1198 ret
= thread_mutex_lock(&free_pages_lock
);
1199 gc_assert(ret
== 0);
1201 first_page
= generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 1);
1202 if (first_page
<= alloc_region
->last_page
) {
1203 first_page
= alloc_region
->last_page
+1;
1206 last_page
=gc_find_freeish_pages(&first_page
,nbytes
, page_type_flag
);
1208 gc_assert(first_page
> alloc_region
->last_page
);
1210 set_generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 1, last_page
);
1212 /* Set up the pages. */
1213 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
1215 /* If the first page was free then set up the gen, and
1216 * scan_start_offset. */
1217 if (page_table
[first_page
].bytes_used
== 0) {
1218 page_table
[first_page
].allocated
= page_type_flag
;
1219 page_table
[first_page
].gen
= gc_alloc_generation
;
1220 page_table
[first_page
].scan_start_offset
= 0;
1221 page_table
[first_page
].large_object
= 1;
1224 gc_assert(page_table
[first_page
].allocated
== page_type_flag
);
1225 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
1226 gc_assert(page_table
[first_page
].large_object
== 1);
1230 /* Calc. the number of bytes used in this page. This is not
1231 * always the number of new bytes, unless it was free. */
1233 if ((bytes_used
= nbytes
+orig_first_page_bytes_used
) > GENCGC_CARD_BYTES
) {
1234 bytes_used
= GENCGC_CARD_BYTES
;
1237 page_table
[first_page
].bytes_used
= bytes_used
;
1238 byte_cnt
+= bytes_used
;
1240 next_page
= first_page
+1;
1242 /* All the rest of the pages should be free. We need to set their
1243 * scan_start_offset pointer to the start of the region, and set
1244 * the bytes_used. */
1246 gc_assert(page_free_p(next_page
));
1247 gc_assert(page_table
[next_page
].bytes_used
== 0);
1248 page_table
[next_page
].allocated
= page_type_flag
;
1249 page_table
[next_page
].gen
= gc_alloc_generation
;
1250 page_table
[next_page
].large_object
= 1;
1252 page_table
[next_page
].scan_start_offset
=
1253 npage_bytes(next_page
-first_page
) - orig_first_page_bytes_used
;
1255 /* Calculate the number of bytes used in this page. */
1257 bytes_used
=(nbytes
+orig_first_page_bytes_used
)-byte_cnt
;
1258 if (bytes_used
> GENCGC_CARD_BYTES
) {
1259 bytes_used
= GENCGC_CARD_BYTES
;
1262 page_table
[next_page
].bytes_used
= bytes_used
;
1263 page_table
[next_page
].write_protected
=0;
1264 page_table
[next_page
].dont_move
=0;
1265 byte_cnt
+= bytes_used
;
1269 gc_assert((byte_cnt
-orig_first_page_bytes_used
) == (size_t)nbytes
);
1271 bytes_allocated
+= nbytes
;
1272 generations
[gc_alloc_generation
].bytes_allocated
+= nbytes
;
1274 /* Add the region to the new_areas if requested. */
1275 if (BOXED_PAGE_FLAG
& page_type_flag
)
1276 add_new_area(first_page
,orig_first_page_bytes_used
,nbytes
);
1278 /* Bump up last_free_page */
1279 if (last_page
+1 > last_free_page
) {
1280 last_free_page
= last_page
+1;
1281 set_alloc_pointer((lispobj
)(page_address(last_free_page
)));
1283 ret
= thread_mutex_unlock(&free_pages_lock
);
1284 gc_assert(ret
== 0);
1286 #ifdef READ_PROTECT_FREE_PAGES
1287 os_protect(page_address(first_page
),
1288 npage_bytes(1+last_page
-first_page
),
1292 zero_dirty_pages(first_page
, last_page
);
1294 return page_address(first_page
);
1297 static page_index_t gencgc_alloc_start_page
= -1;
1300 gc_heap_exhausted_error_or_lose (sword_t available
, sword_t requested
)
1302 struct thread
*thread
= arch_os_get_current_thread();
1303 /* Write basic information before doing anything else: if we don't
1304 * call to lisp this is a must, and even if we do there is always
1305 * the danger that we bounce back here before the error has been
1306 * handled, or indeed even printed.
1308 report_heap_exhaustion(available
, requested
, thread
);
1309 if (gc_active_p
|| (available
== 0)) {
1310 /* If we are in GC, or totally out of memory there is no way
1311 * to sanely transfer control to the lisp-side of things.
1313 lose("Heap exhausted, game over.");
1316 /* FIXME: assert free_pages_lock held */
1317 (void)thread_mutex_unlock(&free_pages_lock
);
1318 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
1319 gc_assert(get_pseudo_atomic_atomic(thread
));
1320 clear_pseudo_atomic_atomic(thread
);
1321 if (get_pseudo_atomic_interrupted(thread
))
1322 do_pending_interrupt();
1324 /* Another issue is that signalling HEAP-EXHAUSTED error leads
1325 * to running user code at arbitrary places, even in a
1326 * WITHOUT-INTERRUPTS which may lead to a deadlock without
1327 * running out of the heap. So at this point all bets are
1329 if (SymbolValue(INTERRUPTS_ENABLED
,thread
) == NIL
)
1330 corruption_warning_and_maybe_lose
1331 ("Signalling HEAP-EXHAUSTED in a WITHOUT-INTERRUPTS.");
1332 /* available and requested should be double word aligned, thus
1333 they can passed as fixnums and shifted later. */
1334 funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR
), available
, requested
);
1335 lose("HEAP-EXHAUSTED-ERROR fell through");
1340 gc_find_freeish_pages(page_index_t
*restart_page_ptr
, sword_t bytes
,
1343 page_index_t most_bytes_found_from
= 0, most_bytes_found_to
= 0;
1344 page_index_t first_page
, last_page
, restart_page
= *restart_page_ptr
;
1345 os_vm_size_t nbytes
= bytes
;
1346 os_vm_size_t nbytes_goal
= nbytes
;
1347 os_vm_size_t bytes_found
= 0;
1348 os_vm_size_t most_bytes_found
= 0;
1349 boolean small_object
= nbytes
< GENCGC_CARD_BYTES
;
1350 /* FIXME: assert(free_pages_lock is held); */
1352 if (nbytes_goal
< gencgc_alloc_granularity
)
1353 nbytes_goal
= gencgc_alloc_granularity
;
1355 /* Toggled by gc_and_save for heap compaction, normally -1. */
1356 if (gencgc_alloc_start_page
!= -1) {
1357 restart_page
= gencgc_alloc_start_page
;
1360 /* FIXME: This is on bytes instead of nbytes pending cleanup of
1361 * long from the interface. */
1362 gc_assert(bytes
>=0);
1363 /* Search for a page with at least nbytes of space. We prefer
1364 * not to split small objects on multiple pages, to reduce the
1365 * number of contiguous allocation regions spaning multiple
1366 * pages: this helps avoid excessive conservativism.
1368 * For other objects, we guarantee that they start on their own
1371 first_page
= restart_page
;
1372 while (first_page
< page_table_pages
) {
1374 if (page_free_p(first_page
)) {
1375 gc_assert(0 == page_table
[first_page
].bytes_used
);
1376 bytes_found
= GENCGC_CARD_BYTES
;
1377 } else if (small_object
&&
1378 (page_table
[first_page
].allocated
== page_type_flag
) &&
1379 (page_table
[first_page
].large_object
== 0) &&
1380 (page_table
[first_page
].gen
== gc_alloc_generation
) &&
1381 (page_table
[first_page
].write_protected
== 0) &&
1382 (page_table
[first_page
].dont_move
== 0)) {
1383 bytes_found
= GENCGC_CARD_BYTES
- page_table
[first_page
].bytes_used
;
1384 if (bytes_found
< nbytes
) {
1385 if (bytes_found
> most_bytes_found
)
1386 most_bytes_found
= bytes_found
;
1395 gc_assert(page_table
[first_page
].write_protected
== 0);
1396 for (last_page
= first_page
+1;
1397 ((last_page
< page_table_pages
) &&
1398 page_free_p(last_page
) &&
1399 (bytes_found
< nbytes_goal
));
1401 bytes_found
+= GENCGC_CARD_BYTES
;
1402 gc_assert(0 == page_table
[last_page
].bytes_used
);
1403 gc_assert(0 == page_table
[last_page
].write_protected
);
1406 if (bytes_found
> most_bytes_found
) {
1407 most_bytes_found
= bytes_found
;
1408 most_bytes_found_from
= first_page
;
1409 most_bytes_found_to
= last_page
;
1411 if (bytes_found
>= nbytes_goal
)
1414 first_page
= last_page
;
1417 bytes_found
= most_bytes_found
;
1418 restart_page
= first_page
+ 1;
1420 /* Check for a failure */
1421 if (bytes_found
< nbytes
) {
1422 gc_assert(restart_page
>= page_table_pages
);
1423 gc_heap_exhausted_error_or_lose(most_bytes_found
, nbytes
);
1426 gc_assert(most_bytes_found_to
);
1427 *restart_page_ptr
= most_bytes_found_from
;
1428 return most_bytes_found_to
-1;
1431 /* Allocate bytes. All the rest of the special-purpose allocation
1432 * functions will eventually call this */
1435 gc_alloc_with_region(sword_t nbytes
,int page_type_flag
, struct alloc_region
*my_region
,
1438 void *new_free_pointer
;
1440 if ((size_t)nbytes
>=large_object_size
)
1441 return gc_alloc_large(nbytes
, page_type_flag
, my_region
);
1443 /* Check whether there is room in the current alloc region. */
1444 new_free_pointer
= my_region
->free_pointer
+ nbytes
;
1446 /* fprintf(stderr, "alloc %d bytes from %p to %p\n", nbytes,
1447 my_region->free_pointer, new_free_pointer); */
1449 if (new_free_pointer
<= my_region
->end_addr
) {
1450 /* If so then allocate from the current alloc region. */
1451 void *new_obj
= my_region
->free_pointer
;
1452 my_region
->free_pointer
= new_free_pointer
;
1454 /* Unless a `quick' alloc was requested, check whether the
1455 alloc region is almost empty. */
1457 void_diff(my_region
->end_addr
,my_region
->free_pointer
) <= 32) {
1458 /* If so, finished with the current region. */
1459 gc_alloc_update_page_tables(page_type_flag
, my_region
);
1460 /* Set up a new region. */
1461 gc_alloc_new_region(32 /*bytes*/, page_type_flag
, my_region
);
1464 return((void *)new_obj
);
1467 /* Else not enough free space in the current region: retry with a
1470 gc_alloc_update_page_tables(page_type_flag
, my_region
);
1471 gc_alloc_new_region(nbytes
, page_type_flag
, my_region
);
1472 return gc_alloc_with_region(nbytes
, page_type_flag
, my_region
,0);
1475 /* Copy a large object. If the object is in a large object region then
1476 * it is simply promoted, else it is copied. If it's large enough then
1477 * it's copied to a large object region.
1479 * Bignums and vectors may have shrunk. If the object is not copied
1480 * the space needs to be reclaimed, and the page_tables corrected. */
1482 general_copy_large_object(lispobj object
, word_t nwords
, boolean boxedp
)
1486 page_index_t first_page
;
1488 gc_assert(is_lisp_pointer(object
));
1489 gc_assert(from_space_p(object
));
1490 gc_assert((nwords
& 0x01) == 0);
1492 if ((nwords
> 1024*1024) && gencgc_verbose
) {
1493 FSHOW((stderr
, "/general_copy_large_object: %d bytes\n",
1494 nwords
*N_WORD_BYTES
));
1497 /* Check whether it's a large object. */
1498 first_page
= find_page_index((void *)object
);
1499 gc_assert(first_page
>= 0);
1501 if (page_table
[first_page
].large_object
) {
1502 /* Promote the object. Note: Unboxed objects may have been
1503 * allocated to a BOXED region so it may be necessary to
1504 * change the region to UNBOXED. */
1505 os_vm_size_t remaining_bytes
;
1506 os_vm_size_t bytes_freed
;
1507 page_index_t next_page
;
1508 page_bytes_t old_bytes_used
;
1510 /* FIXME: This comment is somewhat stale.
1512 * Note: Any page write-protection must be removed, else a
1513 * later scavenge_newspace may incorrectly not scavenge these
1514 * pages. This would not be necessary if they are added to the
1515 * new areas, but let's do it for them all (they'll probably
1516 * be written anyway?). */
1518 gc_assert(page_starts_contiguous_block_p(first_page
));
1519 next_page
= first_page
;
1520 remaining_bytes
= nwords
*N_WORD_BYTES
;
1522 while (remaining_bytes
> GENCGC_CARD_BYTES
) {
1523 gc_assert(page_table
[next_page
].gen
== from_space
);
1524 gc_assert(page_table
[next_page
].large_object
);
1525 gc_assert(page_table
[next_page
].scan_start_offset
==
1526 npage_bytes(next_page
-first_page
));
1527 gc_assert(page_table
[next_page
].bytes_used
== GENCGC_CARD_BYTES
);
1528 /* Should have been unprotected by unprotect_oldspace()
1529 * for boxed objects, and after promotion unboxed ones
1530 * should not be on protected pages at all. */
1531 gc_assert(!page_table
[next_page
].write_protected
);
1534 gc_assert(page_boxed_p(next_page
));
1536 gc_assert(page_allocated_no_region_p(next_page
));
1537 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1539 page_table
[next_page
].gen
= new_space
;
1541 remaining_bytes
-= GENCGC_CARD_BYTES
;
1545 /* Now only one page remains, but the object may have shrunk so
1546 * there may be more unused pages which will be freed. */
1548 /* Object may have shrunk but shouldn't have grown - check. */
1549 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
1551 page_table
[next_page
].gen
= new_space
;
1554 gc_assert(page_boxed_p(next_page
));
1556 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1558 /* Adjust the bytes_used. */
1559 old_bytes_used
= page_table
[next_page
].bytes_used
;
1560 page_table
[next_page
].bytes_used
= remaining_bytes
;
1562 bytes_freed
= old_bytes_used
- remaining_bytes
;
1564 /* Free any remaining pages; needs care. */
1566 while ((old_bytes_used
== GENCGC_CARD_BYTES
) &&
1567 (page_table
[next_page
].gen
== from_space
) &&
1568 /* FIXME: It is not obvious to me why this is necessary
1569 * as a loop condition: it seems to me that the
1570 * scan_start_offset test should be sufficient, but
1571 * experimentally that is not the case. --NS
1574 page_boxed_p(next_page
) :
1575 page_allocated_no_region_p(next_page
)) &&
1576 page_table
[next_page
].large_object
&&
1577 (page_table
[next_page
].scan_start_offset
==
1578 npage_bytes(next_page
- first_page
))) {
1579 /* Checks out OK, free the page. Don't need to both zeroing
1580 * pages as this should have been done before shrinking the
1581 * object. These pages shouldn't be write-protected, even if
1582 * boxed they should be zero filled. */
1583 gc_assert(page_table
[next_page
].write_protected
== 0);
1585 old_bytes_used
= page_table
[next_page
].bytes_used
;
1586 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1587 page_table
[next_page
].bytes_used
= 0;
1588 bytes_freed
+= old_bytes_used
;
1592 if ((bytes_freed
> 0) && gencgc_verbose
) {
1594 "/general_copy_large_object bytes_freed=%"OS_VM_SIZE_FMT
"\n",
1598 generations
[from_space
].bytes_allocated
-= nwords
*N_WORD_BYTES
1600 generations
[new_space
].bytes_allocated
+= nwords
*N_WORD_BYTES
;
1601 bytes_allocated
-= bytes_freed
;
1603 /* Add the region to the new_areas if requested. */
1605 add_new_area(first_page
,0,nwords
*N_WORD_BYTES
);
1610 /* Get tag of object. */
1611 tag
= lowtag_of(object
);
1613 /* Allocate space. */
1614 new = gc_general_alloc(nwords
*N_WORD_BYTES
,
1615 (boxedp
? BOXED_PAGE_FLAG
: UNBOXED_PAGE_FLAG
),
1618 /* Copy the object. */
1619 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1621 /* Return Lisp pointer of new object. */
1622 return ((lispobj
) new) | tag
;
1627 copy_large_object(lispobj object
, sword_t nwords
)
1629 return general_copy_large_object(object
, nwords
, 1);
1633 copy_large_unboxed_object(lispobj object
, sword_t nwords
)
1635 return general_copy_large_object(object
, nwords
, 0);
1638 /* to copy unboxed objects */
1640 copy_unboxed_object(lispobj object
, sword_t nwords
)
1642 return gc_general_copy_object(object
, nwords
, UNBOXED_PAGE_FLAG
);
1647 * code and code-related objects
1650 static lispobj trans_fun_header(lispobj object);
1651 static lispobj trans_boxed(lispobj object);
1654 /* Scan a x86 compiled code object, looking for possible fixups that
1655 * have been missed after a move.
1657 * Two types of fixups are needed:
1658 * 1. Absolute fixups to within the code object.
1659 * 2. Relative fixups to outside the code object.
1661 * Currently only absolute fixups to the constant vector, or to the
1662 * code area are checked. */
1663 #ifdef LISP_FEATURE_X86
1665 sniff_code_object(struct code
*code
, os_vm_size_t displacement
)
1667 sword_t nheader_words
, ncode_words
, nwords
;
1668 os_vm_address_t constants_start_addr
= NULL
, constants_end_addr
, p
;
1669 os_vm_address_t code_start_addr
, code_end_addr
;
1670 os_vm_address_t code_addr
= (os_vm_address_t
)code
;
1671 int fixup_found
= 0;
1673 if (!check_code_fixups
)
1676 FSHOW((stderr
, "/sniffing code: %p, %lu\n", code
, displacement
));
1678 ncode_words
= fixnum_word_value(code
->code_size
);
1679 nheader_words
= HeaderValue(*(lispobj
*)code
);
1680 nwords
= ncode_words
+ nheader_words
;
1682 constants_start_addr
= code_addr
+ 5*N_WORD_BYTES
;
1683 constants_end_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1684 code_start_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1685 code_end_addr
= code_addr
+ nwords
*N_WORD_BYTES
;
1687 /* Work through the unboxed code. */
1688 for (p
= code_start_addr
; p
< code_end_addr
; p
++) {
1689 void *data
= *(void **)p
;
1690 unsigned d1
= *((unsigned char *)p
- 1);
1691 unsigned d2
= *((unsigned char *)p
- 2);
1692 unsigned d3
= *((unsigned char *)p
- 3);
1693 unsigned d4
= *((unsigned char *)p
- 4);
1695 unsigned d5
= *((unsigned char *)p
- 5);
1696 unsigned d6
= *((unsigned char *)p
- 6);
1699 /* Check for code references. */
1700 /* Check for a 32 bit word that looks like an absolute
1701 reference to within the code adea of the code object. */
1702 if ((data
>= (void*)(code_start_addr
-displacement
))
1703 && (data
< (void*)(code_end_addr
-displacement
))) {
1704 /* function header */
1706 && (((unsigned)p
- 4 - 4*HeaderValue(*((unsigned *)p
-1))) ==
1708 /* Skip the function header */
1712 /* the case of PUSH imm32 */
1716 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1717 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1718 FSHOW((stderr
, "/PUSH $0x%.8x\n", data
));
1720 /* the case of MOV [reg-8],imm32 */
1722 && (d2
==0x40 || d2
==0x41 || d2
==0x42 || d2
==0x43
1723 || d2
==0x45 || d2
==0x46 || d2
==0x47)
1727 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1728 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1729 FSHOW((stderr
, "/MOV [reg-8],$0x%.8x\n", data
));
1731 /* the case of LEA reg,[disp32] */
1732 if ((d2
== 0x8d) && ((d1
& 0xc7) == 5)) {
1735 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1736 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1737 FSHOW((stderr
,"/LEA reg,[$0x%.8x]\n", data
));
1741 /* Check for constant references. */
1742 /* Check for a 32 bit word that looks like an absolute
1743 reference to within the constant vector. Constant references
1745 if ((data
>= (void*)(constants_start_addr
-displacement
))
1746 && (data
< (void*)(constants_end_addr
-displacement
))
1747 && (((unsigned)data
& 0x3) == 0)) {
1752 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1753 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1754 FSHOW((stderr
,"/MOV eax,0x%.8x\n", data
));
1757 /* the case of MOV m32,EAX */
1761 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1762 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1763 FSHOW((stderr
, "/MOV 0x%.8x,eax\n", data
));
1766 /* the case of CMP m32,imm32 */
1767 if ((d1
== 0x3d) && (d2
== 0x81)) {
1770 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1771 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1773 FSHOW((stderr
, "/CMP 0x%.8x,immed32\n", data
));
1776 /* Check for a mod=00, r/m=101 byte. */
1777 if ((d1
& 0xc7) == 5) {
1782 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1783 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1784 FSHOW((stderr
,"/CMP 0x%.8x,reg\n", data
));
1786 /* the case of CMP reg32,m32 */
1790 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1791 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1792 FSHOW((stderr
, "/CMP reg32,0x%.8x\n", data
));
1794 /* the case of MOV m32,reg32 */
1798 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1799 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1800 FSHOW((stderr
, "/MOV 0x%.8x,reg32\n", data
));
1802 /* the case of MOV reg32,m32 */
1806 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1807 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1808 FSHOW((stderr
, "/MOV reg32,0x%.8x\n", data
));
1810 /* the case of LEA reg32,m32 */
1814 "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1815 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1816 FSHOW((stderr
, "/LEA reg32,0x%.8x\n", data
));
1822 /* If anything was found, print some information on the code
1826 "/compiled code object at %x: header words = %d, code words = %d\n",
1827 code
, nheader_words
, ncode_words
));
1829 "/const start = %x, end = %x\n",
1830 constants_start_addr
, constants_end_addr
));
1832 "/code start = %x, end = %x\n",
1833 code_start_addr
, code_end_addr
));
1838 #ifdef LISP_FEATURE_X86
1840 gencgc_apply_code_fixups(struct code
*old_code
, struct code
*new_code
)
1842 sword_t nheader_words
, ncode_words
, nwords
;
1843 os_vm_address_t constants_start_addr
, constants_end_addr
;
1844 os_vm_address_t code_start_addr
, code_end_addr
;
1845 os_vm_address_t code_addr
= (os_vm_address_t
)new_code
;
1846 os_vm_address_t old_addr
= (os_vm_address_t
)old_code
;
1847 os_vm_size_t displacement
= code_addr
- old_addr
;
1848 lispobj fixups
= NIL
;
1849 struct vector
*fixups_vector
;
1851 ncode_words
= fixnum_word_value(new_code
->code_size
);
1852 nheader_words
= HeaderValue(*(lispobj
*)new_code
);
1853 nwords
= ncode_words
+ nheader_words
;
1855 "/compiled code object at %x: header words = %d, code words = %d\n",
1856 new_code, nheader_words, ncode_words)); */
1857 constants_start_addr
= code_addr
+ 5*N_WORD_BYTES
;
1858 constants_end_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1859 code_start_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1860 code_end_addr
= code_addr
+ nwords
*N_WORD_BYTES
;
1863 "/const start = %x, end = %x\n",
1864 constants_start_addr,constants_end_addr));
1866 "/code start = %x; end = %x\n",
1867 code_start_addr,code_end_addr));
1870 /* The first constant should be a pointer to the fixups for this
1871 code objects. Check. */
1872 fixups
= new_code
->constants
[0];
1874 /* It will be 0 or the unbound-marker if there are no fixups (as
1875 * will be the case if the code object has been purified, for
1876 * example) and will be an other pointer if it is valid. */
1877 if ((fixups
== 0) || (fixups
== UNBOUND_MARKER_WIDETAG
) ||
1878 !is_lisp_pointer(fixups
)) {
1879 /* Check for possible errors. */
1880 if (check_code_fixups
)
1881 sniff_code_object(new_code
, displacement
);
1886 fixups_vector
= (struct vector
*)native_pointer(fixups
);
1888 /* Could be pointing to a forwarding pointer. */
1889 /* FIXME is this always in from_space? if so, could replace this code with
1890 * forwarding_pointer_p/forwarding_pointer_value */
1891 if (is_lisp_pointer(fixups
) &&
1892 (find_page_index((void*)fixups_vector
) != -1) &&
1893 (fixups_vector
->header
== 0x01)) {
1894 /* If so, then follow it. */
1895 /*SHOW("following pointer to a forwarding pointer");*/
1897 (struct vector
*)native_pointer((lispobj
)fixups_vector
->length
);
1900 /*SHOW("got fixups");*/
1902 if (widetag_of(fixups_vector
->header
) == SIMPLE_ARRAY_WORD_WIDETAG
) {
1903 /* Got the fixups for the code block. Now work through the vector,
1904 and apply a fixup at each address. */
1905 sword_t length
= fixnum_value(fixups_vector
->length
);
1907 for (i
= 0; i
< length
; i
++) {
1908 long offset
= fixups_vector
->data
[i
];
1909 /* Now check the current value of offset. */
1910 os_vm_address_t old_value
= *(os_vm_address_t
*)(code_start_addr
+ offset
);
1912 /* If it's within the old_code object then it must be an
1913 * absolute fixup (relative ones are not saved) */
1914 if ((old_value
>= old_addr
)
1915 && (old_value
< (old_addr
+ nwords
*N_WORD_BYTES
)))
1916 /* So add the dispacement. */
1917 *(os_vm_address_t
*)(code_start_addr
+ offset
) =
1918 old_value
+ displacement
;
1920 /* It is outside the old code object so it must be a
1921 * relative fixup (absolute fixups are not saved). So
1922 * subtract the displacement. */
1923 *(os_vm_address_t
*)(code_start_addr
+ offset
) =
1924 old_value
- displacement
;
1927 /* This used to just print a note to stderr, but a bogus fixup seems to
1928 * indicate real heap corruption, so a hard hailure is in order. */
1929 lose("fixup vector %p has a bad widetag: %d\n",
1930 fixups_vector
, widetag_of(fixups_vector
->header
));
1933 /* Check for possible errors. */
1934 if (check_code_fixups
) {
1935 sniff_code_object(new_code
,displacement
);
1941 trans_boxed_large(lispobj object
)
1946 gc_assert(is_lisp_pointer(object
));
1948 header
= *((lispobj
*) native_pointer(object
));
1949 length
= HeaderValue(header
) + 1;
1950 length
= CEILING(length
, 2);
1952 return copy_large_object(object
, length
);
1955 /* Doesn't seem to be used, delete it after the grace period. */
1958 trans_unboxed_large(lispobj object
)
1963 gc_assert(is_lisp_pointer(object
));
1965 header
= *((lispobj
*) native_pointer(object
));
1966 length
= HeaderValue(header
) + 1;
1967 length
= CEILING(length
, 2);
1969 return copy_large_unboxed_object(object
, length
);
1977 /* XX This is a hack adapted from cgc.c. These don't work too
1978 * efficiently with the gencgc as a list of the weak pointers is
1979 * maintained within the objects which causes writes to the pages. A
1980 * limited attempt is made to avoid unnecessary writes, but this needs
1982 #define WEAK_POINTER_NWORDS \
1983 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
1986 scav_weak_pointer(lispobj
*where
, lispobj object
)
1988 /* Since we overwrite the 'next' field, we have to make
1989 * sure not to do so for pointers already in the list.
1990 * Instead of searching the list of weak_pointers each
1991 * time, we ensure that next is always NULL when the weak
1992 * pointer isn't in the list, and not NULL otherwise.
1993 * Since we can't use NULL to denote end of list, we
1994 * use a pointer back to the same weak_pointer.
1996 struct weak_pointer
* wp
= (struct weak_pointer
*)where
;
1998 if (NULL
== wp
->next
) {
1999 wp
->next
= weak_pointers
;
2001 if (NULL
== wp
->next
)
2005 /* Do not let GC scavenge the value slot of the weak pointer.
2006 * (That is why it is a weak pointer.) */
2008 return WEAK_POINTER_NWORDS
;
2013 search_read_only_space(void *pointer
)
2015 lispobj
*start
= (lispobj
*) READ_ONLY_SPACE_START
;
2016 lispobj
*end
= (lispobj
*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0);
2017 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
2019 return (gc_search_space(start
,
2020 (((lispobj
*)pointer
)+2)-start
,
2021 (lispobj
*) pointer
));
2025 search_static_space(void *pointer
)
2027 lispobj
*start
= (lispobj
*)STATIC_SPACE_START
;
2028 lispobj
*end
= (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0);
2029 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
2031 return (gc_search_space(start
,
2032 (((lispobj
*)pointer
)+2)-start
,
2033 (lispobj
*) pointer
));
2036 /* a faster version for searching the dynamic space. This will work even
2037 * if the object is in a current allocation region. */
2039 search_dynamic_space(void *pointer
)
2041 page_index_t page_index
= find_page_index(pointer
);
2044 /* The address may be invalid, so do some checks. */
2045 if ((page_index
== -1) || page_free_p(page_index
))
2047 start
= (lispobj
*)page_scan_start(page_index
);
2048 return (gc_search_space(start
,
2049 (((lispobj
*)pointer
)+2)-start
,
2050 (lispobj
*)pointer
));
2053 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2055 /* Is there any possibility that pointer is a valid Lisp object
2056 * reference, and/or something else (e.g. subroutine call return
2057 * address) which should prevent us from moving the referred-to thing?
2058 * This is called from preserve_pointers() */
2060 possibly_valid_dynamic_space_pointer_s(lispobj
*pointer
,
2061 page_index_t addr_page_index
,
2062 lispobj
**store_here
)
2064 lispobj
*start_addr
;
2066 /* Find the object start address. */
2067 start_addr
= search_dynamic_space(pointer
);
2069 if (start_addr
== NULL
) {
2073 *store_here
= start_addr
;
2076 /* If the containing object is a code object, presume that the
2077 * pointer is valid, simply because it could be an unboxed return
2079 if (widetag_of(*start_addr
) == CODE_HEADER_WIDETAG
)
2082 /* Large object pages only contain ONE object, and it will never
2083 * be a CONS. However, arrays and bignums can be allocated larger
2084 * than necessary and then shrunk to fit, leaving what look like
2085 * (0 . 0) CONSes at the end. These appear valid to
2086 * looks_like_valid_lisp_pointer_p(), so pick them off here. */
2087 if (page_table
[addr_page_index
].large_object
&&
2088 (lowtag_of((lispobj
)pointer
) == LIST_POINTER_LOWTAG
))
2091 return looks_like_valid_lisp_pointer_p((lispobj
)pointer
, start_addr
);
2094 #endif // defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2097 valid_conservative_root_p(void *addr
, page_index_t addr_page_index
,
2098 lispobj
**begin_ptr
)
2100 #ifdef GENCGC_IS_PRECISE
2101 /* If we're in precise gencgc (non-x86oid as of this writing) then
2102 * we are only called on valid object pointers in the first place,
2103 * so we just have to do a bounds-check against the heap, a
2104 * generation check, and the already-pinned check. */
2105 if ((addr_page_index
== -1)
2106 || (page_table
[addr_page_index
].gen
!= from_space
)
2107 || (page_table
[addr_page_index
].dont_move
!= 0))
2110 /* quick check 1: Address is quite likely to have been invalid. */
2111 if ((addr_page_index
== -1)
2112 || page_free_p(addr_page_index
)
2113 || (page_table
[addr_page_index
].bytes_used
== 0)
2114 || (page_table
[addr_page_index
].gen
!= from_space
))
2116 gc_assert(!(page_table
[addr_page_index
].allocated
&OPEN_REGION_PAGE_FLAG
));
2118 /* quick check 2: Check the offset within the page.
2121 if (((uword_t
)addr
& (GENCGC_CARD_BYTES
- 1)) >
2122 page_table
[addr_page_index
].bytes_used
)
2125 /* Filter out anything which can't be a pointer to a Lisp object
2126 * (or, as a special case which also requires dont_move, a return
2127 * address referring to something in a CodeObject). This is
2128 * expensive but important, since it vastly reduces the
2129 * probability that random garbage will be bogusly interpreted as
2130 * a pointer which prevents a page from moving. */
2131 if (!possibly_valid_dynamic_space_pointer_s(addr
, addr_page_index
,
2140 in_dontmove_dwordindex_p(page_index_t page_index
, int dword_in_page
)
2142 in_use_marker_t
*marker
;
2143 marker
= dontmove_dwords(page_index
);
2145 return marker
[dword_in_page
];
2149 in_dontmove_nativeptr_p(page_index_t page_index
, lispobj
*native_ptr
)
2151 if (dontmove_dwords(page_index
)) {
2152 lispobj
*begin
= page_address(page_index
);
2153 int dword_in_page
= (native_ptr
- begin
) / 2;
2154 return in_dontmove_dwordindex_p(page_index
, dword_in_page
);
2160 /* Adjust large bignum and vector objects. This will adjust the
2161 * allocated region if the size has shrunk, and move unboxed objects
2162 * into unboxed pages. The pages are not promoted here, and the
2163 * promoted region is not added to the new_regions; this is really
2164 * only designed to be called from preserve_pointer(). Shouldn't fail
2165 * if this is missed, just may delay the moving of objects to unboxed
2166 * pages, and the freeing of pages. */
2168 maybe_adjust_large_object(lispobj
*where
)
2170 page_index_t first_page
;
2171 page_index_t next_page
;
2174 uword_t remaining_bytes
;
2175 uword_t bytes_freed
;
2176 uword_t old_bytes_used
;
2180 /* Check whether it's a vector or bignum object. */
2181 switch (widetag_of(where
[0])) {
2182 case SIMPLE_VECTOR_WIDETAG
:
2183 boxed
= BOXED_PAGE_FLAG
;
2185 case BIGNUM_WIDETAG
:
2186 case SIMPLE_BASE_STRING_WIDETAG
:
2187 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
2188 case SIMPLE_CHARACTER_STRING_WIDETAG
:
2190 case SIMPLE_BIT_VECTOR_WIDETAG
:
2191 case SIMPLE_ARRAY_NIL_WIDETAG
:
2192 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
2193 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
2194 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
2195 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
2196 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
2197 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
2199 case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG
:
2201 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
2202 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
2203 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
2204 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
2206 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
2207 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
2209 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
2210 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
2212 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
2213 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
2216 case SIMPLE_ARRAY_FIXNUM_WIDETAG
:
2218 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
2219 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
2221 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
2222 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
2224 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
2225 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
2226 #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
2227 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
2229 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
2230 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
2232 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
2233 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2235 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
2236 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
2238 boxed
= UNBOXED_PAGE_FLAG
;
2244 /* Find its current size. */
2245 nwords
= (sizetab
[widetag_of(where
[0])])(where
);
2247 first_page
= find_page_index((void *)where
);
2248 gc_assert(first_page
>= 0);
2250 /* Note: Any page write-protection must be removed, else a later
2251 * scavenge_newspace may incorrectly not scavenge these pages.
2252 * This would not be necessary if they are added to the new areas,
2253 * but lets do it for them all (they'll probably be written
2256 gc_assert(page_starts_contiguous_block_p(first_page
));
2258 next_page
= first_page
;
2259 remaining_bytes
= nwords
*N_WORD_BYTES
;
2260 while (remaining_bytes
> GENCGC_CARD_BYTES
) {
2261 gc_assert(page_table
[next_page
].gen
== from_space
);
2262 gc_assert(page_allocated_no_region_p(next_page
));
2263 gc_assert(page_table
[next_page
].large_object
);
2264 gc_assert(page_table
[next_page
].scan_start_offset
==
2265 npage_bytes(next_page
-first_page
));
2266 gc_assert(page_table
[next_page
].bytes_used
== GENCGC_CARD_BYTES
);
2268 page_table
[next_page
].allocated
= boxed
;
2270 /* Shouldn't be write-protected at this stage. Essential that the
2272 gc_assert(!page_table
[next_page
].write_protected
);
2273 remaining_bytes
-= GENCGC_CARD_BYTES
;
2277 /* Now only one page remains, but the object may have shrunk so
2278 * there may be more unused pages which will be freed. */
2280 /* Object may have shrunk but shouldn't have grown - check. */
2281 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
2283 page_table
[next_page
].allocated
= boxed
;
2284 gc_assert(page_table
[next_page
].allocated
==
2285 page_table
[first_page
].allocated
);
2287 /* Adjust the bytes_used. */
2288 old_bytes_used
= page_table
[next_page
].bytes_used
;
2289 page_table
[next_page
].bytes_used
= remaining_bytes
;
2291 bytes_freed
= old_bytes_used
- remaining_bytes
;
2293 /* Free any remaining pages; needs care. */
2295 while ((old_bytes_used
== GENCGC_CARD_BYTES
) &&
2296 (page_table
[next_page
].gen
== from_space
) &&
2297 page_allocated_no_region_p(next_page
) &&
2298 page_table
[next_page
].large_object
&&
2299 (page_table
[next_page
].scan_start_offset
==
2300 npage_bytes(next_page
- first_page
))) {
2301 /* It checks out OK, free the page. We don't need to both zeroing
2302 * pages as this should have been done before shrinking the
2303 * object. These pages shouldn't be write protected as they
2304 * should be zero filled. */
2305 gc_assert(page_table
[next_page
].write_protected
== 0);
2307 old_bytes_used
= page_table
[next_page
].bytes_used
;
2308 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
2309 page_table
[next_page
].bytes_used
= 0;
2310 bytes_freed
+= old_bytes_used
;
2314 if ((bytes_freed
> 0) && gencgc_verbose
) {
2316 "/maybe_adjust_large_object() freed %d\n",
2320 generations
[from_space
].bytes_allocated
-= bytes_freed
;
2321 bytes_allocated
-= bytes_freed
;
2327 * Why is this restricted to protected objects only?
2328 * Because the rest of the page has been scavenged already,
2329 * and since that leaves forwarding pointers in the unprotected
2330 * areas you cannot scavenge it again until those are gone.
2333 scavenge_pages_with_conservative_pointers_to_them_protected_objects_only()
2336 for (i
= 0; i
< last_free_page
; i
++) {
2337 if (!dontmove_dwords(i
)) {
2340 lispobj
*begin
= page_address(i
);
2343 lispobj
*scavme_begin
= NULL
;
2344 for (dword
= 0; dword
< GENCGC_CARD_BYTES
/ N_WORD_BYTES
/ 2; dword
++) {
2345 if (in_dontmove_dwordindex_p(i
, dword
)) {
2346 if (!scavme_begin
) {
2347 scavme_begin
= begin
+ dword
* 2;
2350 // contiguous area stopped
2352 scavenge(scavme_begin
, (begin
+ dword
* 2) - scavme_begin
);
2354 scavme_begin
= NULL
;
2358 scavenge(scavme_begin
, (begin
+ dword
* 2) - scavme_begin
);
2363 int verbosefixes
= 0;
2369 int words_wiped
= 0;
2370 int lisp_pointers_wiped
= 0;
2371 int pages_considered
= 0;
2372 int n_pages_cannot_wipe
= 0;
2374 for (i
= 0; i
< last_free_page
; i
++) {
2375 if (!page_table
[i
].dont_move
) {
2379 if (!dontmove_dwords(i
)) {
2380 n_pages_cannot_wipe
++;
2383 begin
= page_address(i
);
2385 for (dword
= 0; dword
< GENCGC_CARD_BYTES
/ N_WORD_BYTES
/ 2; dword
++) {
2386 if (!in_dontmove_dwordindex_p(i
, dword
)) {
2387 if (is_lisp_pointer(*(begin
+ dword
* 2))) {
2388 lisp_pointers_wiped
++;
2390 if (is_lisp_pointer(*(begin
+ dword
* 2 + 1))) {
2391 lisp_pointers_wiped
++;
2393 *(begin
+ dword
* 2) = wipe_with
;
2394 *(begin
+ dword
* 2 + 1) = wipe_with
;
2398 page_table
[i
].has_dontmove_dwords
= 0;
2400 // move the page to newspace
2401 generations
[new_space
].bytes_allocated
+= page_table
[i
].bytes_used
;
2402 generations
[page_table
[i
].gen
].bytes_allocated
-= page_table
[i
].bytes_used
;
2403 page_table
[i
].gen
= new_space
;
2405 #ifndef LISP_FEATURE_WIN32
2406 madvise(page_table_dontmove_dwords
, page_table_dontmove_dwords_size_in_bytes
, MADV_DONTNEED
);
2408 if ((verbosefixes
>= 1 && lisp_pointers_wiped
> 0) || verbosefixes
>= 2) {
2409 fprintf(stderr
, "gencgc: wiped %d words (%d lisp_pointers) in %d pages, cannot wipe %d pages \n"
2410 , words_wiped
, lisp_pointers_wiped
, pages_considered
, n_pages_cannot_wipe
);
2415 set_page_consi_bit(page_index_t pageindex
, lispobj
*mark_which_pointer
)
2417 struct page
*page
= &page_table
[pageindex
];
2422 gc_assert(mark_which_pointer
);
2423 if (!page
->has_dontmove_dwords
) {
2424 page
->has_dontmove_dwords
= 1;
2425 bzero(dontmove_dwords(pageindex
),
2426 sizeof(in_use_marker_t
) * n_dwords_in_card
);
2428 int size
= (sizetab
[widetag_of(mark_which_pointer
[0])])(mark_which_pointer
);
2430 (fixnump(*mark_which_pointer
) ||
2431 is_lisp_pointer(*mark_which_pointer
) ||
2432 lowtag_of(*mark_which_pointer
) == 9 ||
2433 lowtag_of(*mark_which_pointer
) == 2)) {
2436 if (size
% 2 != 0) {
2437 fprintf(stderr
, "WIPE ERROR !dword, size %d, lowtag %d, world 0x%lld\n",
2439 lowtag_of(*mark_which_pointer
),
2440 (long long)*mark_which_pointer
);
2442 gc_assert(size
% 2 == 0);
2443 lispobj
*begin
= page_address(pageindex
);
2444 int begin_dword
= (mark_which_pointer
- begin
) / 2;
2446 in_use_marker_t
*marker
= dontmove_dwords(pageindex
);
2447 for (dword
= begin_dword
; dword
< begin_dword
+ size
/ 2; dword
++) {
2452 /* Take a possible pointer to a Lisp object and mark its page in the
2453 * page_table so that it will not be relocated during a GC.
2455 * This involves locating the page it points to, then backing up to
2456 * the start of its region, then marking all pages dont_move from there
2457 * up to the first page that's not full or has a different generation
2459 * It is assumed that all the page static flags have been cleared at
2460 * the start of a GC.
2462 * It is also assumed that the current gc_alloc() region has been
2463 * flushed and the tables updated. */
2466 preserve_pointer(void *addr
)
2468 page_index_t addr_page_index
= find_page_index(addr
);
2469 page_index_t first_page
;
2471 unsigned int region_allocation
;
2472 lispobj
*begin_ptr
= NULL
;
2474 if (!valid_conservative_root_p(addr
, addr_page_index
, &begin_ptr
))
2477 /* (Now that we know that addr_page_index is in range, it's
2478 * safe to index into page_table[] with it.) */
2479 region_allocation
= page_table
[addr_page_index
].allocated
;
2481 /* Find the beginning of the region. Note that there may be
2482 * objects in the region preceding the one that we were passed a
2483 * pointer to: if this is the case, we will write-protect all the
2484 * previous objects' pages too. */
2487 /* I think this'd work just as well, but without the assertions.
2488 * -dan 2004.01.01 */
2489 first_page
= find_page_index(page_scan_start(addr_page_index
))
2491 first_page
= addr_page_index
;
2492 while (!page_starts_contiguous_block_p(first_page
)) {
2494 /* Do some checks. */
2495 gc_assert(page_table
[first_page
].bytes_used
== GENCGC_CARD_BYTES
);
2496 gc_assert(page_table
[first_page
].gen
== from_space
);
2497 gc_assert(page_table
[first_page
].allocated
== region_allocation
);
2501 /* Adjust any large objects before promotion as they won't be
2502 * copied after promotion. */
2503 if (page_table
[first_page
].large_object
) {
2504 maybe_adjust_large_object(page_address(first_page
));
2505 /* It may have moved to unboxed pages. */
2506 region_allocation
= page_table
[first_page
].allocated
;
2509 /* Now work forward until the end of this contiguous area is found,
2510 * marking all pages as dont_move. */
2511 for (i
= first_page
; ;i
++) {
2512 gc_assert(page_table
[i
].allocated
== region_allocation
);
2514 /* Mark the page static. */
2515 page_table
[i
].dont_move
= 1;
2517 /* It is essential that the pages are not write protected as
2518 * they may have pointers into the old-space which need
2519 * scavenging. They shouldn't be write protected at this
2521 gc_assert(!page_table
[i
].write_protected
);
2523 /* Check whether this is the last page in this contiguous block.. */
2524 if (page_ends_contiguous_block_p(i
, from_space
))
2528 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2529 /* Do not do this for multi-page objects. Those pages do not need
2530 * object wipeout anyway.
2532 if (i
== first_page
) {
2533 /* We need the pointer to the beginning of the object
2534 * We might have gotten it above but maybe not, so make sure
2536 if (begin_ptr
== NULL
) {
2537 possibly_valid_dynamic_space_pointer_s(addr
, first_page
,
2540 set_page_consi_bit(first_page
, begin_ptr
);
2544 /* Check that the page is now static. */
2545 gc_assert(page_table
[addr_page_index
].dont_move
!= 0);
2548 /* If the given page is not write-protected, then scan it for pointers
2549 * to younger generations or the top temp. generation, if no
2550 * suspicious pointers are found then the page is write-protected.
2552 * Care is taken to check for pointers to the current gc_alloc()
2553 * region if it is a younger generation or the temp. generation. This
2554 * frees the caller from doing a gc_alloc_update_page_tables(). Actually
2555 * the gc_alloc_generation does not need to be checked as this is only
2556 * called from scavenge_generation() when the gc_alloc generation is
2557 * younger, so it just checks if there is a pointer to the current
2560 * We return 1 if the page was write-protected, else 0. */
2562 update_page_write_prot(page_index_t page
)
2564 generation_index_t gen
= page_table
[page
].gen
;
2567 void **page_addr
= (void **)page_address(page
);
2568 sword_t num_words
= page_table
[page
].bytes_used
/ N_WORD_BYTES
;
2570 /* Shouldn't be a free page. */
2571 gc_assert(page_allocated_p(page
));
2572 gc_assert(page_table
[page
].bytes_used
!= 0);
2574 /* Skip if it's already write-protected, pinned, or unboxed */
2575 if (page_table
[page
].write_protected
2576 /* FIXME: What's the reason for not write-protecting pinned pages? */
2577 || page_table
[page
].dont_move
2578 || page_unboxed_p(page
))
2581 /* Scan the page for pointers to younger generations or the
2582 * top temp. generation. */
2584 /* This is conservative: any word satisfying is_lisp_pointer() is
2585 * assumed to be a pointer despite that it might be machine code
2586 * or part of an unboxed array */
2587 for (j
= 0; j
< num_words
; j
++) {
2588 void *ptr
= *(page_addr
+j
);
2591 /* Check that it's in the dynamic space */
2592 if (is_lisp_pointer((lispobj
)ptr
) && (index
= find_page_index(ptr
)) != -1)
2593 if (/* Does it point to a younger or the temp. generation? */
2594 (page_allocated_p(index
)
2595 && (page_table
[index
].bytes_used
!= 0)
2596 && ((page_table
[index
].gen
< gen
)
2597 || (page_table
[index
].gen
== SCRATCH_GENERATION
)))
2599 /* Or does it point within a current gc_alloc() region? */
2600 || ((boxed_region
.start_addr
<= ptr
)
2601 && (ptr
<= boxed_region
.free_pointer
))
2602 || ((unboxed_region
.start_addr
<= ptr
)
2603 && (ptr
<= unboxed_region
.free_pointer
))) {
2610 /* Write-protect the page. */
2611 /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
2613 os_protect((void *)page_addr
,
2615 OS_VM_PROT_READ
|OS_VM_PROT_EXECUTE
);
2617 /* Note the page as protected in the page tables. */
2618 page_table
[page
].write_protected
= 1;
2624 /* Scavenge all generations from FROM to TO, inclusive, except for
2625 * new_space which needs special handling, as new objects may be
2626 * added which are not checked here - use scavenge_newspace generation.
2628 * Write-protected pages should not have any pointers to the
2629 * from_space so do need scavenging; thus write-protected pages are
2630 * not always scavenged. There is some code to check that these pages
2631 * are not written; but to check fully the write-protected pages need
2632 * to be scavenged by disabling the code to skip them.
2634 * Under the current scheme when a generation is GCed the younger
2635 * generations will be empty. So, when a generation is being GCed it
2636 * is only necessary to scavenge the older generations for pointers
2637 * not the younger. So a page that does not have pointers to younger
2638 * generations does not need to be scavenged.
2640 * The write-protection can be used to note pages that don't have
2641 * pointers to younger pages. But pages can be written without having
2642 * pointers to younger generations. After the pages are scavenged here
2643 * they can be scanned for pointers to younger generations and if
2644 * there are none the page can be write-protected.
2646 * One complication is when the newspace is the top temp. generation.
2648 * Enabling SC_GEN_CK scavenges the write-protected pages and checks
2649 * that none were written, which they shouldn't be as they should have
2650 * no pointers to younger generations. This breaks down for weak
2651 * pointers as the objects contain a link to the next and are written
2652 * if a weak pointer is scavenged. Still it's a useful check. */
2654 scavenge_generations(generation_index_t from
, generation_index_t to
)
2657 page_index_t num_wp
= 0;
2661 /* Clear the write_protected_cleared flags on all pages. */
2662 for (i
= 0; i
< page_table_pages
; i
++)
2663 page_table
[i
].write_protected_cleared
= 0;
2666 for (i
= 0; i
< last_free_page
; i
++) {
2667 generation_index_t generation
= page_table
[i
].gen
;
2669 && (page_table
[i
].bytes_used
!= 0)
2670 && (generation
!= new_space
)
2671 && (generation
>= from
)
2672 && (generation
<= to
)) {
2673 page_index_t last_page
,j
;
2674 int write_protected
=1;
2676 /* This should be the start of a region */
2677 gc_assert(page_starts_contiguous_block_p(i
));
2679 /* Now work forward until the end of the region */
2680 for (last_page
= i
; ; last_page
++) {
2682 write_protected
&& page_table
[last_page
].write_protected
;
2683 if (page_ends_contiguous_block_p(last_page
, generation
))
2686 if (!write_protected
) {
2687 scavenge(page_address(i
),
2688 ((uword_t
)(page_table
[last_page
].bytes_used
2689 + npage_bytes(last_page
-i
)))
2692 /* Now scan the pages and write protect those that
2693 * don't have pointers to younger generations. */
2694 if (enable_page_protection
) {
2695 for (j
= i
; j
<= last_page
; j
++) {
2696 num_wp
+= update_page_write_prot(j
);
2699 if ((gencgc_verbose
> 1) && (num_wp
!= 0)) {
2701 "/write protected %d pages within generation %d\n",
2702 num_wp
, generation
));
2710 /* Check that none of the write_protected pages in this generation
2711 * have been written to. */
2712 for (i
= 0; i
< page_table_pages
; i
++) {
2713 if (page_allocated_p(i
)
2714 && (page_table
[i
].bytes_used
!= 0)
2715 && (page_table
[i
].gen
== generation
)
2716 && (page_table
[i
].write_protected_cleared
!= 0)) {
2717 FSHOW((stderr
, "/scavenge_generation() %d\n", generation
));
2719 "/page bytes_used=%d scan_start_offset=%lu dont_move=%d\n",
2720 page_table
[i
].bytes_used
,
2721 page_table
[i
].scan_start_offset
,
2722 page_table
[i
].dont_move
));
2723 lose("write to protected page %d in scavenge_generation()\n", i
);
2730 /* Scavenge a newspace generation. As it is scavenged new objects may
2731 * be allocated to it; these will also need to be scavenged. This
2732 * repeats until there are no more objects unscavenged in the
2733 * newspace generation.
2735 * To help improve the efficiency, areas written are recorded by
2736 * gc_alloc() and only these scavenged. Sometimes a little more will be
2737 * scavenged, but this causes no harm. An easy check is done that the
2738 * scavenged bytes equals the number allocated in the previous
2741 * Write-protected pages are not scanned except if they are marked
2742 * dont_move in which case they may have been promoted and still have
2743 * pointers to the from space.
2745 * Write-protected pages could potentially be written by alloc however
2746 * to avoid having to handle re-scavenging of write-protected pages
2747 * gc_alloc() does not write to write-protected pages.
2749 * New areas of objects allocated are recorded alternatively in the two
2750 * new_areas arrays below. */
2751 static struct new_area new_areas_1
[NUM_NEW_AREAS
];
2752 static struct new_area new_areas_2
[NUM_NEW_AREAS
];
2754 /* Do one full scan of the new space generation. This is not enough to
2755 * complete the job as new objects may be added to the generation in
2756 * the process which are not scavenged. */
2758 scavenge_newspace_generation_one_scan(generation_index_t generation
)
2763 "/starting one full scan of newspace generation %d\n",
2765 for (i
= 0; i
< last_free_page
; i
++) {
2766 /* Note that this skips over open regions when it encounters them. */
2768 && (page_table
[i
].bytes_used
!= 0)
2769 && (page_table
[i
].gen
== generation
)
2770 && ((page_table
[i
].write_protected
== 0)
2771 /* (This may be redundant as write_protected is now
2772 * cleared before promotion.) */
2773 || (page_table
[i
].dont_move
== 1))) {
2774 page_index_t last_page
;
2777 /* The scavenge will start at the scan_start_offset of
2780 * We need to find the full extent of this contiguous
2781 * block in case objects span pages.
2783 * Now work forward until the end of this contiguous area
2784 * is found. A small area is preferred as there is a
2785 * better chance of its pages being write-protected. */
2786 for (last_page
= i
; ;last_page
++) {
2787 /* If all pages are write-protected and movable,
2788 * then no need to scavenge */
2789 all_wp
=all_wp
&& page_table
[last_page
].write_protected
&&
2790 !page_table
[last_page
].dont_move
;
2792 /* Check whether this is the last page in this
2793 * contiguous block */
2794 if (page_ends_contiguous_block_p(last_page
, generation
))
2798 /* Do a limited check for write-protected pages. */
2800 sword_t nwords
= (((uword_t
)
2801 (page_table
[last_page
].bytes_used
2802 + npage_bytes(last_page
-i
)
2803 + page_table
[i
].scan_start_offset
))
2805 new_areas_ignore_page
= last_page
;
2807 scavenge(page_scan_start(i
), nwords
);
2814 "/done with one full scan of newspace generation %d\n",
2818 /* Do a complete scavenge of the newspace generation. */
2820 scavenge_newspace_generation(generation_index_t generation
)
2824 /* the new_areas array currently being written to by gc_alloc() */
2825 struct new_area (*current_new_areas
)[] = &new_areas_1
;
2826 size_t current_new_areas_index
;
2828 /* the new_areas created by the previous scavenge cycle */
2829 struct new_area (*previous_new_areas
)[] = NULL
;
2830 size_t previous_new_areas_index
;
2832 /* Flush the current regions updating the tables. */
2833 gc_alloc_update_all_page_tables();
2835 /* Turn on the recording of new areas by gc_alloc(). */
2836 new_areas
= current_new_areas
;
2837 new_areas_index
= 0;
2839 /* Don't need to record new areas that get scavenged anyway during
2840 * scavenge_newspace_generation_one_scan. */
2841 record_new_objects
= 1;
2843 /* Start with a full scavenge. */
2844 scavenge_newspace_generation_one_scan(generation
);
2846 /* Record all new areas now. */
2847 record_new_objects
= 2;
2849 /* Give a chance to weak hash tables to make other objects live.
2850 * FIXME: The algorithm implemented here for weak hash table gcing
2851 * is O(W^2+N) as Bruno Haible warns in
2852 * http://www.haible.de/bruno/papers/cs/weak/WeakDatastructures-writeup.html
2853 * see "Implementation 2". */
2854 scav_weak_hash_tables();
2856 /* Flush the current regions updating the tables. */
2857 gc_alloc_update_all_page_tables();
2859 /* Grab new_areas_index. */
2860 current_new_areas_index
= new_areas_index
;
2863 "The first scan is finished; current_new_areas_index=%d.\n",
2864 current_new_areas_index));*/
2866 while (current_new_areas_index
> 0) {
2867 /* Move the current to the previous new areas */
2868 previous_new_areas
= current_new_areas
;
2869 previous_new_areas_index
= current_new_areas_index
;
2871 /* Scavenge all the areas in previous new areas. Any new areas
2872 * allocated are saved in current_new_areas. */
2874 /* Allocate an array for current_new_areas; alternating between
2875 * new_areas_1 and 2 */
2876 if (previous_new_areas
== &new_areas_1
)
2877 current_new_areas
= &new_areas_2
;
2879 current_new_areas
= &new_areas_1
;
2881 /* Set up for gc_alloc(). */
2882 new_areas
= current_new_areas
;
2883 new_areas_index
= 0;
2885 /* Check whether previous_new_areas had overflowed. */
2886 if (previous_new_areas_index
>= NUM_NEW_AREAS
) {
2888 /* New areas of objects allocated have been lost so need to do a
2889 * full scan to be sure! If this becomes a problem try
2890 * increasing NUM_NEW_AREAS. */
2891 if (gencgc_verbose
) {
2892 SHOW("new_areas overflow, doing full scavenge");
2895 /* Don't need to record new areas that get scavenged
2896 * anyway during scavenge_newspace_generation_one_scan. */
2897 record_new_objects
= 1;
2899 scavenge_newspace_generation_one_scan(generation
);
2901 /* Record all new areas now. */
2902 record_new_objects
= 2;
2904 scav_weak_hash_tables();
2906 /* Flush the current regions updating the tables. */
2907 gc_alloc_update_all_page_tables();
2911 /* Work through previous_new_areas. */
2912 for (i
= 0; i
< previous_new_areas_index
; i
++) {
2913 page_index_t page
= (*previous_new_areas
)[i
].page
;
2914 size_t offset
= (*previous_new_areas
)[i
].offset
;
2915 size_t size
= (*previous_new_areas
)[i
].size
/ N_WORD_BYTES
;
2916 gc_assert((*previous_new_areas
)[i
].size
% N_WORD_BYTES
== 0);
2917 scavenge(page_address(page
)+offset
, size
);
2920 scav_weak_hash_tables();
2922 /* Flush the current regions updating the tables. */
2923 gc_alloc_update_all_page_tables();
2926 current_new_areas_index
= new_areas_index
;
2929 "The re-scan has finished; current_new_areas_index=%d.\n",
2930 current_new_areas_index));*/
2933 /* Turn off recording of areas allocated by gc_alloc(). */
2934 record_new_objects
= 0;
2939 /* Check that none of the write_protected pages in this generation
2940 * have been written to. */
2941 for (i
= 0; i
< page_table_pages
; i
++) {
2942 if (page_allocated_p(i
)
2943 && (page_table
[i
].bytes_used
!= 0)
2944 && (page_table
[i
].gen
== generation
)
2945 && (page_table
[i
].write_protected_cleared
!= 0)
2946 && (page_table
[i
].dont_move
== 0)) {
2947 lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d\n",
2948 i
, generation
, page_table
[i
].dont_move
);
2955 /* Un-write-protect all the pages in from_space. This is done at the
2956 * start of a GC else there may be many page faults while scavenging
2957 * the newspace (I've seen drive the system time to 99%). These pages
2958 * would need to be unprotected anyway before unmapping in
2959 * free_oldspace; not sure what effect this has on paging.. */
2961 unprotect_oldspace(void)
2964 void *region_addr
= 0;
2965 void *page_addr
= 0;
2966 uword_t region_bytes
= 0;
2968 for (i
= 0; i
< last_free_page
; i
++) {
2969 if (page_allocated_p(i
)
2970 && (page_table
[i
].bytes_used
!= 0)
2971 && (page_table
[i
].gen
== from_space
)) {
2973 /* Remove any write-protection. We should be able to rely
2974 * on the write-protect flag to avoid redundant calls. */
2975 if (page_table
[i
].write_protected
) {
2976 page_table
[i
].write_protected
= 0;
2977 page_addr
= page_address(i
);
2980 region_addr
= page_addr
;
2981 region_bytes
= GENCGC_CARD_BYTES
;
2982 } else if (region_addr
+ region_bytes
== page_addr
) {
2983 /* Region continue. */
2984 region_bytes
+= GENCGC_CARD_BYTES
;
2986 /* Unprotect previous region. */
2987 os_protect(region_addr
, region_bytes
, OS_VM_PROT_ALL
);
2988 /* First page in new region. */
2989 region_addr
= page_addr
;
2990 region_bytes
= GENCGC_CARD_BYTES
;
2996 /* Unprotect last region. */
2997 os_protect(region_addr
, region_bytes
, OS_VM_PROT_ALL
);
3001 /* Work through all the pages and free any in from_space. This
3002 * assumes that all objects have been copied or promoted to an older
3003 * generation. Bytes_allocated and the generation bytes_allocated
3004 * counter are updated. The number of bytes freed is returned. */
3008 uword_t bytes_freed
= 0;
3009 page_index_t first_page
, last_page
;
3014 /* Find a first page for the next region of pages. */
3015 while ((first_page
< last_free_page
)
3016 && (page_free_p(first_page
)
3017 || (page_table
[first_page
].bytes_used
== 0)
3018 || (page_table
[first_page
].gen
!= from_space
)))
3021 if (first_page
>= last_free_page
)
3024 /* Find the last page of this region. */
3025 last_page
= first_page
;
3028 /* Free the page. */
3029 bytes_freed
+= page_table
[last_page
].bytes_used
;
3030 generations
[page_table
[last_page
].gen
].bytes_allocated
-=
3031 page_table
[last_page
].bytes_used
;
3032 page_table
[last_page
].allocated
= FREE_PAGE_FLAG
;
3033 page_table
[last_page
].bytes_used
= 0;
3034 /* Should already be unprotected by unprotect_oldspace(). */
3035 gc_assert(!page_table
[last_page
].write_protected
);
3038 while ((last_page
< last_free_page
)
3039 && page_allocated_p(last_page
)
3040 && (page_table
[last_page
].bytes_used
!= 0)
3041 && (page_table
[last_page
].gen
== from_space
));
3043 #ifdef READ_PROTECT_FREE_PAGES
3044 os_protect(page_address(first_page
),
3045 npage_bytes(last_page
-first_page
),
3048 first_page
= last_page
;
3049 } while (first_page
< last_free_page
);
3051 bytes_allocated
-= bytes_freed
;
3056 /* Print some information about a pointer at the given address. */
3058 print_ptr(lispobj
*addr
)
3060 /* If addr is in the dynamic space then out the page information. */
3061 page_index_t pi1
= find_page_index((void*)addr
);
3064 fprintf(stderr
," %p: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n",
3067 page_table
[pi1
].allocated
,
3068 page_table
[pi1
].gen
,
3069 page_table
[pi1
].bytes_used
,
3070 page_table
[pi1
].scan_start_offset
,
3071 page_table
[pi1
].dont_move
);
3072 fprintf(stderr
," %x %x %x %x (%x) %x %x %x %x\n",
3086 is_in_stack_space(lispobj ptr
)
3088 /* For space verification: Pointers can be valid if they point
3089 * to a thread stack space. This would be faster if the thread
3090 * structures had page-table entries as if they were part of
3091 * the heap space. */
3093 for_each_thread(th
) {
3094 if ((th
->control_stack_start
<= (lispobj
*)ptr
) &&
3095 (th
->control_stack_end
>= (lispobj
*)ptr
)) {
3103 verify_space(lispobj
*start
, size_t words
)
3105 int is_in_dynamic_space
= (find_page_index((void*)start
) != -1);
3106 int is_in_readonly_space
=
3107 (READ_ONLY_SPACE_START
<= (uword_t
)start
&&
3108 (uword_t
)start
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0));
3112 lispobj thing
= *(lispobj
*)start
;
3114 if (is_lisp_pointer(thing
)) {
3115 page_index_t page_index
= find_page_index((void*)thing
);
3116 sword_t to_readonly_space
=
3117 (READ_ONLY_SPACE_START
<= thing
&&
3118 thing
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0));
3119 sword_t to_static_space
=
3120 (STATIC_SPACE_START
<= thing
&&
3121 thing
< SymbolValue(STATIC_SPACE_FREE_POINTER
,0));
3123 /* Does it point to the dynamic space? */
3124 if (page_index
!= -1) {
3125 /* If it's within the dynamic space it should point to a used
3126 * page. XX Could check the offset too. */
3127 if (page_allocated_p(page_index
)
3128 && (page_table
[page_index
].bytes_used
== 0))
3129 lose ("Ptr %p @ %p sees free page.\n", thing
, start
);
3130 /* Check that it doesn't point to a forwarding pointer! */
3131 if (*((lispobj
*)native_pointer(thing
)) == 0x01) {
3132 lose("Ptr %p @ %p sees forwarding ptr.\n", thing
, start
);
3134 /* Check that its not in the RO space as it would then be a
3135 * pointer from the RO to the dynamic space. */
3136 if (is_in_readonly_space
) {
3137 lose("ptr to dynamic space %p from RO space %x\n",
3140 /* Does it point to a plausible object? This check slows
3141 * it down a lot (so it's commented out).
3143 * "a lot" is serious: it ate 50 minutes cpu time on
3144 * my duron 950 before I came back from lunch and
3147 * FIXME: Add a variable to enable this
3150 if (!possibly_valid_dynamic_space_pointer_s((lispobj *)thing, page_index, NULL)) {
3151 lose("ptr %p to invalid object %p\n", thing, start);
3155 extern void funcallable_instance_tramp
;
3156 /* Verify that it points to another valid space. */
3157 if (!to_readonly_space
&& !to_static_space
3158 && (thing
!= (lispobj
)&funcallable_instance_tramp
)
3159 && !is_in_stack_space(thing
)) {
3160 lose("Ptr %p @ %p sees junk.\n", thing
, start
);
3164 if (!(fixnump(thing
))) {
3166 switch(widetag_of(*start
)) {
3169 case SIMPLE_VECTOR_WIDETAG
:
3171 case COMPLEX_WIDETAG
:
3172 case SIMPLE_ARRAY_WIDETAG
:
3173 case COMPLEX_BASE_STRING_WIDETAG
:
3174 #ifdef COMPLEX_CHARACTER_STRING_WIDETAG
3175 case COMPLEX_CHARACTER_STRING_WIDETAG
:
3177 case COMPLEX_VECTOR_NIL_WIDETAG
:
3178 case COMPLEX_BIT_VECTOR_WIDETAG
:
3179 case COMPLEX_VECTOR_WIDETAG
:
3180 case COMPLEX_ARRAY_WIDETAG
:
3181 case CLOSURE_HEADER_WIDETAG
:
3182 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
3183 case VALUE_CELL_HEADER_WIDETAG
:
3184 case SYMBOL_HEADER_WIDETAG
:
3185 case CHARACTER_WIDETAG
:
3186 #if N_WORD_BITS == 64
3187 case SINGLE_FLOAT_WIDETAG
:
3189 case UNBOUND_MARKER_WIDETAG
:
3194 case INSTANCE_HEADER_WIDETAG
:
3196 sword_t ntotal
= instance_length(thing
);
3197 lispobj layout
= instance_layout(start
);
3202 #ifdef LISP_FEATURE_INTERLEAVED_RAW_SLOTS
3203 instance_scan_interleaved(verify_space
,
3205 native_pointer(layout
));
3208 nuntagged
= ((struct layout
*)
3209 native_pointer(layout
))->n_untagged_slots
;
3210 verify_space(start
+ 1,
3211 ntotal
- fixnum_value(nuntagged
));
3216 case CODE_HEADER_WIDETAG
:
3218 lispobj object
= *start
;
3220 sword_t nheader_words
, ncode_words
, nwords
;
3222 struct simple_fun
*fheaderp
;
3224 code
= (struct code
*) start
;
3226 /* Check that it's not in the dynamic space.
3227 * FIXME: Isn't is supposed to be OK for code
3228 * objects to be in the dynamic space these days? */
3229 /* It is for byte compiled code, but there's
3230 * no byte compilation in SBCL anymore. */
3231 if (is_in_dynamic_space
3232 /* Only when enabled */
3233 && verify_dynamic_code_check
) {
3235 "/code object at %p in the dynamic space\n",
3239 ncode_words
= fixnum_word_value(code
->code_size
);
3240 nheader_words
= HeaderValue(object
);
3241 nwords
= ncode_words
+ nheader_words
;
3242 nwords
= CEILING(nwords
, 2);
3243 /* Scavenge the boxed section of the code data block */
3244 verify_space(start
+ 1, nheader_words
- 1);
3246 /* Scavenge the boxed section of each function
3247 * object in the code data block. */
3248 fheaderl
= code
->entry_points
;
3249 while (fheaderl
!= NIL
) {
3251 (struct simple_fun
*) native_pointer(fheaderl
);
3252 gc_assert(widetag_of(fheaderp
->header
) ==
3253 SIMPLE_FUN_HEADER_WIDETAG
);
3254 verify_space(SIMPLE_FUN_SCAV_START(fheaderp
),
3255 SIMPLE_FUN_SCAV_NWORDS(fheaderp
));
3256 fheaderl
= fheaderp
->next
;
3262 /* unboxed objects */
3263 case BIGNUM_WIDETAG
:
3264 #if N_WORD_BITS != 64
3265 case SINGLE_FLOAT_WIDETAG
:
3267 case DOUBLE_FLOAT_WIDETAG
:
3268 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3269 case LONG_FLOAT_WIDETAG
:
3271 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
3272 case COMPLEX_SINGLE_FLOAT_WIDETAG
:
3274 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
3275 case COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3277 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3278 case COMPLEX_LONG_FLOAT_WIDETAG
:
3280 #ifdef SIMD_PACK_WIDETAG
3281 case SIMD_PACK_WIDETAG
:
3283 case SIMPLE_BASE_STRING_WIDETAG
:
3284 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
3285 case SIMPLE_CHARACTER_STRING_WIDETAG
:
3287 case SIMPLE_BIT_VECTOR_WIDETAG
:
3288 case SIMPLE_ARRAY_NIL_WIDETAG
:
3289 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
3290 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
3291 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
3292 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
3293 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
3294 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
3296 case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG
:
3298 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
3299 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
3300 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
3301 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
3303 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
3304 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
3306 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
3307 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
3309 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
3310 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
3313 case SIMPLE_ARRAY_FIXNUM_WIDETAG
:
3315 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
3316 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
3318 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
3319 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
3321 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
3322 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
3323 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3324 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
3326 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
3327 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
3329 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
3330 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3332 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3333 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
3336 case WEAK_POINTER_WIDETAG
:
3337 #ifdef NO_TLS_VALUE_MARKER_WIDETAG
3338 case NO_TLS_VALUE_MARKER_WIDETAG
:
3340 count
= (sizetab
[widetag_of(*start
)])(start
);
3344 lose("Unhandled widetag %p at %p\n",
3345 widetag_of(*start
), start
);
3357 /* FIXME: It would be nice to make names consistent so that
3358 * foo_size meant size *in* *bytes* instead of size in some
3359 * arbitrary units. (Yes, this caused a bug, how did you guess?:-)
3360 * Some counts of lispobjs are called foo_count; it might be good
3361 * to grep for all foo_size and rename the appropriate ones to
3363 sword_t read_only_space_size
=
3364 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0)
3365 - (lispobj
*)READ_ONLY_SPACE_START
;
3366 sword_t static_space_size
=
3367 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0)
3368 - (lispobj
*)STATIC_SPACE_START
;
3370 for_each_thread(th
) {
3371 sword_t binding_stack_size
=
3372 (lispobj
*)get_binding_stack_pointer(th
)
3373 - (lispobj
*)th
->binding_stack_start
;
3374 verify_space(th
->binding_stack_start
, binding_stack_size
);
3376 verify_space((lispobj
*)READ_ONLY_SPACE_START
, read_only_space_size
);
3377 verify_space((lispobj
*)STATIC_SPACE_START
, static_space_size
);
3381 verify_generation(generation_index_t generation
)
3385 for (i
= 0; i
< last_free_page
; i
++) {
3386 if (page_allocated_p(i
)
3387 && (page_table
[i
].bytes_used
!= 0)
3388 && (page_table
[i
].gen
== generation
)) {
3389 page_index_t last_page
;
3391 /* This should be the start of a contiguous block */
3392 gc_assert(page_starts_contiguous_block_p(i
));
3394 /* Need to find the full extent of this contiguous block in case
3395 objects span pages. */
3397 /* Now work forward until the end of this contiguous area is
3399 for (last_page
= i
; ;last_page
++)
3400 /* Check whether this is the last page in this contiguous
3402 if (page_ends_contiguous_block_p(last_page
, generation
))
3405 verify_space(page_address(i
),
3407 (page_table
[last_page
].bytes_used
3408 + npage_bytes(last_page
-i
)))
3415 /* Check that all the free space is zero filled. */
3417 verify_zero_fill(void)
3421 for (page
= 0; page
< last_free_page
; page
++) {
3422 if (page_free_p(page
)) {
3423 /* The whole page should be zero filled. */
3424 sword_t
*start_addr
= (sword_t
*)page_address(page
);
3425 sword_t size
= 1024;
3427 for (i
= 0; i
< size
; i
++) {
3428 if (start_addr
[i
] != 0) {
3429 lose("free page not zero at %x\n", start_addr
+ i
);
3433 sword_t free_bytes
= GENCGC_CARD_BYTES
- page_table
[page
].bytes_used
;
3434 if (free_bytes
> 0) {
3435 sword_t
*start_addr
= (sword_t
*)((uword_t
)page_address(page
)
3436 + page_table
[page
].bytes_used
);
3437 sword_t size
= free_bytes
/ N_WORD_BYTES
;
3439 for (i
= 0; i
< size
; i
++) {
3440 if (start_addr
[i
] != 0) {
3441 lose("free region not zero at %x\n", start_addr
+ i
);
3449 /* External entry point for verify_zero_fill */
3451 gencgc_verify_zero_fill(void)
3453 /* Flush the alloc regions updating the tables. */
3454 gc_alloc_update_all_page_tables();
3455 SHOW("verifying zero fill");
3460 verify_dynamic_space(void)
3462 generation_index_t i
;
3464 for (i
= 0; i
<= HIGHEST_NORMAL_GENERATION
; i
++)
3465 verify_generation(i
);
3467 if (gencgc_enable_verify_zero_fill
)
3471 /* Write-protect all the dynamic boxed pages in the given generation. */
3473 write_protect_generation_pages(generation_index_t generation
)
3477 gc_assert(generation
< SCRATCH_GENERATION
);
3479 for (start
= 0; start
< last_free_page
; start
++) {
3480 if (protect_page_p(start
, generation
)) {
3484 /* Note the page as protected in the page tables. */
3485 page_table
[start
].write_protected
= 1;
3487 for (last
= start
+ 1; last
< last_free_page
; last
++) {
3488 if (!protect_page_p(last
, generation
))
3490 page_table
[last
].write_protected
= 1;
3493 page_start
= (void *)page_address(start
);
3495 os_protect(page_start
,
3496 npage_bytes(last
- start
),
3497 OS_VM_PROT_READ
| OS_VM_PROT_EXECUTE
);
3503 if (gencgc_verbose
> 1) {
3505 "/write protected %d of %d pages in generation %d\n",
3506 count_write_protect_generation_pages(generation
),
3507 count_generation_pages(generation
),
3512 #if defined(LISP_FEATURE_SB_THREAD) && (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
3514 preserve_context_registers (os_context_t
*c
)
3517 /* On Darwin the signal context isn't a contiguous block of memory,
3518 * so just preserve_pointering its contents won't be sufficient.
3520 #if defined(LISP_FEATURE_DARWIN)||defined(LISP_FEATURE_WIN32)
3521 #if defined LISP_FEATURE_X86
3522 preserve_pointer((void*)*os_context_register_addr(c
,reg_EAX
));
3523 preserve_pointer((void*)*os_context_register_addr(c
,reg_ECX
));
3524 preserve_pointer((void*)*os_context_register_addr(c
,reg_EDX
));
3525 preserve_pointer((void*)*os_context_register_addr(c
,reg_EBX
));
3526 preserve_pointer((void*)*os_context_register_addr(c
,reg_ESI
));
3527 preserve_pointer((void*)*os_context_register_addr(c
,reg_EDI
));
3528 preserve_pointer((void*)*os_context_pc_addr(c
));
3529 #elif defined LISP_FEATURE_X86_64
3530 preserve_pointer((void*)*os_context_register_addr(c
,reg_RAX
));
3531 preserve_pointer((void*)*os_context_register_addr(c
,reg_RCX
));
3532 preserve_pointer((void*)*os_context_register_addr(c
,reg_RDX
));
3533 preserve_pointer((void*)*os_context_register_addr(c
,reg_RBX
));
3534 preserve_pointer((void*)*os_context_register_addr(c
,reg_RSI
));
3535 preserve_pointer((void*)*os_context_register_addr(c
,reg_RDI
));
3536 preserve_pointer((void*)*os_context_register_addr(c
,reg_R8
));
3537 preserve_pointer((void*)*os_context_register_addr(c
,reg_R9
));
3538 preserve_pointer((void*)*os_context_register_addr(c
,reg_R10
));
3539 preserve_pointer((void*)*os_context_register_addr(c
,reg_R11
));
3540 preserve_pointer((void*)*os_context_register_addr(c
,reg_R12
));
3541 preserve_pointer((void*)*os_context_register_addr(c
,reg_R13
));
3542 preserve_pointer((void*)*os_context_register_addr(c
,reg_R14
));
3543 preserve_pointer((void*)*os_context_register_addr(c
,reg_R15
));
3544 preserve_pointer((void*)*os_context_pc_addr(c
));
3546 #error "preserve_context_registers needs to be tweaked for non-x86 Darwin"
3549 #if !defined(LISP_FEATURE_WIN32)
3550 for(ptr
= ((void **)(c
+1))-1; ptr
>=(void **)c
; ptr
--) {
3551 preserve_pointer(*ptr
);
3558 move_pinned_pages_to_newspace()
3562 /* scavenge() will evacuate all oldspace pages, but no newspace
3563 * pages. Pinned pages are precisely those pages which must not
3564 * be evacuated, so move them to newspace directly. */
3566 for (i
= 0; i
< last_free_page
; i
++) {
3567 if (page_table
[i
].dont_move
&&
3568 /* dont_move is cleared lazily, so validate the space as well. */
3569 page_table
[i
].gen
== from_space
) {
3570 if (dontmove_dwords(i
) && do_wipe_p
) {
3571 // do not move to newspace after all, this will be word-wiped
3574 page_table
[i
].gen
= new_space
;
3575 /* And since we're moving the pages wholesale, also adjust
3576 * the generation allocation counters. */
3577 generations
[new_space
].bytes_allocated
+= page_table
[i
].bytes_used
;
3578 generations
[from_space
].bytes_allocated
-= page_table
[i
].bytes_used
;
3583 /* Garbage collect a generation. If raise is 0 then the remains of the
3584 * generation are not raised to the next generation. */
3586 garbage_collect_generation(generation_index_t generation
, int raise
)
3588 uword_t bytes_freed
;
3590 uword_t static_space_size
;
3593 gc_assert(generation
<= HIGHEST_NORMAL_GENERATION
);
3595 /* The oldest generation can't be raised. */
3596 gc_assert((generation
!= HIGHEST_NORMAL_GENERATION
) || (raise
== 0));
3598 /* Check if weak hash tables were processed in the previous GC. */
3599 gc_assert(weak_hash_tables
== NULL
);
3601 /* Initialize the weak pointer list. */
3602 weak_pointers
= NULL
;
3604 /* When a generation is not being raised it is transported to a
3605 * temporary generation (NUM_GENERATIONS), and lowered when
3606 * done. Set up this new generation. There should be no pages
3607 * allocated to it yet. */
3609 gc_assert(generations
[SCRATCH_GENERATION
].bytes_allocated
== 0);
3612 /* Set the global src and dest. generations */
3613 from_space
= generation
;
3615 new_space
= generation
+1;
3617 new_space
= SCRATCH_GENERATION
;
3619 /* Change to a new space for allocation, resetting the alloc_start_page */
3620 gc_alloc_generation
= new_space
;
3621 generations
[new_space
].alloc_start_page
= 0;
3622 generations
[new_space
].alloc_unboxed_start_page
= 0;
3623 generations
[new_space
].alloc_large_start_page
= 0;
3624 generations
[new_space
].alloc_large_unboxed_start_page
= 0;
3626 /* Before any pointers are preserved, the dont_move flags on the
3627 * pages need to be cleared. */
3628 for (i
= 0; i
< last_free_page
; i
++)
3629 if(page_table
[i
].gen
==from_space
) {
3630 page_table
[i
].dont_move
= 0;
3631 gc_assert(dontmove_dwords(i
) == NULL
);
3634 /* Un-write-protect the old-space pages. This is essential for the
3635 * promoted pages as they may contain pointers into the old-space
3636 * which need to be scavenged. It also helps avoid unnecessary page
3637 * faults as forwarding pointers are written into them. They need to
3638 * be un-protected anyway before unmapping later. */
3639 unprotect_oldspace();
3641 /* Scavenge the stacks' conservative roots. */
3643 /* there are potentially two stacks for each thread: the main
3644 * stack, which may contain Lisp pointers, and the alternate stack.
3645 * We don't ever run Lisp code on the altstack, but it may
3646 * host a sigcontext with lisp objects in it */
3648 /* what we need to do: (1) find the stack pointer for the main
3649 * stack; scavenge it (2) find the interrupt context on the
3650 * alternate stack that might contain lisp values, and scavenge
3653 /* we assume that none of the preceding applies to the thread that
3654 * initiates GC. If you ever call GC from inside an altstack
3655 * handler, you will lose. */
3657 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
3658 /* And if we're saving a core, there's no point in being conservative. */
3659 if (conservative_stack
) {
3660 for_each_thread(th
) {
3662 void **esp
=(void **)-1;
3663 if (th
->state
== STATE_DEAD
)
3665 # if defined(LISP_FEATURE_SB_SAFEPOINT)
3666 /* Conservative collect_garbage is always invoked with a
3667 * foreign C call or an interrupt handler on top of every
3668 * existing thread, so the stored SP in each thread
3669 * structure is valid, no matter which thread we are looking
3670 * at. For threads that were running Lisp code, the pitstop
3671 * and edge functions maintain this value within the
3672 * interrupt or exception handler. */
3673 esp
= os_get_csp(th
);
3674 assert_on_stack(th
, esp
);
3676 /* In addition to pointers on the stack, also preserve the
3677 * return PC, the only value from the context that we need
3678 * in addition to the SP. The return PC gets saved by the
3679 * foreign call wrapper, and removed from the control stack
3680 * into a register. */
3681 preserve_pointer(th
->pc_around_foreign_call
);
3683 /* And on platforms with interrupts: scavenge ctx registers. */
3685 /* Disabled on Windows, because it does not have an explicit
3686 * stack of `interrupt_contexts'. The reported CSP has been
3687 * chosen so that the current context on the stack is
3688 * covered by the stack scan. See also set_csp_from_context(). */
3689 # ifndef LISP_FEATURE_WIN32
3690 if (th
!= arch_os_get_current_thread()) {
3691 long k
= fixnum_value(
3692 SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
3694 preserve_context_registers(th
->interrupt_contexts
[--k
]);
3697 # elif defined(LISP_FEATURE_SB_THREAD)
3699 if(th
==arch_os_get_current_thread()) {
3700 /* Somebody is going to burn in hell for this, but casting
3701 * it in two steps shuts gcc up about strict aliasing. */
3702 esp
= (void **)((void *)&raise
);
3705 free
=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
3706 for(i
=free
-1;i
>=0;i
--) {
3707 os_context_t
*c
=th
->interrupt_contexts
[i
];
3708 esp1
= (void **) *os_context_register_addr(c
,reg_SP
);
3709 if (esp1
>=(void **)th
->control_stack_start
&&
3710 esp1
<(void **)th
->control_stack_end
) {
3711 if(esp1
<esp
) esp
=esp1
;
3712 preserve_context_registers(c
);
3717 esp
= (void **)((void *)&raise
);
3719 if (!esp
|| esp
== (void*) -1)
3720 lose("garbage_collect: no SP known for thread %x (OS %x)",
3722 for (ptr
= ((void **)th
->control_stack_end
)-1; ptr
>= esp
; ptr
--) {
3723 preserve_pointer(*ptr
);
3728 /* Non-x86oid systems don't have "conservative roots" as such, but
3729 * the same mechanism is used for objects pinned for use by alien
3731 for_each_thread(th
) {
3732 lispobj pin_list
= SymbolTlValue(PINNED_OBJECTS
,th
);
3733 while (pin_list
!= NIL
) {
3734 struct cons
*list_entry
=
3735 (struct cons
*)native_pointer(pin_list
);
3736 preserve_pointer(list_entry
->car
);
3737 pin_list
= list_entry
->cdr
;
3743 if (gencgc_verbose
> 1) {
3744 sword_t num_dont_move_pages
= count_dont_move_pages();
3746 "/non-movable pages due to conservative pointers = %ld (%lu bytes)\n",
3747 num_dont_move_pages
,
3748 npage_bytes(num_dont_move_pages
));
3752 /* Now that all of the pinned (dont_move) pages are known, and
3753 * before we start to scavenge (and thus relocate) objects,
3754 * relocate the pinned pages to newspace, so that the scavenger
3755 * will not attempt to relocate their contents. */
3756 move_pinned_pages_to_newspace();
3758 /* Scavenge all the rest of the roots. */
3760 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
3762 * If not x86, we need to scavenge the interrupt context(s) and the
3767 for_each_thread(th
) {
3768 scavenge_interrupt_contexts(th
);
3769 scavenge_control_stack(th
);
3772 # ifdef LISP_FEATURE_SB_SAFEPOINT
3773 /* In this case, scrub all stacks right here from the GCing thread
3774 * instead of doing what the comment below says. Suboptimal, but
3777 scrub_thread_control_stack(th
);
3779 /* Scrub the unscavenged control stack space, so that we can't run
3780 * into any stale pointers in a later GC (this is done by the
3781 * stop-for-gc handler in the other threads). */
3782 scrub_control_stack();
3787 /* Scavenge the Lisp functions of the interrupt handlers, taking
3788 * care to avoid SIG_DFL and SIG_IGN. */
3789 for (i
= 0; i
< NSIG
; i
++) {
3790 union interrupt_handler handler
= interrupt_handlers
[i
];
3791 if (!ARE_SAME_HANDLER(handler
.c
, SIG_IGN
) &&
3792 !ARE_SAME_HANDLER(handler
.c
, SIG_DFL
)) {
3793 scavenge((lispobj
*)(interrupt_handlers
+ i
), 1);
3796 /* Scavenge the binding stacks. */
3799 for_each_thread(th
) {
3800 sword_t len
= (lispobj
*)get_binding_stack_pointer(th
) -
3801 th
->binding_stack_start
;
3802 scavenge((lispobj
*) th
->binding_stack_start
,len
);
3803 #ifdef LISP_FEATURE_SB_THREAD
3804 /* do the tls as well */
3805 len
=(SymbolValue(FREE_TLS_INDEX
,0) >> WORD_SHIFT
) -
3806 (sizeof (struct thread
))/(sizeof (lispobj
));
3807 scavenge((lispobj
*) (th
+1),len
);
3812 /* The original CMU CL code had scavenge-read-only-space code
3813 * controlled by the Lisp-level variable
3814 * *SCAVENGE-READ-ONLY-SPACE*. It was disabled by default, and it
3815 * wasn't documented under what circumstances it was useful or
3816 * safe to turn it on, so it's been turned off in SBCL. If you
3817 * want/need this functionality, and can test and document it,
3818 * please submit a patch. */
3820 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE
) != NIL
) {
3821 uword_t read_only_space_size
=
3822 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
) -
3823 (lispobj
*)READ_ONLY_SPACE_START
;
3825 "/scavenge read only space: %d bytes\n",
3826 read_only_space_size
* sizeof(lispobj
)));
3827 scavenge( (lispobj
*) READ_ONLY_SPACE_START
, read_only_space_size
);
3831 /* Scavenge static space. */
3833 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0) -
3834 (lispobj
*)STATIC_SPACE_START
;
3835 if (gencgc_verbose
> 1) {
3837 "/scavenge static space: %d bytes\n",
3838 static_space_size
* sizeof(lispobj
)));
3840 scavenge( (lispobj
*) STATIC_SPACE_START
, static_space_size
);
3842 /* All generations but the generation being GCed need to be
3843 * scavenged. The new_space generation needs special handling as
3844 * objects may be moved in - it is handled separately below. */
3845 scavenge_generations(generation
+1, PSEUDO_STATIC_GENERATION
);
3847 scavenge_pages_with_conservative_pointers_to_them_protected_objects_only();
3849 /* Finally scavenge the new_space generation. Keep going until no
3850 * more objects are moved into the new generation */
3851 scavenge_newspace_generation(new_space
);
3853 /* FIXME: I tried reenabling this check when debugging unrelated
3854 * GC weirdness ca. sbcl-0.6.12.45, and it failed immediately.
3855 * Since the current GC code seems to work well, I'm guessing that
3856 * this debugging code is just stale, but I haven't tried to
3857 * figure it out. It should be figured out and then either made to
3858 * work or just deleted. */
3860 #define RESCAN_CHECK 0
3862 /* As a check re-scavenge the newspace once; no new objects should
3865 os_vm_size_t old_bytes_allocated
= bytes_allocated
;
3866 os_vm_size_t bytes_allocated
;
3868 /* Start with a full scavenge. */
3869 scavenge_newspace_generation_one_scan(new_space
);
3871 /* Flush the current regions, updating the tables. */
3872 gc_alloc_update_all_page_tables();
3874 bytes_allocated
= bytes_allocated
- old_bytes_allocated
;
3876 if (bytes_allocated
!= 0) {
3877 lose("Rescan of new_space allocated %d more bytes.\n",
3883 scan_weak_hash_tables();
3884 scan_weak_pointers();
3887 /* Flush the current regions, updating the tables. */
3888 gc_alloc_update_all_page_tables();
3890 /* Free the pages in oldspace, but not those marked dont_move. */
3891 bytes_freed
= free_oldspace();
3893 /* If the GC is not raising the age then lower the generation back
3894 * to its normal generation number */
3896 for (i
= 0; i
< last_free_page
; i
++)
3897 if ((page_table
[i
].bytes_used
!= 0)
3898 && (page_table
[i
].gen
== SCRATCH_GENERATION
))
3899 page_table
[i
].gen
= generation
;
3900 gc_assert(generations
[generation
].bytes_allocated
== 0);
3901 generations
[generation
].bytes_allocated
=
3902 generations
[SCRATCH_GENERATION
].bytes_allocated
;
3903 generations
[SCRATCH_GENERATION
].bytes_allocated
= 0;
3906 /* Reset the alloc_start_page for generation. */
3907 generations
[generation
].alloc_start_page
= 0;
3908 generations
[generation
].alloc_unboxed_start_page
= 0;
3909 generations
[generation
].alloc_large_start_page
= 0;
3910 generations
[generation
].alloc_large_unboxed_start_page
= 0;
3912 if (generation
>= verify_gens
) {
3913 if (gencgc_verbose
) {
3917 verify_dynamic_space();
3920 /* Set the new gc trigger for the GCed generation. */
3921 generations
[generation
].gc_trigger
=
3922 generations
[generation
].bytes_allocated
3923 + generations
[generation
].bytes_consed_between_gc
;
3926 generations
[generation
].num_gc
= 0;
3928 ++generations
[generation
].num_gc
;
3932 /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
3934 update_dynamic_space_free_pointer(void)
3936 page_index_t last_page
= -1, i
;
3938 for (i
= 0; i
< last_free_page
; i
++)
3939 if (page_allocated_p(i
) && (page_table
[i
].bytes_used
!= 0))
3942 last_free_page
= last_page
+1;
3944 set_alloc_pointer((lispobj
)(page_address(last_free_page
)));
3945 return 0; /* dummy value: return something ... */
3949 remap_page_range (page_index_t from
, page_index_t to
)
3951 /* There's a mysterious Solaris/x86 problem with using mmap
3952 * tricks for memory zeroing. See sbcl-devel thread
3953 * "Re: patch: standalone executable redux".
3955 #if defined(LISP_FEATURE_SUNOS)
3956 zero_and_mark_pages(from
, to
);
3959 release_granularity
= gencgc_release_granularity
/GENCGC_CARD_BYTES
,
3960 release_mask
= release_granularity
-1,
3962 aligned_from
= (from
+release_mask
)&~release_mask
,
3963 aligned_end
= (end
&~release_mask
);
3965 if (aligned_from
< aligned_end
) {
3966 zero_pages_with_mmap(aligned_from
, aligned_end
-1);
3967 if (aligned_from
!= from
)
3968 zero_and_mark_pages(from
, aligned_from
-1);
3969 if (aligned_end
!= end
)
3970 zero_and_mark_pages(aligned_end
, end
-1);
3972 zero_and_mark_pages(from
, to
);
3978 remap_free_pages (page_index_t from
, page_index_t to
, int forcibly
)
3980 page_index_t first_page
, last_page
;
3983 return remap_page_range(from
, to
);
3985 for (first_page
= from
; first_page
<= to
; first_page
++) {
3986 if (page_allocated_p(first_page
) ||
3987 (page_table
[first_page
].need_to_zero
== 0))
3990 last_page
= first_page
+ 1;
3991 while (page_free_p(last_page
) &&
3992 (last_page
<= to
) &&
3993 (page_table
[last_page
].need_to_zero
== 1))
3996 remap_page_range(first_page
, last_page
-1);
3998 first_page
= last_page
;
4002 generation_index_t small_generation_limit
= 1;
4004 /* GC all generations newer than last_gen, raising the objects in each
4005 * to the next older generation - we finish when all generations below
4006 * last_gen are empty. Then if last_gen is due for a GC, or if
4007 * last_gen==NUM_GENERATIONS (the scratch generation? eh?) we GC that
4008 * too. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
4010 * We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than
4011 * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */
4013 collect_garbage(generation_index_t last_gen
)
4015 generation_index_t gen
= 0, i
;
4016 int raise
, more
= 0;
4018 /* The largest value of last_free_page seen since the time
4019 * remap_free_pages was called. */
4020 static page_index_t high_water_mark
= 0;
4022 FSHOW((stderr
, "/entering collect_garbage(%d)\n", last_gen
));
4023 log_generation_stats(gc_logfile
, "=== GC Start ===");
4027 if (last_gen
> HIGHEST_NORMAL_GENERATION
+1) {
4029 "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
4034 /* Flush the alloc regions updating the tables. */
4035 gc_alloc_update_all_page_tables();
4037 /* Verify the new objects created by Lisp code. */
4038 if (pre_verify_gen_0
) {
4039 FSHOW((stderr
, "pre-checking generation 0\n"));
4040 verify_generation(0);
4043 if (gencgc_verbose
> 1)
4044 print_generation_stats();
4047 /* Collect the generation. */
4049 if (more
|| (gen
>= gencgc_oldest_gen_to_gc
)) {
4050 /* Never raise the oldest generation. Never raise the extra generation
4051 * collected due to more-flag. */
4057 || (generations
[gen
].num_gc
>= generations
[gen
].number_of_gcs_before_promotion
);
4058 /* If we would not normally raise this one, but we're
4059 * running low on space in comparison to the object-sizes
4060 * we've been seeing, raise it and collect the next one
4062 if (!raise
&& gen
== last_gen
) {
4063 more
= (2*large_allocation
) >= (dynamic_space_size
- bytes_allocated
);
4068 if (gencgc_verbose
> 1) {
4070 "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
4073 generations
[gen
].bytes_allocated
,
4074 generations
[gen
].gc_trigger
,
4075 generations
[gen
].num_gc
));
4078 /* If an older generation is being filled, then update its
4081 generations
[gen
+1].cum_sum_bytes_allocated
+=
4082 generations
[gen
+1].bytes_allocated
;
4085 garbage_collect_generation(gen
, raise
);
4087 /* Reset the memory age cum_sum. */
4088 generations
[gen
].cum_sum_bytes_allocated
= 0;
4090 if (gencgc_verbose
> 1) {
4091 FSHOW((stderr
, "GC of generation %d finished:\n", gen
));
4092 print_generation_stats();
4096 } while ((gen
<= gencgc_oldest_gen_to_gc
)
4097 && ((gen
< last_gen
)
4100 && (generations
[gen
].bytes_allocated
4101 > generations
[gen
].gc_trigger
)
4102 && (generation_average_age(gen
)
4103 > generations
[gen
].minimum_age_before_gc
))));
4105 /* Now if gen-1 was raised all generations before gen are empty.
4106 * If it wasn't raised then all generations before gen-1 are empty.
4108 * Now objects within this gen's pages cannot point to younger
4109 * generations unless they are written to. This can be exploited
4110 * by write-protecting the pages of gen; then when younger
4111 * generations are GCed only the pages which have been written
4116 gen_to_wp
= gen
- 1;
4118 /* There's not much point in WPing pages in generation 0 as it is
4119 * never scavenged (except promoted pages). */
4120 if ((gen_to_wp
> 0) && enable_page_protection
) {
4121 /* Check that they are all empty. */
4122 for (i
= 0; i
< gen_to_wp
; i
++) {
4123 if (generations
[i
].bytes_allocated
)
4124 lose("trying to write-protect gen. %d when gen. %d nonempty\n",
4127 write_protect_generation_pages(gen_to_wp
);
4130 /* Set gc_alloc() back to generation 0. The current regions should
4131 * be flushed after the above GCs. */
4132 gc_assert((boxed_region
.free_pointer
- boxed_region
.start_addr
) == 0);
4133 gc_alloc_generation
= 0;
4135 /* Save the high-water mark before updating last_free_page */
4136 if (last_free_page
> high_water_mark
)
4137 high_water_mark
= last_free_page
;
4139 update_dynamic_space_free_pointer();
4141 /* Update auto_gc_trigger. Make sure we trigger the next GC before
4142 * running out of heap! */
4143 if (bytes_consed_between_gcs
<= (dynamic_space_size
- bytes_allocated
))
4144 auto_gc_trigger
= bytes_allocated
+ bytes_consed_between_gcs
;
4146 auto_gc_trigger
= bytes_allocated
+ (dynamic_space_size
- bytes_allocated
)/2;
4149 fprintf(stderr
,"Next gc when %"OS_VM_SIZE_FMT
" bytes have been consed\n",
4152 /* If we did a big GC (arbitrarily defined as gen > 1), release memory
4155 if (gen
> small_generation_limit
) {
4156 if (last_free_page
> high_water_mark
)
4157 high_water_mark
= last_free_page
;
4158 remap_free_pages(0, high_water_mark
, 0);
4159 high_water_mark
= 0;
4163 large_allocation
= 0;
4165 log_generation_stats(gc_logfile
, "=== GC End ===");
4166 SHOW("returning from collect_garbage");
4169 /* This is called by Lisp PURIFY when it is finished. All live objects
4170 * will have been moved to the RO and Static heaps. The dynamic space
4171 * will need a full re-initialization. We don't bother having Lisp
4172 * PURIFY flush the current gc_alloc() region, as the page_tables are
4173 * re-initialized, and every page is zeroed to be sure. */
4177 page_index_t page
, last_page
;
4179 if (gencgc_verbose
> 1) {
4180 SHOW("entering gc_free_heap");
4183 for (page
= 0; page
< page_table_pages
; page
++) {
4184 /* Skip free pages which should already be zero filled. */
4185 if (page_allocated_p(page
)) {
4187 for (last_page
= page
;
4188 (last_page
< page_table_pages
) && page_allocated_p(last_page
);
4190 /* Mark the page free. The other slots are assumed invalid
4191 * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
4192 * should not be write-protected -- except that the
4193 * generation is used for the current region but it sets
4195 page_table
[page
].allocated
= FREE_PAGE_FLAG
;
4196 page_table
[page
].bytes_used
= 0;
4197 page_table
[page
].write_protected
= 0;
4200 #ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure
4201 * about this change. */
4202 page_start
= (void *)page_address(page
);
4203 os_protect(page_start
, npage_bytes(last_page
-page
), OS_VM_PROT_ALL
);
4204 remap_free_pages(page
, last_page
-1, 1);
4207 } else if (gencgc_zero_check_during_free_heap
) {
4208 /* Double-check that the page is zero filled. */
4209 sword_t
*page_start
;
4211 gc_assert(page_free_p(page
));
4212 gc_assert(page_table
[page
].bytes_used
== 0);
4213 page_start
= (sword_t
*)page_address(page
);
4214 for (i
=0; i
<(long)(GENCGC_CARD_BYTES
/sizeof(sword_t
)); i
++) {
4215 if (page_start
[i
] != 0) {
4216 lose("free region not zero at %x\n", page_start
+ i
);
4222 bytes_allocated
= 0;
4224 /* Initialize the generations. */
4225 for (page
= 0; page
< NUM_GENERATIONS
; page
++) {
4226 generations
[page
].alloc_start_page
= 0;
4227 generations
[page
].alloc_unboxed_start_page
= 0;
4228 generations
[page
].alloc_large_start_page
= 0;
4229 generations
[page
].alloc_large_unboxed_start_page
= 0;
4230 generations
[page
].bytes_allocated
= 0;
4231 generations
[page
].gc_trigger
= 2000000;
4232 generations
[page
].num_gc
= 0;
4233 generations
[page
].cum_sum_bytes_allocated
= 0;
4236 if (gencgc_verbose
> 1)
4237 print_generation_stats();
4239 /* Initialize gc_alloc(). */
4240 gc_alloc_generation
= 0;
4242 gc_set_region_empty(&boxed_region
);
4243 gc_set_region_empty(&unboxed_region
);
4246 set_alloc_pointer((lispobj
)((char *)heap_base
));
4248 if (verify_after_free_heap
) {
4249 /* Check whether purify has left any bad pointers. */
4250 FSHOW((stderr
, "checking after free_heap\n"));
4260 #if defined(LISP_FEATURE_SB_SAFEPOINT)
4264 /* Compute the number of pages needed for the dynamic space.
4265 * Dynamic space size should be aligned on page size. */
4266 page_table_pages
= dynamic_space_size
/GENCGC_CARD_BYTES
;
4267 gc_assert(dynamic_space_size
== npage_bytes(page_table_pages
));
4269 /* Default nursery size to 5% of the total dynamic space size,
4271 bytes_consed_between_gcs
= dynamic_space_size
/(os_vm_size_t
)20;
4272 if (bytes_consed_between_gcs
< (1024*1024))
4273 bytes_consed_between_gcs
= 1024*1024;
4275 /* The page_table must be allocated using "calloc" to initialize
4276 * the page structures correctly. There used to be a separate
4277 * initialization loop (now commented out; see below) but that was
4278 * unnecessary and did hurt startup time. */
4279 page_table
= calloc(page_table_pages
, sizeof(struct page
));
4280 gc_assert(page_table
);
4281 size_t total_size
= sizeof(in_use_marker_t
) * n_dwords_in_card
*
4283 /* We use mmap directly here so that we can use a minimum of
4284 system calls per page during GC.
4285 All we need here now is a madvise(DONTNEED) at the end of GC. */
4286 page_table_dontmove_dwords
= os_validate(NULL
, total_size
);
4287 /* We do not need to zero, in fact we shouldn't. Pages actually
4288 used are zeroed before use. */
4290 gc_assert(page_table_dontmove_dwords
);
4291 page_table_dontmove_dwords_size_in_bytes
= total_size
;
4294 scavtab
[WEAK_POINTER_WIDETAG
] = scav_weak_pointer
;
4295 transother
[SIMPLE_ARRAY_WIDETAG
] = trans_boxed_large
;
4297 heap_base
= (void*)DYNAMIC_SPACE_START
;
4299 /* The page structures are initialized implicitly when page_table
4300 * is allocated with "calloc" above. Formerly we had the following
4301 * explicit initialization here (comments converted to C99 style
4302 * for readability as C's block comments don't nest):
4304 * // Initialize each page structure.
4305 * for (i = 0; i < page_table_pages; i++) {
4306 * // Initialize all pages as free.
4307 * page_table[i].allocated = FREE_PAGE_FLAG;
4308 * page_table[i].bytes_used = 0;
4310 * // Pages are not write-protected at startup.
4311 * page_table[i].write_protected = 0;
4314 * Without this loop the image starts up much faster when dynamic
4315 * space is large -- which it is on 64-bit platforms already by
4316 * default -- and when "calloc" for large arrays is implemented
4317 * using copy-on-write of a page of zeroes -- which it is at least
4318 * on Linux. In this case the pages that page_table_pages is stored
4319 * in are mapped and cleared not before the corresponding part of
4320 * dynamic space is used. For example, this saves clearing 16 MB of
4321 * memory at startup if the page size is 4 KB and the size of
4322 * dynamic space is 4 GB.
4323 * FREE_PAGE_FLAG must be 0 for this to work correctly which is
4324 * asserted below: */
4326 /* Compile time assertion: If triggered, declares an array
4327 * of dimension -1 forcing a syntax error. The intent of the
4328 * assignment is to avoid an "unused variable" warning. */
4329 char assert_free_page_flag_0
[(FREE_PAGE_FLAG
) ? -1 : 1];
4330 assert_free_page_flag_0
[0] = assert_free_page_flag_0
[0];
4333 bytes_allocated
= 0;
4335 /* Initialize the generations.
4337 * FIXME: very similar to code in gc_free_heap(), should be shared */
4338 for (i
= 0; i
< NUM_GENERATIONS
; i
++) {
4339 generations
[i
].alloc_start_page
= 0;
4340 generations
[i
].alloc_unboxed_start_page
= 0;
4341 generations
[i
].alloc_large_start_page
= 0;
4342 generations
[i
].alloc_large_unboxed_start_page
= 0;
4343 generations
[i
].bytes_allocated
= 0;
4344 generations
[i
].gc_trigger
= 2000000;
4345 generations
[i
].num_gc
= 0;
4346 generations
[i
].cum_sum_bytes_allocated
= 0;
4347 /* the tune-able parameters */
4348 generations
[i
].bytes_consed_between_gc
4349 = bytes_consed_between_gcs
/(os_vm_size_t
)HIGHEST_NORMAL_GENERATION
;
4350 generations
[i
].number_of_gcs_before_promotion
= 1;
4351 generations
[i
].minimum_age_before_gc
= 0.75;
4354 /* Initialize gc_alloc. */
4355 gc_alloc_generation
= 0;
4356 gc_set_region_empty(&boxed_region
);
4357 gc_set_region_empty(&unboxed_region
);
4362 /* Pick up the dynamic space from after a core load.
4364 * The ALLOCATION_POINTER points to the end of the dynamic space.
4368 gencgc_pickup_dynamic(void)
4370 page_index_t page
= 0;
4371 void *alloc_ptr
= (void *)get_alloc_pointer();
4372 lispobj
*prev
=(lispobj
*)page_address(page
);
4373 generation_index_t gen
= PSEUDO_STATIC_GENERATION
;
4375 bytes_allocated
= 0;
4378 lispobj
*first
,*ptr
= (lispobj
*)page_address(page
);
4380 if (!gencgc_partial_pickup
|| page_allocated_p(page
)) {
4381 /* It is possible, though rare, for the saved page table
4382 * to contain free pages below alloc_ptr. */
4383 page_table
[page
].gen
= gen
;
4384 page_table
[page
].bytes_used
= GENCGC_CARD_BYTES
;
4385 page_table
[page
].large_object
= 0;
4386 page_table
[page
].write_protected
= 0;
4387 page_table
[page
].write_protected_cleared
= 0;
4388 page_table
[page
].dont_move
= 0;
4389 page_table
[page
].need_to_zero
= 1;
4391 bytes_allocated
+= GENCGC_CARD_BYTES
;
4394 if (!gencgc_partial_pickup
) {
4395 page_table
[page
].allocated
= BOXED_PAGE_FLAG
;
4396 first
=gc_search_space(prev
,(ptr
+2)-prev
,ptr
);
4399 page_table
[page
].scan_start_offset
=
4400 page_address(page
) - (void *)prev
;
4403 } while (page_address(page
) < alloc_ptr
);
4405 last_free_page
= page
;
4407 generations
[gen
].bytes_allocated
= bytes_allocated
;
4409 gc_alloc_update_all_page_tables();
4410 write_protect_generation_pages(gen
);
4414 gc_initialize_pointers(void)
4416 gencgc_pickup_dynamic();
4420 /* alloc(..) is the external interface for memory allocation. It
4421 * allocates to generation 0. It is not called from within the garbage
4422 * collector as it is only external uses that need the check for heap
4423 * size (GC trigger) and to disable the interrupts (interrupts are
4424 * always disabled during a GC).
4426 * The vops that call alloc(..) assume that the returned space is zero-filled.
4427 * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
4429 * The check for a GC trigger is only performed when the current
4430 * region is full, so in most cases it's not needed. */
4432 static inline lispobj
*
4433 general_alloc_internal(sword_t nbytes
, int page_type_flag
, struct alloc_region
*region
,
4434 struct thread
*thread
)
4436 #ifndef LISP_FEATURE_WIN32
4437 lispobj alloc_signal
;
4440 void *new_free_pointer
;
4441 os_vm_size_t trigger_bytes
= 0;
4443 gc_assert(nbytes
>0);
4445 /* Check for alignment allocation problems. */
4446 gc_assert((((uword_t
)region
->free_pointer
& LOWTAG_MASK
) == 0)
4447 && ((nbytes
& LOWTAG_MASK
) == 0));
4449 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
4450 /* Must be inside a PA section. */
4451 gc_assert(get_pseudo_atomic_atomic(thread
));
4454 if (nbytes
> large_allocation
)
4455 large_allocation
= nbytes
;
4457 /* maybe we can do this quickly ... */
4458 new_free_pointer
= region
->free_pointer
+ nbytes
;
4459 if (new_free_pointer
<= region
->end_addr
) {
4460 new_obj
= (void*)(region
->free_pointer
);
4461 region
->free_pointer
= new_free_pointer
;
4462 return(new_obj
); /* yup */
4465 /* We don't want to count nbytes against auto_gc_trigger unless we
4466 * have to: it speeds up the tenuring of objects and slows down
4467 * allocation. However, unless we do so when allocating _very_
4468 * large objects we are in danger of exhausting the heap without
4469 * running sufficient GCs.
4471 if (nbytes
>= bytes_consed_between_gcs
)
4472 trigger_bytes
= nbytes
;
4474 /* we have to go the long way around, it seems. Check whether we
4475 * should GC in the near future
4477 if (auto_gc_trigger
&& (bytes_allocated
+trigger_bytes
> auto_gc_trigger
)) {
4478 /* Don't flood the system with interrupts if the need to gc is
4479 * already noted. This can happen for example when SUB-GC
4480 * allocates or after a gc triggered in a WITHOUT-GCING. */
4481 if (SymbolValue(GC_PENDING
,thread
) == NIL
) {
4482 /* set things up so that GC happens when we finish the PA
4484 SetSymbolValue(GC_PENDING
,T
,thread
);
4485 if (SymbolValue(GC_INHIBIT
,thread
) == NIL
) {
4486 #ifdef LISP_FEATURE_SB_SAFEPOINT
4487 thread_register_gc_trigger();
4489 set_pseudo_atomic_interrupted(thread
);
4490 #ifdef GENCGC_IS_PRECISE
4491 /* PPC calls alloc() from a trap or from pa_alloc(),
4492 * look up the most context if it's from a trap. */
4494 os_context_t
*context
=
4495 thread
->interrupt_data
->allocation_trap_context
;
4496 maybe_save_gc_mask_and_block_deferrables
4497 (context
? os_context_sigmask_addr(context
) : NULL
);
4500 maybe_save_gc_mask_and_block_deferrables(NULL
);
4506 new_obj
= gc_alloc_with_region(nbytes
, page_type_flag
, region
, 0);
4508 #ifndef LISP_FEATURE_WIN32
4509 /* for sb-prof, and not supported on Windows yet */
4510 alloc_signal
= SymbolValue(ALLOC_SIGNAL
,thread
);
4511 if ((alloc_signal
& FIXNUM_TAG_MASK
) == 0) {
4512 if ((sword_t
) alloc_signal
<= 0) {
4513 SetSymbolValue(ALLOC_SIGNAL
, T
, thread
);
4516 SetSymbolValue(ALLOC_SIGNAL
,
4517 alloc_signal
- (1 << N_FIXNUM_TAG_BITS
),
4527 general_alloc(sword_t nbytes
, int page_type_flag
)
4529 struct thread
*thread
= arch_os_get_current_thread();
4530 /* Select correct region, and call general_alloc_internal with it.
4531 * For other then boxed allocation we must lock first, since the
4532 * region is shared. */
4533 if (BOXED_PAGE_FLAG
& page_type_flag
) {
4534 #ifdef LISP_FEATURE_SB_THREAD
4535 struct alloc_region
*region
= (thread
? &(thread
->alloc_region
) : &boxed_region
);
4537 struct alloc_region
*region
= &boxed_region
;
4539 return general_alloc_internal(nbytes
, page_type_flag
, region
, thread
);
4540 } else if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
4542 gc_assert(0 == thread_mutex_lock(&allocation_lock
));
4543 obj
= general_alloc_internal(nbytes
, page_type_flag
, &unboxed_region
, thread
);
4544 gc_assert(0 == thread_mutex_unlock(&allocation_lock
));
4547 lose("bad page type flag: %d", page_type_flag
);
4551 lispobj AMD64_SYSV_ABI
*
4554 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4555 struct thread
*self
= arch_os_get_current_thread();
4556 int was_pseudo_atomic
= get_pseudo_atomic_atomic(self
);
4557 if (!was_pseudo_atomic
)
4558 set_pseudo_atomic_atomic(self
);
4560 gc_assert(get_pseudo_atomic_atomic(arch_os_get_current_thread()));
4563 lispobj
*result
= general_alloc(nbytes
, BOXED_PAGE_FLAG
);
4565 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4566 if (!was_pseudo_atomic
)
4567 clear_pseudo_atomic_atomic(self
);
4574 * shared support for the OS-dependent signal handlers which
4575 * catch GENCGC-related write-protect violations
4577 void unhandled_sigmemoryfault(void* addr
);
4579 /* Depending on which OS we're running under, different signals might
4580 * be raised for a violation of write protection in the heap. This
4581 * function factors out the common generational GC magic which needs
4582 * to invoked in this case, and should be called from whatever signal
4583 * handler is appropriate for the OS we're running under.
4585 * Return true if this signal is a normal generational GC thing that
4586 * we were able to handle, or false if it was abnormal and control
4587 * should fall through to the general SIGSEGV/SIGBUS/whatever logic.
4589 * We have two control flags for this: one causes us to ignore faults
4590 * on unprotected pages completely, and the second complains to stderr
4591 * but allows us to continue without losing.
4593 extern boolean ignore_memoryfaults_on_unprotected_pages
;
4594 boolean ignore_memoryfaults_on_unprotected_pages
= 0;
4596 extern boolean continue_after_memoryfault_on_unprotected_pages
;
4597 boolean continue_after_memoryfault_on_unprotected_pages
= 0;
4600 gencgc_handle_wp_violation(void* fault_addr
)
4602 page_index_t page_index
= find_page_index(fault_addr
);
4606 "heap WP violation? fault_addr=%p, page_index=%"PAGE_INDEX_FMT
"\n",
4607 fault_addr
, page_index
));
4610 /* Check whether the fault is within the dynamic space. */
4611 if (page_index
== (-1)) {
4613 /* It can be helpful to be able to put a breakpoint on this
4614 * case to help diagnose low-level problems. */
4615 unhandled_sigmemoryfault(fault_addr
);
4617 /* not within the dynamic space -- not our responsibility */
4622 ret
= thread_mutex_lock(&free_pages_lock
);
4623 gc_assert(ret
== 0);
4624 if (page_table
[page_index
].write_protected
) {
4625 /* Unprotect the page. */
4626 os_protect(page_address(page_index
), GENCGC_CARD_BYTES
, OS_VM_PROT_ALL
);
4627 page_table
[page_index
].write_protected_cleared
= 1;
4628 page_table
[page_index
].write_protected
= 0;
4629 } else if (!ignore_memoryfaults_on_unprotected_pages
) {
4630 /* The only acceptable reason for this signal on a heap
4631 * access is that GENCGC write-protected the page.
4632 * However, if two CPUs hit a wp page near-simultaneously,
4633 * we had better not have the second one lose here if it
4634 * does this test after the first one has already set wp=0
4636 if(page_table
[page_index
].write_protected_cleared
!= 1) {
4637 void lisp_backtrace(int frames
);
4640 "Fault @ %p, page %"PAGE_INDEX_FMT
" not marked as write-protected:\n"
4641 " boxed_region.first_page: %"PAGE_INDEX_FMT
","
4642 " boxed_region.last_page %"PAGE_INDEX_FMT
"\n"
4643 " page.scan_start_offset: %"OS_VM_SIZE_FMT
"\n"
4644 " page.bytes_used: %"PAGE_BYTES_FMT
"\n"
4645 " page.allocated: %d\n"
4646 " page.write_protected: %d\n"
4647 " page.write_protected_cleared: %d\n"
4648 " page.generation: %d\n",
4651 boxed_region
.first_page
,
4652 boxed_region
.last_page
,
4653 page_table
[page_index
].scan_start_offset
,
4654 page_table
[page_index
].bytes_used
,
4655 page_table
[page_index
].allocated
,
4656 page_table
[page_index
].write_protected
,
4657 page_table
[page_index
].write_protected_cleared
,
4658 page_table
[page_index
].gen
);
4659 if (!continue_after_memoryfault_on_unprotected_pages
)
4663 ret
= thread_mutex_unlock(&free_pages_lock
);
4664 gc_assert(ret
== 0);
4665 /* Don't worry, we can handle it. */
4669 /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
4670 * it's not just a case of the program hitting the write barrier, and
4671 * are about to let Lisp deal with it. It's basically just a
4672 * convenient place to set a gdb breakpoint. */
4674 unhandled_sigmemoryfault(void *addr
)
4677 void gc_alloc_update_all_page_tables(void)
4679 /* Flush the alloc regions updating the tables. */
4681 for_each_thread(th
) {
4682 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->alloc_region
);
4683 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
4684 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->sprof_alloc_region
);
4687 gc_alloc_update_page_tables(UNBOXED_PAGE_FLAG
, &unboxed_region
);
4688 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &boxed_region
);
4692 gc_set_region_empty(struct alloc_region
*region
)
4694 region
->first_page
= 0;
4695 region
->last_page
= -1;
4696 region
->start_addr
= page_address(0);
4697 region
->free_pointer
= page_address(0);
4698 region
->end_addr
= page_address(0);
4702 zero_all_free_pages()
4706 for (i
= 0; i
< last_free_page
; i
++) {
4707 if (page_free_p(i
)) {
4708 #ifdef READ_PROTECT_FREE_PAGES
4709 os_protect(page_address(i
),
4718 /* Things to do before doing a final GC before saving a core (without
4721 * + Pages in large_object pages aren't moved by the GC, so we need to
4722 * unset that flag from all pages.
4723 * + The pseudo-static generation isn't normally collected, but it seems
4724 * reasonable to collect it at least when saving a core. So move the
4725 * pages to a normal generation.
4728 prepare_for_final_gc ()
4733 for (i
= 0; i
< last_free_page
; i
++) {
4734 page_table
[i
].large_object
= 0;
4735 if (page_table
[i
].gen
== PSEUDO_STATIC_GENERATION
) {
4736 int used
= page_table
[i
].bytes_used
;
4737 page_table
[i
].gen
= HIGHEST_NORMAL_GENERATION
;
4738 generations
[PSEUDO_STATIC_GENERATION
].bytes_allocated
-= used
;
4739 generations
[HIGHEST_NORMAL_GENERATION
].bytes_allocated
+= used
;
4745 /* Do a non-conservative GC, and then save a core with the initial
4746 * function being set to the value of the static symbol
4747 * SB!VM:RESTART-LISP-FUNCTION */
4749 gc_and_save(char *filename
, boolean prepend_runtime
,
4750 boolean save_runtime_options
, boolean compressed
,
4751 int compression_level
, int application_type
)
4754 void *runtime_bytes
= NULL
;
4755 size_t runtime_size
;
4757 file
= prepare_to_save(filename
, prepend_runtime
, &runtime_bytes
,
4762 conservative_stack
= 0;
4764 /* The filename might come from Lisp, and be moved by the now
4765 * non-conservative GC. */
4766 filename
= strdup(filename
);
4768 /* Collect twice: once into relatively high memory, and then back
4769 * into low memory. This compacts the retained data into the lower
4770 * pages, minimizing the size of the core file.
4772 prepare_for_final_gc();
4773 gencgc_alloc_start_page
= last_free_page
;
4774 collect_garbage(HIGHEST_NORMAL_GENERATION
+1);
4776 prepare_for_final_gc();
4777 gencgc_alloc_start_page
= -1;
4778 collect_garbage(HIGHEST_NORMAL_GENERATION
+1);
4780 if (prepend_runtime
)
4781 save_runtime_to_filehandle(file
, runtime_bytes
, runtime_size
,
4784 /* The dumper doesn't know that pages need to be zeroed before use. */
4785 zero_all_free_pages();
4786 save_to_filehandle(file
, filename
, SymbolValue(RESTART_LISP_FUNCTION
,0),
4787 prepend_runtime
, save_runtime_options
,
4788 compressed
? compression_level
: COMPRESSION_LEVEL_NONE
);
4789 /* Oops. Save still managed to fail. Since we've mangled the stack
4790 * beyond hope, there's not much we can do.
4791 * (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's
4792 * going to be rather unsatisfactory too... */
4793 lose("Attempt to save core after non-conservative GC failed.\n");