2 * GENerational Conservative Garbage Collector for SBCL
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 * For a review of garbage collection techniques (e.g. generational
18 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
19 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
20 * had been accepted for _ACM Computing Surveys_ and was available
21 * as a PostScript preprint through
22 * <http://www.cs.utexas.edu/users/oops/papers.html>
24 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
37 #include "interrupt.h"
42 #include "gc-internal.h"
45 #include "genesis/vector.h"
46 #include "genesis/weak-pointer.h"
47 #include "genesis/fdefn.h"
48 #include "genesis/simple-fun.h"
50 #include "genesis/hash-table.h"
51 #include "genesis/instance.h"
52 #include "genesis/layout.h"
54 #if defined(LUTEX_WIDETAG)
55 #include "pthread-lutex.h"
58 /* forward declarations */
59 page_index_t
gc_find_freeish_pages(long *restart_page_ptr
, long nbytes
,
67 /* Generations 0-5 are normal collected generations, 6 is only used as
68 * scratch space by the collector, and should never get collected.
71 HIGHEST_NORMAL_GENERATION
= 5,
72 PSEUDO_STATIC_GENERATION
,
77 /* Should we use page protection to help avoid the scavenging of pages
78 * that don't have pointers to younger generations? */
79 boolean enable_page_protection
= 1;
81 /* the minimum size (in bytes) for a large object*/
82 long large_object_size
= 4 * PAGE_BYTES
;
89 /* the verbosity level. All non-error messages are disabled at level 0;
90 * and only a few rare messages are printed at level 1. */
92 boolean gencgc_verbose
= 1;
94 boolean gencgc_verbose
= 0;
97 /* FIXME: At some point enable the various error-checking things below
98 * and see what they say. */
100 /* We hunt for pointers to old-space, when GCing generations >= verify_gen.
101 * Set verify_gens to HIGHEST_NORMAL_GENERATION + 1 to disable this kind of
103 generation_index_t verify_gens
= HIGHEST_NORMAL_GENERATION
+ 1;
105 /* Should we do a pre-scan verify of generation 0 before it's GCed? */
106 boolean pre_verify_gen_0
= 0;
108 /* Should we check for bad pointers after gc_free_heap is called
109 * from Lisp PURIFY? */
110 boolean verify_after_free_heap
= 0;
112 /* Should we print a note when code objects are found in the dynamic space
113 * during a heap verify? */
114 boolean verify_dynamic_code_check
= 0;
116 /* Should we check code objects for fixup errors after they are transported? */
117 boolean check_code_fixups
= 0;
119 /* Should we check that newly allocated regions are zero filled? */
120 boolean gencgc_zero_check
= 0;
122 /* Should we check that the free space is zero filled? */
123 boolean gencgc_enable_verify_zero_fill
= 0;
125 /* Should we check that free pages are zero filled during gc_free_heap
126 * called after Lisp PURIFY? */
127 boolean gencgc_zero_check_during_free_heap
= 0;
129 /* When loading a core, don't do a full scan of the memory for the
130 * memory region boundaries. (Set to true by coreparse.c if the core
131 * contained a pagetable entry).
133 boolean gencgc_partial_pickup
= 0;
135 /* If defined, free pages are read-protected to ensure that nothing
139 /* #define READ_PROTECT_FREE_PAGES */
143 * GC structures and variables
146 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
147 unsigned long bytes_allocated
= 0;
148 unsigned long auto_gc_trigger
= 0;
150 /* the source and destination generations. These are set before a GC starts
152 generation_index_t from_space
;
153 generation_index_t new_space
;
155 /* Set to 1 when in GC */
156 boolean gc_active_p
= 0;
158 /* should the GC be conservative on stack. If false (only right before
159 * saving a core), don't scan the stack / mark pages dont_move. */
160 static boolean conservative_stack
= 1;
162 /* An array of page structures is allocated on gc initialization.
163 * This helps quickly map between an address its page structure.
164 * page_table_pages is set from the size of the dynamic space. */
165 page_index_t page_table_pages
;
166 struct page
*page_table
;
168 static inline boolean
page_allocated_p(page_index_t page
) {
169 return (page_table
[page
].allocated
!= FREE_PAGE_FLAG
);
172 static inline boolean
page_no_region_p(page_index_t page
) {
173 return !(page_table
[page
].allocated
& OPEN_REGION_PAGE_FLAG
);
176 static inline boolean
page_allocated_no_region_p(page_index_t page
) {
177 return ((page_table
[page
].allocated
& (UNBOXED_PAGE_FLAG
| BOXED_PAGE_FLAG
))
178 && page_no_region_p(page
));
181 static inline boolean
page_free_p(page_index_t page
) {
182 return (page_table
[page
].allocated
== FREE_PAGE_FLAG
);
185 static inline boolean
page_boxed_p(page_index_t page
) {
186 return (page_table
[page
].allocated
& BOXED_PAGE_FLAG
);
189 static inline boolean
code_page_p(page_index_t page
) {
190 return (page_table
[page
].allocated
& CODE_PAGE_FLAG
);
193 static inline boolean
page_boxed_no_region_p(page_index_t page
) {
194 return page_boxed_p(page
) && page_no_region_p(page
);
197 static inline boolean
page_unboxed_p(page_index_t page
) {
198 /* Both flags set == boxed code page */
199 return ((page_table
[page
].allocated
& UNBOXED_PAGE_FLAG
)
200 && !page_boxed_p(page
));
203 static inline boolean
protect_page_p(page_index_t page
, generation_index_t generation
) {
204 return (page_boxed_no_region_p(page
)
205 && (page_table
[page
].bytes_used
!= 0)
206 && !page_table
[page
].dont_move
207 && (page_table
[page
].gen
== generation
));
210 /* To map addresses to page structures the address of the first page
212 static void *heap_base
= NULL
;
214 /* Calculate the start address for the given page number. */
216 page_address(page_index_t page_num
)
218 return (heap_base
+ (page_num
* PAGE_BYTES
));
221 /* Calculate the address where the allocation region associated with
222 * the page starts. */
224 page_region_start(page_index_t page_index
)
226 return page_address(page_index
)-page_table
[page_index
].region_start_offset
;
229 /* Find the page index within the page_table for the given
230 * address. Return -1 on failure. */
232 find_page_index(void *addr
)
234 if (addr
>= heap_base
) {
235 page_index_t index
= ((pointer_sized_uint_t
)addr
-
236 (pointer_sized_uint_t
)heap_base
) / PAGE_BYTES
;
237 if (index
< page_table_pages
)
244 npage_bytes(long npages
)
246 gc_assert(npages
>=0);
247 return ((unsigned long)npages
)*PAGE_BYTES
;
250 /* Check that X is a higher address than Y and return offset from Y to
253 size_t void_diff(void *x
, void *y
)
256 return (pointer_sized_uint_t
)x
- (pointer_sized_uint_t
)y
;
259 /* a structure to hold the state of a generation */
262 /* the first page that gc_alloc() checks on its next call */
263 page_index_t alloc_start_page
;
265 /* the first page that gc_alloc_unboxed() checks on its next call */
266 page_index_t alloc_unboxed_start_page
;
268 /* the first page that gc_alloc_large (boxed) considers on its next
269 * call. (Although it always allocates after the boxed_region.) */
270 page_index_t alloc_large_start_page
;
272 /* the first page that gc_alloc_large (unboxed) considers on its
273 * next call. (Although it always allocates after the
274 * current_unboxed_region.) */
275 page_index_t alloc_large_unboxed_start_page
;
277 /* the bytes allocated to this generation */
278 unsigned long bytes_allocated
;
280 /* the number of bytes at which to trigger a GC */
281 unsigned long gc_trigger
;
283 /* to calculate a new level for gc_trigger */
284 unsigned long bytes_consed_between_gc
;
286 /* the number of GCs since the last raise */
289 /* the average age after which a GC will raise objects to the
293 /* the cumulative sum of the bytes allocated to this generation. It is
294 * cleared after a GC on this generations, and update before new
295 * objects are added from a GC of a younger generation. Dividing by
296 * the bytes_allocated will give the average age of the memory in
297 * this generation since its last GC. */
298 unsigned long cum_sum_bytes_allocated
;
300 /* a minimum average memory age before a GC will occur helps
301 * prevent a GC when a large number of new live objects have been
302 * added, in which case a GC could be a waste of time */
303 double min_av_mem_age
;
305 /* A linked list of lutex structures in this generation, used for
306 * implementing lutex finalization. */
308 struct lutex
*lutexes
;
314 /* an array of generation structures. There needs to be one more
315 * generation structure than actual generations as the oldest
316 * generation is temporarily raised then lowered. */
317 struct generation generations
[NUM_GENERATIONS
];
319 /* the oldest generation that is will currently be GCed by default.
320 * Valid values are: 0, 1, ... HIGHEST_NORMAL_GENERATION
322 * The default of HIGHEST_NORMAL_GENERATION enables GC on all generations.
324 * Setting this to 0 effectively disables the generational nature of
325 * the GC. In some applications generational GC may not be useful
326 * because there are no long-lived objects.
328 * An intermediate value could be handy after moving long-lived data
329 * into an older generation so an unnecessary GC of this long-lived
330 * data can be avoided. */
331 generation_index_t gencgc_oldest_gen_to_gc
= HIGHEST_NORMAL_GENERATION
;
333 /* The maximum free page in the heap is maintained and used to update
334 * ALLOCATION_POINTER which is used by the room function to limit its
335 * search of the heap. XX Gencgc obviously needs to be better
336 * integrated with the Lisp code. */
337 page_index_t last_free_page
;
339 #ifdef LISP_FEATURE_SB_THREAD
340 /* This lock is to prevent multiple threads from simultaneously
341 * allocating new regions which overlap each other. Note that the
342 * majority of GC is single-threaded, but alloc() may be called from
343 * >1 thread at a time and must be thread-safe. This lock must be
344 * seized before all accesses to generations[] or to parts of
345 * page_table[] that other threads may want to see */
346 static pthread_mutex_t free_pages_lock
= PTHREAD_MUTEX_INITIALIZER
;
347 /* This lock is used to protect non-thread-local allocation. */
348 static pthread_mutex_t allocation_lock
= PTHREAD_MUTEX_INITIALIZER
;
353 * miscellaneous heap functions
356 /* Count the number of pages which are write-protected within the
357 * given generation. */
359 count_write_protect_generation_pages(generation_index_t generation
)
362 unsigned long count
= 0;
364 for (i
= 0; i
< last_free_page
; i
++)
365 if (page_allocated_p(i
)
366 && (page_table
[i
].gen
== generation
)
367 && (page_table
[i
].write_protected
== 1))
372 /* Count the number of pages within the given generation. */
374 count_generation_pages(generation_index_t generation
)
379 for (i
= 0; i
< last_free_page
; i
++)
380 if (page_allocated_p(i
)
381 && (page_table
[i
].gen
== generation
))
388 count_dont_move_pages(void)
392 for (i
= 0; i
< last_free_page
; i
++) {
393 if (page_allocated_p(i
)
394 && (page_table
[i
].dont_move
!= 0)) {
402 /* Work through the pages and add up the number of bytes used for the
403 * given generation. */
405 count_generation_bytes_allocated (generation_index_t gen
)
408 unsigned long result
= 0;
409 for (i
= 0; i
< last_free_page
; i
++) {
410 if (page_allocated_p(i
)
411 && (page_table
[i
].gen
== gen
))
412 result
+= page_table
[i
].bytes_used
;
417 /* Return the average age of the memory in a generation. */
419 gen_av_mem_age(generation_index_t gen
)
421 if (generations
[gen
].bytes_allocated
== 0)
425 ((double)generations
[gen
].cum_sum_bytes_allocated
)
426 / ((double)generations
[gen
].bytes_allocated
);
429 /* The verbose argument controls how much to print: 0 for normal
430 * level of detail; 1 for debugging. */
432 print_generation_stats(int verbose
) /* FIXME: should take FILE argument */
434 generation_index_t i
, gens
;
436 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
437 #define FPU_STATE_SIZE 27
438 int fpu_state
[FPU_STATE_SIZE
];
439 #elif defined(LISP_FEATURE_PPC)
440 #define FPU_STATE_SIZE 32
441 long long fpu_state
[FPU_STATE_SIZE
];
444 /* This code uses the FP instructions which may be set up for Lisp
445 * so they need to be saved and reset for C. */
448 /* highest generation to print */
450 gens
= SCRATCH_GENERATION
;
452 gens
= PSEUDO_STATIC_GENERATION
;
454 /* Print the heap stats. */
456 " Gen StaPg UbSta LaSta LUbSt Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
458 for (i
= 0; i
< gens
; i
++) {
461 long unboxed_cnt
= 0;
462 long large_boxed_cnt
= 0;
463 long large_unboxed_cnt
= 0;
466 for (j
= 0; j
< last_free_page
; j
++)
467 if (page_table
[j
].gen
== i
) {
469 /* Count the number of boxed pages within the given
471 if (page_boxed_p(j
)) {
472 if (page_table
[j
].large_object
)
477 if(page_table
[j
].dont_move
) pinned_cnt
++;
478 /* Count the number of unboxed pages within the given
480 if (page_unboxed_p(j
)) {
481 if (page_table
[j
].large_object
)
488 gc_assert(generations
[i
].bytes_allocated
489 == count_generation_bytes_allocated(i
));
491 " %1d: %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %8ld %5ld %8ld %4ld %3d %7.4f\n",
493 generations
[i
].alloc_start_page
,
494 generations
[i
].alloc_unboxed_start_page
,
495 generations
[i
].alloc_large_start_page
,
496 generations
[i
].alloc_large_unboxed_start_page
,
502 generations
[i
].bytes_allocated
,
503 (npage_bytes(count_generation_pages(i
))
504 - generations
[i
].bytes_allocated
),
505 generations
[i
].gc_trigger
,
506 count_write_protect_generation_pages(i
),
507 generations
[i
].num_gc
,
510 fprintf(stderr
," Total bytes allocated=%ld\n", bytes_allocated
);
512 fpu_restore(fpu_state
);
516 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
517 void fast_bzero(void*, size_t); /* in <arch>-assem.S */
520 /* Zero the pages from START to END (inclusive), but use mmap/munmap instead
521 * if zeroing it ourselves, i.e. in practice give the memory back to the
522 * OS. Generally done after a large GC.
524 void zero_pages_with_mmap(page_index_t start
, page_index_t end
) {
526 void *addr
= page_address(start
), *new_addr
;
527 size_t length
= npage_bytes(1+end
-start
);
532 os_invalidate(addr
, length
);
533 new_addr
= os_validate(addr
, length
);
534 if (new_addr
== NULL
|| new_addr
!= addr
) {
535 lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x",
539 for (i
= start
; i
<= end
; i
++) {
540 page_table
[i
].need_to_zero
= 0;
544 /* Zero the pages from START to END (inclusive). Generally done just after
545 * a new region has been allocated.
548 zero_pages(page_index_t start
, page_index_t end
) {
552 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
553 fast_bzero(page_address(start
), npage_bytes(1+end
-start
));
555 bzero(page_address(start
), npage_bytes(1+end
-start
));
560 /* Zero the pages from START to END (inclusive), except for those
561 * pages that are known to already zeroed. Mark all pages in the
562 * ranges as non-zeroed.
565 zero_dirty_pages(page_index_t start
, page_index_t end
) {
568 for (i
= start
; i
<= end
; i
++) {
569 if (page_table
[i
].need_to_zero
== 1) {
570 zero_pages(start
, end
);
575 for (i
= start
; i
<= end
; i
++) {
576 page_table
[i
].need_to_zero
= 1;
582 * To support quick and inline allocation, regions of memory can be
583 * allocated and then allocated from with just a free pointer and a
584 * check against an end address.
586 * Since objects can be allocated to spaces with different properties
587 * e.g. boxed/unboxed, generation, ages; there may need to be many
588 * allocation regions.
590 * Each allocation region may start within a partly used page. Many
591 * features of memory use are noted on a page wise basis, e.g. the
592 * generation; so if a region starts within an existing allocated page
593 * it must be consistent with this page.
595 * During the scavenging of the newspace, objects will be transported
596 * into an allocation region, and pointers updated to point to this
597 * allocation region. It is possible that these pointers will be
598 * scavenged again before the allocation region is closed, e.g. due to
599 * trans_list which jumps all over the place to cleanup the list. It
600 * is important to be able to determine properties of all objects
601 * pointed to when scavenging, e.g to detect pointers to the oldspace.
602 * Thus it's important that the allocation regions have the correct
603 * properties set when allocated, and not just set when closed. The
604 * region allocation routines return regions with the specified
605 * properties, and grab all the pages, setting their properties
606 * appropriately, except that the amount used is not known.
608 * These regions are used to support quicker allocation using just a
609 * free pointer. The actual space used by the region is not reflected
610 * in the pages tables until it is closed. It can't be scavenged until
613 * When finished with the region it should be closed, which will
614 * update the page tables for the actual space used returning unused
615 * space. Further it may be noted in the new regions which is
616 * necessary when scavenging the newspace.
618 * Large objects may be allocated directly without an allocation
619 * region, the page tables are updated immediately.
621 * Unboxed objects don't contain pointers to other objects and so
622 * don't need scavenging. Further they can't contain pointers to
623 * younger generations so WP is not needed. By allocating pages to
624 * unboxed objects the whole page never needs scavenging or
625 * write-protecting. */
627 /* We are only using two regions at present. Both are for the current
628 * newspace generation. */
629 struct alloc_region boxed_region
;
630 struct alloc_region unboxed_region
;
632 /* The generation currently being allocated to. */
633 static generation_index_t gc_alloc_generation
;
635 static inline page_index_t
636 generation_alloc_start_page(generation_index_t generation
, int page_type_flag
, int large
)
639 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
640 return generations
[generation
].alloc_large_unboxed_start_page
;
641 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
642 /* Both code and data. */
643 return generations
[generation
].alloc_large_start_page
;
645 lose("bad page type flag: %d", page_type_flag
);
648 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
649 return generations
[generation
].alloc_unboxed_start_page
;
650 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
651 /* Both code and data. */
652 return generations
[generation
].alloc_start_page
;
654 lose("bad page_type_flag: %d", page_type_flag
);
660 set_generation_alloc_start_page(generation_index_t generation
, int page_type_flag
, int large
,
664 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
665 generations
[generation
].alloc_large_unboxed_start_page
= page
;
666 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
667 /* Both code and data. */
668 generations
[generation
].alloc_large_start_page
= page
;
670 lose("bad page type flag: %d", page_type_flag
);
673 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
674 generations
[generation
].alloc_unboxed_start_page
= page
;
675 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
676 /* Both code and data. */
677 generations
[generation
].alloc_start_page
= page
;
679 lose("bad page type flag: %d", page_type_flag
);
684 /* Find a new region with room for at least the given number of bytes.
686 * It starts looking at the current generation's alloc_start_page. So
687 * may pick up from the previous region if there is enough space. This
688 * keeps the allocation contiguous when scavenging the newspace.
690 * The alloc_region should have been closed by a call to
691 * gc_alloc_update_page_tables(), and will thus be in an empty state.
693 * To assist the scavenging functions write-protected pages are not
694 * used. Free pages should not be write-protected.
696 * It is critical to the conservative GC that the start of regions be
697 * known. To help achieve this only small regions are allocated at a
700 * During scavenging, pointers may be found to within the current
701 * region and the page generation must be set so that pointers to the
702 * from space can be recognized. Therefore the generation of pages in
703 * the region are set to gc_alloc_generation. To prevent another
704 * allocation call using the same pages, all the pages in the region
705 * are allocated, although they will initially be empty.
708 gc_alloc_new_region(long nbytes
, int page_type_flag
, struct alloc_region
*alloc_region
)
710 page_index_t first_page
;
711 page_index_t last_page
;
712 unsigned long bytes_found
;
718 "/alloc_new_region for %d bytes from gen %d\n",
719 nbytes, gc_alloc_generation));
722 /* Check that the region is in a reset state. */
723 gc_assert((alloc_region
->first_page
== 0)
724 && (alloc_region
->last_page
== -1)
725 && (alloc_region
->free_pointer
== alloc_region
->end_addr
));
726 ret
= thread_mutex_lock(&free_pages_lock
);
728 first_page
= generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 0);
729 last_page
=gc_find_freeish_pages(&first_page
, nbytes
, page_type_flag
);
730 bytes_found
=(PAGE_BYTES
- page_table
[first_page
].bytes_used
)
731 + npage_bytes(last_page
-first_page
);
733 /* Set up the alloc_region. */
734 alloc_region
->first_page
= first_page
;
735 alloc_region
->last_page
= last_page
;
736 alloc_region
->start_addr
= page_table
[first_page
].bytes_used
737 + page_address(first_page
);
738 alloc_region
->free_pointer
= alloc_region
->start_addr
;
739 alloc_region
->end_addr
= alloc_region
->start_addr
+ bytes_found
;
741 /* Set up the pages. */
743 /* The first page may have already been in use. */
744 if (page_table
[first_page
].bytes_used
== 0) {
745 page_table
[first_page
].allocated
= page_type_flag
;
746 page_table
[first_page
].gen
= gc_alloc_generation
;
747 page_table
[first_page
].large_object
= 0;
748 page_table
[first_page
].region_start_offset
= 0;
751 gc_assert(page_table
[first_page
].allocated
== page_type_flag
);
752 page_table
[first_page
].allocated
|= OPEN_REGION_PAGE_FLAG
;
754 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
755 gc_assert(page_table
[first_page
].large_object
== 0);
757 for (i
= first_page
+1; i
<= last_page
; i
++) {
758 page_table
[i
].allocated
= page_type_flag
;
759 page_table
[i
].gen
= gc_alloc_generation
;
760 page_table
[i
].large_object
= 0;
761 /* This may not be necessary for unboxed regions (think it was
763 page_table
[i
].region_start_offset
=
764 void_diff(page_address(i
),alloc_region
->start_addr
);
765 page_table
[i
].allocated
|= OPEN_REGION_PAGE_FLAG
;
767 /* Bump up last_free_page. */
768 if (last_page
+1 > last_free_page
) {
769 last_free_page
= last_page
+1;
770 /* do we only want to call this on special occasions? like for
772 set_alloc_pointer((lispobj
)page_address(last_free_page
));
774 ret
= thread_mutex_unlock(&free_pages_lock
);
777 #ifdef READ_PROTECT_FREE_PAGES
778 os_protect(page_address(first_page
),
779 npage_bytes(1+last_page
-first_page
),
783 /* If the first page was only partial, don't check whether it's
784 * zeroed (it won't be) and don't zero it (since the parts that
785 * we're interested in are guaranteed to be zeroed).
787 if (page_table
[first_page
].bytes_used
) {
791 zero_dirty_pages(first_page
, last_page
);
793 /* we can do this after releasing free_pages_lock */
794 if (gencgc_zero_check
) {
796 for (p
= (long *)alloc_region
->start_addr
;
797 p
< (long *)alloc_region
->end_addr
; p
++) {
799 /* KLUDGE: It would be nice to use %lx and explicit casts
800 * (long) in code like this, so that it is less likely to
801 * break randomly when running on a machine with different
802 * word sizes. -- WHN 19991129 */
803 lose("The new region at %x is not zero (start=%p, end=%p).\n",
804 p
, alloc_region
->start_addr
, alloc_region
->end_addr
);
810 /* If the record_new_objects flag is 2 then all new regions created
813 * If it's 1 then then it is only recorded if the first page of the
814 * current region is <= new_areas_ignore_page. This helps avoid
815 * unnecessary recording when doing full scavenge pass.
817 * The new_object structure holds the page, byte offset, and size of
818 * new regions of objects. Each new area is placed in the array of
819 * these structures pointer to by new_areas. new_areas_index holds the
820 * offset into new_areas.
822 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
823 * later code must detect this and handle it, probably by doing a full
824 * scavenge of a generation. */
825 #define NUM_NEW_AREAS 512
826 static int record_new_objects
= 0;
827 static page_index_t new_areas_ignore_page
;
833 static struct new_area (*new_areas
)[];
834 static long new_areas_index
;
837 /* Add a new area to new_areas. */
839 add_new_area(page_index_t first_page
, size_t offset
, size_t size
)
841 unsigned long new_area_start
,c
;
844 /* Ignore if full. */
845 if (new_areas_index
>= NUM_NEW_AREAS
)
848 switch (record_new_objects
) {
852 if (first_page
> new_areas_ignore_page
)
861 new_area_start
= npage_bytes(first_page
) + offset
;
863 /* Search backwards for a prior area that this follows from. If
864 found this will save adding a new area. */
865 for (i
= new_areas_index
-1, c
= 0; (i
>= 0) && (c
< 8); i
--, c
++) {
866 unsigned long area_end
=
867 npage_bytes((*new_areas
)[i
].page
)
868 + (*new_areas
)[i
].offset
869 + (*new_areas
)[i
].size
;
871 "/add_new_area S1 %d %d %d %d\n",
872 i, c, new_area_start, area_end));*/
873 if (new_area_start
== area_end
) {
875 "/adding to [%d] %d %d %d with %d %d %d:\n",
877 (*new_areas)[i].page,
878 (*new_areas)[i].offset,
879 (*new_areas)[i].size,
883 (*new_areas
)[i
].size
+= size
;
888 (*new_areas
)[new_areas_index
].page
= first_page
;
889 (*new_areas
)[new_areas_index
].offset
= offset
;
890 (*new_areas
)[new_areas_index
].size
= size
;
892 "/new_area %d page %d offset %d size %d\n",
893 new_areas_index, first_page, offset, size));*/
896 /* Note the max new_areas used. */
897 if (new_areas_index
> max_new_areas
)
898 max_new_areas
= new_areas_index
;
901 /* Update the tables for the alloc_region. The region may be added to
904 * When done the alloc_region is set up so that the next quick alloc
905 * will fail safely and thus a new region will be allocated. Further
906 * it is safe to try to re-update the page table of this reset
909 gc_alloc_update_page_tables(int page_type_flag
, struct alloc_region
*alloc_region
)
912 page_index_t first_page
;
913 page_index_t next_page
;
914 unsigned long bytes_used
;
915 unsigned long orig_first_page_bytes_used
;
916 unsigned long region_size
;
917 unsigned long byte_cnt
;
921 first_page
= alloc_region
->first_page
;
923 /* Catch an unused alloc_region. */
924 if ((first_page
== 0) && (alloc_region
->last_page
== -1))
927 next_page
= first_page
+1;
929 ret
= thread_mutex_lock(&free_pages_lock
);
931 if (alloc_region
->free_pointer
!= alloc_region
->start_addr
) {
932 /* some bytes were allocated in the region */
933 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
935 gc_assert(alloc_region
->start_addr
==
936 (page_address(first_page
)
937 + page_table
[first_page
].bytes_used
));
939 /* All the pages used need to be updated */
941 /* Update the first page. */
943 /* If the page was free then set up the gen, and
944 * region_start_offset. */
945 if (page_table
[first_page
].bytes_used
== 0)
946 gc_assert(page_table
[first_page
].region_start_offset
== 0);
947 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
949 gc_assert(page_table
[first_page
].allocated
& page_type_flag
);
950 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
951 gc_assert(page_table
[first_page
].large_object
== 0);
955 /* Calculate the number of bytes used in this page. This is not
956 * always the number of new bytes, unless it was free. */
958 if ((bytes_used
= void_diff(alloc_region
->free_pointer
,
959 page_address(first_page
)))
961 bytes_used
= PAGE_BYTES
;
964 page_table
[first_page
].bytes_used
= bytes_used
;
965 byte_cnt
+= bytes_used
;
968 /* All the rest of the pages should be free. We need to set
969 * their region_start_offset pointer to the start of the
970 * region, and set the bytes_used. */
972 page_table
[next_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
973 gc_assert(page_table
[next_page
].allocated
& page_type_flag
);
974 gc_assert(page_table
[next_page
].bytes_used
== 0);
975 gc_assert(page_table
[next_page
].gen
== gc_alloc_generation
);
976 gc_assert(page_table
[next_page
].large_object
== 0);
978 gc_assert(page_table
[next_page
].region_start_offset
==
979 void_diff(page_address(next_page
),
980 alloc_region
->start_addr
));
982 /* Calculate the number of bytes used in this page. */
984 if ((bytes_used
= void_diff(alloc_region
->free_pointer
,
985 page_address(next_page
)))>PAGE_BYTES
) {
986 bytes_used
= PAGE_BYTES
;
989 page_table
[next_page
].bytes_used
= bytes_used
;
990 byte_cnt
+= bytes_used
;
995 region_size
= void_diff(alloc_region
->free_pointer
,
996 alloc_region
->start_addr
);
997 bytes_allocated
+= region_size
;
998 generations
[gc_alloc_generation
].bytes_allocated
+= region_size
;
1000 gc_assert((byte_cnt
- orig_first_page_bytes_used
) == region_size
);
1002 /* Set the generations alloc restart page to the last page of
1004 set_generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 0, next_page
-1);
1006 /* Add the region to the new_areas if requested. */
1007 if (BOXED_PAGE_FLAG
& page_type_flag
)
1008 add_new_area(first_page
,orig_first_page_bytes_used
, region_size
);
1012 "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
1014 gc_alloc_generation));
1017 /* There are no bytes allocated. Unallocate the first_page if
1018 * there are 0 bytes_used. */
1019 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1020 if (page_table
[first_page
].bytes_used
== 0)
1021 page_table
[first_page
].allocated
= FREE_PAGE_FLAG
;
1024 /* Unallocate any unused pages. */
1025 while (next_page
<= alloc_region
->last_page
) {
1026 gc_assert(page_table
[next_page
].bytes_used
== 0);
1027 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1030 ret
= thread_mutex_unlock(&free_pages_lock
);
1031 gc_assert(ret
== 0);
1033 /* alloc_region is per-thread, we're ok to do this unlocked */
1034 gc_set_region_empty(alloc_region
);
1037 static inline void *gc_quick_alloc(long nbytes
);
1039 /* Allocate a possibly large object. */
1041 gc_alloc_large(long nbytes
, int page_type_flag
, struct alloc_region
*alloc_region
)
1043 page_index_t first_page
;
1044 page_index_t last_page
;
1045 int orig_first_page_bytes_used
;
1049 page_index_t next_page
;
1052 ret
= thread_mutex_lock(&free_pages_lock
);
1053 gc_assert(ret
== 0);
1055 first_page
= generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 1);
1056 if (first_page
<= alloc_region
->last_page
) {
1057 first_page
= alloc_region
->last_page
+1;
1060 last_page
=gc_find_freeish_pages(&first_page
,nbytes
, page_type_flag
);
1062 gc_assert(first_page
> alloc_region
->last_page
);
1064 set_generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 1, last_page
);
1066 /* Set up the pages. */
1067 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
1069 /* If the first page was free then set up the gen, and
1070 * region_start_offset. */
1071 if (page_table
[first_page
].bytes_used
== 0) {
1072 page_table
[first_page
].allocated
= page_type_flag
;
1073 page_table
[first_page
].gen
= gc_alloc_generation
;
1074 page_table
[first_page
].region_start_offset
= 0;
1075 page_table
[first_page
].large_object
= 1;
1078 gc_assert(page_table
[first_page
].allocated
== page_type_flag
);
1079 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
1080 gc_assert(page_table
[first_page
].large_object
== 1);
1084 /* Calc. the number of bytes used in this page. This is not
1085 * always the number of new bytes, unless it was free. */
1087 if ((bytes_used
= nbytes
+orig_first_page_bytes_used
) > PAGE_BYTES
) {
1088 bytes_used
= PAGE_BYTES
;
1091 page_table
[first_page
].bytes_used
= bytes_used
;
1092 byte_cnt
+= bytes_used
;
1094 next_page
= first_page
+1;
1096 /* All the rest of the pages should be free. We need to set their
1097 * region_start_offset pointer to the start of the region, and set
1098 * the bytes_used. */
1100 gc_assert(page_free_p(next_page
));
1101 gc_assert(page_table
[next_page
].bytes_used
== 0);
1102 page_table
[next_page
].allocated
= page_type_flag
;
1103 page_table
[next_page
].gen
= gc_alloc_generation
;
1104 page_table
[next_page
].large_object
= 1;
1106 page_table
[next_page
].region_start_offset
=
1107 npage_bytes(next_page
-first_page
) - orig_first_page_bytes_used
;
1109 /* Calculate the number of bytes used in this page. */
1111 bytes_used
=(nbytes
+orig_first_page_bytes_used
)-byte_cnt
;
1112 if (bytes_used
> PAGE_BYTES
) {
1113 bytes_used
= PAGE_BYTES
;
1116 page_table
[next_page
].bytes_used
= bytes_used
;
1117 page_table
[next_page
].write_protected
=0;
1118 page_table
[next_page
].dont_move
=0;
1119 byte_cnt
+= bytes_used
;
1123 gc_assert((byte_cnt
-orig_first_page_bytes_used
) == nbytes
);
1125 bytes_allocated
+= nbytes
;
1126 generations
[gc_alloc_generation
].bytes_allocated
+= nbytes
;
1128 /* Add the region to the new_areas if requested. */
1129 if (BOXED_PAGE_FLAG
& page_type_flag
)
1130 add_new_area(first_page
,orig_first_page_bytes_used
,nbytes
);
1132 /* Bump up last_free_page */
1133 if (last_page
+1 > last_free_page
) {
1134 last_free_page
= last_page
+1;
1135 set_alloc_pointer((lispobj
)(page_address(last_free_page
)));
1137 ret
= thread_mutex_unlock(&free_pages_lock
);
1138 gc_assert(ret
== 0);
1140 #ifdef READ_PROTECT_FREE_PAGES
1141 os_protect(page_address(first_page
),
1142 npage_bytes(1+last_page
-first_page
),
1146 zero_dirty_pages(first_page
, last_page
);
1148 return page_address(first_page
);
1151 static page_index_t gencgc_alloc_start_page
= -1;
1154 gc_heap_exhausted_error_or_lose (long available
, long requested
)
1156 /* Write basic information before doing anything else: if we don't
1157 * call to lisp this is a must, and even if we do there is always
1158 * the danger that we bounce back here before the error has been
1159 * handled, or indeed even printed.
1161 fprintf(stderr
, "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
1162 gc_active_p
? "garbage collection" : "allocation",
1163 available
, requested
);
1164 if (gc_active_p
|| (available
== 0)) {
1165 /* If we are in GC, or totally out of memory there is no way
1166 * to sanely transfer control to the lisp-side of things.
1168 struct thread
*thread
= arch_os_get_current_thread();
1169 print_generation_stats(1);
1170 fprintf(stderr
, "GC control variables:\n");
1171 fprintf(stderr
, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n",
1172 SymbolValue(GC_INHIBIT
,thread
)==NIL
? "false" : "true",
1173 SymbolValue(GC_PENDING
,thread
)==NIL
? "false" : "true");
1174 #ifdef LISP_FEATURE_SB_THREAD
1175 fprintf(stderr
, " *STOP-FOR-GC-PENDING* = %s\n",
1176 SymbolValue(STOP_FOR_GC_PENDING
,thread
)==NIL
? "false" : "true");
1178 lose("Heap exhausted, game over.");
1181 /* FIXME: assert free_pages_lock held */
1182 (void)thread_mutex_unlock(&free_pages_lock
);
1183 funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR
),
1184 alloc_number(available
), alloc_number(requested
));
1185 lose("HEAP-EXHAUSTED-ERROR fell through");
1190 gc_find_freeish_pages(page_index_t
*restart_page_ptr
, long nbytes
, int page_type_flag
)
1192 page_index_t first_page
, last_page
;
1193 page_index_t restart_page
= *restart_page_ptr
;
1194 long bytes_found
= 0;
1195 long most_bytes_found
= 0;
1196 /* FIXME: assert(free_pages_lock is held); */
1198 /* Toggled by gc_and_save for heap compaction, normally -1. */
1199 if (gencgc_alloc_start_page
!= -1) {
1200 restart_page
= gencgc_alloc_start_page
;
1203 if (nbytes
>=PAGE_BYTES
) {
1204 /* Search for a contiguous free space of at least nbytes,
1205 * aligned on a page boundary. The page-alignment is strictly
1206 * speaking needed only for objects at least large_object_size
1209 first_page
= restart_page
;
1210 while ((first_page
< page_table_pages
) &&
1211 page_allocated_p(first_page
))
1214 last_page
= first_page
;
1215 bytes_found
= PAGE_BYTES
;
1216 while ((bytes_found
< nbytes
) &&
1217 (last_page
< (page_table_pages
-1)) &&
1218 page_free_p(last_page
+1)) {
1220 bytes_found
+= PAGE_BYTES
;
1221 gc_assert(0 == page_table
[last_page
].bytes_used
);
1222 gc_assert(0 == page_table
[last_page
].write_protected
);
1224 if (bytes_found
> most_bytes_found
)
1225 most_bytes_found
= bytes_found
;
1226 restart_page
= last_page
+ 1;
1227 } while ((restart_page
< page_table_pages
) && (bytes_found
< nbytes
));
1230 /* Search for a page with at least nbytes of space. We prefer
1231 * not to split small objects on multiple pages, to reduce the
1232 * number of contiguous allocation regions spaning multiple
1233 * pages: this helps avoid excessive conservativism. */
1234 first_page
= restart_page
;
1235 while (first_page
< page_table_pages
) {
1236 if (page_free_p(first_page
))
1238 gc_assert(0 == page_table
[first_page
].bytes_used
);
1239 bytes_found
= PAGE_BYTES
;
1242 else if ((page_table
[first_page
].allocated
== page_type_flag
) &&
1243 (page_table
[first_page
].large_object
== 0) &&
1244 (page_table
[first_page
].gen
== gc_alloc_generation
) &&
1245 (page_table
[first_page
].write_protected
== 0) &&
1246 (page_table
[first_page
].dont_move
== 0))
1248 bytes_found
= PAGE_BYTES
1249 - page_table
[first_page
].bytes_used
;
1250 if (bytes_found
> most_bytes_found
)
1251 most_bytes_found
= bytes_found
;
1252 if (bytes_found
>= nbytes
)
1257 last_page
= first_page
;
1258 restart_page
= first_page
+ 1;
1261 /* Check for a failure */
1262 if (bytes_found
< nbytes
) {
1263 gc_assert(restart_page
>= page_table_pages
);
1264 gc_heap_exhausted_error_or_lose(most_bytes_found
, nbytes
);
1267 gc_assert(page_table
[first_page
].write_protected
== 0);
1269 *restart_page_ptr
= first_page
;
1273 /* Allocate bytes. All the rest of the special-purpose allocation
1274 * functions will eventually call this */
1277 gc_alloc_with_region(long nbytes
,int page_type_flag
, struct alloc_region
*my_region
,
1280 void *new_free_pointer
;
1282 if (nbytes
>=large_object_size
)
1283 return gc_alloc_large(nbytes
, page_type_flag
, my_region
);
1285 /* Check whether there is room in the current alloc region. */
1286 new_free_pointer
= my_region
->free_pointer
+ nbytes
;
1288 /* fprintf(stderr, "alloc %d bytes from %p to %p\n", nbytes,
1289 my_region->free_pointer, new_free_pointer); */
1291 if (new_free_pointer
<= my_region
->end_addr
) {
1292 /* If so then allocate from the current alloc region. */
1293 void *new_obj
= my_region
->free_pointer
;
1294 my_region
->free_pointer
= new_free_pointer
;
1296 /* Unless a `quick' alloc was requested, check whether the
1297 alloc region is almost empty. */
1299 void_diff(my_region
->end_addr
,my_region
->free_pointer
) <= 32) {
1300 /* If so, finished with the current region. */
1301 gc_alloc_update_page_tables(page_type_flag
, my_region
);
1302 /* Set up a new region. */
1303 gc_alloc_new_region(32 /*bytes*/, page_type_flag
, my_region
);
1306 return((void *)new_obj
);
1309 /* Else not enough free space in the current region: retry with a
1312 gc_alloc_update_page_tables(page_type_flag
, my_region
);
1313 gc_alloc_new_region(nbytes
, page_type_flag
, my_region
);
1314 return gc_alloc_with_region(nbytes
, page_type_flag
, my_region
,0);
1317 /* these are only used during GC: all allocation from the mutator calls
1318 * alloc() -> gc_alloc_with_region() with the appropriate per-thread
1321 static inline void *
1322 gc_quick_alloc(long nbytes
)
1324 return gc_general_alloc(nbytes
, BOXED_PAGE_FLAG
, ALLOC_QUICK
);
1327 static inline void *
1328 gc_quick_alloc_large(long nbytes
)
1330 return gc_general_alloc(nbytes
, BOXED_PAGE_FLAG
,ALLOC_QUICK
);
1333 static inline void *
1334 gc_alloc_unboxed(long nbytes
)
1336 return gc_general_alloc(nbytes
, UNBOXED_PAGE_FLAG
, 0);
1339 static inline void *
1340 gc_quick_alloc_unboxed(long nbytes
)
1342 return gc_general_alloc(nbytes
, UNBOXED_PAGE_FLAG
, ALLOC_QUICK
);
1345 static inline void *
1346 gc_quick_alloc_large_unboxed(long nbytes
)
1348 return gc_general_alloc(nbytes
, UNBOXED_PAGE_FLAG
, ALLOC_QUICK
);
1352 /* Copy a large boxed object. If the object is in a large object
1353 * region then it is simply promoted, else it is copied. If it's large
1354 * enough then it's copied to a large object region.
1356 * Vectors may have shrunk. If the object is not copied the space
1357 * needs to be reclaimed, and the page_tables corrected. */
1359 copy_large_object(lispobj object
, long nwords
)
1363 page_index_t first_page
;
1365 gc_assert(is_lisp_pointer(object
));
1366 gc_assert(from_space_p(object
));
1367 gc_assert((nwords
& 0x01) == 0);
1370 /* Check whether it's in a large object region. */
1371 first_page
= find_page_index((void *)object
);
1372 gc_assert(first_page
>= 0);
1374 if (page_table
[first_page
].large_object
) {
1376 /* Promote the object. */
1378 unsigned long remaining_bytes
;
1379 page_index_t next_page
;
1380 unsigned long bytes_freed
;
1381 unsigned long old_bytes_used
;
1383 /* Note: Any page write-protection must be removed, else a
1384 * later scavenge_newspace may incorrectly not scavenge these
1385 * pages. This would not be necessary if they are added to the
1386 * new areas, but let's do it for them all (they'll probably
1387 * be written anyway?). */
1389 gc_assert(page_table
[first_page
].region_start_offset
== 0);
1391 next_page
= first_page
;
1392 remaining_bytes
= nwords
*N_WORD_BYTES
;
1393 while (remaining_bytes
> PAGE_BYTES
) {
1394 gc_assert(page_table
[next_page
].gen
== from_space
);
1395 gc_assert(page_boxed_p(next_page
));
1396 gc_assert(page_table
[next_page
].large_object
);
1397 gc_assert(page_table
[next_page
].region_start_offset
==
1398 npage_bytes(next_page
-first_page
));
1399 gc_assert(page_table
[next_page
].bytes_used
== PAGE_BYTES
);
1401 page_table
[next_page
].gen
= new_space
;
1403 /* Remove any write-protection. We should be able to rely
1404 * on the write-protect flag to avoid redundant calls. */
1405 if (page_table
[next_page
].write_protected
) {
1406 os_protect(page_address(next_page
), PAGE_BYTES
, OS_VM_PROT_ALL
);
1407 page_table
[next_page
].write_protected
= 0;
1409 remaining_bytes
-= PAGE_BYTES
;
1413 /* Now only one page remains, but the object may have shrunk
1414 * so there may be more unused pages which will be freed. */
1416 /* The object may have shrunk but shouldn't have grown. */
1417 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
1419 page_table
[next_page
].gen
= new_space
;
1420 gc_assert(page_boxed_p(next_page
));
1422 /* Adjust the bytes_used. */
1423 old_bytes_used
= page_table
[next_page
].bytes_used
;
1424 page_table
[next_page
].bytes_used
= remaining_bytes
;
1426 bytes_freed
= old_bytes_used
- remaining_bytes
;
1428 /* Free any remaining pages; needs care. */
1430 while ((old_bytes_used
== PAGE_BYTES
) &&
1431 (page_table
[next_page
].gen
== from_space
) &&
1432 page_boxed_p(next_page
) &&
1433 page_table
[next_page
].large_object
&&
1434 (page_table
[next_page
].region_start_offset
==
1435 npage_bytes(next_page
- first_page
))) {
1436 /* Checks out OK, free the page. Don't need to bother zeroing
1437 * pages as this should have been done before shrinking the
1438 * object. These pages shouldn't be write-protected as they
1439 * should be zero filled. */
1440 gc_assert(page_table
[next_page
].write_protected
== 0);
1442 old_bytes_used
= page_table
[next_page
].bytes_used
;
1443 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1444 page_table
[next_page
].bytes_used
= 0;
1445 bytes_freed
+= old_bytes_used
;
1449 generations
[from_space
].bytes_allocated
-= N_WORD_BYTES
*nwords
1451 generations
[new_space
].bytes_allocated
+= N_WORD_BYTES
*nwords
;
1452 bytes_allocated
-= bytes_freed
;
1454 /* Add the region to the new_areas if requested. */
1455 add_new_area(first_page
,0,nwords
*N_WORD_BYTES
);
1459 /* Get tag of object. */
1460 tag
= lowtag_of(object
);
1462 /* Allocate space. */
1463 new = gc_quick_alloc_large(nwords
*N_WORD_BYTES
);
1465 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1467 /* Return Lisp pointer of new object. */
1468 return ((lispobj
) new) | tag
;
1472 /* to copy unboxed objects */
1474 copy_unboxed_object(lispobj object
, long nwords
)
1479 gc_assert(is_lisp_pointer(object
));
1480 gc_assert(from_space_p(object
));
1481 gc_assert((nwords
& 0x01) == 0);
1483 /* Get tag of object. */
1484 tag
= lowtag_of(object
);
1486 /* Allocate space. */
1487 new = gc_quick_alloc_unboxed(nwords
*N_WORD_BYTES
);
1489 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1491 /* Return Lisp pointer of new object. */
1492 return ((lispobj
) new) | tag
;
1495 /* to copy large unboxed objects
1497 * If the object is in a large object region then it is simply
1498 * promoted, else it is copied. If it's large enough then it's copied
1499 * to a large object region.
1501 * Bignums and vectors may have shrunk. If the object is not copied
1502 * the space needs to be reclaimed, and the page_tables corrected.
1504 * KLUDGE: There's a lot of cut-and-paste duplication between this
1505 * function and copy_large_object(..). -- WHN 20000619 */
1507 copy_large_unboxed_object(lispobj object
, long nwords
)
1511 page_index_t first_page
;
1513 gc_assert(is_lisp_pointer(object
));
1514 gc_assert(from_space_p(object
));
1515 gc_assert((nwords
& 0x01) == 0);
1517 if ((nwords
> 1024*1024) && gencgc_verbose
)
1518 FSHOW((stderr
, "/copy_large_unboxed_object: %d bytes\n",
1519 nwords
*N_WORD_BYTES
));
1521 /* Check whether it's a large object. */
1522 first_page
= find_page_index((void *)object
);
1523 gc_assert(first_page
>= 0);
1525 if (page_table
[first_page
].large_object
) {
1526 /* Promote the object. Note: Unboxed objects may have been
1527 * allocated to a BOXED region so it may be necessary to
1528 * change the region to UNBOXED. */
1529 unsigned long remaining_bytes
;
1530 page_index_t next_page
;
1531 unsigned long bytes_freed
;
1532 unsigned long old_bytes_used
;
1534 gc_assert(page_table
[first_page
].region_start_offset
== 0);
1536 next_page
= first_page
;
1537 remaining_bytes
= nwords
*N_WORD_BYTES
;
1538 while (remaining_bytes
> PAGE_BYTES
) {
1539 gc_assert(page_table
[next_page
].gen
== from_space
);
1540 gc_assert(page_allocated_no_region_p(next_page
));
1541 gc_assert(page_table
[next_page
].large_object
);
1542 gc_assert(page_table
[next_page
].region_start_offset
==
1543 npage_bytes(next_page
-first_page
));
1544 gc_assert(page_table
[next_page
].bytes_used
== PAGE_BYTES
);
1546 page_table
[next_page
].gen
= new_space
;
1547 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1548 remaining_bytes
-= PAGE_BYTES
;
1552 /* Now only one page remains, but the object may have shrunk so
1553 * there may be more unused pages which will be freed. */
1555 /* Object may have shrunk but shouldn't have grown - check. */
1556 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
1558 page_table
[next_page
].gen
= new_space
;
1559 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1561 /* Adjust the bytes_used. */
1562 old_bytes_used
= page_table
[next_page
].bytes_used
;
1563 page_table
[next_page
].bytes_used
= remaining_bytes
;
1565 bytes_freed
= old_bytes_used
- remaining_bytes
;
1567 /* Free any remaining pages; needs care. */
1569 while ((old_bytes_used
== PAGE_BYTES
) &&
1570 (page_table
[next_page
].gen
== from_space
) &&
1571 page_allocated_no_region_p(next_page
) &&
1572 page_table
[next_page
].large_object
&&
1573 (page_table
[next_page
].region_start_offset
==
1574 npage_bytes(next_page
- first_page
))) {
1575 /* Checks out OK, free the page. Don't need to both zeroing
1576 * pages as this should have been done before shrinking the
1577 * object. These pages shouldn't be write-protected, even if
1578 * boxed they should be zero filled. */
1579 gc_assert(page_table
[next_page
].write_protected
== 0);
1581 old_bytes_used
= page_table
[next_page
].bytes_used
;
1582 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1583 page_table
[next_page
].bytes_used
= 0;
1584 bytes_freed
+= old_bytes_used
;
1588 if ((bytes_freed
> 0) && gencgc_verbose
)
1590 "/copy_large_unboxed bytes_freed=%d\n",
1593 generations
[from_space
].bytes_allocated
-=
1594 nwords
*N_WORD_BYTES
+ bytes_freed
;
1595 generations
[new_space
].bytes_allocated
+= nwords
*N_WORD_BYTES
;
1596 bytes_allocated
-= bytes_freed
;
1601 /* Get tag of object. */
1602 tag
= lowtag_of(object
);
1604 /* Allocate space. */
1605 new = gc_quick_alloc_large_unboxed(nwords
*N_WORD_BYTES
);
1607 /* Copy the object. */
1608 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1610 /* Return Lisp pointer of new object. */
1611 return ((lispobj
) new) | tag
;
1620 * code and code-related objects
1623 static lispobj trans_fun_header(lispobj object);
1624 static lispobj trans_boxed(lispobj object);
1627 /* Scan a x86 compiled code object, looking for possible fixups that
1628 * have been missed after a move.
1630 * Two types of fixups are needed:
1631 * 1. Absolute fixups to within the code object.
1632 * 2. Relative fixups to outside the code object.
1634 * Currently only absolute fixups to the constant vector, or to the
1635 * code area are checked. */
1637 sniff_code_object(struct code
*code
, unsigned long displacement
)
1639 #ifdef LISP_FEATURE_X86
1640 long nheader_words
, ncode_words
, nwords
;
1642 void *constants_start_addr
= NULL
, *constants_end_addr
;
1643 void *code_start_addr
, *code_end_addr
;
1644 int fixup_found
= 0;
1646 if (!check_code_fixups
)
1649 FSHOW((stderr
, "/sniffing code: %p, %lu\n", code
, displacement
));
1651 ncode_words
= fixnum_value(code
->code_size
);
1652 nheader_words
= HeaderValue(*(lispobj
*)code
);
1653 nwords
= ncode_words
+ nheader_words
;
1655 constants_start_addr
= (void *)code
+ 5*N_WORD_BYTES
;
1656 constants_end_addr
= (void *)code
+ nheader_words
*N_WORD_BYTES
;
1657 code_start_addr
= (void *)code
+ nheader_words
*N_WORD_BYTES
;
1658 code_end_addr
= (void *)code
+ nwords
*N_WORD_BYTES
;
1660 /* Work through the unboxed code. */
1661 for (p
= code_start_addr
; p
< code_end_addr
; p
++) {
1662 void *data
= *(void **)p
;
1663 unsigned d1
= *((unsigned char *)p
- 1);
1664 unsigned d2
= *((unsigned char *)p
- 2);
1665 unsigned d3
= *((unsigned char *)p
- 3);
1666 unsigned d4
= *((unsigned char *)p
- 4);
1668 unsigned d5
= *((unsigned char *)p
- 5);
1669 unsigned d6
= *((unsigned char *)p
- 6);
1672 /* Check for code references. */
1673 /* Check for a 32 bit word that looks like an absolute
1674 reference to within the code adea of the code object. */
1675 if ((data
>= (code_start_addr
-displacement
))
1676 && (data
< (code_end_addr
-displacement
))) {
1677 /* function header */
1679 && (((unsigned)p
- 4 - 4*HeaderValue(*((unsigned *)p
-1))) ==
1681 /* Skip the function header */
1685 /* the case of PUSH imm32 */
1689 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1690 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1691 FSHOW((stderr
, "/PUSH $0x%.8x\n", data
));
1693 /* the case of MOV [reg-8],imm32 */
1695 && (d2
==0x40 || d2
==0x41 || d2
==0x42 || d2
==0x43
1696 || d2
==0x45 || d2
==0x46 || d2
==0x47)
1700 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1701 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1702 FSHOW((stderr
, "/MOV [reg-8],$0x%.8x\n", data
));
1704 /* the case of LEA reg,[disp32] */
1705 if ((d2
== 0x8d) && ((d1
& 0xc7) == 5)) {
1708 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1709 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1710 FSHOW((stderr
,"/LEA reg,[$0x%.8x]\n", data
));
1714 /* Check for constant references. */
1715 /* Check for a 32 bit word that looks like an absolute
1716 reference to within the constant vector. Constant references
1718 if ((data
>= (constants_start_addr
-displacement
))
1719 && (data
< (constants_end_addr
-displacement
))
1720 && (((unsigned)data
& 0x3) == 0)) {
1725 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1726 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1727 FSHOW((stderr
,"/MOV eax,0x%.8x\n", data
));
1730 /* the case of MOV m32,EAX */
1734 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1735 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1736 FSHOW((stderr
, "/MOV 0x%.8x,eax\n", data
));
1739 /* the case of CMP m32,imm32 */
1740 if ((d1
== 0x3d) && (d2
== 0x81)) {
1743 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1744 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1746 FSHOW((stderr
, "/CMP 0x%.8x,immed32\n", data
));
1749 /* Check for a mod=00, r/m=101 byte. */
1750 if ((d1
& 0xc7) == 5) {
1755 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1756 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1757 FSHOW((stderr
,"/CMP 0x%.8x,reg\n", data
));
1759 /* the case of CMP reg32,m32 */
1763 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1764 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1765 FSHOW((stderr
, "/CMP reg32,0x%.8x\n", data
));
1767 /* the case of MOV m32,reg32 */
1771 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1772 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1773 FSHOW((stderr
, "/MOV 0x%.8x,reg32\n", data
));
1775 /* the case of MOV reg32,m32 */
1779 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1780 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1781 FSHOW((stderr
, "/MOV reg32,0x%.8x\n", data
));
1783 /* the case of LEA reg32,m32 */
1787 "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1788 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1789 FSHOW((stderr
, "/LEA reg32,0x%.8x\n", data
));
1795 /* If anything was found, print some information on the code
1799 "/compiled code object at %x: header words = %d, code words = %d\n",
1800 code
, nheader_words
, ncode_words
));
1802 "/const start = %x, end = %x\n",
1803 constants_start_addr
, constants_end_addr
));
1805 "/code start = %x, end = %x\n",
1806 code_start_addr
, code_end_addr
));
1812 gencgc_apply_code_fixups(struct code
*old_code
, struct code
*new_code
)
1814 /* x86-64 uses pc-relative addressing instead of this kludge */
1815 #ifndef LISP_FEATURE_X86_64
1816 long nheader_words
, ncode_words
, nwords
;
1817 void *constants_start_addr
, *constants_end_addr
;
1818 void *code_start_addr
, *code_end_addr
;
1819 lispobj fixups
= NIL
;
1820 unsigned long displacement
=
1821 (unsigned long)new_code
- (unsigned long)old_code
;
1822 struct vector
*fixups_vector
;
1824 ncode_words
= fixnum_value(new_code
->code_size
);
1825 nheader_words
= HeaderValue(*(lispobj
*)new_code
);
1826 nwords
= ncode_words
+ nheader_words
;
1828 "/compiled code object at %x: header words = %d, code words = %d\n",
1829 new_code, nheader_words, ncode_words)); */
1830 constants_start_addr
= (void *)new_code
+ 5*N_WORD_BYTES
;
1831 constants_end_addr
= (void *)new_code
+ nheader_words
*N_WORD_BYTES
;
1832 code_start_addr
= (void *)new_code
+ nheader_words
*N_WORD_BYTES
;
1833 code_end_addr
= (void *)new_code
+ nwords
*N_WORD_BYTES
;
1836 "/const start = %x, end = %x\n",
1837 constants_start_addr,constants_end_addr));
1839 "/code start = %x; end = %x\n",
1840 code_start_addr,code_end_addr));
1843 /* The first constant should be a pointer to the fixups for this
1844 code objects. Check. */
1845 fixups
= new_code
->constants
[0];
1847 /* It will be 0 or the unbound-marker if there are no fixups (as
1848 * will be the case if the code object has been purified, for
1849 * example) and will be an other pointer if it is valid. */
1850 if ((fixups
== 0) || (fixups
== UNBOUND_MARKER_WIDETAG
) ||
1851 !is_lisp_pointer(fixups
)) {
1852 /* Check for possible errors. */
1853 if (check_code_fixups
)
1854 sniff_code_object(new_code
, displacement
);
1859 fixups_vector
= (struct vector
*)native_pointer(fixups
);
1861 /* Could be pointing to a forwarding pointer. */
1862 /* FIXME is this always in from_space? if so, could replace this code with
1863 * forwarding_pointer_p/forwarding_pointer_value */
1864 if (is_lisp_pointer(fixups
) &&
1865 (find_page_index((void*)fixups_vector
) != -1) &&
1866 (fixups_vector
->header
== 0x01)) {
1867 /* If so, then follow it. */
1868 /*SHOW("following pointer to a forwarding pointer");*/
1870 (struct vector
*)native_pointer((lispobj
)fixups_vector
->length
);
1873 /*SHOW("got fixups");*/
1875 if (widetag_of(fixups_vector
->header
) == SIMPLE_ARRAY_WORD_WIDETAG
) {
1876 /* Got the fixups for the code block. Now work through the vector,
1877 and apply a fixup at each address. */
1878 long length
= fixnum_value(fixups_vector
->length
);
1880 for (i
= 0; i
< length
; i
++) {
1881 unsigned long offset
= fixups_vector
->data
[i
];
1882 /* Now check the current value of offset. */
1883 unsigned long old_value
=
1884 *(unsigned long *)((unsigned long)code_start_addr
+ offset
);
1886 /* If it's within the old_code object then it must be an
1887 * absolute fixup (relative ones are not saved) */
1888 if ((old_value
>= (unsigned long)old_code
)
1889 && (old_value
< ((unsigned long)old_code
1890 + nwords
*N_WORD_BYTES
)))
1891 /* So add the dispacement. */
1892 *(unsigned long *)((unsigned long)code_start_addr
+ offset
) =
1893 old_value
+ displacement
;
1895 /* It is outside the old code object so it must be a
1896 * relative fixup (absolute fixups are not saved). So
1897 * subtract the displacement. */
1898 *(unsigned long *)((unsigned long)code_start_addr
+ offset
) =
1899 old_value
- displacement
;
1902 /* This used to just print a note to stderr, but a bogus fixup seems to
1903 * indicate real heap corruption, so a hard hailure is in order. */
1904 lose("fixup vector %p has a bad widetag: %d\n",
1905 fixups_vector
, widetag_of(fixups_vector
->header
));
1908 /* Check for possible errors. */
1909 if (check_code_fixups
) {
1910 sniff_code_object(new_code
,displacement
);
1917 trans_boxed_large(lispobj object
)
1920 unsigned long length
;
1922 gc_assert(is_lisp_pointer(object
));
1924 header
= *((lispobj
*) native_pointer(object
));
1925 length
= HeaderValue(header
) + 1;
1926 length
= CEILING(length
, 2);
1928 return copy_large_object(object
, length
);
1931 /* Doesn't seem to be used, delete it after the grace period. */
1934 trans_unboxed_large(lispobj object
)
1937 unsigned long length
;
1939 gc_assert(is_lisp_pointer(object
));
1941 header
= *((lispobj
*) native_pointer(object
));
1942 length
= HeaderValue(header
) + 1;
1943 length
= CEILING(length
, 2);
1945 return copy_large_unboxed_object(object
, length
);
1951 * Lutexes. Using the normal finalization machinery for finalizing
1952 * lutexes is tricky, since the finalization depends on working lutexes.
1953 * So we track the lutexes in the GC and finalize them manually.
1956 #if defined(LUTEX_WIDETAG)
1959 * Start tracking LUTEX in the GC, by adding it to the linked list of
1960 * lutexes in the nursery generation. The caller is responsible for
1961 * locking, and GCs must be inhibited until the registration is
1965 gencgc_register_lutex (struct lutex
*lutex
) {
1966 int index
= find_page_index(lutex
);
1967 generation_index_t gen
;
1970 /* This lutex is in static space, so we don't need to worry about
1976 gen
= page_table
[index
].gen
;
1978 gc_assert(gen
>= 0);
1979 gc_assert(gen
< NUM_GENERATIONS
);
1981 head
= generations
[gen
].lutexes
;
1988 generations
[gen
].lutexes
= lutex
;
1992 * Stop tracking LUTEX in the GC by removing it from the appropriate
1993 * linked lists. This will only be called during GC, so no locking is
1997 gencgc_unregister_lutex (struct lutex
*lutex
) {
1999 lutex
->prev
->next
= lutex
->next
;
2001 generations
[lutex
->gen
].lutexes
= lutex
->next
;
2005 lutex
->next
->prev
= lutex
->prev
;
2014 * Mark all lutexes in generation GEN as not live.
2017 unmark_lutexes (generation_index_t gen
) {
2018 struct lutex
*lutex
= generations
[gen
].lutexes
;
2022 lutex
= lutex
->next
;
2027 * Finalize all lutexes in generation GEN that have not been marked live.
2030 reap_lutexes (generation_index_t gen
) {
2031 struct lutex
*lutex
= generations
[gen
].lutexes
;
2034 struct lutex
*next
= lutex
->next
;
2036 lutex_destroy((tagged_lutex_t
) lutex
);
2037 gencgc_unregister_lutex(lutex
);
2044 * Mark LUTEX as live.
2047 mark_lutex (lispobj tagged_lutex
) {
2048 struct lutex
*lutex
= (struct lutex
*) native_pointer(tagged_lutex
);
2054 * Move all lutexes in generation FROM to generation TO.
2057 move_lutexes (generation_index_t from
, generation_index_t to
) {
2058 struct lutex
*tail
= generations
[from
].lutexes
;
2060 /* Nothing to move */
2064 /* Change the generation of the lutexes in FROM. */
2065 while (tail
->next
) {
2071 /* Link the last lutex in the FROM list to the start of the TO list */
2072 tail
->next
= generations
[to
].lutexes
;
2074 /* And vice versa */
2075 if (generations
[to
].lutexes
) {
2076 generations
[to
].lutexes
->prev
= tail
;
2079 /* And update the generations structures to match this */
2080 generations
[to
].lutexes
= generations
[from
].lutexes
;
2081 generations
[from
].lutexes
= NULL
;
2085 scav_lutex(lispobj
*where
, lispobj object
)
2087 mark_lutex((lispobj
) where
);
2089 return CEILING(sizeof(struct lutex
)/sizeof(lispobj
), 2);
2093 trans_lutex(lispobj object
)
2095 struct lutex
*lutex
= (struct lutex
*) native_pointer(object
);
2097 size_t words
= CEILING(sizeof(struct lutex
)/sizeof(lispobj
), 2);
2098 gc_assert(is_lisp_pointer(object
));
2099 copied
= copy_object(object
, words
);
2101 /* Update the links, since the lutex moved in memory. */
2103 lutex
->next
->prev
= (struct lutex
*) native_pointer(copied
);
2107 lutex
->prev
->next
= (struct lutex
*) native_pointer(copied
);
2109 generations
[lutex
->gen
].lutexes
=
2110 (struct lutex
*) native_pointer(copied
);
2117 size_lutex(lispobj
*where
)
2119 return CEILING(sizeof(struct lutex
)/sizeof(lispobj
), 2);
2121 #endif /* LUTEX_WIDETAG */
2128 /* XX This is a hack adapted from cgc.c. These don't work too
2129 * efficiently with the gencgc as a list of the weak pointers is
2130 * maintained within the objects which causes writes to the pages. A
2131 * limited attempt is made to avoid unnecessary writes, but this needs
2133 #define WEAK_POINTER_NWORDS \
2134 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
2137 scav_weak_pointer(lispobj
*where
, lispobj object
)
2139 /* Since we overwrite the 'next' field, we have to make
2140 * sure not to do so for pointers already in the list.
2141 * Instead of searching the list of weak_pointers each
2142 * time, we ensure that next is always NULL when the weak
2143 * pointer isn't in the list, and not NULL otherwise.
2144 * Since we can't use NULL to denote end of list, we
2145 * use a pointer back to the same weak_pointer.
2147 struct weak_pointer
* wp
= (struct weak_pointer
*)where
;
2149 if (NULL
== wp
->next
) {
2150 wp
->next
= weak_pointers
;
2152 if (NULL
== wp
->next
)
2156 /* Do not let GC scavenge the value slot of the weak pointer.
2157 * (That is why it is a weak pointer.) */
2159 return WEAK_POINTER_NWORDS
;
2164 search_read_only_space(void *pointer
)
2166 lispobj
*start
= (lispobj
*) READ_ONLY_SPACE_START
;
2167 lispobj
*end
= (lispobj
*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0);
2168 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
2170 return (gc_search_space(start
,
2171 (((lispobj
*)pointer
)+2)-start
,
2172 (lispobj
*) pointer
));
2176 search_static_space(void *pointer
)
2178 lispobj
*start
= (lispobj
*)STATIC_SPACE_START
;
2179 lispobj
*end
= (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0);
2180 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
2182 return (gc_search_space(start
,
2183 (((lispobj
*)pointer
)+2)-start
,
2184 (lispobj
*) pointer
));
2187 /* a faster version for searching the dynamic space. This will work even
2188 * if the object is in a current allocation region. */
2190 search_dynamic_space(void *pointer
)
2192 page_index_t page_index
= find_page_index(pointer
);
2195 /* The address may be invalid, so do some checks. */
2196 if ((page_index
== -1) || page_free_p(page_index
))
2198 start
= (lispobj
*)page_region_start(page_index
);
2199 return (gc_search_space(start
,
2200 (((lispobj
*)pointer
)+2)-start
,
2201 (lispobj
*)pointer
));
2204 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2206 /* Helper for valid_lisp_pointer_p and
2207 * possibly_valid_dynamic_space_pointer.
2209 * pointer is the pointer to validate, and start_addr is the address
2210 * of the enclosing object.
2213 looks_like_valid_lisp_pointer_p(lispobj
*pointer
, lispobj
*start_addr
)
2215 if (!is_lisp_pointer((lispobj
)pointer
)) {
2219 /* Check that the object pointed to is consistent with the pointer
2221 switch (lowtag_of((lispobj
)pointer
)) {
2222 case FUN_POINTER_LOWTAG
:
2223 /* Start_addr should be the enclosing code object, or a closure
2225 switch (widetag_of(*start_addr
)) {
2226 case CODE_HEADER_WIDETAG
:
2227 /* This case is probably caught above. */
2229 case CLOSURE_HEADER_WIDETAG
:
2230 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
2231 if ((unsigned long)pointer
!=
2232 ((unsigned long)start_addr
+FUN_POINTER_LOWTAG
)) {
2236 pointer
, start_addr
, *start_addr
));
2244 pointer
, start_addr
, *start_addr
));
2248 case LIST_POINTER_LOWTAG
:
2249 if ((unsigned long)pointer
!=
2250 ((unsigned long)start_addr
+LIST_POINTER_LOWTAG
)) {
2254 pointer
, start_addr
, *start_addr
));
2257 /* Is it plausible cons? */
2258 if ((is_lisp_pointer(start_addr
[0]) ||
2259 is_lisp_immediate(start_addr
[0])) &&
2260 (is_lisp_pointer(start_addr
[1]) ||
2261 is_lisp_immediate(start_addr
[1])))
2267 pointer
, start_addr
, *start_addr
));
2270 case INSTANCE_POINTER_LOWTAG
:
2271 if ((unsigned long)pointer
!=
2272 ((unsigned long)start_addr
+INSTANCE_POINTER_LOWTAG
)) {
2276 pointer
, start_addr
, *start_addr
));
2279 if (widetag_of(start_addr
[0]) != INSTANCE_HEADER_WIDETAG
) {
2283 pointer
, start_addr
, *start_addr
));
2287 case OTHER_POINTER_LOWTAG
:
2288 if ((unsigned long)pointer
!=
2289 ((unsigned long)start_addr
+OTHER_POINTER_LOWTAG
)) {
2293 pointer
, start_addr
, *start_addr
));
2296 /* Is it plausible? Not a cons. XXX should check the headers. */
2297 if (is_lisp_pointer(start_addr
[0]) || ((start_addr
[0] & 3) == 0)) {
2301 pointer
, start_addr
, *start_addr
));
2304 switch (widetag_of(start_addr
[0])) {
2305 case UNBOUND_MARKER_WIDETAG
:
2306 case NO_TLS_VALUE_MARKER_WIDETAG
:
2307 case CHARACTER_WIDETAG
:
2308 #if N_WORD_BITS == 64
2309 case SINGLE_FLOAT_WIDETAG
:
2314 pointer
, start_addr
, *start_addr
));
2317 /* only pointed to by function pointers? */
2318 case CLOSURE_HEADER_WIDETAG
:
2319 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
2323 pointer
, start_addr
, *start_addr
));
2326 case INSTANCE_HEADER_WIDETAG
:
2330 pointer
, start_addr
, *start_addr
));
2333 /* the valid other immediate pointer objects */
2334 case SIMPLE_VECTOR_WIDETAG
:
2336 case COMPLEX_WIDETAG
:
2337 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
2338 case COMPLEX_SINGLE_FLOAT_WIDETAG
:
2340 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
2341 case COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2343 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
2344 case COMPLEX_LONG_FLOAT_WIDETAG
:
2346 case SIMPLE_ARRAY_WIDETAG
:
2347 case COMPLEX_BASE_STRING_WIDETAG
:
2348 #ifdef COMPLEX_CHARACTER_STRING_WIDETAG
2349 case COMPLEX_CHARACTER_STRING_WIDETAG
:
2351 case COMPLEX_VECTOR_NIL_WIDETAG
:
2352 case COMPLEX_BIT_VECTOR_WIDETAG
:
2353 case COMPLEX_VECTOR_WIDETAG
:
2354 case COMPLEX_ARRAY_WIDETAG
:
2355 case VALUE_CELL_HEADER_WIDETAG
:
2356 case SYMBOL_HEADER_WIDETAG
:
2358 case CODE_HEADER_WIDETAG
:
2359 case BIGNUM_WIDETAG
:
2360 #if N_WORD_BITS != 64
2361 case SINGLE_FLOAT_WIDETAG
:
2363 case DOUBLE_FLOAT_WIDETAG
:
2364 #ifdef LONG_FLOAT_WIDETAG
2365 case LONG_FLOAT_WIDETAG
:
2367 case SIMPLE_BASE_STRING_WIDETAG
:
2368 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
2369 case SIMPLE_CHARACTER_STRING_WIDETAG
:
2371 case SIMPLE_BIT_VECTOR_WIDETAG
:
2372 case SIMPLE_ARRAY_NIL_WIDETAG
:
2373 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
2374 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
2375 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
2376 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
2377 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
2378 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
2379 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
2380 case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
:
2382 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
2383 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
2384 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
2385 case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
:
2387 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
2388 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
2390 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
2391 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
2393 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
2394 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
2396 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
2397 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
2399 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
2400 case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
:
2402 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
2403 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
2405 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
2406 case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
:
2408 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
2409 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
2411 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
2412 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
2413 #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
2414 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
2416 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
2417 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
2419 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
2420 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2422 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
2423 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
2426 case WEAK_POINTER_WIDETAG
:
2427 #ifdef LUTEX_WIDETAG
2436 pointer
, start_addr
, *start_addr
));
2444 pointer
, start_addr
, *start_addr
));
2452 /* Used by the debugger to validate possibly bogus pointers before
2453 * calling MAKE-LISP-OBJ on them.
2455 * FIXME: We would like to make this perfect, because if the debugger
2456 * constructs a reference to a bugs lisp object, and it ends up in a
2457 * location scavenged by the GC all hell breaks loose.
2459 * Whereas possibly_valid_dynamic_space_pointer has to be conservative
2460 * and return true for all valid pointers, this could actually be eager
2461 * and lie about a few pointers without bad results... but that should
2462 * be reflected in the name.
2465 valid_lisp_pointer_p(lispobj
*pointer
)
2468 if (((start
=search_dynamic_space(pointer
))!=NULL
) ||
2469 ((start
=search_static_space(pointer
))!=NULL
) ||
2470 ((start
=search_read_only_space(pointer
))!=NULL
))
2471 return looks_like_valid_lisp_pointer_p(pointer
, start
);
2476 /* Is there any possibility that pointer is a valid Lisp object
2477 * reference, and/or something else (e.g. subroutine call return
2478 * address) which should prevent us from moving the referred-to thing?
2479 * This is called from preserve_pointers() */
2481 possibly_valid_dynamic_space_pointer(lispobj
*pointer
)
2483 lispobj
*start_addr
;
2485 /* Find the object start address. */
2486 if ((start_addr
= search_dynamic_space(pointer
)) == NULL
) {
2490 return looks_like_valid_lisp_pointer_p(pointer
, start_addr
);
2493 /* Adjust large bignum and vector objects. This will adjust the
2494 * allocated region if the size has shrunk, and move unboxed objects
2495 * into unboxed pages. The pages are not promoted here, and the
2496 * promoted region is not added to the new_regions; this is really
2497 * only designed to be called from preserve_pointer(). Shouldn't fail
2498 * if this is missed, just may delay the moving of objects to unboxed
2499 * pages, and the freeing of pages. */
2501 maybe_adjust_large_object(lispobj
*where
)
2503 page_index_t first_page
;
2504 page_index_t next_page
;
2507 unsigned long remaining_bytes
;
2508 unsigned long bytes_freed
;
2509 unsigned long old_bytes_used
;
2513 /* Check whether it's a vector or bignum object. */
2514 switch (widetag_of(where
[0])) {
2515 case SIMPLE_VECTOR_WIDETAG
:
2516 boxed
= BOXED_PAGE_FLAG
;
2518 case BIGNUM_WIDETAG
:
2519 case SIMPLE_BASE_STRING_WIDETAG
:
2520 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
2521 case SIMPLE_CHARACTER_STRING_WIDETAG
:
2523 case SIMPLE_BIT_VECTOR_WIDETAG
:
2524 case SIMPLE_ARRAY_NIL_WIDETAG
:
2525 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
2526 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
2527 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
2528 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
2529 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
2530 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
2531 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
2532 case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
:
2534 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
2535 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
2536 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
2537 case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
:
2539 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
2540 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
2542 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
2543 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
2545 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
2546 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
2548 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
2549 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
2551 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
2552 case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
:
2554 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
2555 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
2557 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
2558 case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
:
2560 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
2561 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
2563 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
2564 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
2565 #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
2566 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
2568 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
2569 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
2571 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
2572 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2574 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
2575 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
2577 boxed
= UNBOXED_PAGE_FLAG
;
2583 /* Find its current size. */
2584 nwords
= (sizetab
[widetag_of(where
[0])])(where
);
2586 first_page
= find_page_index((void *)where
);
2587 gc_assert(first_page
>= 0);
2589 /* Note: Any page write-protection must be removed, else a later
2590 * scavenge_newspace may incorrectly not scavenge these pages.
2591 * This would not be necessary if they are added to the new areas,
2592 * but lets do it for them all (they'll probably be written
2595 gc_assert(page_table
[first_page
].region_start_offset
== 0);
2597 next_page
= first_page
;
2598 remaining_bytes
= nwords
*N_WORD_BYTES
;
2599 while (remaining_bytes
> PAGE_BYTES
) {
2600 gc_assert(page_table
[next_page
].gen
== from_space
);
2601 gc_assert(page_allocated_no_region_p(next_page
));
2602 gc_assert(page_table
[next_page
].large_object
);
2603 gc_assert(page_table
[next_page
].region_start_offset
==
2604 npage_bytes(next_page
-first_page
));
2605 gc_assert(page_table
[next_page
].bytes_used
== PAGE_BYTES
);
2607 page_table
[next_page
].allocated
= boxed
;
2609 /* Shouldn't be write-protected at this stage. Essential that the
2611 gc_assert(!page_table
[next_page
].write_protected
);
2612 remaining_bytes
-= PAGE_BYTES
;
2616 /* Now only one page remains, but the object may have shrunk so
2617 * there may be more unused pages which will be freed. */
2619 /* Object may have shrunk but shouldn't have grown - check. */
2620 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
2622 page_table
[next_page
].allocated
= boxed
;
2623 gc_assert(page_table
[next_page
].allocated
==
2624 page_table
[first_page
].allocated
);
2626 /* Adjust the bytes_used. */
2627 old_bytes_used
= page_table
[next_page
].bytes_used
;
2628 page_table
[next_page
].bytes_used
= remaining_bytes
;
2630 bytes_freed
= old_bytes_used
- remaining_bytes
;
2632 /* Free any remaining pages; needs care. */
2634 while ((old_bytes_used
== PAGE_BYTES
) &&
2635 (page_table
[next_page
].gen
== from_space
) &&
2636 page_allocated_no_region_p(next_page
) &&
2637 page_table
[next_page
].large_object
&&
2638 (page_table
[next_page
].region_start_offset
==
2639 npage_bytes(next_page
- first_page
))) {
2640 /* It checks out OK, free the page. We don't need to both zeroing
2641 * pages as this should have been done before shrinking the
2642 * object. These pages shouldn't be write protected as they
2643 * should be zero filled. */
2644 gc_assert(page_table
[next_page
].write_protected
== 0);
2646 old_bytes_used
= page_table
[next_page
].bytes_used
;
2647 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
2648 page_table
[next_page
].bytes_used
= 0;
2649 bytes_freed
+= old_bytes_used
;
2653 if ((bytes_freed
> 0) && gencgc_verbose
) {
2655 "/maybe_adjust_large_object() freed %d\n",
2659 generations
[from_space
].bytes_allocated
-= bytes_freed
;
2660 bytes_allocated
-= bytes_freed
;
2665 /* Take a possible pointer to a Lisp object and mark its page in the
2666 * page_table so that it will not be relocated during a GC.
2668 * This involves locating the page it points to, then backing up to
2669 * the start of its region, then marking all pages dont_move from there
2670 * up to the first page that's not full or has a different generation
2672 * It is assumed that all the page static flags have been cleared at
2673 * the start of a GC.
2675 * It is also assumed that the current gc_alloc() region has been
2676 * flushed and the tables updated. */
2679 preserve_pointer(void *addr
)
2681 page_index_t addr_page_index
= find_page_index(addr
);
2682 page_index_t first_page
;
2684 unsigned int region_allocation
;
2686 /* quick check 1: Address is quite likely to have been invalid. */
2687 if ((addr_page_index
== -1)
2688 || page_free_p(addr_page_index
)
2689 || (page_table
[addr_page_index
].bytes_used
== 0)
2690 || (page_table
[addr_page_index
].gen
!= from_space
)
2691 /* Skip if already marked dont_move. */
2692 || (page_table
[addr_page_index
].dont_move
!= 0))
2694 gc_assert(!(page_table
[addr_page_index
].allocated
&OPEN_REGION_PAGE_FLAG
));
2695 /* (Now that we know that addr_page_index is in range, it's
2696 * safe to index into page_table[] with it.) */
2697 region_allocation
= page_table
[addr_page_index
].allocated
;
2699 /* quick check 2: Check the offset within the page.
2702 if (((unsigned long)addr
& (PAGE_BYTES
- 1)) >
2703 page_table
[addr_page_index
].bytes_used
)
2706 /* Filter out anything which can't be a pointer to a Lisp object
2707 * (or, as a special case which also requires dont_move, a return
2708 * address referring to something in a CodeObject). This is
2709 * expensive but important, since it vastly reduces the
2710 * probability that random garbage will be bogusly interpreted as
2711 * a pointer which prevents a page from moving. */
2712 if (!(code_page_p(addr_page_index
)
2713 || (is_lisp_pointer(addr
) &&
2714 possibly_valid_dynamic_space_pointer(addr
))))
2717 /* Find the beginning of the region. Note that there may be
2718 * objects in the region preceding the one that we were passed a
2719 * pointer to: if this is the case, we will write-protect all the
2720 * previous objects' pages too. */
2723 /* I think this'd work just as well, but without the assertions.
2724 * -dan 2004.01.01 */
2725 first_page
= find_page_index(page_region_start(addr_page_index
))
2727 first_page
= addr_page_index
;
2728 while (page_table
[first_page
].region_start_offset
!= 0) {
2730 /* Do some checks. */
2731 gc_assert(page_table
[first_page
].bytes_used
== PAGE_BYTES
);
2732 gc_assert(page_table
[first_page
].gen
== from_space
);
2733 gc_assert(page_table
[first_page
].allocated
== region_allocation
);
2737 /* Adjust any large objects before promotion as they won't be
2738 * copied after promotion. */
2739 if (page_table
[first_page
].large_object
) {
2740 maybe_adjust_large_object(page_address(first_page
));
2741 /* If a large object has shrunk then addr may now point to a
2742 * free area in which case it's ignored here. Note it gets
2743 * through the valid pointer test above because the tail looks
2745 if (page_free_p(addr_page_index
)
2746 || (page_table
[addr_page_index
].bytes_used
== 0)
2747 /* Check the offset within the page. */
2748 || (((unsigned long)addr
& (PAGE_BYTES
- 1))
2749 > page_table
[addr_page_index
].bytes_used
)) {
2751 "weird? ignore ptr 0x%x to freed area of large object\n",
2755 /* It may have moved to unboxed pages. */
2756 region_allocation
= page_table
[first_page
].allocated
;
2759 /* Now work forward until the end of this contiguous area is found,
2760 * marking all pages as dont_move. */
2761 for (i
= first_page
; ;i
++) {
2762 gc_assert(page_table
[i
].allocated
== region_allocation
);
2764 /* Mark the page static. */
2765 page_table
[i
].dont_move
= 1;
2767 /* Move the page to the new_space. XX I'd rather not do this
2768 * but the GC logic is not quite able to copy with the static
2769 * pages remaining in the from space. This also requires the
2770 * generation bytes_allocated counters be updated. */
2771 page_table
[i
].gen
= new_space
;
2772 generations
[new_space
].bytes_allocated
+= page_table
[i
].bytes_used
;
2773 generations
[from_space
].bytes_allocated
-= page_table
[i
].bytes_used
;
2775 /* It is essential that the pages are not write protected as
2776 * they may have pointers into the old-space which need
2777 * scavenging. They shouldn't be write protected at this
2779 gc_assert(!page_table
[i
].write_protected
);
2781 /* Check whether this is the last page in this contiguous block.. */
2782 if ((page_table
[i
].bytes_used
< PAGE_BYTES
)
2783 /* ..or it is PAGE_BYTES and is the last in the block */
2785 || (page_table
[i
+1].bytes_used
== 0) /* next page free */
2786 || (page_table
[i
+1].gen
!= from_space
) /* diff. gen */
2787 || (page_table
[i
+1].region_start_offset
== 0))
2791 /* Check that the page is now static. */
2792 gc_assert(page_table
[addr_page_index
].dont_move
!= 0);
2795 #endif // defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2798 /* If the given page is not write-protected, then scan it for pointers
2799 * to younger generations or the top temp. generation, if no
2800 * suspicious pointers are found then the page is write-protected.
2802 * Care is taken to check for pointers to the current gc_alloc()
2803 * region if it is a younger generation or the temp. generation. This
2804 * frees the caller from doing a gc_alloc_update_page_tables(). Actually
2805 * the gc_alloc_generation does not need to be checked as this is only
2806 * called from scavenge_generation() when the gc_alloc generation is
2807 * younger, so it just checks if there is a pointer to the current
2810 * We return 1 if the page was write-protected, else 0. */
2812 update_page_write_prot(page_index_t page
)
2814 generation_index_t gen
= page_table
[page
].gen
;
2817 void **page_addr
= (void **)page_address(page
);
2818 long num_words
= page_table
[page
].bytes_used
/ N_WORD_BYTES
;
2820 /* Shouldn't be a free page. */
2821 gc_assert(page_allocated_p(page
));
2822 gc_assert(page_table
[page
].bytes_used
!= 0);
2824 /* Skip if it's already write-protected, pinned, or unboxed */
2825 if (page_table
[page
].write_protected
2826 /* FIXME: What's the reason for not write-protecting pinned pages? */
2827 || page_table
[page
].dont_move
2828 || page_unboxed_p(page
))
2831 /* Scan the page for pointers to younger generations or the
2832 * top temp. generation. */
2834 for (j
= 0; j
< num_words
; j
++) {
2835 void *ptr
= *(page_addr
+j
);
2836 page_index_t index
= find_page_index(ptr
);
2838 /* Check that it's in the dynamic space */
2840 if (/* Does it point to a younger or the temp. generation? */
2841 (page_allocated_p(index
)
2842 && (page_table
[index
].bytes_used
!= 0)
2843 && ((page_table
[index
].gen
< gen
)
2844 || (page_table
[index
].gen
== SCRATCH_GENERATION
)))
2846 /* Or does it point within a current gc_alloc() region? */
2847 || ((boxed_region
.start_addr
<= ptr
)
2848 && (ptr
<= boxed_region
.free_pointer
))
2849 || ((unboxed_region
.start_addr
<= ptr
)
2850 && (ptr
<= unboxed_region
.free_pointer
))) {
2857 /* Write-protect the page. */
2858 /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
2860 os_protect((void *)page_addr
,
2862 OS_VM_PROT_READ
|OS_VM_PROT_EXECUTE
);
2864 /* Note the page as protected in the page tables. */
2865 page_table
[page
].write_protected
= 1;
2871 /* Scavenge all generations from FROM to TO, inclusive, except for
2872 * new_space which needs special handling, as new objects may be
2873 * added which are not checked here - use scavenge_newspace generation.
2875 * Write-protected pages should not have any pointers to the
2876 * from_space so do need scavenging; thus write-protected pages are
2877 * not always scavenged. There is some code to check that these pages
2878 * are not written; but to check fully the write-protected pages need
2879 * to be scavenged by disabling the code to skip them.
2881 * Under the current scheme when a generation is GCed the younger
2882 * generations will be empty. So, when a generation is being GCed it
2883 * is only necessary to scavenge the older generations for pointers
2884 * not the younger. So a page that does not have pointers to younger
2885 * generations does not need to be scavenged.
2887 * The write-protection can be used to note pages that don't have
2888 * pointers to younger pages. But pages can be written without having
2889 * pointers to younger generations. After the pages are scavenged here
2890 * they can be scanned for pointers to younger generations and if
2891 * there are none the page can be write-protected.
2893 * One complication is when the newspace is the top temp. generation.
2895 * Enabling SC_GEN_CK scavenges the write-protected pages and checks
2896 * that none were written, which they shouldn't be as they should have
2897 * no pointers to younger generations. This breaks down for weak
2898 * pointers as the objects contain a link to the next and are written
2899 * if a weak pointer is scavenged. Still it's a useful check. */
2901 scavenge_generations(generation_index_t from
, generation_index_t to
)
2908 /* Clear the write_protected_cleared flags on all pages. */
2909 for (i
= 0; i
< page_table_pages
; i
++)
2910 page_table
[i
].write_protected_cleared
= 0;
2913 for (i
= 0; i
< last_free_page
; i
++) {
2914 generation_index_t generation
= page_table
[i
].gen
;
2916 && (page_table
[i
].bytes_used
!= 0)
2917 && (generation
!= new_space
)
2918 && (generation
>= from
)
2919 && (generation
<= to
)) {
2920 page_index_t last_page
,j
;
2921 int write_protected
=1;
2923 /* This should be the start of a region */
2924 gc_assert(page_table
[i
].region_start_offset
== 0);
2926 /* Now work forward until the end of the region */
2927 for (last_page
= i
; ; last_page
++) {
2929 write_protected
&& page_table
[last_page
].write_protected
;
2930 if ((page_table
[last_page
].bytes_used
< PAGE_BYTES
)
2931 /* Or it is PAGE_BYTES and is the last in the block */
2932 || (!page_boxed_p(last_page
+1))
2933 || (page_table
[last_page
+1].bytes_used
== 0)
2934 || (page_table
[last_page
+1].gen
!= generation
)
2935 || (page_table
[last_page
+1].region_start_offset
== 0))
2938 if (!write_protected
) {
2939 scavenge(page_address(i
),
2940 ((unsigned long)(page_table
[last_page
].bytes_used
2941 + npage_bytes(last_page
-i
)))
2944 /* Now scan the pages and write protect those that
2945 * don't have pointers to younger generations. */
2946 if (enable_page_protection
) {
2947 for (j
= i
; j
<= last_page
; j
++) {
2948 num_wp
+= update_page_write_prot(j
);
2951 if ((gencgc_verbose
> 1) && (num_wp
!= 0)) {
2953 "/write protected %d pages within generation %d\n",
2954 num_wp
, generation
));
2962 /* Check that none of the write_protected pages in this generation
2963 * have been written to. */
2964 for (i
= 0; i
< page_table_pages
; i
++) {
2965 if (page_allocated_p(i
)
2966 && (page_table
[i
].bytes_used
!= 0)
2967 && (page_table
[i
].gen
== generation
)
2968 && (page_table
[i
].write_protected_cleared
!= 0)) {
2969 FSHOW((stderr
, "/scavenge_generation() %d\n", generation
));
2971 "/page bytes_used=%d region_start_offset=%lu dont_move=%d\n",
2972 page_table
[i
].bytes_used
,
2973 page_table
[i
].region_start_offset
,
2974 page_table
[i
].dont_move
));
2975 lose("write to protected page %d in scavenge_generation()\n", i
);
2982 /* Scavenge a newspace generation. As it is scavenged new objects may
2983 * be allocated to it; these will also need to be scavenged. This
2984 * repeats until there are no more objects unscavenged in the
2985 * newspace generation.
2987 * To help improve the efficiency, areas written are recorded by
2988 * gc_alloc() and only these scavenged. Sometimes a little more will be
2989 * scavenged, but this causes no harm. An easy check is done that the
2990 * scavenged bytes equals the number allocated in the previous
2993 * Write-protected pages are not scanned except if they are marked
2994 * dont_move in which case they may have been promoted and still have
2995 * pointers to the from space.
2997 * Write-protected pages could potentially be written by alloc however
2998 * to avoid having to handle re-scavenging of write-protected pages
2999 * gc_alloc() does not write to write-protected pages.
3001 * New areas of objects allocated are recorded alternatively in the two
3002 * new_areas arrays below. */
3003 static struct new_area new_areas_1
[NUM_NEW_AREAS
];
3004 static struct new_area new_areas_2
[NUM_NEW_AREAS
];
3006 /* Do one full scan of the new space generation. This is not enough to
3007 * complete the job as new objects may be added to the generation in
3008 * the process which are not scavenged. */
3010 scavenge_newspace_generation_one_scan(generation_index_t generation
)
3015 "/starting one full scan of newspace generation %d\n",
3017 for (i
= 0; i
< last_free_page
; i
++) {
3018 /* Note that this skips over open regions when it encounters them. */
3020 && (page_table
[i
].bytes_used
!= 0)
3021 && (page_table
[i
].gen
== generation
)
3022 && ((page_table
[i
].write_protected
== 0)
3023 /* (This may be redundant as write_protected is now
3024 * cleared before promotion.) */
3025 || (page_table
[i
].dont_move
== 1))) {
3026 page_index_t last_page
;
3029 /* The scavenge will start at the region_start_offset of
3032 * We need to find the full extent of this contiguous
3033 * block in case objects span pages.
3035 * Now work forward until the end of this contiguous area
3036 * is found. A small area is preferred as there is a
3037 * better chance of its pages being write-protected. */
3038 for (last_page
= i
; ;last_page
++) {
3039 /* If all pages are write-protected and movable,
3040 * then no need to scavenge */
3041 all_wp
=all_wp
&& page_table
[last_page
].write_protected
&&
3042 !page_table
[last_page
].dont_move
;
3044 /* Check whether this is the last page in this
3045 * contiguous block */
3046 if ((page_table
[last_page
].bytes_used
< PAGE_BYTES
)
3047 /* Or it is PAGE_BYTES and is the last in the block */
3048 || (!page_boxed_p(last_page
+1))
3049 || (page_table
[last_page
+1].bytes_used
== 0)
3050 || (page_table
[last_page
+1].gen
!= generation
)
3051 || (page_table
[last_page
+1].region_start_offset
== 0))
3055 /* Do a limited check for write-protected pages. */
3057 long nwords
= (((unsigned long)
3058 (page_table
[last_page
].bytes_used
3059 + npage_bytes(last_page
-i
)
3060 + page_table
[i
].region_start_offset
))
3062 new_areas_ignore_page
= last_page
;
3064 scavenge(page_region_start(i
), nwords
);
3071 "/done with one full scan of newspace generation %d\n",
3075 /* Do a complete scavenge of the newspace generation. */
3077 scavenge_newspace_generation(generation_index_t generation
)
3081 /* the new_areas array currently being written to by gc_alloc() */
3082 struct new_area (*current_new_areas
)[] = &new_areas_1
;
3083 long current_new_areas_index
;
3085 /* the new_areas created by the previous scavenge cycle */
3086 struct new_area (*previous_new_areas
)[] = NULL
;
3087 long previous_new_areas_index
;
3089 /* Flush the current regions updating the tables. */
3090 gc_alloc_update_all_page_tables();
3092 /* Turn on the recording of new areas by gc_alloc(). */
3093 new_areas
= current_new_areas
;
3094 new_areas_index
= 0;
3096 /* Don't need to record new areas that get scavenged anyway during
3097 * scavenge_newspace_generation_one_scan. */
3098 record_new_objects
= 1;
3100 /* Start with a full scavenge. */
3101 scavenge_newspace_generation_one_scan(generation
);
3103 /* Record all new areas now. */
3104 record_new_objects
= 2;
3106 /* Give a chance to weak hash tables to make other objects live.
3107 * FIXME: The algorithm implemented here for weak hash table gcing
3108 * is O(W^2+N) as Bruno Haible warns in
3109 * http://www.haible.de/bruno/papers/cs/weak/WeakDatastructures-writeup.html
3110 * see "Implementation 2". */
3111 scav_weak_hash_tables();
3113 /* Flush the current regions updating the tables. */
3114 gc_alloc_update_all_page_tables();
3116 /* Grab new_areas_index. */
3117 current_new_areas_index
= new_areas_index
;
3120 "The first scan is finished; current_new_areas_index=%d.\n",
3121 current_new_areas_index));*/
3123 while (current_new_areas_index
> 0) {
3124 /* Move the current to the previous new areas */
3125 previous_new_areas
= current_new_areas
;
3126 previous_new_areas_index
= current_new_areas_index
;
3128 /* Scavenge all the areas in previous new areas. Any new areas
3129 * allocated are saved in current_new_areas. */
3131 /* Allocate an array for current_new_areas; alternating between
3132 * new_areas_1 and 2 */
3133 if (previous_new_areas
== &new_areas_1
)
3134 current_new_areas
= &new_areas_2
;
3136 current_new_areas
= &new_areas_1
;
3138 /* Set up for gc_alloc(). */
3139 new_areas
= current_new_areas
;
3140 new_areas_index
= 0;
3142 /* Check whether previous_new_areas had overflowed. */
3143 if (previous_new_areas_index
>= NUM_NEW_AREAS
) {
3145 /* New areas of objects allocated have been lost so need to do a
3146 * full scan to be sure! If this becomes a problem try
3147 * increasing NUM_NEW_AREAS. */
3149 SHOW("new_areas overflow, doing full scavenge");
3151 /* Don't need to record new areas that get scavenged
3152 * anyway during scavenge_newspace_generation_one_scan. */
3153 record_new_objects
= 1;
3155 scavenge_newspace_generation_one_scan(generation
);
3157 /* Record all new areas now. */
3158 record_new_objects
= 2;
3160 scav_weak_hash_tables();
3162 /* Flush the current regions updating the tables. */
3163 gc_alloc_update_all_page_tables();
3167 /* Work through previous_new_areas. */
3168 for (i
= 0; i
< previous_new_areas_index
; i
++) {
3169 page_index_t page
= (*previous_new_areas
)[i
].page
;
3170 size_t offset
= (*previous_new_areas
)[i
].offset
;
3171 size_t size
= (*previous_new_areas
)[i
].size
/ N_WORD_BYTES
;
3172 gc_assert((*previous_new_areas
)[i
].size
% N_WORD_BYTES
== 0);
3173 scavenge(page_address(page
)+offset
, size
);
3176 scav_weak_hash_tables();
3178 /* Flush the current regions updating the tables. */
3179 gc_alloc_update_all_page_tables();
3182 current_new_areas_index
= new_areas_index
;
3185 "The re-scan has finished; current_new_areas_index=%d.\n",
3186 current_new_areas_index));*/
3189 /* Turn off recording of areas allocated by gc_alloc(). */
3190 record_new_objects
= 0;
3193 /* Check that none of the write_protected pages in this generation
3194 * have been written to. */
3195 for (i
= 0; i
< page_table_pages
; i
++) {
3196 if (page_allocated_p(i
)
3197 && (page_table
[i
].bytes_used
!= 0)
3198 && (page_table
[i
].gen
== generation
)
3199 && (page_table
[i
].write_protected_cleared
!= 0)
3200 && (page_table
[i
].dont_move
== 0)) {
3201 lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d\n",
3202 i
, generation
, page_table
[i
].dont_move
);
3208 /* Un-write-protect all the pages in from_space. This is done at the
3209 * start of a GC else there may be many page faults while scavenging
3210 * the newspace (I've seen drive the system time to 99%). These pages
3211 * would need to be unprotected anyway before unmapping in
3212 * free_oldspace; not sure what effect this has on paging.. */
3214 unprotect_oldspace(void)
3218 for (i
= 0; i
< last_free_page
; i
++) {
3219 if (page_allocated_p(i
)
3220 && (page_table
[i
].bytes_used
!= 0)
3221 && (page_table
[i
].gen
== from_space
)) {
3224 page_start
= (void *)page_address(i
);
3226 /* Remove any write-protection. We should be able to rely
3227 * on the write-protect flag to avoid redundant calls. */
3228 if (page_table
[i
].write_protected
) {
3229 os_protect(page_start
, PAGE_BYTES
, OS_VM_PROT_ALL
);
3230 page_table
[i
].write_protected
= 0;
3236 /* Work through all the pages and free any in from_space. This
3237 * assumes that all objects have been copied or promoted to an older
3238 * generation. Bytes_allocated and the generation bytes_allocated
3239 * counter are updated. The number of bytes freed is returned. */
3240 static unsigned long
3243 unsigned long bytes_freed
= 0;
3244 page_index_t first_page
, last_page
;
3249 /* Find a first page for the next region of pages. */
3250 while ((first_page
< last_free_page
)
3251 && (page_free_p(first_page
)
3252 || (page_table
[first_page
].bytes_used
== 0)
3253 || (page_table
[first_page
].gen
!= from_space
)))
3256 if (first_page
>= last_free_page
)
3259 /* Find the last page of this region. */
3260 last_page
= first_page
;
3263 /* Free the page. */
3264 bytes_freed
+= page_table
[last_page
].bytes_used
;
3265 generations
[page_table
[last_page
].gen
].bytes_allocated
-=
3266 page_table
[last_page
].bytes_used
;
3267 page_table
[last_page
].allocated
= FREE_PAGE_FLAG
;
3268 page_table
[last_page
].bytes_used
= 0;
3270 /* Remove any write-protection. We should be able to rely
3271 * on the write-protect flag to avoid redundant calls. */
3273 void *page_start
= (void *)page_address(last_page
);
3275 if (page_table
[last_page
].write_protected
) {
3276 os_protect(page_start
, PAGE_BYTES
, OS_VM_PROT_ALL
);
3277 page_table
[last_page
].write_protected
= 0;
3282 while ((last_page
< last_free_page
)
3283 && page_allocated_p(last_page
)
3284 && (page_table
[last_page
].bytes_used
!= 0)
3285 && (page_table
[last_page
].gen
== from_space
));
3287 #ifdef READ_PROTECT_FREE_PAGES
3288 os_protect(page_address(first_page
),
3289 npage_bytes(last_page
-first_page
),
3292 first_page
= last_page
;
3293 } while (first_page
< last_free_page
);
3295 bytes_allocated
-= bytes_freed
;
3300 /* Print some information about a pointer at the given address. */
3302 print_ptr(lispobj
*addr
)
3304 /* If addr is in the dynamic space then out the page information. */
3305 page_index_t pi1
= find_page_index((void*)addr
);
3308 fprintf(stderr
," %x: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n",
3309 (unsigned long) addr
,
3311 page_table
[pi1
].allocated
,
3312 page_table
[pi1
].gen
,
3313 page_table
[pi1
].bytes_used
,
3314 page_table
[pi1
].region_start_offset
,
3315 page_table
[pi1
].dont_move
);
3316 fprintf(stderr
," %x %x %x %x (%x) %x %x %x %x\n",
3330 verify_space(lispobj
*start
, size_t words
)
3332 int is_in_dynamic_space
= (find_page_index((void*)start
) != -1);
3333 int is_in_readonly_space
=
3334 (READ_ONLY_SPACE_START
<= (unsigned long)start
&&
3335 (unsigned long)start
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0));
3339 lispobj thing
= *(lispobj
*)start
;
3341 if (is_lisp_pointer(thing
)) {
3342 page_index_t page_index
= find_page_index((void*)thing
);
3343 long to_readonly_space
=
3344 (READ_ONLY_SPACE_START
<= thing
&&
3345 thing
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0));
3346 long to_static_space
=
3347 (STATIC_SPACE_START
<= thing
&&
3348 thing
< SymbolValue(STATIC_SPACE_FREE_POINTER
,0));
3350 /* Does it point to the dynamic space? */
3351 if (page_index
!= -1) {
3352 /* If it's within the dynamic space it should point to a used
3353 * page. XX Could check the offset too. */
3354 if (page_allocated_p(page_index
)
3355 && (page_table
[page_index
].bytes_used
== 0))
3356 lose ("Ptr %x @ %x sees free page.\n", thing
, start
);
3357 /* Check that it doesn't point to a forwarding pointer! */
3358 if (*((lispobj
*)native_pointer(thing
)) == 0x01) {
3359 lose("Ptr %x @ %x sees forwarding ptr.\n", thing
, start
);
3361 /* Check that its not in the RO space as it would then be a
3362 * pointer from the RO to the dynamic space. */
3363 if (is_in_readonly_space
) {
3364 lose("ptr to dynamic space %x from RO space %x\n",
3367 /* Does it point to a plausible object? This check slows
3368 * it down a lot (so it's commented out).
3370 * "a lot" is serious: it ate 50 minutes cpu time on
3371 * my duron 950 before I came back from lunch and
3374 * FIXME: Add a variable to enable this
3377 if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) {
3378 lose("ptr %x to invalid object %x\n", thing, start);
3382 /* Verify that it points to another valid space. */
3383 if (!to_readonly_space
&& !to_static_space
) {
3384 lose("Ptr %x @ %x sees junk.\n", thing
, start
);
3388 if (!(fixnump(thing
))) {
3390 switch(widetag_of(*start
)) {
3393 case SIMPLE_VECTOR_WIDETAG
:
3395 case COMPLEX_WIDETAG
:
3396 case SIMPLE_ARRAY_WIDETAG
:
3397 case COMPLEX_BASE_STRING_WIDETAG
:
3398 #ifdef COMPLEX_CHARACTER_STRING_WIDETAG
3399 case COMPLEX_CHARACTER_STRING_WIDETAG
:
3401 case COMPLEX_VECTOR_NIL_WIDETAG
:
3402 case COMPLEX_BIT_VECTOR_WIDETAG
:
3403 case COMPLEX_VECTOR_WIDETAG
:
3404 case COMPLEX_ARRAY_WIDETAG
:
3405 case CLOSURE_HEADER_WIDETAG
:
3406 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
3407 case VALUE_CELL_HEADER_WIDETAG
:
3408 case SYMBOL_HEADER_WIDETAG
:
3409 case CHARACTER_WIDETAG
:
3410 #if N_WORD_BITS == 64
3411 case SINGLE_FLOAT_WIDETAG
:
3413 case UNBOUND_MARKER_WIDETAG
:
3418 case INSTANCE_HEADER_WIDETAG
:
3421 long ntotal
= HeaderValue(thing
);
3422 lispobj layout
= ((struct instance
*)start
)->slots
[0];
3427 nuntagged
= ((struct layout
*)
3428 native_pointer(layout
))->n_untagged_slots
;
3429 verify_space(start
+ 1,
3430 ntotal
- fixnum_value(nuntagged
));
3434 case CODE_HEADER_WIDETAG
:
3436 lispobj object
= *start
;
3438 long nheader_words
, ncode_words
, nwords
;
3440 struct simple_fun
*fheaderp
;
3442 code
= (struct code
*) start
;
3444 /* Check that it's not in the dynamic space.
3445 * FIXME: Isn't is supposed to be OK for code
3446 * objects to be in the dynamic space these days? */
3447 if (is_in_dynamic_space
3448 /* It's ok if it's byte compiled code. The trace
3449 * table offset will be a fixnum if it's x86
3450 * compiled code - check.
3452 * FIXME: #^#@@! lack of abstraction here..
3453 * This line can probably go away now that
3454 * there's no byte compiler, but I've got
3455 * too much to worry about right now to try
3456 * to make sure. -- WHN 2001-10-06 */
3457 && fixnump(code
->trace_table_offset
)
3458 /* Only when enabled */
3459 && verify_dynamic_code_check
) {
3461 "/code object at %x in the dynamic space\n",
3465 ncode_words
= fixnum_value(code
->code_size
);
3466 nheader_words
= HeaderValue(object
);
3467 nwords
= ncode_words
+ nheader_words
;
3468 nwords
= CEILING(nwords
, 2);
3469 /* Scavenge the boxed section of the code data block */
3470 verify_space(start
+ 1, nheader_words
- 1);
3472 /* Scavenge the boxed section of each function
3473 * object in the code data block. */
3474 fheaderl
= code
->entry_points
;
3475 while (fheaderl
!= NIL
) {
3477 (struct simple_fun
*) native_pointer(fheaderl
);
3478 gc_assert(widetag_of(fheaderp
->header
) ==
3479 SIMPLE_FUN_HEADER_WIDETAG
);
3480 verify_space(&fheaderp
->name
, 1);
3481 verify_space(&fheaderp
->arglist
, 1);
3482 verify_space(&fheaderp
->type
, 1);
3483 fheaderl
= fheaderp
->next
;
3489 /* unboxed objects */
3490 case BIGNUM_WIDETAG
:
3491 #if N_WORD_BITS != 64
3492 case SINGLE_FLOAT_WIDETAG
:
3494 case DOUBLE_FLOAT_WIDETAG
:
3495 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3496 case LONG_FLOAT_WIDETAG
:
3498 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
3499 case COMPLEX_SINGLE_FLOAT_WIDETAG
:
3501 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
3502 case COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3504 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3505 case COMPLEX_LONG_FLOAT_WIDETAG
:
3507 case SIMPLE_BASE_STRING_WIDETAG
:
3508 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
3509 case SIMPLE_CHARACTER_STRING_WIDETAG
:
3511 case SIMPLE_BIT_VECTOR_WIDETAG
:
3512 case SIMPLE_ARRAY_NIL_WIDETAG
:
3513 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
3514 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
3515 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
3516 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
3517 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
3518 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
3519 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
3520 case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
:
3522 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
3523 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
3524 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
3525 case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
:
3527 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
3528 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
3530 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
3531 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
3533 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
3534 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
3536 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
3537 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
3539 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
3540 case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
:
3542 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
3543 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
3545 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
3546 case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
:
3548 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
3549 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
3551 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
3552 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
3553 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3554 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
3556 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
3557 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
3559 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
3560 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3562 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3563 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
3566 case WEAK_POINTER_WIDETAG
:
3567 #ifdef LUTEX_WIDETAG
3570 #ifdef NO_TLS_VALUE_MARKER_WIDETAG
3571 case NO_TLS_VALUE_MARKER_WIDETAG
:
3573 count
= (sizetab
[widetag_of(*start
)])(start
);
3577 lose("Unhandled widetag 0x%x at 0x%x\n",
3578 widetag_of(*start
), start
);
3590 /* FIXME: It would be nice to make names consistent so that
3591 * foo_size meant size *in* *bytes* instead of size in some
3592 * arbitrary units. (Yes, this caused a bug, how did you guess?:-)
3593 * Some counts of lispobjs are called foo_count; it might be good
3594 * to grep for all foo_size and rename the appropriate ones to
3596 long read_only_space_size
=
3597 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0)
3598 - (lispobj
*)READ_ONLY_SPACE_START
;
3599 long static_space_size
=
3600 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0)
3601 - (lispobj
*)STATIC_SPACE_START
;
3603 for_each_thread(th
) {
3604 long binding_stack_size
=
3605 (lispobj
*)get_binding_stack_pointer(th
)
3606 - (lispobj
*)th
->binding_stack_start
;
3607 verify_space(th
->binding_stack_start
, binding_stack_size
);
3609 verify_space((lispobj
*)READ_ONLY_SPACE_START
, read_only_space_size
);
3610 verify_space((lispobj
*)STATIC_SPACE_START
, static_space_size
);
3614 verify_generation(generation_index_t generation
)
3618 for (i
= 0; i
< last_free_page
; i
++) {
3619 if (page_allocated_p(i
)
3620 && (page_table
[i
].bytes_used
!= 0)
3621 && (page_table
[i
].gen
== generation
)) {
3622 page_index_t last_page
;
3623 int region_allocation
= page_table
[i
].allocated
;
3625 /* This should be the start of a contiguous block */
3626 gc_assert(page_table
[i
].region_start_offset
== 0);
3628 /* Need to find the full extent of this contiguous block in case
3629 objects span pages. */
3631 /* Now work forward until the end of this contiguous area is
3633 for (last_page
= i
; ;last_page
++)
3634 /* Check whether this is the last page in this contiguous
3636 if ((page_table
[last_page
].bytes_used
< PAGE_BYTES
)
3637 /* Or it is PAGE_BYTES and is the last in the block */
3638 || (page_table
[last_page
+1].allocated
!= region_allocation
)
3639 || (page_table
[last_page
+1].bytes_used
== 0)
3640 || (page_table
[last_page
+1].gen
!= generation
)
3641 || (page_table
[last_page
+1].region_start_offset
== 0))
3644 verify_space(page_address(i
),
3646 (page_table
[last_page
].bytes_used
3647 + npage_bytes(last_page
-i
)))
3654 /* Check that all the free space is zero filled. */
3656 verify_zero_fill(void)
3660 for (page
= 0; page
< last_free_page
; page
++) {
3661 if (page_free_p(page
)) {
3662 /* The whole page should be zero filled. */
3663 long *start_addr
= (long *)page_address(page
);
3666 for (i
= 0; i
< size
; i
++) {
3667 if (start_addr
[i
] != 0) {
3668 lose("free page not zero at %x\n", start_addr
+ i
);
3672 long free_bytes
= PAGE_BYTES
- page_table
[page
].bytes_used
;
3673 if (free_bytes
> 0) {
3674 long *start_addr
= (long *)((unsigned long)page_address(page
)
3675 + page_table
[page
].bytes_used
);
3676 long size
= free_bytes
/ N_WORD_BYTES
;
3678 for (i
= 0; i
< size
; i
++) {
3679 if (start_addr
[i
] != 0) {
3680 lose("free region not zero at %x\n", start_addr
+ i
);
3688 /* External entry point for verify_zero_fill */
3690 gencgc_verify_zero_fill(void)
3692 /* Flush the alloc regions updating the tables. */
3693 gc_alloc_update_all_page_tables();
3694 SHOW("verifying zero fill");
3699 verify_dynamic_space(void)
3701 generation_index_t i
;
3703 for (i
= 0; i
<= HIGHEST_NORMAL_GENERATION
; i
++)
3704 verify_generation(i
);
3706 if (gencgc_enable_verify_zero_fill
)
3710 /* Write-protect all the dynamic boxed pages in the given generation. */
3712 write_protect_generation_pages(generation_index_t generation
)
3716 gc_assert(generation
< SCRATCH_GENERATION
);
3718 for (start
= 0; start
< last_free_page
; start
++) {
3719 if (protect_page_p(start
, generation
)) {
3723 /* Note the page as protected in the page tables. */
3724 page_table
[start
].write_protected
= 1;
3726 for (last
= start
+ 1; last
< last_free_page
; last
++) {
3727 if (!protect_page_p(last
, generation
))
3729 page_table
[last
].write_protected
= 1;
3732 page_start
= (void *)page_address(start
);
3734 os_protect(page_start
,
3735 npage_bytes(last
- start
),
3736 OS_VM_PROT_READ
| OS_VM_PROT_EXECUTE
);
3742 if (gencgc_verbose
> 1) {
3744 "/write protected %d of %d pages in generation %d\n",
3745 count_write_protect_generation_pages(generation
),
3746 count_generation_pages(generation
),
3751 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
3754 scavenge_control_stack()
3756 unsigned long control_stack_size
;
3758 /* This is going to be a big problem when we try to port threads
3760 struct thread
*th
= arch_os_get_current_thread();
3761 lispobj
*control_stack
=
3762 (lispobj
*)(th
->control_stack_start
);
3764 control_stack_size
= current_control_stack_pointer
- control_stack
;
3765 scavenge(control_stack
, control_stack_size
);
3768 /* Scavenging Interrupt Contexts */
3770 static int boxed_registers
[] = BOXED_REGISTERS
;
3773 scavenge_interrupt_context(os_context_t
* context
)
3779 unsigned long lip_offset
;
3780 int lip_register_pair
;
3782 unsigned long pc_code_offset
;
3784 #ifdef ARCH_HAS_LINK_REGISTER
3785 unsigned long lr_code_offset
;
3787 #ifdef ARCH_HAS_NPC_REGISTER
3788 unsigned long npc_code_offset
;
3792 /* Find the LIP's register pair and calculate it's offset */
3793 /* before we scavenge the context. */
3796 * I (RLT) think this is trying to find the boxed register that is
3797 * closest to the LIP address, without going past it. Usually, it's
3798 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
3800 lip
= *os_context_register_addr(context
, reg_LIP
);
3801 lip_offset
= 0x7FFFFFFF;
3802 lip_register_pair
= -1;
3803 for (i
= 0; i
< (sizeof(boxed_registers
) / sizeof(int)); i
++) {
3808 index
= boxed_registers
[i
];
3809 reg
= *os_context_register_addr(context
, index
);
3810 if ((reg
& ~((1L<<N_LOWTAG_BITS
)-1)) <= lip
) {
3812 if (offset
< lip_offset
) {
3813 lip_offset
= offset
;
3814 lip_register_pair
= index
;
3818 #endif /* reg_LIP */
3820 /* Compute the PC's offset from the start of the CODE */
3822 pc_code_offset
= *os_context_pc_addr(context
)
3823 - *os_context_register_addr(context
, reg_CODE
);
3824 #ifdef ARCH_HAS_NPC_REGISTER
3825 npc_code_offset
= *os_context_npc_addr(context
)
3826 - *os_context_register_addr(context
, reg_CODE
);
3827 #endif /* ARCH_HAS_NPC_REGISTER */
3829 #ifdef ARCH_HAS_LINK_REGISTER
3831 *os_context_lr_addr(context
) -
3832 *os_context_register_addr(context
, reg_CODE
);
3835 /* Scanvenge all boxed registers in the context. */
3836 for (i
= 0; i
< (sizeof(boxed_registers
) / sizeof(int)); i
++) {
3840 index
= boxed_registers
[i
];
3841 foo
= *os_context_register_addr(context
, index
);
3843 *os_context_register_addr(context
, index
) = foo
;
3845 scavenge((lispobj
*) &(*os_context_register_addr(context
, index
)), 1);
3852 * But what happens if lip_register_pair is -1?
3853 * *os_context_register_addr on Solaris (see
3854 * solaris_register_address in solaris-os.c) will return
3855 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
3856 * that what we really want? My guess is that that is not what we
3857 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
3858 * all. But maybe it doesn't really matter if LIP is trashed?
3860 if (lip_register_pair
>= 0) {
3861 *os_context_register_addr(context
, reg_LIP
) =
3862 *os_context_register_addr(context
, lip_register_pair
)
3865 #endif /* reg_LIP */
3867 /* Fix the PC if it was in from space */
3868 if (from_space_p(*os_context_pc_addr(context
)))
3869 *os_context_pc_addr(context
) =
3870 *os_context_register_addr(context
, reg_CODE
) + pc_code_offset
;
3872 #ifdef ARCH_HAS_LINK_REGISTER
3873 /* Fix the LR ditto; important if we're being called from
3874 * an assembly routine that expects to return using blr, otherwise
3876 if (from_space_p(*os_context_lr_addr(context
)))
3877 *os_context_lr_addr(context
) =
3878 *os_context_register_addr(context
, reg_CODE
) + lr_code_offset
;
3881 #ifdef ARCH_HAS_NPC_REGISTER
3882 if (from_space_p(*os_context_npc_addr(context
)))
3883 *os_context_npc_addr(context
) =
3884 *os_context_register_addr(context
, reg_CODE
) + npc_code_offset
;
3885 #endif /* ARCH_HAS_NPC_REGISTER */
3889 scavenge_interrupt_contexts(void)
3892 os_context_t
*context
;
3894 struct thread
*th
=arch_os_get_current_thread();
3896 index
= fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,0));
3898 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
3899 printf("Number of active contexts: %d\n", index
);
3902 for (i
= 0; i
< index
; i
++) {
3903 context
= th
->interrupt_contexts
[i
];
3904 scavenge_interrupt_context(context
);
3910 #if defined(LISP_FEATURE_SB_THREAD)
3912 preserve_context_registers (os_context_t
*c
)
3915 /* On Darwin the signal context isn't a contiguous block of memory,
3916 * so just preserve_pointering its contents won't be sufficient.
3918 #if defined(LISP_FEATURE_DARWIN)
3919 #if defined LISP_FEATURE_X86
3920 preserve_pointer((void*)*os_context_register_addr(c
,reg_EAX
));
3921 preserve_pointer((void*)*os_context_register_addr(c
,reg_ECX
));
3922 preserve_pointer((void*)*os_context_register_addr(c
,reg_EDX
));
3923 preserve_pointer((void*)*os_context_register_addr(c
,reg_EBX
));
3924 preserve_pointer((void*)*os_context_register_addr(c
,reg_ESI
));
3925 preserve_pointer((void*)*os_context_register_addr(c
,reg_EDI
));
3926 preserve_pointer((void*)*os_context_pc_addr(c
));
3927 #elif defined LISP_FEATURE_X86_64
3928 preserve_pointer((void*)*os_context_register_addr(c
,reg_RAX
));
3929 preserve_pointer((void*)*os_context_register_addr(c
,reg_RCX
));
3930 preserve_pointer((void*)*os_context_register_addr(c
,reg_RDX
));
3931 preserve_pointer((void*)*os_context_register_addr(c
,reg_RBX
));
3932 preserve_pointer((void*)*os_context_register_addr(c
,reg_RSI
));
3933 preserve_pointer((void*)*os_context_register_addr(c
,reg_RDI
));
3934 preserve_pointer((void*)*os_context_register_addr(c
,reg_R8
));
3935 preserve_pointer((void*)*os_context_register_addr(c
,reg_R9
));
3936 preserve_pointer((void*)*os_context_register_addr(c
,reg_R10
));
3937 preserve_pointer((void*)*os_context_register_addr(c
,reg_R11
));
3938 preserve_pointer((void*)*os_context_register_addr(c
,reg_R12
));
3939 preserve_pointer((void*)*os_context_register_addr(c
,reg_R13
));
3940 preserve_pointer((void*)*os_context_register_addr(c
,reg_R14
));
3941 preserve_pointer((void*)*os_context_register_addr(c
,reg_R15
));
3942 preserve_pointer((void*)*os_context_pc_addr(c
));
3944 #error "preserve_context_registers needs to be tweaked for non-x86 Darwin"
3947 for(ptr
= ((void **)(c
+1))-1; ptr
>=(void **)c
; ptr
--) {
3948 preserve_pointer(*ptr
);
3953 /* Garbage collect a generation. If raise is 0 then the remains of the
3954 * generation are not raised to the next generation. */
3956 garbage_collect_generation(generation_index_t generation
, int raise
)
3958 unsigned long bytes_freed
;
3960 unsigned long static_space_size
;
3961 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
3964 gc_assert(generation
<= HIGHEST_NORMAL_GENERATION
);
3966 /* The oldest generation can't be raised. */
3967 gc_assert((generation
!= HIGHEST_NORMAL_GENERATION
) || (raise
== 0));
3969 /* Check if weak hash tables were processed in the previous GC. */
3970 gc_assert(weak_hash_tables
== NULL
);
3972 /* Initialize the weak pointer list. */
3973 weak_pointers
= NULL
;
3975 #ifdef LUTEX_WIDETAG
3976 unmark_lutexes(generation
);
3979 /* When a generation is not being raised it is transported to a
3980 * temporary generation (NUM_GENERATIONS), and lowered when
3981 * done. Set up this new generation. There should be no pages
3982 * allocated to it yet. */
3984 gc_assert(generations
[SCRATCH_GENERATION
].bytes_allocated
== 0);
3987 /* Set the global src and dest. generations */
3988 from_space
= generation
;
3990 new_space
= generation
+1;
3992 new_space
= SCRATCH_GENERATION
;
3994 /* Change to a new space for allocation, resetting the alloc_start_page */
3995 gc_alloc_generation
= new_space
;
3996 generations
[new_space
].alloc_start_page
= 0;
3997 generations
[new_space
].alloc_unboxed_start_page
= 0;
3998 generations
[new_space
].alloc_large_start_page
= 0;
3999 generations
[new_space
].alloc_large_unboxed_start_page
= 0;
4001 /* Before any pointers are preserved, the dont_move flags on the
4002 * pages need to be cleared. */
4003 for (i
= 0; i
< last_free_page
; i
++)
4004 if(page_table
[i
].gen
==from_space
)
4005 page_table
[i
].dont_move
= 0;
4007 /* Un-write-protect the old-space pages. This is essential for the
4008 * promoted pages as they may contain pointers into the old-space
4009 * which need to be scavenged. It also helps avoid unnecessary page
4010 * faults as forwarding pointers are written into them. They need to
4011 * be un-protected anyway before unmapping later. */
4012 unprotect_oldspace();
4014 /* Scavenge the stacks' conservative roots. */
4016 /* there are potentially two stacks for each thread: the main
4017 * stack, which may contain Lisp pointers, and the alternate stack.
4018 * We don't ever run Lisp code on the altstack, but it may
4019 * host a sigcontext with lisp objects in it */
4021 /* what we need to do: (1) find the stack pointer for the main
4022 * stack; scavenge it (2) find the interrupt context on the
4023 * alternate stack that might contain lisp values, and scavenge
4026 /* we assume that none of the preceding applies to the thread that
4027 * initiates GC. If you ever call GC from inside an altstack
4028 * handler, you will lose. */
4030 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
4031 /* And if we're saving a core, there's no point in being conservative. */
4032 if (conservative_stack
) {
4033 for_each_thread(th
) {
4035 void **esp
=(void **)-1;
4036 #ifdef LISP_FEATURE_SB_THREAD
4038 if(th
==arch_os_get_current_thread()) {
4039 /* Somebody is going to burn in hell for this, but casting
4040 * it in two steps shuts gcc up about strict aliasing. */
4041 esp
= (void **)((void *)&raise
);
4044 free
=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
4045 for(i
=free
-1;i
>=0;i
--) {
4046 os_context_t
*c
=th
->interrupt_contexts
[i
];
4047 esp1
= (void **) *os_context_register_addr(c
,reg_SP
);
4048 if (esp1
>=(void **)th
->control_stack_start
&&
4049 esp1
<(void **)th
->control_stack_end
) {
4050 if(esp1
<esp
) esp
=esp1
;
4051 preserve_context_registers(c
);
4056 esp
= (void **)((void *)&raise
);
4058 for (ptr
= ((void **)th
->control_stack_end
)-1; ptr
>= esp
; ptr
--) {
4059 preserve_pointer(*ptr
);
4066 if (gencgc_verbose
> 1) {
4067 long num_dont_move_pages
= count_dont_move_pages();
4069 "/non-movable pages due to conservative pointers = %d (%d bytes)\n",
4070 num_dont_move_pages
,
4071 npage_bytes(num_dont_move_pages
);
4075 /* Scavenge all the rest of the roots. */
4077 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
4079 * If not x86, we need to scavenge the interrupt context(s) and the
4082 scavenge_interrupt_contexts();
4083 scavenge_control_stack();
4086 /* Scavenge the Lisp functions of the interrupt handlers, taking
4087 * care to avoid SIG_DFL and SIG_IGN. */
4088 for (i
= 0; i
< NSIG
; i
++) {
4089 union interrupt_handler handler
= interrupt_handlers
[i
];
4090 if (!ARE_SAME_HANDLER(handler
.c
, SIG_IGN
) &&
4091 !ARE_SAME_HANDLER(handler
.c
, SIG_DFL
)) {
4092 scavenge((lispobj
*)(interrupt_handlers
+ i
), 1);
4095 /* Scavenge the binding stacks. */
4098 for_each_thread(th
) {
4099 long len
= (lispobj
*)get_binding_stack_pointer(th
) -
4100 th
->binding_stack_start
;
4101 scavenge((lispobj
*) th
->binding_stack_start
,len
);
4102 #ifdef LISP_FEATURE_SB_THREAD
4103 /* do the tls as well */
4104 len
=fixnum_value(SymbolValue(FREE_TLS_INDEX
,0)) -
4105 (sizeof (struct thread
))/(sizeof (lispobj
));
4106 scavenge((lispobj
*) (th
+1),len
);
4111 /* The original CMU CL code had scavenge-read-only-space code
4112 * controlled by the Lisp-level variable
4113 * *SCAVENGE-READ-ONLY-SPACE*. It was disabled by default, and it
4114 * wasn't documented under what circumstances it was useful or
4115 * safe to turn it on, so it's been turned off in SBCL. If you
4116 * want/need this functionality, and can test and document it,
4117 * please submit a patch. */
4119 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE
) != NIL
) {
4120 unsigned long read_only_space_size
=
4121 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
) -
4122 (lispobj
*)READ_ONLY_SPACE_START
;
4124 "/scavenge read only space: %d bytes\n",
4125 read_only_space_size
* sizeof(lispobj
)));
4126 scavenge( (lispobj
*) READ_ONLY_SPACE_START
, read_only_space_size
);
4130 /* Scavenge static space. */
4132 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0) -
4133 (lispobj
*)STATIC_SPACE_START
;
4134 if (gencgc_verbose
> 1) {
4136 "/scavenge static space: %d bytes\n",
4137 static_space_size
* sizeof(lispobj
)));
4139 scavenge( (lispobj
*) STATIC_SPACE_START
, static_space_size
);
4141 /* All generations but the generation being GCed need to be
4142 * scavenged. The new_space generation needs special handling as
4143 * objects may be moved in - it is handled separately below. */
4144 scavenge_generations(generation
+1, PSEUDO_STATIC_GENERATION
);
4146 /* Finally scavenge the new_space generation. Keep going until no
4147 * more objects are moved into the new generation */
4148 scavenge_newspace_generation(new_space
);
4150 /* FIXME: I tried reenabling this check when debugging unrelated
4151 * GC weirdness ca. sbcl-0.6.12.45, and it failed immediately.
4152 * Since the current GC code seems to work well, I'm guessing that
4153 * this debugging code is just stale, but I haven't tried to
4154 * figure it out. It should be figured out and then either made to
4155 * work or just deleted. */
4156 #define RESCAN_CHECK 0
4158 /* As a check re-scavenge the newspace once; no new objects should
4161 long old_bytes_allocated
= bytes_allocated
;
4162 long bytes_allocated
;
4164 /* Start with a full scavenge. */
4165 scavenge_newspace_generation_one_scan(new_space
);
4167 /* Flush the current regions, updating the tables. */
4168 gc_alloc_update_all_page_tables();
4170 bytes_allocated
= bytes_allocated
- old_bytes_allocated
;
4172 if (bytes_allocated
!= 0) {
4173 lose("Rescan of new_space allocated %d more bytes.\n",
4179 scan_weak_hash_tables();
4180 scan_weak_pointers();
4182 /* Flush the current regions, updating the tables. */
4183 gc_alloc_update_all_page_tables();
4185 /* Free the pages in oldspace, but not those marked dont_move. */
4186 bytes_freed
= free_oldspace();
4188 /* If the GC is not raising the age then lower the generation back
4189 * to its normal generation number */
4191 for (i
= 0; i
< last_free_page
; i
++)
4192 if ((page_table
[i
].bytes_used
!= 0)
4193 && (page_table
[i
].gen
== SCRATCH_GENERATION
))
4194 page_table
[i
].gen
= generation
;
4195 gc_assert(generations
[generation
].bytes_allocated
== 0);
4196 generations
[generation
].bytes_allocated
=
4197 generations
[SCRATCH_GENERATION
].bytes_allocated
;
4198 generations
[SCRATCH_GENERATION
].bytes_allocated
= 0;
4201 /* Reset the alloc_start_page for generation. */
4202 generations
[generation
].alloc_start_page
= 0;
4203 generations
[generation
].alloc_unboxed_start_page
= 0;
4204 generations
[generation
].alloc_large_start_page
= 0;
4205 generations
[generation
].alloc_large_unboxed_start_page
= 0;
4207 if (generation
>= verify_gens
) {
4211 verify_dynamic_space();
4214 /* Set the new gc trigger for the GCed generation. */
4215 generations
[generation
].gc_trigger
=
4216 generations
[generation
].bytes_allocated
4217 + generations
[generation
].bytes_consed_between_gc
;
4220 generations
[generation
].num_gc
= 0;
4222 ++generations
[generation
].num_gc
;
4224 #ifdef LUTEX_WIDETAG
4225 reap_lutexes(generation
);
4227 move_lutexes(generation
, generation
+1);
4231 /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
4233 update_dynamic_space_free_pointer(void)
4235 page_index_t last_page
= -1, i
;
4237 for (i
= 0; i
< last_free_page
; i
++)
4238 if (page_allocated_p(i
) && (page_table
[i
].bytes_used
!= 0))
4241 last_free_page
= last_page
+1;
4243 set_alloc_pointer((lispobj
)(page_address(last_free_page
)));
4244 return 0; /* dummy value: return something ... */
4248 remap_free_pages (page_index_t from
, page_index_t to
)
4250 page_index_t first_page
, last_page
;
4252 for (first_page
= from
; first_page
<= to
; first_page
++) {
4253 if (page_allocated_p(first_page
) ||
4254 (page_table
[first_page
].need_to_zero
== 0)) {
4258 last_page
= first_page
+ 1;
4259 while (page_free_p(last_page
) &&
4261 (page_table
[last_page
].need_to_zero
== 1)) {
4265 /* There's a mysterious Solaris/x86 problem with using mmap
4266 * tricks for memory zeroing. See sbcl-devel thread
4267 * "Re: patch: standalone executable redux".
4269 #if defined(LISP_FEATURE_SUNOS)
4270 zero_pages(first_page
, last_page
-1);
4272 zero_pages_with_mmap(first_page
, last_page
-1);
4275 first_page
= last_page
;
4279 generation_index_t small_generation_limit
= 1;
4281 /* GC all generations newer than last_gen, raising the objects in each
4282 * to the next older generation - we finish when all generations below
4283 * last_gen are empty. Then if last_gen is due for a GC, or if
4284 * last_gen==NUM_GENERATIONS (the scratch generation? eh?) we GC that
4285 * too. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
4287 * We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than
4288 * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */
4290 collect_garbage(generation_index_t last_gen
)
4292 generation_index_t gen
= 0, i
;
4295 /* The largest value of last_free_page seen since the time
4296 * remap_free_pages was called. */
4297 static page_index_t high_water_mark
= 0;
4299 FSHOW((stderr
, "/entering collect_garbage(%d)\n", last_gen
));
4303 if (last_gen
> HIGHEST_NORMAL_GENERATION
+1) {
4305 "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
4310 /* Flush the alloc regions updating the tables. */
4311 gc_alloc_update_all_page_tables();
4313 /* Verify the new objects created by Lisp code. */
4314 if (pre_verify_gen_0
) {
4315 FSHOW((stderr
, "pre-checking generation 0\n"));
4316 verify_generation(0);
4319 if (gencgc_verbose
> 1)
4320 print_generation_stats(0);
4323 /* Collect the generation. */
4325 if (gen
>= gencgc_oldest_gen_to_gc
) {
4326 /* Never raise the oldest generation. */
4331 || (generations
[gen
].num_gc
>= generations
[gen
].trigger_age
);
4334 if (gencgc_verbose
> 1) {
4336 "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
4339 generations
[gen
].bytes_allocated
,
4340 generations
[gen
].gc_trigger
,
4341 generations
[gen
].num_gc
));
4344 /* If an older generation is being filled, then update its
4347 generations
[gen
+1].cum_sum_bytes_allocated
+=
4348 generations
[gen
+1].bytes_allocated
;
4351 garbage_collect_generation(gen
, raise
);
4353 /* Reset the memory age cum_sum. */
4354 generations
[gen
].cum_sum_bytes_allocated
= 0;
4356 if (gencgc_verbose
> 1) {
4357 FSHOW((stderr
, "GC of generation %d finished:\n", gen
));
4358 print_generation_stats(0);
4362 } while ((gen
<= gencgc_oldest_gen_to_gc
)
4363 && ((gen
< last_gen
)
4364 || ((gen
<= gencgc_oldest_gen_to_gc
)
4366 && (generations
[gen
].bytes_allocated
4367 > generations
[gen
].gc_trigger
)
4368 && (gen_av_mem_age(gen
)
4369 > generations
[gen
].min_av_mem_age
))));
4371 /* Now if gen-1 was raised all generations before gen are empty.
4372 * If it wasn't raised then all generations before gen-1 are empty.
4374 * Now objects within this gen's pages cannot point to younger
4375 * generations unless they are written to. This can be exploited
4376 * by write-protecting the pages of gen; then when younger
4377 * generations are GCed only the pages which have been written
4382 gen_to_wp
= gen
- 1;
4384 /* There's not much point in WPing pages in generation 0 as it is
4385 * never scavenged (except promoted pages). */
4386 if ((gen_to_wp
> 0) && enable_page_protection
) {
4387 /* Check that they are all empty. */
4388 for (i
= 0; i
< gen_to_wp
; i
++) {
4389 if (generations
[i
].bytes_allocated
)
4390 lose("trying to write-protect gen. %d when gen. %d nonempty\n",
4393 write_protect_generation_pages(gen_to_wp
);
4396 /* Set gc_alloc() back to generation 0. The current regions should
4397 * be flushed after the above GCs. */
4398 gc_assert((boxed_region
.free_pointer
- boxed_region
.start_addr
) == 0);
4399 gc_alloc_generation
= 0;
4401 /* Save the high-water mark before updating last_free_page */
4402 if (last_free_page
> high_water_mark
)
4403 high_water_mark
= last_free_page
;
4405 update_dynamic_space_free_pointer();
4407 auto_gc_trigger
= bytes_allocated
+ bytes_consed_between_gcs
;
4409 fprintf(stderr
,"Next gc when %ld bytes have been consed\n",
4412 /* If we did a big GC (arbitrarily defined as gen > 1), release memory
4415 if (gen
> small_generation_limit
) {
4416 if (last_free_page
> high_water_mark
)
4417 high_water_mark
= last_free_page
;
4418 remap_free_pages(0, high_water_mark
);
4419 high_water_mark
= 0;
4424 SHOW("returning from collect_garbage");
4427 /* This is called by Lisp PURIFY when it is finished. All live objects
4428 * will have been moved to the RO and Static heaps. The dynamic space
4429 * will need a full re-initialization. We don't bother having Lisp
4430 * PURIFY flush the current gc_alloc() region, as the page_tables are
4431 * re-initialized, and every page is zeroed to be sure. */
4437 if (gencgc_verbose
> 1)
4438 SHOW("entering gc_free_heap");
4440 for (page
= 0; page
< page_table_pages
; page
++) {
4441 /* Skip free pages which should already be zero filled. */
4442 if (page_allocated_p(page
)) {
4443 void *page_start
, *addr
;
4445 /* Mark the page free. The other slots are assumed invalid
4446 * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
4447 * should not be write-protected -- except that the
4448 * generation is used for the current region but it sets
4450 page_table
[page
].allocated
= FREE_PAGE_FLAG
;
4451 page_table
[page
].bytes_used
= 0;
4453 #ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure
4454 * about this change. */
4455 /* Zero the page. */
4456 page_start
= (void *)page_address(page
);
4458 /* First, remove any write-protection. */
4459 os_protect(page_start
, PAGE_BYTES
, OS_VM_PROT_ALL
);
4460 page_table
[page
].write_protected
= 0;
4462 os_invalidate(page_start
,PAGE_BYTES
);
4463 addr
= os_validate(page_start
,PAGE_BYTES
);
4464 if (addr
== NULL
|| addr
!= page_start
) {
4465 lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x\n",
4470 page_table
[page
].write_protected
= 0;
4472 } else if (gencgc_zero_check_during_free_heap
) {
4473 /* Double-check that the page is zero filled. */
4476 gc_assert(page_free_p(page
));
4477 gc_assert(page_table
[page
].bytes_used
== 0);
4478 page_start
= (long *)page_address(page
);
4479 for (i
=0; i
<1024; i
++) {
4480 if (page_start
[i
] != 0) {
4481 lose("free region not zero at %x\n", page_start
+ i
);
4487 bytes_allocated
= 0;
4489 /* Initialize the generations. */
4490 for (page
= 0; page
< NUM_GENERATIONS
; page
++) {
4491 generations
[page
].alloc_start_page
= 0;
4492 generations
[page
].alloc_unboxed_start_page
= 0;
4493 generations
[page
].alloc_large_start_page
= 0;
4494 generations
[page
].alloc_large_unboxed_start_page
= 0;
4495 generations
[page
].bytes_allocated
= 0;
4496 generations
[page
].gc_trigger
= 2000000;
4497 generations
[page
].num_gc
= 0;
4498 generations
[page
].cum_sum_bytes_allocated
= 0;
4499 generations
[page
].lutexes
= NULL
;
4502 if (gencgc_verbose
> 1)
4503 print_generation_stats(0);
4505 /* Initialize gc_alloc(). */
4506 gc_alloc_generation
= 0;
4508 gc_set_region_empty(&boxed_region
);
4509 gc_set_region_empty(&unboxed_region
);
4512 set_alloc_pointer((lispobj
)((char *)heap_base
));
4514 if (verify_after_free_heap
) {
4515 /* Check whether purify has left any bad pointers. */
4516 FSHOW((stderr
, "checking after free_heap\n"));
4526 /* Compute the number of pages needed for the dynamic space.
4527 * Dynamic space size should be aligned on page size. */
4528 page_table_pages
= dynamic_space_size
/PAGE_BYTES
;
4529 gc_assert(dynamic_space_size
== npage_bytes(page_table_pages
));
4531 page_table
= calloc(page_table_pages
, sizeof(struct page
));
4532 gc_assert(page_table
);
4535 scavtab
[WEAK_POINTER_WIDETAG
] = scav_weak_pointer
;
4536 transother
[SIMPLE_ARRAY_WIDETAG
] = trans_boxed_large
;
4538 #ifdef LUTEX_WIDETAG
4539 scavtab
[LUTEX_WIDETAG
] = scav_lutex
;
4540 transother
[LUTEX_WIDETAG
] = trans_lutex
;
4541 sizetab
[LUTEX_WIDETAG
] = size_lutex
;
4544 heap_base
= (void*)DYNAMIC_SPACE_START
;
4546 /* Initialize each page structure. */
4547 for (i
= 0; i
< page_table_pages
; i
++) {
4548 /* Initialize all pages as free. */
4549 page_table
[i
].allocated
= FREE_PAGE_FLAG
;
4550 page_table
[i
].bytes_used
= 0;
4552 /* Pages are not write-protected at startup. */
4553 page_table
[i
].write_protected
= 0;
4556 bytes_allocated
= 0;
4558 /* Initialize the generations.
4560 * FIXME: very similar to code in gc_free_heap(), should be shared */
4561 for (i
= 0; i
< NUM_GENERATIONS
; i
++) {
4562 generations
[i
].alloc_start_page
= 0;
4563 generations
[i
].alloc_unboxed_start_page
= 0;
4564 generations
[i
].alloc_large_start_page
= 0;
4565 generations
[i
].alloc_large_unboxed_start_page
= 0;
4566 generations
[i
].bytes_allocated
= 0;
4567 generations
[i
].gc_trigger
= 2000000;
4568 generations
[i
].num_gc
= 0;
4569 generations
[i
].cum_sum_bytes_allocated
= 0;
4570 /* the tune-able parameters */
4571 generations
[i
].bytes_consed_between_gc
= 2000000;
4572 generations
[i
].trigger_age
= 1;
4573 generations
[i
].min_av_mem_age
= 0.75;
4574 generations
[i
].lutexes
= NULL
;
4577 /* Initialize gc_alloc. */
4578 gc_alloc_generation
= 0;
4579 gc_set_region_empty(&boxed_region
);
4580 gc_set_region_empty(&unboxed_region
);
4585 /* Pick up the dynamic space from after a core load.
4587 * The ALLOCATION_POINTER points to the end of the dynamic space.
4591 gencgc_pickup_dynamic(void)
4593 page_index_t page
= 0;
4594 void *alloc_ptr
= (void *)get_alloc_pointer();
4595 lispobj
*prev
=(lispobj
*)page_address(page
);
4596 generation_index_t gen
= PSEUDO_STATIC_GENERATION
;
4598 lispobj
*first
,*ptr
= (lispobj
*)page_address(page
);
4599 page_table
[page
].allocated
= BOXED_PAGE_FLAG
;
4600 page_table
[page
].gen
= gen
;
4601 page_table
[page
].bytes_used
= PAGE_BYTES
;
4602 page_table
[page
].large_object
= 0;
4603 page_table
[page
].write_protected
= 0;
4604 page_table
[page
].write_protected_cleared
= 0;
4605 page_table
[page
].dont_move
= 0;
4606 page_table
[page
].need_to_zero
= 1;
4608 if (!gencgc_partial_pickup
) {
4609 first
=gc_search_space(prev
,(ptr
+2)-prev
,ptr
);
4610 if(ptr
== first
) prev
=ptr
;
4611 page_table
[page
].region_start_offset
=
4612 page_address(page
) - (void *)prev
;
4615 } while (page_address(page
) < alloc_ptr
);
4617 #ifdef LUTEX_WIDETAG
4618 /* Lutexes have been registered in generation 0 by coreparse, and
4619 * need to be moved to the right one manually.
4621 move_lutexes(0, PSEUDO_STATIC_GENERATION
);
4624 last_free_page
= page
;
4626 generations
[gen
].bytes_allocated
= npage_bytes(page
);
4627 bytes_allocated
= npage_bytes(page
);
4629 gc_alloc_update_all_page_tables();
4630 write_protect_generation_pages(gen
);
4634 gc_initialize_pointers(void)
4636 gencgc_pickup_dynamic();
4640 /* alloc(..) is the external interface for memory allocation. It
4641 * allocates to generation 0. It is not called from within the garbage
4642 * collector as it is only external uses that need the check for heap
4643 * size (GC trigger) and to disable the interrupts (interrupts are
4644 * always disabled during a GC).
4646 * The vops that call alloc(..) assume that the returned space is zero-filled.
4647 * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
4649 * The check for a GC trigger is only performed when the current
4650 * region is full, so in most cases it's not needed. */
4652 static inline lispobj
*
4653 general_alloc_internal(long nbytes
, int page_type_flag
, struct alloc_region
*region
,
4654 struct thread
*thread
)
4656 #ifndef LISP_FEATURE_WIN32
4657 lispobj alloc_signal
;
4660 void *new_free_pointer
;
4662 gc_assert(nbytes
>0);
4664 /* Check for alignment allocation problems. */
4665 gc_assert((((unsigned long)region
->free_pointer
& LOWTAG_MASK
) == 0)
4666 && ((nbytes
& LOWTAG_MASK
) == 0));
4668 /* Must be inside a PA section. */
4669 gc_assert(get_pseudo_atomic_atomic(thread
));
4671 /* maybe we can do this quickly ... */
4672 new_free_pointer
= region
->free_pointer
+ nbytes
;
4673 if (new_free_pointer
<= region
->end_addr
) {
4674 new_obj
= (void*)(region
->free_pointer
);
4675 region
->free_pointer
= new_free_pointer
;
4676 return(new_obj
); /* yup */
4679 /* we have to go the long way around, it seems. Check whether we
4680 * should GC in the near future
4682 if (auto_gc_trigger
&& bytes_allocated
> auto_gc_trigger
) {
4683 /* Don't flood the system with interrupts if the need to gc is
4684 * already noted. This can happen for example when SUB-GC
4685 * allocates or after a gc triggered in a WITHOUT-GCING. */
4686 if (SymbolValue(GC_PENDING
,thread
) == NIL
) {
4687 /* set things up so that GC happens when we finish the PA
4689 SetSymbolValue(GC_PENDING
,T
,thread
);
4690 if (SymbolValue(GC_INHIBIT
,thread
) == NIL
)
4691 set_pseudo_atomic_interrupted(thread
);
4694 new_obj
= gc_alloc_with_region(nbytes
, page_type_flag
, region
, 0);
4696 #ifndef LISP_FEATURE_WIN32
4697 alloc_signal
= SymbolValue(ALLOC_SIGNAL
,thread
);
4698 if ((alloc_signal
& FIXNUM_TAG_MASK
) == 0) {
4699 if ((signed long) alloc_signal
<= 0) {
4700 SetSymbolValue(ALLOC_SIGNAL
, T
, thread
);
4701 #ifdef LISP_FEATURE_SB_THREAD
4702 kill_thread_safely(thread
->os_thread
, SIGPROF
);
4707 SetSymbolValue(ALLOC_SIGNAL
,
4708 alloc_signal
- (1 << N_FIXNUM_TAG_BITS
),
4718 general_alloc(long nbytes
, int page_type_flag
)
4720 struct thread
*thread
= arch_os_get_current_thread();
4721 /* Select correct region, and call general_alloc_internal with it.
4722 * For other then boxed allocation we must lock first, since the
4723 * region is shared. */
4724 if (BOXED_PAGE_FLAG
& page_type_flag
) {
4725 #ifdef LISP_FEATURE_SB_THREAD
4726 struct alloc_region
*region
= (thread
? &(thread
->alloc_region
) : &boxed_region
);
4728 struct alloc_region
*region
= &boxed_region
;
4730 return general_alloc_internal(nbytes
, page_type_flag
, region
, thread
);
4731 } else if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
4733 gc_assert(0 == thread_mutex_lock(&allocation_lock
));
4734 obj
= general_alloc_internal(nbytes
, page_type_flag
, &unboxed_region
, thread
);
4735 gc_assert(0 == thread_mutex_unlock(&allocation_lock
));
4738 lose("bad page type flag: %d", page_type_flag
);
4745 return general_alloc(nbytes
, BOXED_PAGE_FLAG
);
4749 * shared support for the OS-dependent signal handlers which
4750 * catch GENCGC-related write-protect violations
4752 void unhandled_sigmemoryfault(void* addr
);
4754 /* Depending on which OS we're running under, different signals might
4755 * be raised for a violation of write protection in the heap. This
4756 * function factors out the common generational GC magic which needs
4757 * to invoked in this case, and should be called from whatever signal
4758 * handler is appropriate for the OS we're running under.
4760 * Return true if this signal is a normal generational GC thing that
4761 * we were able to handle, or false if it was abnormal and control
4762 * should fall through to the general SIGSEGV/SIGBUS/whatever logic. */
4765 gencgc_handle_wp_violation(void* fault_addr
)
4767 page_index_t page_index
= find_page_index(fault_addr
);
4769 #ifdef QSHOW_SIGNALS
4770 FSHOW((stderr
, "heap WP violation? fault_addr=%x, page_index=%d\n",
4771 fault_addr
, page_index
));
4774 /* Check whether the fault is within the dynamic space. */
4775 if (page_index
== (-1)) {
4777 /* It can be helpful to be able to put a breakpoint on this
4778 * case to help diagnose low-level problems. */
4779 unhandled_sigmemoryfault(fault_addr
);
4781 /* not within the dynamic space -- not our responsibility */
4785 if (page_table
[page_index
].write_protected
) {
4786 /* Unprotect the page. */
4787 os_protect(page_address(page_index
), PAGE_BYTES
, OS_VM_PROT_ALL
);
4788 page_table
[page_index
].write_protected_cleared
= 1;
4789 page_table
[page_index
].write_protected
= 0;
4791 /* The only acceptable reason for this signal on a heap
4792 * access is that GENCGC write-protected the page.
4793 * However, if two CPUs hit a wp page near-simultaneously,
4794 * we had better not have the second one lose here if it
4795 * does this test after the first one has already set wp=0
4797 if(page_table
[page_index
].write_protected_cleared
!= 1)
4798 lose("fault in heap page %d not marked as write-protected\nboxed_region.first_page: %d, boxed_region.last_page %d\n",
4799 page_index
, boxed_region
.first_page
,
4800 boxed_region
.last_page
);
4802 /* Don't worry, we can handle it. */
4806 /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
4807 * it's not just a case of the program hitting the write barrier, and
4808 * are about to let Lisp deal with it. It's basically just a
4809 * convenient place to set a gdb breakpoint. */
4811 unhandled_sigmemoryfault(void *addr
)
4814 void gc_alloc_update_all_page_tables(void)
4816 /* Flush the alloc regions updating the tables. */
4819 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->alloc_region
);
4820 gc_alloc_update_page_tables(UNBOXED_PAGE_FLAG
, &unboxed_region
);
4821 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &boxed_region
);
4825 gc_set_region_empty(struct alloc_region
*region
)
4827 region
->first_page
= 0;
4828 region
->last_page
= -1;
4829 region
->start_addr
= page_address(0);
4830 region
->free_pointer
= page_address(0);
4831 region
->end_addr
= page_address(0);
4835 zero_all_free_pages()
4839 for (i
= 0; i
< last_free_page
; i
++) {
4840 if (page_free_p(i
)) {
4841 #ifdef READ_PROTECT_FREE_PAGES
4842 os_protect(page_address(i
),
4851 /* Things to do before doing a final GC before saving a core (without
4854 * + Pages in large_object pages aren't moved by the GC, so we need to
4855 * unset that flag from all pages.
4856 * + The pseudo-static generation isn't normally collected, but it seems
4857 * reasonable to collect it at least when saving a core. So move the
4858 * pages to a normal generation.
4861 prepare_for_final_gc ()
4864 for (i
= 0; i
< last_free_page
; i
++) {
4865 page_table
[i
].large_object
= 0;
4866 if (page_table
[i
].gen
== PSEUDO_STATIC_GENERATION
) {
4867 int used
= page_table
[i
].bytes_used
;
4868 page_table
[i
].gen
= HIGHEST_NORMAL_GENERATION
;
4869 generations
[PSEUDO_STATIC_GENERATION
].bytes_allocated
-= used
;
4870 generations
[HIGHEST_NORMAL_GENERATION
].bytes_allocated
+= used
;
4876 /* Do a non-conservative GC, and then save a core with the initial
4877 * function being set to the value of the static symbol
4878 * SB!VM:RESTART-LISP-FUNCTION */
4880 gc_and_save(char *filename
, boolean prepend_runtime
,
4881 boolean save_runtime_options
)
4884 void *runtime_bytes
= NULL
;
4885 size_t runtime_size
;
4887 file
= prepare_to_save(filename
, prepend_runtime
, &runtime_bytes
,
4892 conservative_stack
= 0;
4894 /* The filename might come from Lisp, and be moved by the now
4895 * non-conservative GC. */
4896 filename
= strdup(filename
);
4898 /* Collect twice: once into relatively high memory, and then back
4899 * into low memory. This compacts the retained data into the lower
4900 * pages, minimizing the size of the core file.
4902 prepare_for_final_gc();
4903 gencgc_alloc_start_page
= last_free_page
;
4904 collect_garbage(HIGHEST_NORMAL_GENERATION
+1);
4906 prepare_for_final_gc();
4907 gencgc_alloc_start_page
= -1;
4908 collect_garbage(HIGHEST_NORMAL_GENERATION
+1);
4910 if (prepend_runtime
)
4911 save_runtime_to_filehandle(file
, runtime_bytes
, runtime_size
);
4913 /* The dumper doesn't know that pages need to be zeroed before use. */
4914 zero_all_free_pages();
4915 save_to_filehandle(file
, filename
, SymbolValue(RESTART_LISP_FUNCTION
,0),
4916 prepend_runtime
, save_runtime_options
);
4917 /* Oops. Save still managed to fail. Since we've mangled the stack
4918 * beyond hope, there's not much we can do.
4919 * (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's
4920 * going to be rather unsatisfactory too... */
4921 lose("Attempt to save core after non-conservative GC failed.\n");