2 * GENerational Conservative Garbage Collector for SBCL
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 * For a review of garbage collection techniques (e.g. generational
18 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
19 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
20 * had been accepted for _ACM Computing Surveys_ and was available
21 * as a PostScript preprint through
22 * <http://www.cs.utexas.edu/users/oops/papers.html>
24 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
37 #include "interrupt.h"
42 #include "gc-internal.h"
45 #include "genesis/vector.h"
46 #include "genesis/weak-pointer.h"
47 #include "genesis/fdefn.h"
48 #include "genesis/simple-fun.h"
50 #include "genesis/hash-table.h"
51 #include "genesis/instance.h"
52 #include "genesis/layout.h"
54 #if defined(LUTEX_WIDETAG)
55 #include "pthread-lutex.h"
58 /* forward declarations */
59 page_index_t
gc_find_freeish_pages(long *restart_page_ptr
, long nbytes
,
67 /* Generations 0-5 are normal collected generations, 6 is only used as
68 * scratch space by the collector, and should never get collected.
71 HIGHEST_NORMAL_GENERATION
= 5,
72 PSEUDO_STATIC_GENERATION
,
77 /* Should we use page protection to help avoid the scavenging of pages
78 * that don't have pointers to younger generations? */
79 boolean enable_page_protection
= 1;
81 /* the minimum size (in bytes) for a large object*/
82 long large_object_size
= 4 * PAGE_BYTES
;
89 /* the verbosity level. All non-error messages are disabled at level 0;
90 * and only a few rare messages are printed at level 1. */
92 boolean gencgc_verbose
= 1;
94 boolean gencgc_verbose
= 0;
97 /* FIXME: At some point enable the various error-checking things below
98 * and see what they say. */
100 /* We hunt for pointers to old-space, when GCing generations >= verify_gen.
101 * Set verify_gens to HIGHEST_NORMAL_GENERATION + 1 to disable this kind of
103 generation_index_t verify_gens
= HIGHEST_NORMAL_GENERATION
+ 1;
105 /* Should we do a pre-scan verify of generation 0 before it's GCed? */
106 boolean pre_verify_gen_0
= 0;
108 /* Should we check for bad pointers after gc_free_heap is called
109 * from Lisp PURIFY? */
110 boolean verify_after_free_heap
= 0;
112 /* Should we print a note when code objects are found in the dynamic space
113 * during a heap verify? */
114 boolean verify_dynamic_code_check
= 0;
116 /* Should we check code objects for fixup errors after they are transported? */
117 boolean check_code_fixups
= 0;
119 /* Should we check that newly allocated regions are zero filled? */
120 boolean gencgc_zero_check
= 0;
122 /* Should we check that the free space is zero filled? */
123 boolean gencgc_enable_verify_zero_fill
= 0;
125 /* Should we check that free pages are zero filled during gc_free_heap
126 * called after Lisp PURIFY? */
127 boolean gencgc_zero_check_during_free_heap
= 0;
129 /* When loading a core, don't do a full scan of the memory for the
130 * memory region boundaries. (Set to true by coreparse.c if the core
131 * contained a pagetable entry).
133 boolean gencgc_partial_pickup
= 0;
135 /* If defined, free pages are read-protected to ensure that nothing
139 /* #define READ_PROTECT_FREE_PAGES */
143 * GC structures and variables
146 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
147 unsigned long bytes_allocated
= 0;
148 unsigned long auto_gc_trigger
= 0;
150 /* the source and destination generations. These are set before a GC starts
152 generation_index_t from_space
;
153 generation_index_t new_space
;
155 /* Set to 1 when in GC */
156 boolean gc_active_p
= 0;
158 /* should the GC be conservative on stack. If false (only right before
159 * saving a core), don't scan the stack / mark pages dont_move. */
160 static boolean conservative_stack
= 1;
162 /* An array of page structures is allocated on gc initialization.
163 * This helps quickly map between an address its page structure.
164 * page_table_pages is set from the size of the dynamic space. */
165 page_index_t page_table_pages
;
166 struct page
*page_table
;
168 static inline boolean
page_allocated_p(page_index_t page
) {
169 return (page_table
[page
].allocated
!= FREE_PAGE_FLAG
);
172 static inline boolean
page_no_region_p(page_index_t page
) {
173 return !(page_table
[page
].allocated
& OPEN_REGION_PAGE_FLAG
);
176 static inline boolean
page_allocated_no_region_p(page_index_t page
) {
177 return ((page_table
[page
].allocated
& (UNBOXED_PAGE_FLAG
| BOXED_PAGE_FLAG
))
178 && page_no_region_p(page
));
181 static inline boolean
page_free_p(page_index_t page
) {
182 return (page_table
[page
].allocated
== FREE_PAGE_FLAG
);
185 static inline boolean
page_boxed_p(page_index_t page
) {
186 return (page_table
[page
].allocated
& BOXED_PAGE_FLAG
);
189 static inline boolean
code_page_p(page_index_t page
) {
190 return (page_table
[page
].allocated
& CODE_PAGE_FLAG
);
193 static inline boolean
page_boxed_no_region_p(page_index_t page
) {
194 return page_boxed_p(page
) && page_no_region_p(page
);
197 static inline boolean
page_unboxed_p(page_index_t page
) {
198 /* Both flags set == boxed code page */
199 return ((page_table
[page
].allocated
& UNBOXED_PAGE_FLAG
)
200 && !page_boxed_p(page
));
203 static inline boolean
protect_page_p(page_index_t page
, generation_index_t generation
) {
204 return (page_boxed_no_region_p(page
)
205 && (page_table
[page
].bytes_used
!= 0)
206 && !page_table
[page
].dont_move
207 && (page_table
[page
].gen
== generation
));
210 /* To map addresses to page structures the address of the first page
212 static void *heap_base
= NULL
;
214 /* Calculate the start address for the given page number. */
216 page_address(page_index_t page_num
)
218 return (heap_base
+ (page_num
* PAGE_BYTES
));
221 /* Calculate the address where the allocation region associated with
222 * the page starts. */
224 page_region_start(page_index_t page_index
)
226 return page_address(page_index
)-page_table
[page_index
].region_start_offset
;
229 /* Find the page index within the page_table for the given
230 * address. Return -1 on failure. */
232 find_page_index(void *addr
)
234 if (addr
>= heap_base
) {
235 page_index_t index
= ((pointer_sized_uint_t
)addr
-
236 (pointer_sized_uint_t
)heap_base
) / PAGE_BYTES
;
237 if (index
< page_table_pages
)
244 npage_bytes(long npages
)
246 gc_assert(npages
>=0);
247 return ((unsigned long)npages
)*PAGE_BYTES
;
250 /* Check that X is a higher address than Y and return offset from Y to
253 size_t void_diff(void *x
, void *y
)
256 return (pointer_sized_uint_t
)x
- (pointer_sized_uint_t
)y
;
259 /* a structure to hold the state of a generation */
262 /* the first page that gc_alloc() checks on its next call */
263 page_index_t alloc_start_page
;
265 /* the first page that gc_alloc_unboxed() checks on its next call */
266 page_index_t alloc_unboxed_start_page
;
268 /* the first page that gc_alloc_large (boxed) considers on its next
269 * call. (Although it always allocates after the boxed_region.) */
270 page_index_t alloc_large_start_page
;
272 /* the first page that gc_alloc_large (unboxed) considers on its
273 * next call. (Although it always allocates after the
274 * current_unboxed_region.) */
275 page_index_t alloc_large_unboxed_start_page
;
277 /* the bytes allocated to this generation */
278 unsigned long bytes_allocated
;
280 /* the number of bytes at which to trigger a GC */
281 unsigned long gc_trigger
;
283 /* to calculate a new level for gc_trigger */
284 unsigned long bytes_consed_between_gc
;
286 /* the number of GCs since the last raise */
289 /* the average age after which a GC will raise objects to the
293 /* the cumulative sum of the bytes allocated to this generation. It is
294 * cleared after a GC on this generations, and update before new
295 * objects are added from a GC of a younger generation. Dividing by
296 * the bytes_allocated will give the average age of the memory in
297 * this generation since its last GC. */
298 unsigned long cum_sum_bytes_allocated
;
300 /* a minimum average memory age before a GC will occur helps
301 * prevent a GC when a large number of new live objects have been
302 * added, in which case a GC could be a waste of time */
303 double min_av_mem_age
;
305 /* A linked list of lutex structures in this generation, used for
306 * implementing lutex finalization. */
308 struct lutex
*lutexes
;
314 /* an array of generation structures. There needs to be one more
315 * generation structure than actual generations as the oldest
316 * generation is temporarily raised then lowered. */
317 struct generation generations
[NUM_GENERATIONS
];
319 /* the oldest generation that is will currently be GCed by default.
320 * Valid values are: 0, 1, ... HIGHEST_NORMAL_GENERATION
322 * The default of HIGHEST_NORMAL_GENERATION enables GC on all generations.
324 * Setting this to 0 effectively disables the generational nature of
325 * the GC. In some applications generational GC may not be useful
326 * because there are no long-lived objects.
328 * An intermediate value could be handy after moving long-lived data
329 * into an older generation so an unnecessary GC of this long-lived
330 * data can be avoided. */
331 generation_index_t gencgc_oldest_gen_to_gc
= HIGHEST_NORMAL_GENERATION
;
333 /* The maximum free page in the heap is maintained and used to update
334 * ALLOCATION_POINTER which is used by the room function to limit its
335 * search of the heap. XX Gencgc obviously needs to be better
336 * integrated with the Lisp code. */
337 page_index_t last_free_page
;
339 #ifdef LISP_FEATURE_SB_THREAD
340 /* This lock is to prevent multiple threads from simultaneously
341 * allocating new regions which overlap each other. Note that the
342 * majority of GC is single-threaded, but alloc() may be called from
343 * >1 thread at a time and must be thread-safe. This lock must be
344 * seized before all accesses to generations[] or to parts of
345 * page_table[] that other threads may want to see */
346 static pthread_mutex_t free_pages_lock
= PTHREAD_MUTEX_INITIALIZER
;
347 /* This lock is used to protect non-thread-local allocation. */
348 static pthread_mutex_t allocation_lock
= PTHREAD_MUTEX_INITIALIZER
;
353 * miscellaneous heap functions
356 /* Count the number of pages which are write-protected within the
357 * given generation. */
359 count_write_protect_generation_pages(generation_index_t generation
)
362 unsigned long count
= 0;
364 for (i
= 0; i
< last_free_page
; i
++)
365 if (page_allocated_p(i
)
366 && (page_table
[i
].gen
== generation
)
367 && (page_table
[i
].write_protected
== 1))
372 /* Count the number of pages within the given generation. */
374 count_generation_pages(generation_index_t generation
)
379 for (i
= 0; i
< last_free_page
; i
++)
380 if (page_allocated_p(i
)
381 && (page_table
[i
].gen
== generation
))
388 count_dont_move_pages(void)
392 for (i
= 0; i
< last_free_page
; i
++) {
393 if (page_allocated_p(i
)
394 && (page_table
[i
].dont_move
!= 0)) {
402 /* Work through the pages and add up the number of bytes used for the
403 * given generation. */
405 count_generation_bytes_allocated (generation_index_t gen
)
408 unsigned long result
= 0;
409 for (i
= 0; i
< last_free_page
; i
++) {
410 if (page_allocated_p(i
)
411 && (page_table
[i
].gen
== gen
))
412 result
+= page_table
[i
].bytes_used
;
417 /* Return the average age of the memory in a generation. */
419 gen_av_mem_age(generation_index_t gen
)
421 if (generations
[gen
].bytes_allocated
== 0)
425 ((double)generations
[gen
].cum_sum_bytes_allocated
)
426 / ((double)generations
[gen
].bytes_allocated
);
429 /* The verbose argument controls how much to print: 0 for normal
430 * level of detail; 1 for debugging. */
432 print_generation_stats(int verbose
) /* FIXME: should take FILE argument */
434 generation_index_t i
, gens
;
436 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
437 #define FPU_STATE_SIZE 27
438 int fpu_state
[FPU_STATE_SIZE
];
439 #elif defined(LISP_FEATURE_PPC)
440 #define FPU_STATE_SIZE 32
441 long long fpu_state
[FPU_STATE_SIZE
];
444 /* This code uses the FP instructions which may be set up for Lisp
445 * so they need to be saved and reset for C. */
448 /* highest generation to print */
450 gens
= SCRATCH_GENERATION
;
452 gens
= PSEUDO_STATIC_GENERATION
;
454 /* Print the heap stats. */
456 " Gen StaPg UbSta LaSta LUbSt Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
458 for (i
= 0; i
< gens
; i
++) {
461 long unboxed_cnt
= 0;
462 long large_boxed_cnt
= 0;
463 long large_unboxed_cnt
= 0;
466 for (j
= 0; j
< last_free_page
; j
++)
467 if (page_table
[j
].gen
== i
) {
469 /* Count the number of boxed pages within the given
471 if (page_boxed_p(j
)) {
472 if (page_table
[j
].large_object
)
477 if(page_table
[j
].dont_move
) pinned_cnt
++;
478 /* Count the number of unboxed pages within the given
480 if (page_unboxed_p(j
)) {
481 if (page_table
[j
].large_object
)
488 gc_assert(generations
[i
].bytes_allocated
489 == count_generation_bytes_allocated(i
));
491 " %1d: %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %8ld %5ld %8ld %4ld %3d %7.4f\n",
493 generations
[i
].alloc_start_page
,
494 generations
[i
].alloc_unboxed_start_page
,
495 generations
[i
].alloc_large_start_page
,
496 generations
[i
].alloc_large_unboxed_start_page
,
502 generations
[i
].bytes_allocated
,
503 (npage_bytes(count_generation_pages(i
))
504 - generations
[i
].bytes_allocated
),
505 generations
[i
].gc_trigger
,
506 count_write_protect_generation_pages(i
),
507 generations
[i
].num_gc
,
510 fprintf(stderr
," Total bytes allocated = %lu\n", bytes_allocated
);
511 fprintf(stderr
," Dynamic-space-size bytes = %lu\n", dynamic_space_size
);
513 fpu_restore(fpu_state
);
517 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
518 void fast_bzero(void*, size_t); /* in <arch>-assem.S */
521 /* Zero the pages from START to END (inclusive), but use mmap/munmap instead
522 * if zeroing it ourselves, i.e. in practice give the memory back to the
523 * OS. Generally done after a large GC.
525 void zero_pages_with_mmap(page_index_t start
, page_index_t end
) {
527 void *addr
= page_address(start
), *new_addr
;
528 size_t length
= npage_bytes(1+end
-start
);
533 os_invalidate(addr
, length
);
534 new_addr
= os_validate(addr
, length
);
535 if (new_addr
== NULL
|| new_addr
!= addr
) {
536 lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x",
540 for (i
= start
; i
<= end
; i
++) {
541 page_table
[i
].need_to_zero
= 0;
545 /* Zero the pages from START to END (inclusive). Generally done just after
546 * a new region has been allocated.
549 zero_pages(page_index_t start
, page_index_t end
) {
553 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
554 fast_bzero(page_address(start
), npage_bytes(1+end
-start
));
556 bzero(page_address(start
), npage_bytes(1+end
-start
));
561 /* Zero the pages from START to END (inclusive), except for those
562 * pages that are known to already zeroed. Mark all pages in the
563 * ranges as non-zeroed.
566 zero_dirty_pages(page_index_t start
, page_index_t end
) {
569 for (i
= start
; i
<= end
; i
++) {
570 if (page_table
[i
].need_to_zero
== 1) {
571 zero_pages(start
, end
);
576 for (i
= start
; i
<= end
; i
++) {
577 page_table
[i
].need_to_zero
= 1;
583 * To support quick and inline allocation, regions of memory can be
584 * allocated and then allocated from with just a free pointer and a
585 * check against an end address.
587 * Since objects can be allocated to spaces with different properties
588 * e.g. boxed/unboxed, generation, ages; there may need to be many
589 * allocation regions.
591 * Each allocation region may start within a partly used page. Many
592 * features of memory use are noted on a page wise basis, e.g. the
593 * generation; so if a region starts within an existing allocated page
594 * it must be consistent with this page.
596 * During the scavenging of the newspace, objects will be transported
597 * into an allocation region, and pointers updated to point to this
598 * allocation region. It is possible that these pointers will be
599 * scavenged again before the allocation region is closed, e.g. due to
600 * trans_list which jumps all over the place to cleanup the list. It
601 * is important to be able to determine properties of all objects
602 * pointed to when scavenging, e.g to detect pointers to the oldspace.
603 * Thus it's important that the allocation regions have the correct
604 * properties set when allocated, and not just set when closed. The
605 * region allocation routines return regions with the specified
606 * properties, and grab all the pages, setting their properties
607 * appropriately, except that the amount used is not known.
609 * These regions are used to support quicker allocation using just a
610 * free pointer. The actual space used by the region is not reflected
611 * in the pages tables until it is closed. It can't be scavenged until
614 * When finished with the region it should be closed, which will
615 * update the page tables for the actual space used returning unused
616 * space. Further it may be noted in the new regions which is
617 * necessary when scavenging the newspace.
619 * Large objects may be allocated directly without an allocation
620 * region, the page tables are updated immediately.
622 * Unboxed objects don't contain pointers to other objects and so
623 * don't need scavenging. Further they can't contain pointers to
624 * younger generations so WP is not needed. By allocating pages to
625 * unboxed objects the whole page never needs scavenging or
626 * write-protecting. */
628 /* We are only using two regions at present. Both are for the current
629 * newspace generation. */
630 struct alloc_region boxed_region
;
631 struct alloc_region unboxed_region
;
633 /* The generation currently being allocated to. */
634 static generation_index_t gc_alloc_generation
;
636 static inline page_index_t
637 generation_alloc_start_page(generation_index_t generation
, int page_type_flag
, int large
)
640 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
641 return generations
[generation
].alloc_large_unboxed_start_page
;
642 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
643 /* Both code and data. */
644 return generations
[generation
].alloc_large_start_page
;
646 lose("bad page type flag: %d", page_type_flag
);
649 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
650 return generations
[generation
].alloc_unboxed_start_page
;
651 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
652 /* Both code and data. */
653 return generations
[generation
].alloc_start_page
;
655 lose("bad page_type_flag: %d", page_type_flag
);
661 set_generation_alloc_start_page(generation_index_t generation
, int page_type_flag
, int large
,
665 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
666 generations
[generation
].alloc_large_unboxed_start_page
= page
;
667 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
668 /* Both code and data. */
669 generations
[generation
].alloc_large_start_page
= page
;
671 lose("bad page type flag: %d", page_type_flag
);
674 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
675 generations
[generation
].alloc_unboxed_start_page
= page
;
676 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
677 /* Both code and data. */
678 generations
[generation
].alloc_start_page
= page
;
680 lose("bad page type flag: %d", page_type_flag
);
685 /* Find a new region with room for at least the given number of bytes.
687 * It starts looking at the current generation's alloc_start_page. So
688 * may pick up from the previous region if there is enough space. This
689 * keeps the allocation contiguous when scavenging the newspace.
691 * The alloc_region should have been closed by a call to
692 * gc_alloc_update_page_tables(), and will thus be in an empty state.
694 * To assist the scavenging functions write-protected pages are not
695 * used. Free pages should not be write-protected.
697 * It is critical to the conservative GC that the start of regions be
698 * known. To help achieve this only small regions are allocated at a
701 * During scavenging, pointers may be found to within the current
702 * region and the page generation must be set so that pointers to the
703 * from space can be recognized. Therefore the generation of pages in
704 * the region are set to gc_alloc_generation. To prevent another
705 * allocation call using the same pages, all the pages in the region
706 * are allocated, although they will initially be empty.
709 gc_alloc_new_region(long nbytes
, int page_type_flag
, struct alloc_region
*alloc_region
)
711 page_index_t first_page
;
712 page_index_t last_page
;
713 unsigned long bytes_found
;
719 "/alloc_new_region for %d bytes from gen %d\n",
720 nbytes, gc_alloc_generation));
723 /* Check that the region is in a reset state. */
724 gc_assert((alloc_region
->first_page
== 0)
725 && (alloc_region
->last_page
== -1)
726 && (alloc_region
->free_pointer
== alloc_region
->end_addr
));
727 ret
= thread_mutex_lock(&free_pages_lock
);
729 first_page
= generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 0);
730 last_page
=gc_find_freeish_pages(&first_page
, nbytes
, page_type_flag
);
731 bytes_found
=(PAGE_BYTES
- page_table
[first_page
].bytes_used
)
732 + npage_bytes(last_page
-first_page
);
734 /* Set up the alloc_region. */
735 alloc_region
->first_page
= first_page
;
736 alloc_region
->last_page
= last_page
;
737 alloc_region
->start_addr
= page_table
[first_page
].bytes_used
738 + page_address(first_page
);
739 alloc_region
->free_pointer
= alloc_region
->start_addr
;
740 alloc_region
->end_addr
= alloc_region
->start_addr
+ bytes_found
;
742 /* Set up the pages. */
744 /* The first page may have already been in use. */
745 if (page_table
[first_page
].bytes_used
== 0) {
746 page_table
[first_page
].allocated
= page_type_flag
;
747 page_table
[first_page
].gen
= gc_alloc_generation
;
748 page_table
[first_page
].large_object
= 0;
749 page_table
[first_page
].region_start_offset
= 0;
752 gc_assert(page_table
[first_page
].allocated
== page_type_flag
);
753 page_table
[first_page
].allocated
|= OPEN_REGION_PAGE_FLAG
;
755 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
756 gc_assert(page_table
[first_page
].large_object
== 0);
758 for (i
= first_page
+1; i
<= last_page
; i
++) {
759 page_table
[i
].allocated
= page_type_flag
;
760 page_table
[i
].gen
= gc_alloc_generation
;
761 page_table
[i
].large_object
= 0;
762 /* This may not be necessary for unboxed regions (think it was
764 page_table
[i
].region_start_offset
=
765 void_diff(page_address(i
),alloc_region
->start_addr
);
766 page_table
[i
].allocated
|= OPEN_REGION_PAGE_FLAG
;
768 /* Bump up last_free_page. */
769 if (last_page
+1 > last_free_page
) {
770 last_free_page
= last_page
+1;
771 /* do we only want to call this on special occasions? like for
773 set_alloc_pointer((lispobj
)page_address(last_free_page
));
775 ret
= thread_mutex_unlock(&free_pages_lock
);
778 #ifdef READ_PROTECT_FREE_PAGES
779 os_protect(page_address(first_page
),
780 npage_bytes(1+last_page
-first_page
),
784 /* If the first page was only partial, don't check whether it's
785 * zeroed (it won't be) and don't zero it (since the parts that
786 * we're interested in are guaranteed to be zeroed).
788 if (page_table
[first_page
].bytes_used
) {
792 zero_dirty_pages(first_page
, last_page
);
794 /* we can do this after releasing free_pages_lock */
795 if (gencgc_zero_check
) {
797 for (p
= (long *)alloc_region
->start_addr
;
798 p
< (long *)alloc_region
->end_addr
; p
++) {
800 /* KLUDGE: It would be nice to use %lx and explicit casts
801 * (long) in code like this, so that it is less likely to
802 * break randomly when running on a machine with different
803 * word sizes. -- WHN 19991129 */
804 lose("The new region at %x is not zero (start=%p, end=%p).\n",
805 p
, alloc_region
->start_addr
, alloc_region
->end_addr
);
811 /* If the record_new_objects flag is 2 then all new regions created
814 * If it's 1 then then it is only recorded if the first page of the
815 * current region is <= new_areas_ignore_page. This helps avoid
816 * unnecessary recording when doing full scavenge pass.
818 * The new_object structure holds the page, byte offset, and size of
819 * new regions of objects. Each new area is placed in the array of
820 * these structures pointer to by new_areas. new_areas_index holds the
821 * offset into new_areas.
823 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
824 * later code must detect this and handle it, probably by doing a full
825 * scavenge of a generation. */
826 #define NUM_NEW_AREAS 512
827 static int record_new_objects
= 0;
828 static page_index_t new_areas_ignore_page
;
834 static struct new_area (*new_areas
)[];
835 static long new_areas_index
;
838 /* Add a new area to new_areas. */
840 add_new_area(page_index_t first_page
, size_t offset
, size_t size
)
842 unsigned long new_area_start
,c
;
845 /* Ignore if full. */
846 if (new_areas_index
>= NUM_NEW_AREAS
)
849 switch (record_new_objects
) {
853 if (first_page
> new_areas_ignore_page
)
862 new_area_start
= npage_bytes(first_page
) + offset
;
864 /* Search backwards for a prior area that this follows from. If
865 found this will save adding a new area. */
866 for (i
= new_areas_index
-1, c
= 0; (i
>= 0) && (c
< 8); i
--, c
++) {
867 unsigned long area_end
=
868 npage_bytes((*new_areas
)[i
].page
)
869 + (*new_areas
)[i
].offset
870 + (*new_areas
)[i
].size
;
872 "/add_new_area S1 %d %d %d %d\n",
873 i, c, new_area_start, area_end));*/
874 if (new_area_start
== area_end
) {
876 "/adding to [%d] %d %d %d with %d %d %d:\n",
878 (*new_areas)[i].page,
879 (*new_areas)[i].offset,
880 (*new_areas)[i].size,
884 (*new_areas
)[i
].size
+= size
;
889 (*new_areas
)[new_areas_index
].page
= first_page
;
890 (*new_areas
)[new_areas_index
].offset
= offset
;
891 (*new_areas
)[new_areas_index
].size
= size
;
893 "/new_area %d page %d offset %d size %d\n",
894 new_areas_index, first_page, offset, size));*/
897 /* Note the max new_areas used. */
898 if (new_areas_index
> max_new_areas
)
899 max_new_areas
= new_areas_index
;
902 /* Update the tables for the alloc_region. The region may be added to
905 * When done the alloc_region is set up so that the next quick alloc
906 * will fail safely and thus a new region will be allocated. Further
907 * it is safe to try to re-update the page table of this reset
910 gc_alloc_update_page_tables(int page_type_flag
, struct alloc_region
*alloc_region
)
913 page_index_t first_page
;
914 page_index_t next_page
;
915 unsigned long bytes_used
;
916 unsigned long orig_first_page_bytes_used
;
917 unsigned long region_size
;
918 unsigned long byte_cnt
;
922 first_page
= alloc_region
->first_page
;
924 /* Catch an unused alloc_region. */
925 if ((first_page
== 0) && (alloc_region
->last_page
== -1))
928 next_page
= first_page
+1;
930 ret
= thread_mutex_lock(&free_pages_lock
);
932 if (alloc_region
->free_pointer
!= alloc_region
->start_addr
) {
933 /* some bytes were allocated in the region */
934 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
936 gc_assert(alloc_region
->start_addr
==
937 (page_address(first_page
)
938 + page_table
[first_page
].bytes_used
));
940 /* All the pages used need to be updated */
942 /* Update the first page. */
944 /* If the page was free then set up the gen, and
945 * region_start_offset. */
946 if (page_table
[first_page
].bytes_used
== 0)
947 gc_assert(page_table
[first_page
].region_start_offset
== 0);
948 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
950 gc_assert(page_table
[first_page
].allocated
& page_type_flag
);
951 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
952 gc_assert(page_table
[first_page
].large_object
== 0);
956 /* Calculate the number of bytes used in this page. This is not
957 * always the number of new bytes, unless it was free. */
959 if ((bytes_used
= void_diff(alloc_region
->free_pointer
,
960 page_address(first_page
)))
962 bytes_used
= PAGE_BYTES
;
965 page_table
[first_page
].bytes_used
= bytes_used
;
966 byte_cnt
+= bytes_used
;
969 /* All the rest of the pages should be free. We need to set
970 * their region_start_offset pointer to the start of the
971 * region, and set the bytes_used. */
973 page_table
[next_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
974 gc_assert(page_table
[next_page
].allocated
& page_type_flag
);
975 gc_assert(page_table
[next_page
].bytes_used
== 0);
976 gc_assert(page_table
[next_page
].gen
== gc_alloc_generation
);
977 gc_assert(page_table
[next_page
].large_object
== 0);
979 gc_assert(page_table
[next_page
].region_start_offset
==
980 void_diff(page_address(next_page
),
981 alloc_region
->start_addr
));
983 /* Calculate the number of bytes used in this page. */
985 if ((bytes_used
= void_diff(alloc_region
->free_pointer
,
986 page_address(next_page
)))>PAGE_BYTES
) {
987 bytes_used
= PAGE_BYTES
;
990 page_table
[next_page
].bytes_used
= bytes_used
;
991 byte_cnt
+= bytes_used
;
996 region_size
= void_diff(alloc_region
->free_pointer
,
997 alloc_region
->start_addr
);
998 bytes_allocated
+= region_size
;
999 generations
[gc_alloc_generation
].bytes_allocated
+= region_size
;
1001 gc_assert((byte_cnt
- orig_first_page_bytes_used
) == region_size
);
1003 /* Set the generations alloc restart page to the last page of
1005 set_generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 0, next_page
-1);
1007 /* Add the region to the new_areas if requested. */
1008 if (BOXED_PAGE_FLAG
& page_type_flag
)
1009 add_new_area(first_page
,orig_first_page_bytes_used
, region_size
);
1013 "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
1015 gc_alloc_generation));
1018 /* There are no bytes allocated. Unallocate the first_page if
1019 * there are 0 bytes_used. */
1020 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1021 if (page_table
[first_page
].bytes_used
== 0)
1022 page_table
[first_page
].allocated
= FREE_PAGE_FLAG
;
1025 /* Unallocate any unused pages. */
1026 while (next_page
<= alloc_region
->last_page
) {
1027 gc_assert(page_table
[next_page
].bytes_used
== 0);
1028 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1031 ret
= thread_mutex_unlock(&free_pages_lock
);
1032 gc_assert(ret
== 0);
1034 /* alloc_region is per-thread, we're ok to do this unlocked */
1035 gc_set_region_empty(alloc_region
);
1038 static inline void *gc_quick_alloc(long nbytes
);
1040 /* Allocate a possibly large object. */
1042 gc_alloc_large(long nbytes
, int page_type_flag
, struct alloc_region
*alloc_region
)
1044 page_index_t first_page
;
1045 page_index_t last_page
;
1046 int orig_first_page_bytes_used
;
1050 page_index_t next_page
;
1053 ret
= thread_mutex_lock(&free_pages_lock
);
1054 gc_assert(ret
== 0);
1056 first_page
= generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 1);
1057 if (first_page
<= alloc_region
->last_page
) {
1058 first_page
= alloc_region
->last_page
+1;
1061 last_page
=gc_find_freeish_pages(&first_page
,nbytes
, page_type_flag
);
1063 gc_assert(first_page
> alloc_region
->last_page
);
1065 set_generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 1, last_page
);
1067 /* Set up the pages. */
1068 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
1070 /* If the first page was free then set up the gen, and
1071 * region_start_offset. */
1072 if (page_table
[first_page
].bytes_used
== 0) {
1073 page_table
[first_page
].allocated
= page_type_flag
;
1074 page_table
[first_page
].gen
= gc_alloc_generation
;
1075 page_table
[first_page
].region_start_offset
= 0;
1076 page_table
[first_page
].large_object
= 1;
1079 gc_assert(page_table
[first_page
].allocated
== page_type_flag
);
1080 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
1081 gc_assert(page_table
[first_page
].large_object
== 1);
1085 /* Calc. the number of bytes used in this page. This is not
1086 * always the number of new bytes, unless it was free. */
1088 if ((bytes_used
= nbytes
+orig_first_page_bytes_used
) > PAGE_BYTES
) {
1089 bytes_used
= PAGE_BYTES
;
1092 page_table
[first_page
].bytes_used
= bytes_used
;
1093 byte_cnt
+= bytes_used
;
1095 next_page
= first_page
+1;
1097 /* All the rest of the pages should be free. We need to set their
1098 * region_start_offset pointer to the start of the region, and set
1099 * the bytes_used. */
1101 gc_assert(page_free_p(next_page
));
1102 gc_assert(page_table
[next_page
].bytes_used
== 0);
1103 page_table
[next_page
].allocated
= page_type_flag
;
1104 page_table
[next_page
].gen
= gc_alloc_generation
;
1105 page_table
[next_page
].large_object
= 1;
1107 page_table
[next_page
].region_start_offset
=
1108 npage_bytes(next_page
-first_page
) - orig_first_page_bytes_used
;
1110 /* Calculate the number of bytes used in this page. */
1112 bytes_used
=(nbytes
+orig_first_page_bytes_used
)-byte_cnt
;
1113 if (bytes_used
> PAGE_BYTES
) {
1114 bytes_used
= PAGE_BYTES
;
1117 page_table
[next_page
].bytes_used
= bytes_used
;
1118 page_table
[next_page
].write_protected
=0;
1119 page_table
[next_page
].dont_move
=0;
1120 byte_cnt
+= bytes_used
;
1124 gc_assert((byte_cnt
-orig_first_page_bytes_used
) == nbytes
);
1126 bytes_allocated
+= nbytes
;
1127 generations
[gc_alloc_generation
].bytes_allocated
+= nbytes
;
1129 /* Add the region to the new_areas if requested. */
1130 if (BOXED_PAGE_FLAG
& page_type_flag
)
1131 add_new_area(first_page
,orig_first_page_bytes_used
,nbytes
);
1133 /* Bump up last_free_page */
1134 if (last_page
+1 > last_free_page
) {
1135 last_free_page
= last_page
+1;
1136 set_alloc_pointer((lispobj
)(page_address(last_free_page
)));
1138 ret
= thread_mutex_unlock(&free_pages_lock
);
1139 gc_assert(ret
== 0);
1141 #ifdef READ_PROTECT_FREE_PAGES
1142 os_protect(page_address(first_page
),
1143 npage_bytes(1+last_page
-first_page
),
1147 zero_dirty_pages(first_page
, last_page
);
1149 return page_address(first_page
);
1152 static page_index_t gencgc_alloc_start_page
= -1;
1155 gc_heap_exhausted_error_or_lose (long available
, long requested
)
1157 /* Write basic information before doing anything else: if we don't
1158 * call to lisp this is a must, and even if we do there is always
1159 * the danger that we bounce back here before the error has been
1160 * handled, or indeed even printed.
1162 fprintf(stderr
, "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
1163 gc_active_p
? "garbage collection" : "allocation",
1164 available
, requested
);
1165 if (gc_active_p
|| (available
== 0)) {
1166 /* If we are in GC, or totally out of memory there is no way
1167 * to sanely transfer control to the lisp-side of things.
1169 struct thread
*thread
= arch_os_get_current_thread();
1170 print_generation_stats(1);
1171 fprintf(stderr
, "GC control variables:\n");
1172 fprintf(stderr
, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n",
1173 SymbolValue(GC_INHIBIT
,thread
)==NIL
? "false" : "true",
1174 SymbolValue(GC_PENDING
,thread
)==NIL
? "false" : "true");
1175 #ifdef LISP_FEATURE_SB_THREAD
1176 fprintf(stderr
, " *STOP-FOR-GC-PENDING* = %s\n",
1177 SymbolValue(STOP_FOR_GC_PENDING
,thread
)==NIL
? "false" : "true");
1179 lose("Heap exhausted, game over.");
1182 /* FIXME: assert free_pages_lock held */
1183 (void)thread_mutex_unlock(&free_pages_lock
);
1184 funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR
),
1185 alloc_number(available
), alloc_number(requested
));
1186 lose("HEAP-EXHAUSTED-ERROR fell through");
1191 gc_find_freeish_pages(page_index_t
*restart_page_ptr
, long nbytes
, int page_type_flag
)
1193 page_index_t first_page
, last_page
;
1194 page_index_t restart_page
= *restart_page_ptr
;
1195 long bytes_found
= 0;
1196 long most_bytes_found
= 0;
1197 /* FIXME: assert(free_pages_lock is held); */
1199 /* Toggled by gc_and_save for heap compaction, normally -1. */
1200 if (gencgc_alloc_start_page
!= -1) {
1201 restart_page
= gencgc_alloc_start_page
;
1204 if (nbytes
>=PAGE_BYTES
) {
1205 /* Search for a contiguous free space of at least nbytes,
1206 * aligned on a page boundary. The page-alignment is strictly
1207 * speaking needed only for objects at least large_object_size
1210 first_page
= restart_page
;
1211 while ((first_page
< page_table_pages
) &&
1212 page_allocated_p(first_page
))
1215 last_page
= first_page
;
1216 bytes_found
= PAGE_BYTES
;
1217 while ((bytes_found
< nbytes
) &&
1218 (last_page
< (page_table_pages
-1)) &&
1219 page_free_p(last_page
+1)) {
1221 bytes_found
+= PAGE_BYTES
;
1222 gc_assert(0 == page_table
[last_page
].bytes_used
);
1223 gc_assert(0 == page_table
[last_page
].write_protected
);
1225 if (bytes_found
> most_bytes_found
)
1226 most_bytes_found
= bytes_found
;
1227 restart_page
= last_page
+ 1;
1228 } while ((restart_page
< page_table_pages
) && (bytes_found
< nbytes
));
1231 /* Search for a page with at least nbytes of space. We prefer
1232 * not to split small objects on multiple pages, to reduce the
1233 * number of contiguous allocation regions spaning multiple
1234 * pages: this helps avoid excessive conservativism. */
1235 first_page
= restart_page
;
1236 while (first_page
< page_table_pages
) {
1237 if (page_free_p(first_page
))
1239 gc_assert(0 == page_table
[first_page
].bytes_used
);
1240 bytes_found
= PAGE_BYTES
;
1243 else if ((page_table
[first_page
].allocated
== page_type_flag
) &&
1244 (page_table
[first_page
].large_object
== 0) &&
1245 (page_table
[first_page
].gen
== gc_alloc_generation
) &&
1246 (page_table
[first_page
].write_protected
== 0) &&
1247 (page_table
[first_page
].dont_move
== 0))
1249 bytes_found
= PAGE_BYTES
1250 - page_table
[first_page
].bytes_used
;
1251 if (bytes_found
> most_bytes_found
)
1252 most_bytes_found
= bytes_found
;
1253 if (bytes_found
>= nbytes
)
1258 last_page
= first_page
;
1259 restart_page
= first_page
+ 1;
1262 /* Check for a failure */
1263 if (bytes_found
< nbytes
) {
1264 gc_assert(restart_page
>= page_table_pages
);
1265 gc_heap_exhausted_error_or_lose(most_bytes_found
, nbytes
);
1268 gc_assert(page_table
[first_page
].write_protected
== 0);
1270 *restart_page_ptr
= first_page
;
1274 /* Allocate bytes. All the rest of the special-purpose allocation
1275 * functions will eventually call this */
1278 gc_alloc_with_region(long nbytes
,int page_type_flag
, struct alloc_region
*my_region
,
1281 void *new_free_pointer
;
1283 if (nbytes
>=large_object_size
)
1284 return gc_alloc_large(nbytes
, page_type_flag
, my_region
);
1286 /* Check whether there is room in the current alloc region. */
1287 new_free_pointer
= my_region
->free_pointer
+ nbytes
;
1289 /* fprintf(stderr, "alloc %d bytes from %p to %p\n", nbytes,
1290 my_region->free_pointer, new_free_pointer); */
1292 if (new_free_pointer
<= my_region
->end_addr
) {
1293 /* If so then allocate from the current alloc region. */
1294 void *new_obj
= my_region
->free_pointer
;
1295 my_region
->free_pointer
= new_free_pointer
;
1297 /* Unless a `quick' alloc was requested, check whether the
1298 alloc region is almost empty. */
1300 void_diff(my_region
->end_addr
,my_region
->free_pointer
) <= 32) {
1301 /* If so, finished with the current region. */
1302 gc_alloc_update_page_tables(page_type_flag
, my_region
);
1303 /* Set up a new region. */
1304 gc_alloc_new_region(32 /*bytes*/, page_type_flag
, my_region
);
1307 return((void *)new_obj
);
1310 /* Else not enough free space in the current region: retry with a
1313 gc_alloc_update_page_tables(page_type_flag
, my_region
);
1314 gc_alloc_new_region(nbytes
, page_type_flag
, my_region
);
1315 return gc_alloc_with_region(nbytes
, page_type_flag
, my_region
,0);
1318 /* these are only used during GC: all allocation from the mutator calls
1319 * alloc() -> gc_alloc_with_region() with the appropriate per-thread
1322 static inline void *
1323 gc_quick_alloc(long nbytes
)
1325 return gc_general_alloc(nbytes
, BOXED_PAGE_FLAG
, ALLOC_QUICK
);
1328 static inline void *
1329 gc_quick_alloc_large(long nbytes
)
1331 return gc_general_alloc(nbytes
, BOXED_PAGE_FLAG
,ALLOC_QUICK
);
1334 static inline void *
1335 gc_alloc_unboxed(long nbytes
)
1337 return gc_general_alloc(nbytes
, UNBOXED_PAGE_FLAG
, 0);
1340 static inline void *
1341 gc_quick_alloc_unboxed(long nbytes
)
1343 return gc_general_alloc(nbytes
, UNBOXED_PAGE_FLAG
, ALLOC_QUICK
);
1346 static inline void *
1347 gc_quick_alloc_large_unboxed(long nbytes
)
1349 return gc_general_alloc(nbytes
, UNBOXED_PAGE_FLAG
, ALLOC_QUICK
);
1353 /* Copy a large boxed object. If the object is in a large object
1354 * region then it is simply promoted, else it is copied. If it's large
1355 * enough then it's copied to a large object region.
1357 * Vectors may have shrunk. If the object is not copied the space
1358 * needs to be reclaimed, and the page_tables corrected. */
1360 copy_large_object(lispobj object
, long nwords
)
1364 page_index_t first_page
;
1366 gc_assert(is_lisp_pointer(object
));
1367 gc_assert(from_space_p(object
));
1368 gc_assert((nwords
& 0x01) == 0);
1371 /* Check whether it's in a large object region. */
1372 first_page
= find_page_index((void *)object
);
1373 gc_assert(first_page
>= 0);
1375 if (page_table
[first_page
].large_object
) {
1377 /* Promote the object. */
1379 unsigned long remaining_bytes
;
1380 page_index_t next_page
;
1381 unsigned long bytes_freed
;
1382 unsigned long old_bytes_used
;
1384 /* Note: Any page write-protection must be removed, else a
1385 * later scavenge_newspace may incorrectly not scavenge these
1386 * pages. This would not be necessary if they are added to the
1387 * new areas, but let's do it for them all (they'll probably
1388 * be written anyway?). */
1390 gc_assert(page_table
[first_page
].region_start_offset
== 0);
1392 next_page
= first_page
;
1393 remaining_bytes
= nwords
*N_WORD_BYTES
;
1394 while (remaining_bytes
> PAGE_BYTES
) {
1395 gc_assert(page_table
[next_page
].gen
== from_space
);
1396 gc_assert(page_boxed_p(next_page
));
1397 gc_assert(page_table
[next_page
].large_object
);
1398 gc_assert(page_table
[next_page
].region_start_offset
==
1399 npage_bytes(next_page
-first_page
));
1400 gc_assert(page_table
[next_page
].bytes_used
== PAGE_BYTES
);
1402 page_table
[next_page
].gen
= new_space
;
1404 /* Remove any write-protection. We should be able to rely
1405 * on the write-protect flag to avoid redundant calls. */
1406 if (page_table
[next_page
].write_protected
) {
1407 os_protect(page_address(next_page
), PAGE_BYTES
, OS_VM_PROT_ALL
);
1408 page_table
[next_page
].write_protected
= 0;
1410 remaining_bytes
-= PAGE_BYTES
;
1414 /* Now only one page remains, but the object may have shrunk
1415 * so there may be more unused pages which will be freed. */
1417 /* The object may have shrunk but shouldn't have grown. */
1418 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
1420 page_table
[next_page
].gen
= new_space
;
1421 gc_assert(page_boxed_p(next_page
));
1423 /* Adjust the bytes_used. */
1424 old_bytes_used
= page_table
[next_page
].bytes_used
;
1425 page_table
[next_page
].bytes_used
= remaining_bytes
;
1427 bytes_freed
= old_bytes_used
- remaining_bytes
;
1429 /* Free any remaining pages; needs care. */
1431 while ((old_bytes_used
== PAGE_BYTES
) &&
1432 (page_table
[next_page
].gen
== from_space
) &&
1433 page_boxed_p(next_page
) &&
1434 page_table
[next_page
].large_object
&&
1435 (page_table
[next_page
].region_start_offset
==
1436 npage_bytes(next_page
- first_page
))) {
1437 /* Checks out OK, free the page. Don't need to bother zeroing
1438 * pages as this should have been done before shrinking the
1439 * object. These pages shouldn't be write-protected as they
1440 * should be zero filled. */
1441 gc_assert(page_table
[next_page
].write_protected
== 0);
1443 old_bytes_used
= page_table
[next_page
].bytes_used
;
1444 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1445 page_table
[next_page
].bytes_used
= 0;
1446 bytes_freed
+= old_bytes_used
;
1450 generations
[from_space
].bytes_allocated
-= N_WORD_BYTES
*nwords
1452 generations
[new_space
].bytes_allocated
+= N_WORD_BYTES
*nwords
;
1453 bytes_allocated
-= bytes_freed
;
1455 /* Add the region to the new_areas if requested. */
1456 add_new_area(first_page
,0,nwords
*N_WORD_BYTES
);
1460 /* Get tag of object. */
1461 tag
= lowtag_of(object
);
1463 /* Allocate space. */
1464 new = gc_quick_alloc_large(nwords
*N_WORD_BYTES
);
1466 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1468 /* Return Lisp pointer of new object. */
1469 return ((lispobj
) new) | tag
;
1473 /* to copy unboxed objects */
1475 copy_unboxed_object(lispobj object
, long nwords
)
1480 gc_assert(is_lisp_pointer(object
));
1481 gc_assert(from_space_p(object
));
1482 gc_assert((nwords
& 0x01) == 0);
1484 /* Get tag of object. */
1485 tag
= lowtag_of(object
);
1487 /* Allocate space. */
1488 new = gc_quick_alloc_unboxed(nwords
*N_WORD_BYTES
);
1490 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1492 /* Return Lisp pointer of new object. */
1493 return ((lispobj
) new) | tag
;
1496 /* to copy large unboxed objects
1498 * If the object is in a large object region then it is simply
1499 * promoted, else it is copied. If it's large enough then it's copied
1500 * to a large object region.
1502 * Bignums and vectors may have shrunk. If the object is not copied
1503 * the space needs to be reclaimed, and the page_tables corrected.
1505 * KLUDGE: There's a lot of cut-and-paste duplication between this
1506 * function and copy_large_object(..). -- WHN 20000619 */
1508 copy_large_unboxed_object(lispobj object
, long nwords
)
1512 page_index_t first_page
;
1514 gc_assert(is_lisp_pointer(object
));
1515 gc_assert(from_space_p(object
));
1516 gc_assert((nwords
& 0x01) == 0);
1518 if ((nwords
> 1024*1024) && gencgc_verbose
)
1519 FSHOW((stderr
, "/copy_large_unboxed_object: %d bytes\n",
1520 nwords
*N_WORD_BYTES
));
1522 /* Check whether it's a large object. */
1523 first_page
= find_page_index((void *)object
);
1524 gc_assert(first_page
>= 0);
1526 if (page_table
[first_page
].large_object
) {
1527 /* Promote the object. Note: Unboxed objects may have been
1528 * allocated to a BOXED region so it may be necessary to
1529 * change the region to UNBOXED. */
1530 unsigned long remaining_bytes
;
1531 page_index_t next_page
;
1532 unsigned long bytes_freed
;
1533 unsigned long old_bytes_used
;
1535 gc_assert(page_table
[first_page
].region_start_offset
== 0);
1537 next_page
= first_page
;
1538 remaining_bytes
= nwords
*N_WORD_BYTES
;
1539 while (remaining_bytes
> PAGE_BYTES
) {
1540 gc_assert(page_table
[next_page
].gen
== from_space
);
1541 gc_assert(page_allocated_no_region_p(next_page
));
1542 gc_assert(page_table
[next_page
].large_object
);
1543 gc_assert(page_table
[next_page
].region_start_offset
==
1544 npage_bytes(next_page
-first_page
));
1545 gc_assert(page_table
[next_page
].bytes_used
== PAGE_BYTES
);
1547 page_table
[next_page
].gen
= new_space
;
1548 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1549 remaining_bytes
-= PAGE_BYTES
;
1553 /* Now only one page remains, but the object may have shrunk so
1554 * there may be more unused pages which will be freed. */
1556 /* Object may have shrunk but shouldn't have grown - check. */
1557 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
1559 page_table
[next_page
].gen
= new_space
;
1560 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1562 /* Adjust the bytes_used. */
1563 old_bytes_used
= page_table
[next_page
].bytes_used
;
1564 page_table
[next_page
].bytes_used
= remaining_bytes
;
1566 bytes_freed
= old_bytes_used
- remaining_bytes
;
1568 /* Free any remaining pages; needs care. */
1570 while ((old_bytes_used
== PAGE_BYTES
) &&
1571 (page_table
[next_page
].gen
== from_space
) &&
1572 page_allocated_no_region_p(next_page
) &&
1573 page_table
[next_page
].large_object
&&
1574 (page_table
[next_page
].region_start_offset
==
1575 npage_bytes(next_page
- first_page
))) {
1576 /* Checks out OK, free the page. Don't need to both zeroing
1577 * pages as this should have been done before shrinking the
1578 * object. These pages shouldn't be write-protected, even if
1579 * boxed they should be zero filled. */
1580 gc_assert(page_table
[next_page
].write_protected
== 0);
1582 old_bytes_used
= page_table
[next_page
].bytes_used
;
1583 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1584 page_table
[next_page
].bytes_used
= 0;
1585 bytes_freed
+= old_bytes_used
;
1589 if ((bytes_freed
> 0) && gencgc_verbose
)
1591 "/copy_large_unboxed bytes_freed=%d\n",
1594 generations
[from_space
].bytes_allocated
-=
1595 nwords
*N_WORD_BYTES
+ bytes_freed
;
1596 generations
[new_space
].bytes_allocated
+= nwords
*N_WORD_BYTES
;
1597 bytes_allocated
-= bytes_freed
;
1602 /* Get tag of object. */
1603 tag
= lowtag_of(object
);
1605 /* Allocate space. */
1606 new = gc_quick_alloc_large_unboxed(nwords
*N_WORD_BYTES
);
1608 /* Copy the object. */
1609 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1611 /* Return Lisp pointer of new object. */
1612 return ((lispobj
) new) | tag
;
1621 * code and code-related objects
1624 static lispobj trans_fun_header(lispobj object);
1625 static lispobj trans_boxed(lispobj object);
1628 /* Scan a x86 compiled code object, looking for possible fixups that
1629 * have been missed after a move.
1631 * Two types of fixups are needed:
1632 * 1. Absolute fixups to within the code object.
1633 * 2. Relative fixups to outside the code object.
1635 * Currently only absolute fixups to the constant vector, or to the
1636 * code area are checked. */
1638 sniff_code_object(struct code
*code
, unsigned long displacement
)
1640 #ifdef LISP_FEATURE_X86
1641 long nheader_words
, ncode_words
, nwords
;
1643 void *constants_start_addr
= NULL
, *constants_end_addr
;
1644 void *code_start_addr
, *code_end_addr
;
1645 int fixup_found
= 0;
1647 if (!check_code_fixups
)
1650 FSHOW((stderr
, "/sniffing code: %p, %lu\n", code
, displacement
));
1652 ncode_words
= fixnum_value(code
->code_size
);
1653 nheader_words
= HeaderValue(*(lispobj
*)code
);
1654 nwords
= ncode_words
+ nheader_words
;
1656 constants_start_addr
= (void *)code
+ 5*N_WORD_BYTES
;
1657 constants_end_addr
= (void *)code
+ nheader_words
*N_WORD_BYTES
;
1658 code_start_addr
= (void *)code
+ nheader_words
*N_WORD_BYTES
;
1659 code_end_addr
= (void *)code
+ nwords
*N_WORD_BYTES
;
1661 /* Work through the unboxed code. */
1662 for (p
= code_start_addr
; p
< code_end_addr
; p
++) {
1663 void *data
= *(void **)p
;
1664 unsigned d1
= *((unsigned char *)p
- 1);
1665 unsigned d2
= *((unsigned char *)p
- 2);
1666 unsigned d3
= *((unsigned char *)p
- 3);
1667 unsigned d4
= *((unsigned char *)p
- 4);
1669 unsigned d5
= *((unsigned char *)p
- 5);
1670 unsigned d6
= *((unsigned char *)p
- 6);
1673 /* Check for code references. */
1674 /* Check for a 32 bit word that looks like an absolute
1675 reference to within the code adea of the code object. */
1676 if ((data
>= (code_start_addr
-displacement
))
1677 && (data
< (code_end_addr
-displacement
))) {
1678 /* function header */
1680 && (((unsigned)p
- 4 - 4*HeaderValue(*((unsigned *)p
-1))) ==
1682 /* Skip the function header */
1686 /* the case of PUSH imm32 */
1690 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1691 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1692 FSHOW((stderr
, "/PUSH $0x%.8x\n", data
));
1694 /* the case of MOV [reg-8],imm32 */
1696 && (d2
==0x40 || d2
==0x41 || d2
==0x42 || d2
==0x43
1697 || d2
==0x45 || d2
==0x46 || d2
==0x47)
1701 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1702 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1703 FSHOW((stderr
, "/MOV [reg-8],$0x%.8x\n", data
));
1705 /* the case of LEA reg,[disp32] */
1706 if ((d2
== 0x8d) && ((d1
& 0xc7) == 5)) {
1709 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1710 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1711 FSHOW((stderr
,"/LEA reg,[$0x%.8x]\n", data
));
1715 /* Check for constant references. */
1716 /* Check for a 32 bit word that looks like an absolute
1717 reference to within the constant vector. Constant references
1719 if ((data
>= (constants_start_addr
-displacement
))
1720 && (data
< (constants_end_addr
-displacement
))
1721 && (((unsigned)data
& 0x3) == 0)) {
1726 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1727 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1728 FSHOW((stderr
,"/MOV eax,0x%.8x\n", data
));
1731 /* the case of MOV m32,EAX */
1735 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1736 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1737 FSHOW((stderr
, "/MOV 0x%.8x,eax\n", data
));
1740 /* the case of CMP m32,imm32 */
1741 if ((d1
== 0x3d) && (d2
== 0x81)) {
1744 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1745 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1747 FSHOW((stderr
, "/CMP 0x%.8x,immed32\n", data
));
1750 /* Check for a mod=00, r/m=101 byte. */
1751 if ((d1
& 0xc7) == 5) {
1756 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1757 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1758 FSHOW((stderr
,"/CMP 0x%.8x,reg\n", data
));
1760 /* the case of CMP reg32,m32 */
1764 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1765 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1766 FSHOW((stderr
, "/CMP reg32,0x%.8x\n", data
));
1768 /* the case of MOV m32,reg32 */
1772 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1773 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1774 FSHOW((stderr
, "/MOV 0x%.8x,reg32\n", data
));
1776 /* the case of MOV reg32,m32 */
1780 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1781 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1782 FSHOW((stderr
, "/MOV reg32,0x%.8x\n", data
));
1784 /* the case of LEA reg32,m32 */
1788 "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1789 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1790 FSHOW((stderr
, "/LEA reg32,0x%.8x\n", data
));
1796 /* If anything was found, print some information on the code
1800 "/compiled code object at %x: header words = %d, code words = %d\n",
1801 code
, nheader_words
, ncode_words
));
1803 "/const start = %x, end = %x\n",
1804 constants_start_addr
, constants_end_addr
));
1806 "/code start = %x, end = %x\n",
1807 code_start_addr
, code_end_addr
));
1813 gencgc_apply_code_fixups(struct code
*old_code
, struct code
*new_code
)
1815 /* x86-64 uses pc-relative addressing instead of this kludge */
1816 #ifndef LISP_FEATURE_X86_64
1817 long nheader_words
, ncode_words
, nwords
;
1818 void *constants_start_addr
, *constants_end_addr
;
1819 void *code_start_addr
, *code_end_addr
;
1820 lispobj fixups
= NIL
;
1821 unsigned long displacement
=
1822 (unsigned long)new_code
- (unsigned long)old_code
;
1823 struct vector
*fixups_vector
;
1825 ncode_words
= fixnum_value(new_code
->code_size
);
1826 nheader_words
= HeaderValue(*(lispobj
*)new_code
);
1827 nwords
= ncode_words
+ nheader_words
;
1829 "/compiled code object at %x: header words = %d, code words = %d\n",
1830 new_code, nheader_words, ncode_words)); */
1831 constants_start_addr
= (void *)new_code
+ 5*N_WORD_BYTES
;
1832 constants_end_addr
= (void *)new_code
+ nheader_words
*N_WORD_BYTES
;
1833 code_start_addr
= (void *)new_code
+ nheader_words
*N_WORD_BYTES
;
1834 code_end_addr
= (void *)new_code
+ nwords
*N_WORD_BYTES
;
1837 "/const start = %x, end = %x\n",
1838 constants_start_addr,constants_end_addr));
1840 "/code start = %x; end = %x\n",
1841 code_start_addr,code_end_addr));
1844 /* The first constant should be a pointer to the fixups for this
1845 code objects. Check. */
1846 fixups
= new_code
->constants
[0];
1848 /* It will be 0 or the unbound-marker if there are no fixups (as
1849 * will be the case if the code object has been purified, for
1850 * example) and will be an other pointer if it is valid. */
1851 if ((fixups
== 0) || (fixups
== UNBOUND_MARKER_WIDETAG
) ||
1852 !is_lisp_pointer(fixups
)) {
1853 /* Check for possible errors. */
1854 if (check_code_fixups
)
1855 sniff_code_object(new_code
, displacement
);
1860 fixups_vector
= (struct vector
*)native_pointer(fixups
);
1862 /* Could be pointing to a forwarding pointer. */
1863 /* FIXME is this always in from_space? if so, could replace this code with
1864 * forwarding_pointer_p/forwarding_pointer_value */
1865 if (is_lisp_pointer(fixups
) &&
1866 (find_page_index((void*)fixups_vector
) != -1) &&
1867 (fixups_vector
->header
== 0x01)) {
1868 /* If so, then follow it. */
1869 /*SHOW("following pointer to a forwarding pointer");*/
1871 (struct vector
*)native_pointer((lispobj
)fixups_vector
->length
);
1874 /*SHOW("got fixups");*/
1876 if (widetag_of(fixups_vector
->header
) == SIMPLE_ARRAY_WORD_WIDETAG
) {
1877 /* Got the fixups for the code block. Now work through the vector,
1878 and apply a fixup at each address. */
1879 long length
= fixnum_value(fixups_vector
->length
);
1881 for (i
= 0; i
< length
; i
++) {
1882 unsigned long offset
= fixups_vector
->data
[i
];
1883 /* Now check the current value of offset. */
1884 unsigned long old_value
=
1885 *(unsigned long *)((unsigned long)code_start_addr
+ offset
);
1887 /* If it's within the old_code object then it must be an
1888 * absolute fixup (relative ones are not saved) */
1889 if ((old_value
>= (unsigned long)old_code
)
1890 && (old_value
< ((unsigned long)old_code
1891 + nwords
*N_WORD_BYTES
)))
1892 /* So add the dispacement. */
1893 *(unsigned long *)((unsigned long)code_start_addr
+ offset
) =
1894 old_value
+ displacement
;
1896 /* It is outside the old code object so it must be a
1897 * relative fixup (absolute fixups are not saved). So
1898 * subtract the displacement. */
1899 *(unsigned long *)((unsigned long)code_start_addr
+ offset
) =
1900 old_value
- displacement
;
1903 /* This used to just print a note to stderr, but a bogus fixup seems to
1904 * indicate real heap corruption, so a hard hailure is in order. */
1905 lose("fixup vector %p has a bad widetag: %d\n",
1906 fixups_vector
, widetag_of(fixups_vector
->header
));
1909 /* Check for possible errors. */
1910 if (check_code_fixups
) {
1911 sniff_code_object(new_code
,displacement
);
1918 trans_boxed_large(lispobj object
)
1921 unsigned long length
;
1923 gc_assert(is_lisp_pointer(object
));
1925 header
= *((lispobj
*) native_pointer(object
));
1926 length
= HeaderValue(header
) + 1;
1927 length
= CEILING(length
, 2);
1929 return copy_large_object(object
, length
);
1932 /* Doesn't seem to be used, delete it after the grace period. */
1935 trans_unboxed_large(lispobj object
)
1938 unsigned long length
;
1940 gc_assert(is_lisp_pointer(object
));
1942 header
= *((lispobj
*) native_pointer(object
));
1943 length
= HeaderValue(header
) + 1;
1944 length
= CEILING(length
, 2);
1946 return copy_large_unboxed_object(object
, length
);
1952 * Lutexes. Using the normal finalization machinery for finalizing
1953 * lutexes is tricky, since the finalization depends on working lutexes.
1954 * So we track the lutexes in the GC and finalize them manually.
1957 #if defined(LUTEX_WIDETAG)
1960 * Start tracking LUTEX in the GC, by adding it to the linked list of
1961 * lutexes in the nursery generation. The caller is responsible for
1962 * locking, and GCs must be inhibited until the registration is
1966 gencgc_register_lutex (struct lutex
*lutex
) {
1967 int index
= find_page_index(lutex
);
1968 generation_index_t gen
;
1971 /* This lutex is in static space, so we don't need to worry about
1977 gen
= page_table
[index
].gen
;
1979 gc_assert(gen
>= 0);
1980 gc_assert(gen
< NUM_GENERATIONS
);
1982 head
= generations
[gen
].lutexes
;
1989 generations
[gen
].lutexes
= lutex
;
1993 * Stop tracking LUTEX in the GC by removing it from the appropriate
1994 * linked lists. This will only be called during GC, so no locking is
1998 gencgc_unregister_lutex (struct lutex
*lutex
) {
2000 lutex
->prev
->next
= lutex
->next
;
2002 generations
[lutex
->gen
].lutexes
= lutex
->next
;
2006 lutex
->next
->prev
= lutex
->prev
;
2015 * Mark all lutexes in generation GEN as not live.
2018 unmark_lutexes (generation_index_t gen
) {
2019 struct lutex
*lutex
= generations
[gen
].lutexes
;
2023 lutex
= lutex
->next
;
2028 * Finalize all lutexes in generation GEN that have not been marked live.
2031 reap_lutexes (generation_index_t gen
) {
2032 struct lutex
*lutex
= generations
[gen
].lutexes
;
2035 struct lutex
*next
= lutex
->next
;
2037 lutex_destroy((tagged_lutex_t
) lutex
);
2038 gencgc_unregister_lutex(lutex
);
2045 * Mark LUTEX as live.
2048 mark_lutex (lispobj tagged_lutex
) {
2049 struct lutex
*lutex
= (struct lutex
*) native_pointer(tagged_lutex
);
2055 * Move all lutexes in generation FROM to generation TO.
2058 move_lutexes (generation_index_t from
, generation_index_t to
) {
2059 struct lutex
*tail
= generations
[from
].lutexes
;
2061 /* Nothing to move */
2065 /* Change the generation of the lutexes in FROM. */
2066 while (tail
->next
) {
2072 /* Link the last lutex in the FROM list to the start of the TO list */
2073 tail
->next
= generations
[to
].lutexes
;
2075 /* And vice versa */
2076 if (generations
[to
].lutexes
) {
2077 generations
[to
].lutexes
->prev
= tail
;
2080 /* And update the generations structures to match this */
2081 generations
[to
].lutexes
= generations
[from
].lutexes
;
2082 generations
[from
].lutexes
= NULL
;
2086 scav_lutex(lispobj
*where
, lispobj object
)
2088 mark_lutex((lispobj
) where
);
2090 return CEILING(sizeof(struct lutex
)/sizeof(lispobj
), 2);
2094 trans_lutex(lispobj object
)
2096 struct lutex
*lutex
= (struct lutex
*) native_pointer(object
);
2098 size_t words
= CEILING(sizeof(struct lutex
)/sizeof(lispobj
), 2);
2099 gc_assert(is_lisp_pointer(object
));
2100 copied
= copy_object(object
, words
);
2102 /* Update the links, since the lutex moved in memory. */
2104 lutex
->next
->prev
= (struct lutex
*) native_pointer(copied
);
2108 lutex
->prev
->next
= (struct lutex
*) native_pointer(copied
);
2110 generations
[lutex
->gen
].lutexes
=
2111 (struct lutex
*) native_pointer(copied
);
2118 size_lutex(lispobj
*where
)
2120 return CEILING(sizeof(struct lutex
)/sizeof(lispobj
), 2);
2122 #endif /* LUTEX_WIDETAG */
2129 /* XX This is a hack adapted from cgc.c. These don't work too
2130 * efficiently with the gencgc as a list of the weak pointers is
2131 * maintained within the objects which causes writes to the pages. A
2132 * limited attempt is made to avoid unnecessary writes, but this needs
2134 #define WEAK_POINTER_NWORDS \
2135 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
2138 scav_weak_pointer(lispobj
*where
, lispobj object
)
2140 /* Since we overwrite the 'next' field, we have to make
2141 * sure not to do so for pointers already in the list.
2142 * Instead of searching the list of weak_pointers each
2143 * time, we ensure that next is always NULL when the weak
2144 * pointer isn't in the list, and not NULL otherwise.
2145 * Since we can't use NULL to denote end of list, we
2146 * use a pointer back to the same weak_pointer.
2148 struct weak_pointer
* wp
= (struct weak_pointer
*)where
;
2150 if (NULL
== wp
->next
) {
2151 wp
->next
= weak_pointers
;
2153 if (NULL
== wp
->next
)
2157 /* Do not let GC scavenge the value slot of the weak pointer.
2158 * (That is why it is a weak pointer.) */
2160 return WEAK_POINTER_NWORDS
;
2165 search_read_only_space(void *pointer
)
2167 lispobj
*start
= (lispobj
*) READ_ONLY_SPACE_START
;
2168 lispobj
*end
= (lispobj
*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0);
2169 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
2171 return (gc_search_space(start
,
2172 (((lispobj
*)pointer
)+2)-start
,
2173 (lispobj
*) pointer
));
2177 search_static_space(void *pointer
)
2179 lispobj
*start
= (lispobj
*)STATIC_SPACE_START
;
2180 lispobj
*end
= (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0);
2181 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
2183 return (gc_search_space(start
,
2184 (((lispobj
*)pointer
)+2)-start
,
2185 (lispobj
*) pointer
));
2188 /* a faster version for searching the dynamic space. This will work even
2189 * if the object is in a current allocation region. */
2191 search_dynamic_space(void *pointer
)
2193 page_index_t page_index
= find_page_index(pointer
);
2196 /* The address may be invalid, so do some checks. */
2197 if ((page_index
== -1) || page_free_p(page_index
))
2199 start
= (lispobj
*)page_region_start(page_index
);
2200 return (gc_search_space(start
,
2201 (((lispobj
*)pointer
)+2)-start
,
2202 (lispobj
*)pointer
));
2205 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2207 /* Helper for valid_lisp_pointer_p and
2208 * possibly_valid_dynamic_space_pointer.
2210 * pointer is the pointer to validate, and start_addr is the address
2211 * of the enclosing object.
2214 looks_like_valid_lisp_pointer_p(lispobj
*pointer
, lispobj
*start_addr
)
2216 if (!is_lisp_pointer((lispobj
)pointer
)) {
2220 /* Check that the object pointed to is consistent with the pointer
2222 switch (lowtag_of((lispobj
)pointer
)) {
2223 case FUN_POINTER_LOWTAG
:
2224 /* Start_addr should be the enclosing code object, or a closure
2226 switch (widetag_of(*start_addr
)) {
2227 case CODE_HEADER_WIDETAG
:
2228 /* This case is probably caught above. */
2230 case CLOSURE_HEADER_WIDETAG
:
2231 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
2232 if ((unsigned long)pointer
!=
2233 ((unsigned long)start_addr
+FUN_POINTER_LOWTAG
)) {
2237 pointer
, start_addr
, *start_addr
));
2245 pointer
, start_addr
, *start_addr
));
2249 case LIST_POINTER_LOWTAG
:
2250 if ((unsigned long)pointer
!=
2251 ((unsigned long)start_addr
+LIST_POINTER_LOWTAG
)) {
2255 pointer
, start_addr
, *start_addr
));
2258 /* Is it plausible cons? */
2259 if ((is_lisp_pointer(start_addr
[0]) ||
2260 is_lisp_immediate(start_addr
[0])) &&
2261 (is_lisp_pointer(start_addr
[1]) ||
2262 is_lisp_immediate(start_addr
[1])))
2268 pointer
, start_addr
, *start_addr
));
2271 case INSTANCE_POINTER_LOWTAG
:
2272 if ((unsigned long)pointer
!=
2273 ((unsigned long)start_addr
+INSTANCE_POINTER_LOWTAG
)) {
2277 pointer
, start_addr
, *start_addr
));
2280 if (widetag_of(start_addr
[0]) != INSTANCE_HEADER_WIDETAG
) {
2284 pointer
, start_addr
, *start_addr
));
2288 case OTHER_POINTER_LOWTAG
:
2289 if ((unsigned long)pointer
!=
2290 ((unsigned long)start_addr
+OTHER_POINTER_LOWTAG
)) {
2294 pointer
, start_addr
, *start_addr
));
2297 /* Is it plausible? Not a cons. XXX should check the headers. */
2298 if (is_lisp_pointer(start_addr
[0]) || ((start_addr
[0] & 3) == 0)) {
2302 pointer
, start_addr
, *start_addr
));
2305 switch (widetag_of(start_addr
[0])) {
2306 case UNBOUND_MARKER_WIDETAG
:
2307 case NO_TLS_VALUE_MARKER_WIDETAG
:
2308 case CHARACTER_WIDETAG
:
2309 #if N_WORD_BITS == 64
2310 case SINGLE_FLOAT_WIDETAG
:
2315 pointer
, start_addr
, *start_addr
));
2318 /* only pointed to by function pointers? */
2319 case CLOSURE_HEADER_WIDETAG
:
2320 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
2324 pointer
, start_addr
, *start_addr
));
2327 case INSTANCE_HEADER_WIDETAG
:
2331 pointer
, start_addr
, *start_addr
));
2334 /* the valid other immediate pointer objects */
2335 case SIMPLE_VECTOR_WIDETAG
:
2337 case COMPLEX_WIDETAG
:
2338 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
2339 case COMPLEX_SINGLE_FLOAT_WIDETAG
:
2341 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
2342 case COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2344 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
2345 case COMPLEX_LONG_FLOAT_WIDETAG
:
2347 case SIMPLE_ARRAY_WIDETAG
:
2348 case COMPLEX_BASE_STRING_WIDETAG
:
2349 #ifdef COMPLEX_CHARACTER_STRING_WIDETAG
2350 case COMPLEX_CHARACTER_STRING_WIDETAG
:
2352 case COMPLEX_VECTOR_NIL_WIDETAG
:
2353 case COMPLEX_BIT_VECTOR_WIDETAG
:
2354 case COMPLEX_VECTOR_WIDETAG
:
2355 case COMPLEX_ARRAY_WIDETAG
:
2356 case VALUE_CELL_HEADER_WIDETAG
:
2357 case SYMBOL_HEADER_WIDETAG
:
2359 case CODE_HEADER_WIDETAG
:
2360 case BIGNUM_WIDETAG
:
2361 #if N_WORD_BITS != 64
2362 case SINGLE_FLOAT_WIDETAG
:
2364 case DOUBLE_FLOAT_WIDETAG
:
2365 #ifdef LONG_FLOAT_WIDETAG
2366 case LONG_FLOAT_WIDETAG
:
2368 case SIMPLE_BASE_STRING_WIDETAG
:
2369 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
2370 case SIMPLE_CHARACTER_STRING_WIDETAG
:
2372 case SIMPLE_BIT_VECTOR_WIDETAG
:
2373 case SIMPLE_ARRAY_NIL_WIDETAG
:
2374 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
2375 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
2376 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
2377 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
2378 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
2379 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
2380 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
2381 case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
:
2383 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
2384 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
2385 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
2386 case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
:
2388 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
2389 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
2391 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
2392 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
2394 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
2395 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
2397 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
2398 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
2400 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
2401 case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
:
2403 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
2404 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
2406 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
2407 case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
:
2409 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
2410 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
2412 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
2413 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
2414 #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
2415 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
2417 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
2418 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
2420 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
2421 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2423 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
2424 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
2427 case WEAK_POINTER_WIDETAG
:
2428 #ifdef LUTEX_WIDETAG
2437 pointer
, start_addr
, *start_addr
));
2445 pointer
, start_addr
, *start_addr
));
2453 /* Used by the debugger to validate possibly bogus pointers before
2454 * calling MAKE-LISP-OBJ on them.
2456 * FIXME: We would like to make this perfect, because if the debugger
2457 * constructs a reference to a bugs lisp object, and it ends up in a
2458 * location scavenged by the GC all hell breaks loose.
2460 * Whereas possibly_valid_dynamic_space_pointer has to be conservative
2461 * and return true for all valid pointers, this could actually be eager
2462 * and lie about a few pointers without bad results... but that should
2463 * be reflected in the name.
2466 valid_lisp_pointer_p(lispobj
*pointer
)
2469 if (((start
=search_dynamic_space(pointer
))!=NULL
) ||
2470 ((start
=search_static_space(pointer
))!=NULL
) ||
2471 ((start
=search_read_only_space(pointer
))!=NULL
))
2472 return looks_like_valid_lisp_pointer_p(pointer
, start
);
2477 /* Is there any possibility that pointer is a valid Lisp object
2478 * reference, and/or something else (e.g. subroutine call return
2479 * address) which should prevent us from moving the referred-to thing?
2480 * This is called from preserve_pointers() */
2482 possibly_valid_dynamic_space_pointer(lispobj
*pointer
)
2484 lispobj
*start_addr
;
2486 /* Find the object start address. */
2487 if ((start_addr
= search_dynamic_space(pointer
)) == NULL
) {
2491 return looks_like_valid_lisp_pointer_p(pointer
, start_addr
);
2494 /* Adjust large bignum and vector objects. This will adjust the
2495 * allocated region if the size has shrunk, and move unboxed objects
2496 * into unboxed pages. The pages are not promoted here, and the
2497 * promoted region is not added to the new_regions; this is really
2498 * only designed to be called from preserve_pointer(). Shouldn't fail
2499 * if this is missed, just may delay the moving of objects to unboxed
2500 * pages, and the freeing of pages. */
2502 maybe_adjust_large_object(lispobj
*where
)
2504 page_index_t first_page
;
2505 page_index_t next_page
;
2508 unsigned long remaining_bytes
;
2509 unsigned long bytes_freed
;
2510 unsigned long old_bytes_used
;
2514 /* Check whether it's a vector or bignum object. */
2515 switch (widetag_of(where
[0])) {
2516 case SIMPLE_VECTOR_WIDETAG
:
2517 boxed
= BOXED_PAGE_FLAG
;
2519 case BIGNUM_WIDETAG
:
2520 case SIMPLE_BASE_STRING_WIDETAG
:
2521 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
2522 case SIMPLE_CHARACTER_STRING_WIDETAG
:
2524 case SIMPLE_BIT_VECTOR_WIDETAG
:
2525 case SIMPLE_ARRAY_NIL_WIDETAG
:
2526 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
2527 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
2528 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
2529 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
2530 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
2531 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
2532 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
2533 case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
:
2535 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
2536 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
2537 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
2538 case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
:
2540 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
2541 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
2543 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
2544 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
2546 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
2547 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
2549 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
2550 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
2552 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
2553 case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
:
2555 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
2556 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
2558 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
2559 case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
:
2561 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
2562 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
2564 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
2565 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
2566 #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
2567 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
2569 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
2570 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
2572 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
2573 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2575 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
2576 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
2578 boxed
= UNBOXED_PAGE_FLAG
;
2584 /* Find its current size. */
2585 nwords
= (sizetab
[widetag_of(where
[0])])(where
);
2587 first_page
= find_page_index((void *)where
);
2588 gc_assert(first_page
>= 0);
2590 /* Note: Any page write-protection must be removed, else a later
2591 * scavenge_newspace may incorrectly not scavenge these pages.
2592 * This would not be necessary if they are added to the new areas,
2593 * but lets do it for them all (they'll probably be written
2596 gc_assert(page_table
[first_page
].region_start_offset
== 0);
2598 next_page
= first_page
;
2599 remaining_bytes
= nwords
*N_WORD_BYTES
;
2600 while (remaining_bytes
> PAGE_BYTES
) {
2601 gc_assert(page_table
[next_page
].gen
== from_space
);
2602 gc_assert(page_allocated_no_region_p(next_page
));
2603 gc_assert(page_table
[next_page
].large_object
);
2604 gc_assert(page_table
[next_page
].region_start_offset
==
2605 npage_bytes(next_page
-first_page
));
2606 gc_assert(page_table
[next_page
].bytes_used
== PAGE_BYTES
);
2608 page_table
[next_page
].allocated
= boxed
;
2610 /* Shouldn't be write-protected at this stage. Essential that the
2612 gc_assert(!page_table
[next_page
].write_protected
);
2613 remaining_bytes
-= PAGE_BYTES
;
2617 /* Now only one page remains, but the object may have shrunk so
2618 * there may be more unused pages which will be freed. */
2620 /* Object may have shrunk but shouldn't have grown - check. */
2621 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
2623 page_table
[next_page
].allocated
= boxed
;
2624 gc_assert(page_table
[next_page
].allocated
==
2625 page_table
[first_page
].allocated
);
2627 /* Adjust the bytes_used. */
2628 old_bytes_used
= page_table
[next_page
].bytes_used
;
2629 page_table
[next_page
].bytes_used
= remaining_bytes
;
2631 bytes_freed
= old_bytes_used
- remaining_bytes
;
2633 /* Free any remaining pages; needs care. */
2635 while ((old_bytes_used
== PAGE_BYTES
) &&
2636 (page_table
[next_page
].gen
== from_space
) &&
2637 page_allocated_no_region_p(next_page
) &&
2638 page_table
[next_page
].large_object
&&
2639 (page_table
[next_page
].region_start_offset
==
2640 npage_bytes(next_page
- first_page
))) {
2641 /* It checks out OK, free the page. We don't need to both zeroing
2642 * pages as this should have been done before shrinking the
2643 * object. These pages shouldn't be write protected as they
2644 * should be zero filled. */
2645 gc_assert(page_table
[next_page
].write_protected
== 0);
2647 old_bytes_used
= page_table
[next_page
].bytes_used
;
2648 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
2649 page_table
[next_page
].bytes_used
= 0;
2650 bytes_freed
+= old_bytes_used
;
2654 if ((bytes_freed
> 0) && gencgc_verbose
) {
2656 "/maybe_adjust_large_object() freed %d\n",
2660 generations
[from_space
].bytes_allocated
-= bytes_freed
;
2661 bytes_allocated
-= bytes_freed
;
2666 /* Take a possible pointer to a Lisp object and mark its page in the
2667 * page_table so that it will not be relocated during a GC.
2669 * This involves locating the page it points to, then backing up to
2670 * the start of its region, then marking all pages dont_move from there
2671 * up to the first page that's not full or has a different generation
2673 * It is assumed that all the page static flags have been cleared at
2674 * the start of a GC.
2676 * It is also assumed that the current gc_alloc() region has been
2677 * flushed and the tables updated. */
2680 preserve_pointer(void *addr
)
2682 page_index_t addr_page_index
= find_page_index(addr
);
2683 page_index_t first_page
;
2685 unsigned int region_allocation
;
2687 /* quick check 1: Address is quite likely to have been invalid. */
2688 if ((addr_page_index
== -1)
2689 || page_free_p(addr_page_index
)
2690 || (page_table
[addr_page_index
].bytes_used
== 0)
2691 || (page_table
[addr_page_index
].gen
!= from_space
)
2692 /* Skip if already marked dont_move. */
2693 || (page_table
[addr_page_index
].dont_move
!= 0))
2695 gc_assert(!(page_table
[addr_page_index
].allocated
&OPEN_REGION_PAGE_FLAG
));
2696 /* (Now that we know that addr_page_index is in range, it's
2697 * safe to index into page_table[] with it.) */
2698 region_allocation
= page_table
[addr_page_index
].allocated
;
2700 /* quick check 2: Check the offset within the page.
2703 if (((unsigned long)addr
& (PAGE_BYTES
- 1)) >
2704 page_table
[addr_page_index
].bytes_used
)
2707 /* Filter out anything which can't be a pointer to a Lisp object
2708 * (or, as a special case which also requires dont_move, a return
2709 * address referring to something in a CodeObject). This is
2710 * expensive but important, since it vastly reduces the
2711 * probability that random garbage will be bogusly interpreted as
2712 * a pointer which prevents a page from moving. */
2713 if (!(code_page_p(addr_page_index
)
2714 || (is_lisp_pointer(addr
) &&
2715 possibly_valid_dynamic_space_pointer(addr
))))
2718 /* Find the beginning of the region. Note that there may be
2719 * objects in the region preceding the one that we were passed a
2720 * pointer to: if this is the case, we will write-protect all the
2721 * previous objects' pages too. */
2724 /* I think this'd work just as well, but without the assertions.
2725 * -dan 2004.01.01 */
2726 first_page
= find_page_index(page_region_start(addr_page_index
))
2728 first_page
= addr_page_index
;
2729 while (page_table
[first_page
].region_start_offset
!= 0) {
2731 /* Do some checks. */
2732 gc_assert(page_table
[first_page
].bytes_used
== PAGE_BYTES
);
2733 gc_assert(page_table
[first_page
].gen
== from_space
);
2734 gc_assert(page_table
[first_page
].allocated
== region_allocation
);
2738 /* Adjust any large objects before promotion as they won't be
2739 * copied after promotion. */
2740 if (page_table
[first_page
].large_object
) {
2741 maybe_adjust_large_object(page_address(first_page
));
2742 /* If a large object has shrunk then addr may now point to a
2743 * free area in which case it's ignored here. Note it gets
2744 * through the valid pointer test above because the tail looks
2746 if (page_free_p(addr_page_index
)
2747 || (page_table
[addr_page_index
].bytes_used
== 0)
2748 /* Check the offset within the page. */
2749 || (((unsigned long)addr
& (PAGE_BYTES
- 1))
2750 > page_table
[addr_page_index
].bytes_used
)) {
2752 "weird? ignore ptr 0x%x to freed area of large object\n",
2756 /* It may have moved to unboxed pages. */
2757 region_allocation
= page_table
[first_page
].allocated
;
2760 /* Now work forward until the end of this contiguous area is found,
2761 * marking all pages as dont_move. */
2762 for (i
= first_page
; ;i
++) {
2763 gc_assert(page_table
[i
].allocated
== region_allocation
);
2765 /* Mark the page static. */
2766 page_table
[i
].dont_move
= 1;
2768 /* Move the page to the new_space. XX I'd rather not do this
2769 * but the GC logic is not quite able to copy with the static
2770 * pages remaining in the from space. This also requires the
2771 * generation bytes_allocated counters be updated. */
2772 page_table
[i
].gen
= new_space
;
2773 generations
[new_space
].bytes_allocated
+= page_table
[i
].bytes_used
;
2774 generations
[from_space
].bytes_allocated
-= page_table
[i
].bytes_used
;
2776 /* It is essential that the pages are not write protected as
2777 * they may have pointers into the old-space which need
2778 * scavenging. They shouldn't be write protected at this
2780 gc_assert(!page_table
[i
].write_protected
);
2782 /* Check whether this is the last page in this contiguous block.. */
2783 if ((page_table
[i
].bytes_used
< PAGE_BYTES
)
2784 /* ..or it is PAGE_BYTES and is the last in the block */
2786 || (page_table
[i
+1].bytes_used
== 0) /* next page free */
2787 || (page_table
[i
+1].gen
!= from_space
) /* diff. gen */
2788 || (page_table
[i
+1].region_start_offset
== 0))
2792 /* Check that the page is now static. */
2793 gc_assert(page_table
[addr_page_index
].dont_move
!= 0);
2796 #endif // defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2799 /* If the given page is not write-protected, then scan it for pointers
2800 * to younger generations or the top temp. generation, if no
2801 * suspicious pointers are found then the page is write-protected.
2803 * Care is taken to check for pointers to the current gc_alloc()
2804 * region if it is a younger generation or the temp. generation. This
2805 * frees the caller from doing a gc_alloc_update_page_tables(). Actually
2806 * the gc_alloc_generation does not need to be checked as this is only
2807 * called from scavenge_generation() when the gc_alloc generation is
2808 * younger, so it just checks if there is a pointer to the current
2811 * We return 1 if the page was write-protected, else 0. */
2813 update_page_write_prot(page_index_t page
)
2815 generation_index_t gen
= page_table
[page
].gen
;
2818 void **page_addr
= (void **)page_address(page
);
2819 long num_words
= page_table
[page
].bytes_used
/ N_WORD_BYTES
;
2821 /* Shouldn't be a free page. */
2822 gc_assert(page_allocated_p(page
));
2823 gc_assert(page_table
[page
].bytes_used
!= 0);
2825 /* Skip if it's already write-protected, pinned, or unboxed */
2826 if (page_table
[page
].write_protected
2827 /* FIXME: What's the reason for not write-protecting pinned pages? */
2828 || page_table
[page
].dont_move
2829 || page_unboxed_p(page
))
2832 /* Scan the page for pointers to younger generations or the
2833 * top temp. generation. */
2835 for (j
= 0; j
< num_words
; j
++) {
2836 void *ptr
= *(page_addr
+j
);
2837 page_index_t index
= find_page_index(ptr
);
2839 /* Check that it's in the dynamic space */
2841 if (/* Does it point to a younger or the temp. generation? */
2842 (page_allocated_p(index
)
2843 && (page_table
[index
].bytes_used
!= 0)
2844 && ((page_table
[index
].gen
< gen
)
2845 || (page_table
[index
].gen
== SCRATCH_GENERATION
)))
2847 /* Or does it point within a current gc_alloc() region? */
2848 || ((boxed_region
.start_addr
<= ptr
)
2849 && (ptr
<= boxed_region
.free_pointer
))
2850 || ((unboxed_region
.start_addr
<= ptr
)
2851 && (ptr
<= unboxed_region
.free_pointer
))) {
2858 /* Write-protect the page. */
2859 /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
2861 os_protect((void *)page_addr
,
2863 OS_VM_PROT_READ
|OS_VM_PROT_EXECUTE
);
2865 /* Note the page as protected in the page tables. */
2866 page_table
[page
].write_protected
= 1;
2872 /* Scavenge all generations from FROM to TO, inclusive, except for
2873 * new_space which needs special handling, as new objects may be
2874 * added which are not checked here - use scavenge_newspace generation.
2876 * Write-protected pages should not have any pointers to the
2877 * from_space so do need scavenging; thus write-protected pages are
2878 * not always scavenged. There is some code to check that these pages
2879 * are not written; but to check fully the write-protected pages need
2880 * to be scavenged by disabling the code to skip them.
2882 * Under the current scheme when a generation is GCed the younger
2883 * generations will be empty. So, when a generation is being GCed it
2884 * is only necessary to scavenge the older generations for pointers
2885 * not the younger. So a page that does not have pointers to younger
2886 * generations does not need to be scavenged.
2888 * The write-protection can be used to note pages that don't have
2889 * pointers to younger pages. But pages can be written without having
2890 * pointers to younger generations. After the pages are scavenged here
2891 * they can be scanned for pointers to younger generations and if
2892 * there are none the page can be write-protected.
2894 * One complication is when the newspace is the top temp. generation.
2896 * Enabling SC_GEN_CK scavenges the write-protected pages and checks
2897 * that none were written, which they shouldn't be as they should have
2898 * no pointers to younger generations. This breaks down for weak
2899 * pointers as the objects contain a link to the next and are written
2900 * if a weak pointer is scavenged. Still it's a useful check. */
2902 scavenge_generations(generation_index_t from
, generation_index_t to
)
2909 /* Clear the write_protected_cleared flags on all pages. */
2910 for (i
= 0; i
< page_table_pages
; i
++)
2911 page_table
[i
].write_protected_cleared
= 0;
2914 for (i
= 0; i
< last_free_page
; i
++) {
2915 generation_index_t generation
= page_table
[i
].gen
;
2917 && (page_table
[i
].bytes_used
!= 0)
2918 && (generation
!= new_space
)
2919 && (generation
>= from
)
2920 && (generation
<= to
)) {
2921 page_index_t last_page
,j
;
2922 int write_protected
=1;
2924 /* This should be the start of a region */
2925 gc_assert(page_table
[i
].region_start_offset
== 0);
2927 /* Now work forward until the end of the region */
2928 for (last_page
= i
; ; last_page
++) {
2930 write_protected
&& page_table
[last_page
].write_protected
;
2931 if ((page_table
[last_page
].bytes_used
< PAGE_BYTES
)
2932 /* Or it is PAGE_BYTES and is the last in the block */
2933 || (!page_boxed_p(last_page
+1))
2934 || (page_table
[last_page
+1].bytes_used
== 0)
2935 || (page_table
[last_page
+1].gen
!= generation
)
2936 || (page_table
[last_page
+1].region_start_offset
== 0))
2939 if (!write_protected
) {
2940 scavenge(page_address(i
),
2941 ((unsigned long)(page_table
[last_page
].bytes_used
2942 + npage_bytes(last_page
-i
)))
2945 /* Now scan the pages and write protect those that
2946 * don't have pointers to younger generations. */
2947 if (enable_page_protection
) {
2948 for (j
= i
; j
<= last_page
; j
++) {
2949 num_wp
+= update_page_write_prot(j
);
2952 if ((gencgc_verbose
> 1) && (num_wp
!= 0)) {
2954 "/write protected %d pages within generation %d\n",
2955 num_wp
, generation
));
2963 /* Check that none of the write_protected pages in this generation
2964 * have been written to. */
2965 for (i
= 0; i
< page_table_pages
; i
++) {
2966 if (page_allocated_p(i
)
2967 && (page_table
[i
].bytes_used
!= 0)
2968 && (page_table
[i
].gen
== generation
)
2969 && (page_table
[i
].write_protected_cleared
!= 0)) {
2970 FSHOW((stderr
, "/scavenge_generation() %d\n", generation
));
2972 "/page bytes_used=%d region_start_offset=%lu dont_move=%d\n",
2973 page_table
[i
].bytes_used
,
2974 page_table
[i
].region_start_offset
,
2975 page_table
[i
].dont_move
));
2976 lose("write to protected page %d in scavenge_generation()\n", i
);
2983 /* Scavenge a newspace generation. As it is scavenged new objects may
2984 * be allocated to it; these will also need to be scavenged. This
2985 * repeats until there are no more objects unscavenged in the
2986 * newspace generation.
2988 * To help improve the efficiency, areas written are recorded by
2989 * gc_alloc() and only these scavenged. Sometimes a little more will be
2990 * scavenged, but this causes no harm. An easy check is done that the
2991 * scavenged bytes equals the number allocated in the previous
2994 * Write-protected pages are not scanned except if they are marked
2995 * dont_move in which case they may have been promoted and still have
2996 * pointers to the from space.
2998 * Write-protected pages could potentially be written by alloc however
2999 * to avoid having to handle re-scavenging of write-protected pages
3000 * gc_alloc() does not write to write-protected pages.
3002 * New areas of objects allocated are recorded alternatively in the two
3003 * new_areas arrays below. */
3004 static struct new_area new_areas_1
[NUM_NEW_AREAS
];
3005 static struct new_area new_areas_2
[NUM_NEW_AREAS
];
3007 /* Do one full scan of the new space generation. This is not enough to
3008 * complete the job as new objects may be added to the generation in
3009 * the process which are not scavenged. */
3011 scavenge_newspace_generation_one_scan(generation_index_t generation
)
3016 "/starting one full scan of newspace generation %d\n",
3018 for (i
= 0; i
< last_free_page
; i
++) {
3019 /* Note that this skips over open regions when it encounters them. */
3021 && (page_table
[i
].bytes_used
!= 0)
3022 && (page_table
[i
].gen
== generation
)
3023 && ((page_table
[i
].write_protected
== 0)
3024 /* (This may be redundant as write_protected is now
3025 * cleared before promotion.) */
3026 || (page_table
[i
].dont_move
== 1))) {
3027 page_index_t last_page
;
3030 /* The scavenge will start at the region_start_offset of
3033 * We need to find the full extent of this contiguous
3034 * block in case objects span pages.
3036 * Now work forward until the end of this contiguous area
3037 * is found. A small area is preferred as there is a
3038 * better chance of its pages being write-protected. */
3039 for (last_page
= i
; ;last_page
++) {
3040 /* If all pages are write-protected and movable,
3041 * then no need to scavenge */
3042 all_wp
=all_wp
&& page_table
[last_page
].write_protected
&&
3043 !page_table
[last_page
].dont_move
;
3045 /* Check whether this is the last page in this
3046 * contiguous block */
3047 if ((page_table
[last_page
].bytes_used
< PAGE_BYTES
)
3048 /* Or it is PAGE_BYTES and is the last in the block */
3049 || (!page_boxed_p(last_page
+1))
3050 || (page_table
[last_page
+1].bytes_used
== 0)
3051 || (page_table
[last_page
+1].gen
!= generation
)
3052 || (page_table
[last_page
+1].region_start_offset
== 0))
3056 /* Do a limited check for write-protected pages. */
3058 long nwords
= (((unsigned long)
3059 (page_table
[last_page
].bytes_used
3060 + npage_bytes(last_page
-i
)
3061 + page_table
[i
].region_start_offset
))
3063 new_areas_ignore_page
= last_page
;
3065 scavenge(page_region_start(i
), nwords
);
3072 "/done with one full scan of newspace generation %d\n",
3076 /* Do a complete scavenge of the newspace generation. */
3078 scavenge_newspace_generation(generation_index_t generation
)
3082 /* the new_areas array currently being written to by gc_alloc() */
3083 struct new_area (*current_new_areas
)[] = &new_areas_1
;
3084 long current_new_areas_index
;
3086 /* the new_areas created by the previous scavenge cycle */
3087 struct new_area (*previous_new_areas
)[] = NULL
;
3088 long previous_new_areas_index
;
3090 /* Flush the current regions updating the tables. */
3091 gc_alloc_update_all_page_tables();
3093 /* Turn on the recording of new areas by gc_alloc(). */
3094 new_areas
= current_new_areas
;
3095 new_areas_index
= 0;
3097 /* Don't need to record new areas that get scavenged anyway during
3098 * scavenge_newspace_generation_one_scan. */
3099 record_new_objects
= 1;
3101 /* Start with a full scavenge. */
3102 scavenge_newspace_generation_one_scan(generation
);
3104 /* Record all new areas now. */
3105 record_new_objects
= 2;
3107 /* Give a chance to weak hash tables to make other objects live.
3108 * FIXME: The algorithm implemented here for weak hash table gcing
3109 * is O(W^2+N) as Bruno Haible warns in
3110 * http://www.haible.de/bruno/papers/cs/weak/WeakDatastructures-writeup.html
3111 * see "Implementation 2". */
3112 scav_weak_hash_tables();
3114 /* Flush the current regions updating the tables. */
3115 gc_alloc_update_all_page_tables();
3117 /* Grab new_areas_index. */
3118 current_new_areas_index
= new_areas_index
;
3121 "The first scan is finished; current_new_areas_index=%d.\n",
3122 current_new_areas_index));*/
3124 while (current_new_areas_index
> 0) {
3125 /* Move the current to the previous new areas */
3126 previous_new_areas
= current_new_areas
;
3127 previous_new_areas_index
= current_new_areas_index
;
3129 /* Scavenge all the areas in previous new areas. Any new areas
3130 * allocated are saved in current_new_areas. */
3132 /* Allocate an array for current_new_areas; alternating between
3133 * new_areas_1 and 2 */
3134 if (previous_new_areas
== &new_areas_1
)
3135 current_new_areas
= &new_areas_2
;
3137 current_new_areas
= &new_areas_1
;
3139 /* Set up for gc_alloc(). */
3140 new_areas
= current_new_areas
;
3141 new_areas_index
= 0;
3143 /* Check whether previous_new_areas had overflowed. */
3144 if (previous_new_areas_index
>= NUM_NEW_AREAS
) {
3146 /* New areas of objects allocated have been lost so need to do a
3147 * full scan to be sure! If this becomes a problem try
3148 * increasing NUM_NEW_AREAS. */
3150 SHOW("new_areas overflow, doing full scavenge");
3152 /* Don't need to record new areas that get scavenged
3153 * anyway during scavenge_newspace_generation_one_scan. */
3154 record_new_objects
= 1;
3156 scavenge_newspace_generation_one_scan(generation
);
3158 /* Record all new areas now. */
3159 record_new_objects
= 2;
3161 scav_weak_hash_tables();
3163 /* Flush the current regions updating the tables. */
3164 gc_alloc_update_all_page_tables();
3168 /* Work through previous_new_areas. */
3169 for (i
= 0; i
< previous_new_areas_index
; i
++) {
3170 page_index_t page
= (*previous_new_areas
)[i
].page
;
3171 size_t offset
= (*previous_new_areas
)[i
].offset
;
3172 size_t size
= (*previous_new_areas
)[i
].size
/ N_WORD_BYTES
;
3173 gc_assert((*previous_new_areas
)[i
].size
% N_WORD_BYTES
== 0);
3174 scavenge(page_address(page
)+offset
, size
);
3177 scav_weak_hash_tables();
3179 /* Flush the current regions updating the tables. */
3180 gc_alloc_update_all_page_tables();
3183 current_new_areas_index
= new_areas_index
;
3186 "The re-scan has finished; current_new_areas_index=%d.\n",
3187 current_new_areas_index));*/
3190 /* Turn off recording of areas allocated by gc_alloc(). */
3191 record_new_objects
= 0;
3194 /* Check that none of the write_protected pages in this generation
3195 * have been written to. */
3196 for (i
= 0; i
< page_table_pages
; i
++) {
3197 if (page_allocated_p(i
)
3198 && (page_table
[i
].bytes_used
!= 0)
3199 && (page_table
[i
].gen
== generation
)
3200 && (page_table
[i
].write_protected_cleared
!= 0)
3201 && (page_table
[i
].dont_move
== 0)) {
3202 lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d\n",
3203 i
, generation
, page_table
[i
].dont_move
);
3209 /* Un-write-protect all the pages in from_space. This is done at the
3210 * start of a GC else there may be many page faults while scavenging
3211 * the newspace (I've seen drive the system time to 99%). These pages
3212 * would need to be unprotected anyway before unmapping in
3213 * free_oldspace; not sure what effect this has on paging.. */
3215 unprotect_oldspace(void)
3219 for (i
= 0; i
< last_free_page
; i
++) {
3220 if (page_allocated_p(i
)
3221 && (page_table
[i
].bytes_used
!= 0)
3222 && (page_table
[i
].gen
== from_space
)) {
3225 page_start
= (void *)page_address(i
);
3227 /* Remove any write-protection. We should be able to rely
3228 * on the write-protect flag to avoid redundant calls. */
3229 if (page_table
[i
].write_protected
) {
3230 os_protect(page_start
, PAGE_BYTES
, OS_VM_PROT_ALL
);
3231 page_table
[i
].write_protected
= 0;
3237 /* Work through all the pages and free any in from_space. This
3238 * assumes that all objects have been copied or promoted to an older
3239 * generation. Bytes_allocated and the generation bytes_allocated
3240 * counter are updated. The number of bytes freed is returned. */
3241 static unsigned long
3244 unsigned long bytes_freed
= 0;
3245 page_index_t first_page
, last_page
;
3250 /* Find a first page for the next region of pages. */
3251 while ((first_page
< last_free_page
)
3252 && (page_free_p(first_page
)
3253 || (page_table
[first_page
].bytes_used
== 0)
3254 || (page_table
[first_page
].gen
!= from_space
)))
3257 if (first_page
>= last_free_page
)
3260 /* Find the last page of this region. */
3261 last_page
= first_page
;
3264 /* Free the page. */
3265 bytes_freed
+= page_table
[last_page
].bytes_used
;
3266 generations
[page_table
[last_page
].gen
].bytes_allocated
-=
3267 page_table
[last_page
].bytes_used
;
3268 page_table
[last_page
].allocated
= FREE_PAGE_FLAG
;
3269 page_table
[last_page
].bytes_used
= 0;
3271 /* Remove any write-protection. We should be able to rely
3272 * on the write-protect flag to avoid redundant calls. */
3274 void *page_start
= (void *)page_address(last_page
);
3276 if (page_table
[last_page
].write_protected
) {
3277 os_protect(page_start
, PAGE_BYTES
, OS_VM_PROT_ALL
);
3278 page_table
[last_page
].write_protected
= 0;
3283 while ((last_page
< last_free_page
)
3284 && page_allocated_p(last_page
)
3285 && (page_table
[last_page
].bytes_used
!= 0)
3286 && (page_table
[last_page
].gen
== from_space
));
3288 #ifdef READ_PROTECT_FREE_PAGES
3289 os_protect(page_address(first_page
),
3290 npage_bytes(last_page
-first_page
),
3293 first_page
= last_page
;
3294 } while (first_page
< last_free_page
);
3296 bytes_allocated
-= bytes_freed
;
3301 /* Print some information about a pointer at the given address. */
3303 print_ptr(lispobj
*addr
)
3305 /* If addr is in the dynamic space then out the page information. */
3306 page_index_t pi1
= find_page_index((void*)addr
);
3309 fprintf(stderr
," %x: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n",
3310 (unsigned long) addr
,
3312 page_table
[pi1
].allocated
,
3313 page_table
[pi1
].gen
,
3314 page_table
[pi1
].bytes_used
,
3315 page_table
[pi1
].region_start_offset
,
3316 page_table
[pi1
].dont_move
);
3317 fprintf(stderr
," %x %x %x %x (%x) %x %x %x %x\n",
3331 verify_space(lispobj
*start
, size_t words
)
3333 int is_in_dynamic_space
= (find_page_index((void*)start
) != -1);
3334 int is_in_readonly_space
=
3335 (READ_ONLY_SPACE_START
<= (unsigned long)start
&&
3336 (unsigned long)start
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0));
3340 lispobj thing
= *(lispobj
*)start
;
3342 if (is_lisp_pointer(thing
)) {
3343 page_index_t page_index
= find_page_index((void*)thing
);
3344 long to_readonly_space
=
3345 (READ_ONLY_SPACE_START
<= thing
&&
3346 thing
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0));
3347 long to_static_space
=
3348 (STATIC_SPACE_START
<= thing
&&
3349 thing
< SymbolValue(STATIC_SPACE_FREE_POINTER
,0));
3351 /* Does it point to the dynamic space? */
3352 if (page_index
!= -1) {
3353 /* If it's within the dynamic space it should point to a used
3354 * page. XX Could check the offset too. */
3355 if (page_allocated_p(page_index
)
3356 && (page_table
[page_index
].bytes_used
== 0))
3357 lose ("Ptr %x @ %x sees free page.\n", thing
, start
);
3358 /* Check that it doesn't point to a forwarding pointer! */
3359 if (*((lispobj
*)native_pointer(thing
)) == 0x01) {
3360 lose("Ptr %x @ %x sees forwarding ptr.\n", thing
, start
);
3362 /* Check that its not in the RO space as it would then be a
3363 * pointer from the RO to the dynamic space. */
3364 if (is_in_readonly_space
) {
3365 lose("ptr to dynamic space %x from RO space %x\n",
3368 /* Does it point to a plausible object? This check slows
3369 * it down a lot (so it's commented out).
3371 * "a lot" is serious: it ate 50 minutes cpu time on
3372 * my duron 950 before I came back from lunch and
3375 * FIXME: Add a variable to enable this
3378 if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) {
3379 lose("ptr %x to invalid object %x\n", thing, start);
3383 /* Verify that it points to another valid space. */
3384 if (!to_readonly_space
&& !to_static_space
) {
3385 lose("Ptr %x @ %x sees junk.\n", thing
, start
);
3389 if (!(fixnump(thing
))) {
3391 switch(widetag_of(*start
)) {
3394 case SIMPLE_VECTOR_WIDETAG
:
3396 case COMPLEX_WIDETAG
:
3397 case SIMPLE_ARRAY_WIDETAG
:
3398 case COMPLEX_BASE_STRING_WIDETAG
:
3399 #ifdef COMPLEX_CHARACTER_STRING_WIDETAG
3400 case COMPLEX_CHARACTER_STRING_WIDETAG
:
3402 case COMPLEX_VECTOR_NIL_WIDETAG
:
3403 case COMPLEX_BIT_VECTOR_WIDETAG
:
3404 case COMPLEX_VECTOR_WIDETAG
:
3405 case COMPLEX_ARRAY_WIDETAG
:
3406 case CLOSURE_HEADER_WIDETAG
:
3407 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
3408 case VALUE_CELL_HEADER_WIDETAG
:
3409 case SYMBOL_HEADER_WIDETAG
:
3410 case CHARACTER_WIDETAG
:
3411 #if N_WORD_BITS == 64
3412 case SINGLE_FLOAT_WIDETAG
:
3414 case UNBOUND_MARKER_WIDETAG
:
3419 case INSTANCE_HEADER_WIDETAG
:
3422 long ntotal
= HeaderValue(thing
);
3423 lispobj layout
= ((struct instance
*)start
)->slots
[0];
3428 nuntagged
= ((struct layout
*)
3429 native_pointer(layout
))->n_untagged_slots
;
3430 verify_space(start
+ 1,
3431 ntotal
- fixnum_value(nuntagged
));
3435 case CODE_HEADER_WIDETAG
:
3437 lispobj object
= *start
;
3439 long nheader_words
, ncode_words
, nwords
;
3441 struct simple_fun
*fheaderp
;
3443 code
= (struct code
*) start
;
3445 /* Check that it's not in the dynamic space.
3446 * FIXME: Isn't is supposed to be OK for code
3447 * objects to be in the dynamic space these days? */
3448 if (is_in_dynamic_space
3449 /* It's ok if it's byte compiled code. The trace
3450 * table offset will be a fixnum if it's x86
3451 * compiled code - check.
3453 * FIXME: #^#@@! lack of abstraction here..
3454 * This line can probably go away now that
3455 * there's no byte compiler, but I've got
3456 * too much to worry about right now to try
3457 * to make sure. -- WHN 2001-10-06 */
3458 && fixnump(code
->trace_table_offset
)
3459 /* Only when enabled */
3460 && verify_dynamic_code_check
) {
3462 "/code object at %x in the dynamic space\n",
3466 ncode_words
= fixnum_value(code
->code_size
);
3467 nheader_words
= HeaderValue(object
);
3468 nwords
= ncode_words
+ nheader_words
;
3469 nwords
= CEILING(nwords
, 2);
3470 /* Scavenge the boxed section of the code data block */
3471 verify_space(start
+ 1, nheader_words
- 1);
3473 /* Scavenge the boxed section of each function
3474 * object in the code data block. */
3475 fheaderl
= code
->entry_points
;
3476 while (fheaderl
!= NIL
) {
3478 (struct simple_fun
*) native_pointer(fheaderl
);
3479 gc_assert(widetag_of(fheaderp
->header
) ==
3480 SIMPLE_FUN_HEADER_WIDETAG
);
3481 verify_space(&fheaderp
->name
, 1);
3482 verify_space(&fheaderp
->arglist
, 1);
3483 verify_space(&fheaderp
->type
, 1);
3484 fheaderl
= fheaderp
->next
;
3490 /* unboxed objects */
3491 case BIGNUM_WIDETAG
:
3492 #if N_WORD_BITS != 64
3493 case SINGLE_FLOAT_WIDETAG
:
3495 case DOUBLE_FLOAT_WIDETAG
:
3496 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3497 case LONG_FLOAT_WIDETAG
:
3499 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
3500 case COMPLEX_SINGLE_FLOAT_WIDETAG
:
3502 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
3503 case COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3505 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3506 case COMPLEX_LONG_FLOAT_WIDETAG
:
3508 case SIMPLE_BASE_STRING_WIDETAG
:
3509 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
3510 case SIMPLE_CHARACTER_STRING_WIDETAG
:
3512 case SIMPLE_BIT_VECTOR_WIDETAG
:
3513 case SIMPLE_ARRAY_NIL_WIDETAG
:
3514 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
3515 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
3516 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
3517 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
3518 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
3519 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
3520 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
3521 case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
:
3523 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
3524 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
3525 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
3526 case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
:
3528 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
3529 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
3531 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
3532 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
3534 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
3535 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
3537 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
3538 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
3540 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
3541 case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
:
3543 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
3544 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
3546 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
3547 case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
:
3549 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
3550 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
3552 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
3553 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
3554 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3555 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
3557 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
3558 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
3560 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
3561 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3563 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3564 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
3567 case WEAK_POINTER_WIDETAG
:
3568 #ifdef LUTEX_WIDETAG
3571 #ifdef NO_TLS_VALUE_MARKER_WIDETAG
3572 case NO_TLS_VALUE_MARKER_WIDETAG
:
3574 count
= (sizetab
[widetag_of(*start
)])(start
);
3578 lose("Unhandled widetag 0x%x at 0x%x\n",
3579 widetag_of(*start
), start
);
3591 /* FIXME: It would be nice to make names consistent so that
3592 * foo_size meant size *in* *bytes* instead of size in some
3593 * arbitrary units. (Yes, this caused a bug, how did you guess?:-)
3594 * Some counts of lispobjs are called foo_count; it might be good
3595 * to grep for all foo_size and rename the appropriate ones to
3597 long read_only_space_size
=
3598 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0)
3599 - (lispobj
*)READ_ONLY_SPACE_START
;
3600 long static_space_size
=
3601 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0)
3602 - (lispobj
*)STATIC_SPACE_START
;
3604 for_each_thread(th
) {
3605 long binding_stack_size
=
3606 (lispobj
*)get_binding_stack_pointer(th
)
3607 - (lispobj
*)th
->binding_stack_start
;
3608 verify_space(th
->binding_stack_start
, binding_stack_size
);
3610 verify_space((lispobj
*)READ_ONLY_SPACE_START
, read_only_space_size
);
3611 verify_space((lispobj
*)STATIC_SPACE_START
, static_space_size
);
3615 verify_generation(generation_index_t generation
)
3619 for (i
= 0; i
< last_free_page
; i
++) {
3620 if (page_allocated_p(i
)
3621 && (page_table
[i
].bytes_used
!= 0)
3622 && (page_table
[i
].gen
== generation
)) {
3623 page_index_t last_page
;
3624 int region_allocation
= page_table
[i
].allocated
;
3626 /* This should be the start of a contiguous block */
3627 gc_assert(page_table
[i
].region_start_offset
== 0);
3629 /* Need to find the full extent of this contiguous block in case
3630 objects span pages. */
3632 /* Now work forward until the end of this contiguous area is
3634 for (last_page
= i
; ;last_page
++)
3635 /* Check whether this is the last page in this contiguous
3637 if ((page_table
[last_page
].bytes_used
< PAGE_BYTES
)
3638 /* Or it is PAGE_BYTES and is the last in the block */
3639 || (page_table
[last_page
+1].allocated
!= region_allocation
)
3640 || (page_table
[last_page
+1].bytes_used
== 0)
3641 || (page_table
[last_page
+1].gen
!= generation
)
3642 || (page_table
[last_page
+1].region_start_offset
== 0))
3645 verify_space(page_address(i
),
3647 (page_table
[last_page
].bytes_used
3648 + npage_bytes(last_page
-i
)))
3655 /* Check that all the free space is zero filled. */
3657 verify_zero_fill(void)
3661 for (page
= 0; page
< last_free_page
; page
++) {
3662 if (page_free_p(page
)) {
3663 /* The whole page should be zero filled. */
3664 long *start_addr
= (long *)page_address(page
);
3667 for (i
= 0; i
< size
; i
++) {
3668 if (start_addr
[i
] != 0) {
3669 lose("free page not zero at %x\n", start_addr
+ i
);
3673 long free_bytes
= PAGE_BYTES
- page_table
[page
].bytes_used
;
3674 if (free_bytes
> 0) {
3675 long *start_addr
= (long *)((unsigned long)page_address(page
)
3676 + page_table
[page
].bytes_used
);
3677 long size
= free_bytes
/ N_WORD_BYTES
;
3679 for (i
= 0; i
< size
; i
++) {
3680 if (start_addr
[i
] != 0) {
3681 lose("free region not zero at %x\n", start_addr
+ i
);
3689 /* External entry point for verify_zero_fill */
3691 gencgc_verify_zero_fill(void)
3693 /* Flush the alloc regions updating the tables. */
3694 gc_alloc_update_all_page_tables();
3695 SHOW("verifying zero fill");
3700 verify_dynamic_space(void)
3702 generation_index_t i
;
3704 for (i
= 0; i
<= HIGHEST_NORMAL_GENERATION
; i
++)
3705 verify_generation(i
);
3707 if (gencgc_enable_verify_zero_fill
)
3711 /* Write-protect all the dynamic boxed pages in the given generation. */
3713 write_protect_generation_pages(generation_index_t generation
)
3717 gc_assert(generation
< SCRATCH_GENERATION
);
3719 for (start
= 0; start
< last_free_page
; start
++) {
3720 if (protect_page_p(start
, generation
)) {
3724 /* Note the page as protected in the page tables. */
3725 page_table
[start
].write_protected
= 1;
3727 for (last
= start
+ 1; last
< last_free_page
; last
++) {
3728 if (!protect_page_p(last
, generation
))
3730 page_table
[last
].write_protected
= 1;
3733 page_start
= (void *)page_address(start
);
3735 os_protect(page_start
,
3736 npage_bytes(last
- start
),
3737 OS_VM_PROT_READ
| OS_VM_PROT_EXECUTE
);
3743 if (gencgc_verbose
> 1) {
3745 "/write protected %d of %d pages in generation %d\n",
3746 count_write_protect_generation_pages(generation
),
3747 count_generation_pages(generation
),
3752 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
3755 scavenge_control_stack()
3757 unsigned long control_stack_size
;
3759 /* This is going to be a big problem when we try to port threads
3761 struct thread
*th
= arch_os_get_current_thread();
3762 lispobj
*control_stack
=
3763 (lispobj
*)(th
->control_stack_start
);
3765 control_stack_size
= current_control_stack_pointer
- control_stack
;
3766 scavenge(control_stack
, control_stack_size
);
3769 /* Scavenging Interrupt Contexts */
3771 static int boxed_registers
[] = BOXED_REGISTERS
;
3774 scavenge_interrupt_context(os_context_t
* context
)
3780 unsigned long lip_offset
;
3781 int lip_register_pair
;
3783 unsigned long pc_code_offset
;
3785 #ifdef ARCH_HAS_LINK_REGISTER
3786 unsigned long lr_code_offset
;
3788 #ifdef ARCH_HAS_NPC_REGISTER
3789 unsigned long npc_code_offset
;
3793 /* Find the LIP's register pair and calculate it's offset */
3794 /* before we scavenge the context. */
3797 * I (RLT) think this is trying to find the boxed register that is
3798 * closest to the LIP address, without going past it. Usually, it's
3799 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
3801 lip
= *os_context_register_addr(context
, reg_LIP
);
3802 lip_offset
= 0x7FFFFFFF;
3803 lip_register_pair
= -1;
3804 for (i
= 0; i
< (sizeof(boxed_registers
) / sizeof(int)); i
++) {
3809 index
= boxed_registers
[i
];
3810 reg
= *os_context_register_addr(context
, index
);
3811 if ((reg
& ~((1L<<N_LOWTAG_BITS
)-1)) <= lip
) {
3813 if (offset
< lip_offset
) {
3814 lip_offset
= offset
;
3815 lip_register_pair
= index
;
3819 #endif /* reg_LIP */
3821 /* Compute the PC's offset from the start of the CODE */
3823 pc_code_offset
= *os_context_pc_addr(context
)
3824 - *os_context_register_addr(context
, reg_CODE
);
3825 #ifdef ARCH_HAS_NPC_REGISTER
3826 npc_code_offset
= *os_context_npc_addr(context
)
3827 - *os_context_register_addr(context
, reg_CODE
);
3828 #endif /* ARCH_HAS_NPC_REGISTER */
3830 #ifdef ARCH_HAS_LINK_REGISTER
3832 *os_context_lr_addr(context
) -
3833 *os_context_register_addr(context
, reg_CODE
);
3836 /* Scanvenge all boxed registers in the context. */
3837 for (i
= 0; i
< (sizeof(boxed_registers
) / sizeof(int)); i
++) {
3841 index
= boxed_registers
[i
];
3842 foo
= *os_context_register_addr(context
, index
);
3844 *os_context_register_addr(context
, index
) = foo
;
3846 scavenge((lispobj
*) &(*os_context_register_addr(context
, index
)), 1);
3853 * But what happens if lip_register_pair is -1?
3854 * *os_context_register_addr on Solaris (see
3855 * solaris_register_address in solaris-os.c) will return
3856 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
3857 * that what we really want? My guess is that that is not what we
3858 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
3859 * all. But maybe it doesn't really matter if LIP is trashed?
3861 if (lip_register_pair
>= 0) {
3862 *os_context_register_addr(context
, reg_LIP
) =
3863 *os_context_register_addr(context
, lip_register_pair
)
3866 #endif /* reg_LIP */
3868 /* Fix the PC if it was in from space */
3869 if (from_space_p(*os_context_pc_addr(context
)))
3870 *os_context_pc_addr(context
) =
3871 *os_context_register_addr(context
, reg_CODE
) + pc_code_offset
;
3873 #ifdef ARCH_HAS_LINK_REGISTER
3874 /* Fix the LR ditto; important if we're being called from
3875 * an assembly routine that expects to return using blr, otherwise
3877 if (from_space_p(*os_context_lr_addr(context
)))
3878 *os_context_lr_addr(context
) =
3879 *os_context_register_addr(context
, reg_CODE
) + lr_code_offset
;
3882 #ifdef ARCH_HAS_NPC_REGISTER
3883 if (from_space_p(*os_context_npc_addr(context
)))
3884 *os_context_npc_addr(context
) =
3885 *os_context_register_addr(context
, reg_CODE
) + npc_code_offset
;
3886 #endif /* ARCH_HAS_NPC_REGISTER */
3890 scavenge_interrupt_contexts(void)
3893 os_context_t
*context
;
3895 struct thread
*th
=arch_os_get_current_thread();
3897 index
= fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,0));
3899 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
3900 printf("Number of active contexts: %d\n", index
);
3903 for (i
= 0; i
< index
; i
++) {
3904 context
= th
->interrupt_contexts
[i
];
3905 scavenge_interrupt_context(context
);
3911 #if defined(LISP_FEATURE_SB_THREAD)
3913 preserve_context_registers (os_context_t
*c
)
3916 /* On Darwin the signal context isn't a contiguous block of memory,
3917 * so just preserve_pointering its contents won't be sufficient.
3919 #if defined(LISP_FEATURE_DARWIN)
3920 #if defined LISP_FEATURE_X86
3921 preserve_pointer((void*)*os_context_register_addr(c
,reg_EAX
));
3922 preserve_pointer((void*)*os_context_register_addr(c
,reg_ECX
));
3923 preserve_pointer((void*)*os_context_register_addr(c
,reg_EDX
));
3924 preserve_pointer((void*)*os_context_register_addr(c
,reg_EBX
));
3925 preserve_pointer((void*)*os_context_register_addr(c
,reg_ESI
));
3926 preserve_pointer((void*)*os_context_register_addr(c
,reg_EDI
));
3927 preserve_pointer((void*)*os_context_pc_addr(c
));
3928 #elif defined LISP_FEATURE_X86_64
3929 preserve_pointer((void*)*os_context_register_addr(c
,reg_RAX
));
3930 preserve_pointer((void*)*os_context_register_addr(c
,reg_RCX
));
3931 preserve_pointer((void*)*os_context_register_addr(c
,reg_RDX
));
3932 preserve_pointer((void*)*os_context_register_addr(c
,reg_RBX
));
3933 preserve_pointer((void*)*os_context_register_addr(c
,reg_RSI
));
3934 preserve_pointer((void*)*os_context_register_addr(c
,reg_RDI
));
3935 preserve_pointer((void*)*os_context_register_addr(c
,reg_R8
));
3936 preserve_pointer((void*)*os_context_register_addr(c
,reg_R9
));
3937 preserve_pointer((void*)*os_context_register_addr(c
,reg_R10
));
3938 preserve_pointer((void*)*os_context_register_addr(c
,reg_R11
));
3939 preserve_pointer((void*)*os_context_register_addr(c
,reg_R12
));
3940 preserve_pointer((void*)*os_context_register_addr(c
,reg_R13
));
3941 preserve_pointer((void*)*os_context_register_addr(c
,reg_R14
));
3942 preserve_pointer((void*)*os_context_register_addr(c
,reg_R15
));
3943 preserve_pointer((void*)*os_context_pc_addr(c
));
3945 #error "preserve_context_registers needs to be tweaked for non-x86 Darwin"
3948 for(ptr
= ((void **)(c
+1))-1; ptr
>=(void **)c
; ptr
--) {
3949 preserve_pointer(*ptr
);
3954 /* Garbage collect a generation. If raise is 0 then the remains of the
3955 * generation are not raised to the next generation. */
3957 garbage_collect_generation(generation_index_t generation
, int raise
)
3959 unsigned long bytes_freed
;
3961 unsigned long static_space_size
;
3962 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
3965 gc_assert(generation
<= HIGHEST_NORMAL_GENERATION
);
3967 /* The oldest generation can't be raised. */
3968 gc_assert((generation
!= HIGHEST_NORMAL_GENERATION
) || (raise
== 0));
3970 /* Check if weak hash tables were processed in the previous GC. */
3971 gc_assert(weak_hash_tables
== NULL
);
3973 /* Initialize the weak pointer list. */
3974 weak_pointers
= NULL
;
3976 #ifdef LUTEX_WIDETAG
3977 unmark_lutexes(generation
);
3980 /* When a generation is not being raised it is transported to a
3981 * temporary generation (NUM_GENERATIONS), and lowered when
3982 * done. Set up this new generation. There should be no pages
3983 * allocated to it yet. */
3985 gc_assert(generations
[SCRATCH_GENERATION
].bytes_allocated
== 0);
3988 /* Set the global src and dest. generations */
3989 from_space
= generation
;
3991 new_space
= generation
+1;
3993 new_space
= SCRATCH_GENERATION
;
3995 /* Change to a new space for allocation, resetting the alloc_start_page */
3996 gc_alloc_generation
= new_space
;
3997 generations
[new_space
].alloc_start_page
= 0;
3998 generations
[new_space
].alloc_unboxed_start_page
= 0;
3999 generations
[new_space
].alloc_large_start_page
= 0;
4000 generations
[new_space
].alloc_large_unboxed_start_page
= 0;
4002 /* Before any pointers are preserved, the dont_move flags on the
4003 * pages need to be cleared. */
4004 for (i
= 0; i
< last_free_page
; i
++)
4005 if(page_table
[i
].gen
==from_space
)
4006 page_table
[i
].dont_move
= 0;
4008 /* Un-write-protect the old-space pages. This is essential for the
4009 * promoted pages as they may contain pointers into the old-space
4010 * which need to be scavenged. It also helps avoid unnecessary page
4011 * faults as forwarding pointers are written into them. They need to
4012 * be un-protected anyway before unmapping later. */
4013 unprotect_oldspace();
4015 /* Scavenge the stacks' conservative roots. */
4017 /* there are potentially two stacks for each thread: the main
4018 * stack, which may contain Lisp pointers, and the alternate stack.
4019 * We don't ever run Lisp code on the altstack, but it may
4020 * host a sigcontext with lisp objects in it */
4022 /* what we need to do: (1) find the stack pointer for the main
4023 * stack; scavenge it (2) find the interrupt context on the
4024 * alternate stack that might contain lisp values, and scavenge
4027 /* we assume that none of the preceding applies to the thread that
4028 * initiates GC. If you ever call GC from inside an altstack
4029 * handler, you will lose. */
4031 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
4032 /* And if we're saving a core, there's no point in being conservative. */
4033 if (conservative_stack
) {
4034 for_each_thread(th
) {
4036 void **esp
=(void **)-1;
4037 #ifdef LISP_FEATURE_SB_THREAD
4039 if(th
==arch_os_get_current_thread()) {
4040 /* Somebody is going to burn in hell for this, but casting
4041 * it in two steps shuts gcc up about strict aliasing. */
4042 esp
= (void **)((void *)&raise
);
4045 free
=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
4046 for(i
=free
-1;i
>=0;i
--) {
4047 os_context_t
*c
=th
->interrupt_contexts
[i
];
4048 esp1
= (void **) *os_context_register_addr(c
,reg_SP
);
4049 if (esp1
>=(void **)th
->control_stack_start
&&
4050 esp1
<(void **)th
->control_stack_end
) {
4051 if(esp1
<esp
) esp
=esp1
;
4052 preserve_context_registers(c
);
4057 esp
= (void **)((void *)&raise
);
4059 for (ptr
= ((void **)th
->control_stack_end
)-1; ptr
>= esp
; ptr
--) {
4060 preserve_pointer(*ptr
);
4067 if (gencgc_verbose
> 1) {
4068 long num_dont_move_pages
= count_dont_move_pages();
4070 "/non-movable pages due to conservative pointers = %d (%d bytes)\n",
4071 num_dont_move_pages
,
4072 npage_bytes(num_dont_move_pages
);
4076 /* Scavenge all the rest of the roots. */
4078 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
4080 * If not x86, we need to scavenge the interrupt context(s) and the
4083 scavenge_interrupt_contexts();
4084 scavenge_control_stack();
4087 /* Scavenge the Lisp functions of the interrupt handlers, taking
4088 * care to avoid SIG_DFL and SIG_IGN. */
4089 for (i
= 0; i
< NSIG
; i
++) {
4090 union interrupt_handler handler
= interrupt_handlers
[i
];
4091 if (!ARE_SAME_HANDLER(handler
.c
, SIG_IGN
) &&
4092 !ARE_SAME_HANDLER(handler
.c
, SIG_DFL
)) {
4093 scavenge((lispobj
*)(interrupt_handlers
+ i
), 1);
4096 /* Scavenge the binding stacks. */
4099 for_each_thread(th
) {
4100 long len
= (lispobj
*)get_binding_stack_pointer(th
) -
4101 th
->binding_stack_start
;
4102 scavenge((lispobj
*) th
->binding_stack_start
,len
);
4103 #ifdef LISP_FEATURE_SB_THREAD
4104 /* do the tls as well */
4105 len
=fixnum_value(SymbolValue(FREE_TLS_INDEX
,0)) -
4106 (sizeof (struct thread
))/(sizeof (lispobj
));
4107 scavenge((lispobj
*) (th
+1),len
);
4112 /* The original CMU CL code had scavenge-read-only-space code
4113 * controlled by the Lisp-level variable
4114 * *SCAVENGE-READ-ONLY-SPACE*. It was disabled by default, and it
4115 * wasn't documented under what circumstances it was useful or
4116 * safe to turn it on, so it's been turned off in SBCL. If you
4117 * want/need this functionality, and can test and document it,
4118 * please submit a patch. */
4120 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE
) != NIL
) {
4121 unsigned long read_only_space_size
=
4122 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
) -
4123 (lispobj
*)READ_ONLY_SPACE_START
;
4125 "/scavenge read only space: %d bytes\n",
4126 read_only_space_size
* sizeof(lispobj
)));
4127 scavenge( (lispobj
*) READ_ONLY_SPACE_START
, read_only_space_size
);
4131 /* Scavenge static space. */
4133 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0) -
4134 (lispobj
*)STATIC_SPACE_START
;
4135 if (gencgc_verbose
> 1) {
4137 "/scavenge static space: %d bytes\n",
4138 static_space_size
* sizeof(lispobj
)));
4140 scavenge( (lispobj
*) STATIC_SPACE_START
, static_space_size
);
4142 /* All generations but the generation being GCed need to be
4143 * scavenged. The new_space generation needs special handling as
4144 * objects may be moved in - it is handled separately below. */
4145 scavenge_generations(generation
+1, PSEUDO_STATIC_GENERATION
);
4147 /* Finally scavenge the new_space generation. Keep going until no
4148 * more objects are moved into the new generation */
4149 scavenge_newspace_generation(new_space
);
4151 /* FIXME: I tried reenabling this check when debugging unrelated
4152 * GC weirdness ca. sbcl-0.6.12.45, and it failed immediately.
4153 * Since the current GC code seems to work well, I'm guessing that
4154 * this debugging code is just stale, but I haven't tried to
4155 * figure it out. It should be figured out and then either made to
4156 * work or just deleted. */
4157 #define RESCAN_CHECK 0
4159 /* As a check re-scavenge the newspace once; no new objects should
4162 long old_bytes_allocated
= bytes_allocated
;
4163 long bytes_allocated
;
4165 /* Start with a full scavenge. */
4166 scavenge_newspace_generation_one_scan(new_space
);
4168 /* Flush the current regions, updating the tables. */
4169 gc_alloc_update_all_page_tables();
4171 bytes_allocated
= bytes_allocated
- old_bytes_allocated
;
4173 if (bytes_allocated
!= 0) {
4174 lose("Rescan of new_space allocated %d more bytes.\n",
4180 scan_weak_hash_tables();
4181 scan_weak_pointers();
4183 /* Flush the current regions, updating the tables. */
4184 gc_alloc_update_all_page_tables();
4186 /* Free the pages in oldspace, but not those marked dont_move. */
4187 bytes_freed
= free_oldspace();
4189 /* If the GC is not raising the age then lower the generation back
4190 * to its normal generation number */
4192 for (i
= 0; i
< last_free_page
; i
++)
4193 if ((page_table
[i
].bytes_used
!= 0)
4194 && (page_table
[i
].gen
== SCRATCH_GENERATION
))
4195 page_table
[i
].gen
= generation
;
4196 gc_assert(generations
[generation
].bytes_allocated
== 0);
4197 generations
[generation
].bytes_allocated
=
4198 generations
[SCRATCH_GENERATION
].bytes_allocated
;
4199 generations
[SCRATCH_GENERATION
].bytes_allocated
= 0;
4202 /* Reset the alloc_start_page for generation. */
4203 generations
[generation
].alloc_start_page
= 0;
4204 generations
[generation
].alloc_unboxed_start_page
= 0;
4205 generations
[generation
].alloc_large_start_page
= 0;
4206 generations
[generation
].alloc_large_unboxed_start_page
= 0;
4208 if (generation
>= verify_gens
) {
4212 verify_dynamic_space();
4215 /* Set the new gc trigger for the GCed generation. */
4216 generations
[generation
].gc_trigger
=
4217 generations
[generation
].bytes_allocated
4218 + generations
[generation
].bytes_consed_between_gc
;
4221 generations
[generation
].num_gc
= 0;
4223 ++generations
[generation
].num_gc
;
4225 #ifdef LUTEX_WIDETAG
4226 reap_lutexes(generation
);
4228 move_lutexes(generation
, generation
+1);
4232 /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
4234 update_dynamic_space_free_pointer(void)
4236 page_index_t last_page
= -1, i
;
4238 for (i
= 0; i
< last_free_page
; i
++)
4239 if (page_allocated_p(i
) && (page_table
[i
].bytes_used
!= 0))
4242 last_free_page
= last_page
+1;
4244 set_alloc_pointer((lispobj
)(page_address(last_free_page
)));
4245 return 0; /* dummy value: return something ... */
4249 remap_free_pages (page_index_t from
, page_index_t to
)
4251 page_index_t first_page
, last_page
;
4253 for (first_page
= from
; first_page
<= to
; first_page
++) {
4254 if (page_allocated_p(first_page
) ||
4255 (page_table
[first_page
].need_to_zero
== 0)) {
4259 last_page
= first_page
+ 1;
4260 while (page_free_p(last_page
) &&
4262 (page_table
[last_page
].need_to_zero
== 1)) {
4266 /* There's a mysterious Solaris/x86 problem with using mmap
4267 * tricks for memory zeroing. See sbcl-devel thread
4268 * "Re: patch: standalone executable redux".
4270 #if defined(LISP_FEATURE_SUNOS)
4271 zero_pages(first_page
, last_page
-1);
4273 zero_pages_with_mmap(first_page
, last_page
-1);
4276 first_page
= last_page
;
4280 generation_index_t small_generation_limit
= 1;
4282 /* GC all generations newer than last_gen, raising the objects in each
4283 * to the next older generation - we finish when all generations below
4284 * last_gen are empty. Then if last_gen is due for a GC, or if
4285 * last_gen==NUM_GENERATIONS (the scratch generation? eh?) we GC that
4286 * too. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
4288 * We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than
4289 * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */
4291 collect_garbage(generation_index_t last_gen
)
4293 generation_index_t gen
= 0, i
;
4296 /* The largest value of last_free_page seen since the time
4297 * remap_free_pages was called. */
4298 static page_index_t high_water_mark
= 0;
4300 FSHOW((stderr
, "/entering collect_garbage(%d)\n", last_gen
));
4304 if (last_gen
> HIGHEST_NORMAL_GENERATION
+1) {
4306 "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
4311 /* Flush the alloc regions updating the tables. */
4312 gc_alloc_update_all_page_tables();
4314 /* Verify the new objects created by Lisp code. */
4315 if (pre_verify_gen_0
) {
4316 FSHOW((stderr
, "pre-checking generation 0\n"));
4317 verify_generation(0);
4320 if (gencgc_verbose
> 1)
4321 print_generation_stats(0);
4324 /* Collect the generation. */
4326 if (gen
>= gencgc_oldest_gen_to_gc
) {
4327 /* Never raise the oldest generation. */
4332 || (generations
[gen
].num_gc
>= generations
[gen
].trigger_age
);
4335 if (gencgc_verbose
> 1) {
4337 "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
4340 generations
[gen
].bytes_allocated
,
4341 generations
[gen
].gc_trigger
,
4342 generations
[gen
].num_gc
));
4345 /* If an older generation is being filled, then update its
4348 generations
[gen
+1].cum_sum_bytes_allocated
+=
4349 generations
[gen
+1].bytes_allocated
;
4352 garbage_collect_generation(gen
, raise
);
4354 /* Reset the memory age cum_sum. */
4355 generations
[gen
].cum_sum_bytes_allocated
= 0;
4357 if (gencgc_verbose
> 1) {
4358 FSHOW((stderr
, "GC of generation %d finished:\n", gen
));
4359 print_generation_stats(0);
4363 } while ((gen
<= gencgc_oldest_gen_to_gc
)
4364 && ((gen
< last_gen
)
4365 || ((gen
<= gencgc_oldest_gen_to_gc
)
4367 && (generations
[gen
].bytes_allocated
4368 > generations
[gen
].gc_trigger
)
4369 && (gen_av_mem_age(gen
)
4370 > generations
[gen
].min_av_mem_age
))));
4372 /* Now if gen-1 was raised all generations before gen are empty.
4373 * If it wasn't raised then all generations before gen-1 are empty.
4375 * Now objects within this gen's pages cannot point to younger
4376 * generations unless they are written to. This can be exploited
4377 * by write-protecting the pages of gen; then when younger
4378 * generations are GCed only the pages which have been written
4383 gen_to_wp
= gen
- 1;
4385 /* There's not much point in WPing pages in generation 0 as it is
4386 * never scavenged (except promoted pages). */
4387 if ((gen_to_wp
> 0) && enable_page_protection
) {
4388 /* Check that they are all empty. */
4389 for (i
= 0; i
< gen_to_wp
; i
++) {
4390 if (generations
[i
].bytes_allocated
)
4391 lose("trying to write-protect gen. %d when gen. %d nonempty\n",
4394 write_protect_generation_pages(gen_to_wp
);
4397 /* Set gc_alloc() back to generation 0. The current regions should
4398 * be flushed after the above GCs. */
4399 gc_assert((boxed_region
.free_pointer
- boxed_region
.start_addr
) == 0);
4400 gc_alloc_generation
= 0;
4402 /* Save the high-water mark before updating last_free_page */
4403 if (last_free_page
> high_water_mark
)
4404 high_water_mark
= last_free_page
;
4406 update_dynamic_space_free_pointer();
4408 auto_gc_trigger
= bytes_allocated
+ bytes_consed_between_gcs
;
4410 fprintf(stderr
,"Next gc when %ld bytes have been consed\n",
4413 /* If we did a big GC (arbitrarily defined as gen > 1), release memory
4416 if (gen
> small_generation_limit
) {
4417 if (last_free_page
> high_water_mark
)
4418 high_water_mark
= last_free_page
;
4419 remap_free_pages(0, high_water_mark
);
4420 high_water_mark
= 0;
4425 SHOW("returning from collect_garbage");
4428 /* This is called by Lisp PURIFY when it is finished. All live objects
4429 * will have been moved to the RO and Static heaps. The dynamic space
4430 * will need a full re-initialization. We don't bother having Lisp
4431 * PURIFY flush the current gc_alloc() region, as the page_tables are
4432 * re-initialized, and every page is zeroed to be sure. */
4438 if (gencgc_verbose
> 1)
4439 SHOW("entering gc_free_heap");
4441 for (page
= 0; page
< page_table_pages
; page
++) {
4442 /* Skip free pages which should already be zero filled. */
4443 if (page_allocated_p(page
)) {
4444 void *page_start
, *addr
;
4446 /* Mark the page free. The other slots are assumed invalid
4447 * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
4448 * should not be write-protected -- except that the
4449 * generation is used for the current region but it sets
4451 page_table
[page
].allocated
= FREE_PAGE_FLAG
;
4452 page_table
[page
].bytes_used
= 0;
4454 #ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure
4455 * about this change. */
4456 /* Zero the page. */
4457 page_start
= (void *)page_address(page
);
4459 /* First, remove any write-protection. */
4460 os_protect(page_start
, PAGE_BYTES
, OS_VM_PROT_ALL
);
4461 page_table
[page
].write_protected
= 0;
4463 os_invalidate(page_start
,PAGE_BYTES
);
4464 addr
= os_validate(page_start
,PAGE_BYTES
);
4465 if (addr
== NULL
|| addr
!= page_start
) {
4466 lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x\n",
4471 page_table
[page
].write_protected
= 0;
4473 } else if (gencgc_zero_check_during_free_heap
) {
4474 /* Double-check that the page is zero filled. */
4477 gc_assert(page_free_p(page
));
4478 gc_assert(page_table
[page
].bytes_used
== 0);
4479 page_start
= (long *)page_address(page
);
4480 for (i
=0; i
<1024; i
++) {
4481 if (page_start
[i
] != 0) {
4482 lose("free region not zero at %x\n", page_start
+ i
);
4488 bytes_allocated
= 0;
4490 /* Initialize the generations. */
4491 for (page
= 0; page
< NUM_GENERATIONS
; page
++) {
4492 generations
[page
].alloc_start_page
= 0;
4493 generations
[page
].alloc_unboxed_start_page
= 0;
4494 generations
[page
].alloc_large_start_page
= 0;
4495 generations
[page
].alloc_large_unboxed_start_page
= 0;
4496 generations
[page
].bytes_allocated
= 0;
4497 generations
[page
].gc_trigger
= 2000000;
4498 generations
[page
].num_gc
= 0;
4499 generations
[page
].cum_sum_bytes_allocated
= 0;
4500 generations
[page
].lutexes
= NULL
;
4503 if (gencgc_verbose
> 1)
4504 print_generation_stats(0);
4506 /* Initialize gc_alloc(). */
4507 gc_alloc_generation
= 0;
4509 gc_set_region_empty(&boxed_region
);
4510 gc_set_region_empty(&unboxed_region
);
4513 set_alloc_pointer((lispobj
)((char *)heap_base
));
4515 if (verify_after_free_heap
) {
4516 /* Check whether purify has left any bad pointers. */
4517 FSHOW((stderr
, "checking after free_heap\n"));
4527 /* Compute the number of pages needed for the dynamic space.
4528 * Dynamic space size should be aligned on page size. */
4529 page_table_pages
= dynamic_space_size
/PAGE_BYTES
;
4530 gc_assert(dynamic_space_size
== npage_bytes(page_table_pages
));
4532 page_table
= calloc(page_table_pages
, sizeof(struct page
));
4533 gc_assert(page_table
);
4536 scavtab
[WEAK_POINTER_WIDETAG
] = scav_weak_pointer
;
4537 transother
[SIMPLE_ARRAY_WIDETAG
] = trans_boxed_large
;
4539 #ifdef LUTEX_WIDETAG
4540 scavtab
[LUTEX_WIDETAG
] = scav_lutex
;
4541 transother
[LUTEX_WIDETAG
] = trans_lutex
;
4542 sizetab
[LUTEX_WIDETAG
] = size_lutex
;
4545 heap_base
= (void*)DYNAMIC_SPACE_START
;
4547 /* Initialize each page structure. */
4548 for (i
= 0; i
< page_table_pages
; i
++) {
4549 /* Initialize all pages as free. */
4550 page_table
[i
].allocated
= FREE_PAGE_FLAG
;
4551 page_table
[i
].bytes_used
= 0;
4553 /* Pages are not write-protected at startup. */
4554 page_table
[i
].write_protected
= 0;
4557 bytes_allocated
= 0;
4559 /* Initialize the generations.
4561 * FIXME: very similar to code in gc_free_heap(), should be shared */
4562 for (i
= 0; i
< NUM_GENERATIONS
; i
++) {
4563 generations
[i
].alloc_start_page
= 0;
4564 generations
[i
].alloc_unboxed_start_page
= 0;
4565 generations
[i
].alloc_large_start_page
= 0;
4566 generations
[i
].alloc_large_unboxed_start_page
= 0;
4567 generations
[i
].bytes_allocated
= 0;
4568 generations
[i
].gc_trigger
= 2000000;
4569 generations
[i
].num_gc
= 0;
4570 generations
[i
].cum_sum_bytes_allocated
= 0;
4571 /* the tune-able parameters */
4572 generations
[i
].bytes_consed_between_gc
= 2000000;
4573 generations
[i
].trigger_age
= 1;
4574 generations
[i
].min_av_mem_age
= 0.75;
4575 generations
[i
].lutexes
= NULL
;
4578 /* Initialize gc_alloc. */
4579 gc_alloc_generation
= 0;
4580 gc_set_region_empty(&boxed_region
);
4581 gc_set_region_empty(&unboxed_region
);
4586 /* Pick up the dynamic space from after a core load.
4588 * The ALLOCATION_POINTER points to the end of the dynamic space.
4592 gencgc_pickup_dynamic(void)
4594 page_index_t page
= 0;
4595 void *alloc_ptr
= (void *)get_alloc_pointer();
4596 lispobj
*prev
=(lispobj
*)page_address(page
);
4597 generation_index_t gen
= PSEUDO_STATIC_GENERATION
;
4599 lispobj
*first
,*ptr
= (lispobj
*)page_address(page
);
4600 page_table
[page
].allocated
= BOXED_PAGE_FLAG
;
4601 page_table
[page
].gen
= gen
;
4602 page_table
[page
].bytes_used
= PAGE_BYTES
;
4603 page_table
[page
].large_object
= 0;
4604 page_table
[page
].write_protected
= 0;
4605 page_table
[page
].write_protected_cleared
= 0;
4606 page_table
[page
].dont_move
= 0;
4607 page_table
[page
].need_to_zero
= 1;
4609 if (!gencgc_partial_pickup
) {
4610 first
=gc_search_space(prev
,(ptr
+2)-prev
,ptr
);
4611 if(ptr
== first
) prev
=ptr
;
4612 page_table
[page
].region_start_offset
=
4613 page_address(page
) - (void *)prev
;
4616 } while (page_address(page
) < alloc_ptr
);
4618 #ifdef LUTEX_WIDETAG
4619 /* Lutexes have been registered in generation 0 by coreparse, and
4620 * need to be moved to the right one manually.
4622 move_lutexes(0, PSEUDO_STATIC_GENERATION
);
4625 last_free_page
= page
;
4627 generations
[gen
].bytes_allocated
= npage_bytes(page
);
4628 bytes_allocated
= npage_bytes(page
);
4630 gc_alloc_update_all_page_tables();
4631 write_protect_generation_pages(gen
);
4635 gc_initialize_pointers(void)
4637 gencgc_pickup_dynamic();
4641 /* alloc(..) is the external interface for memory allocation. It
4642 * allocates to generation 0. It is not called from within the garbage
4643 * collector as it is only external uses that need the check for heap
4644 * size (GC trigger) and to disable the interrupts (interrupts are
4645 * always disabled during a GC).
4647 * The vops that call alloc(..) assume that the returned space is zero-filled.
4648 * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
4650 * The check for a GC trigger is only performed when the current
4651 * region is full, so in most cases it's not needed. */
4653 static inline lispobj
*
4654 general_alloc_internal(long nbytes
, int page_type_flag
, struct alloc_region
*region
,
4655 struct thread
*thread
)
4657 #ifndef LISP_FEATURE_WIN32
4658 lispobj alloc_signal
;
4661 void *new_free_pointer
;
4663 gc_assert(nbytes
>0);
4665 /* Check for alignment allocation problems. */
4666 gc_assert((((unsigned long)region
->free_pointer
& LOWTAG_MASK
) == 0)
4667 && ((nbytes
& LOWTAG_MASK
) == 0));
4669 /* Must be inside a PA section. */
4670 gc_assert(get_pseudo_atomic_atomic(thread
));
4672 /* maybe we can do this quickly ... */
4673 new_free_pointer
= region
->free_pointer
+ nbytes
;
4674 if (new_free_pointer
<= region
->end_addr
) {
4675 new_obj
= (void*)(region
->free_pointer
);
4676 region
->free_pointer
= new_free_pointer
;
4677 return(new_obj
); /* yup */
4680 /* we have to go the long way around, it seems. Check whether we
4681 * should GC in the near future
4683 if (auto_gc_trigger
&& bytes_allocated
> auto_gc_trigger
) {
4684 /* Don't flood the system with interrupts if the need to gc is
4685 * already noted. This can happen for example when SUB-GC
4686 * allocates or after a gc triggered in a WITHOUT-GCING. */
4687 if (SymbolValue(GC_PENDING
,thread
) == NIL
) {
4688 /* set things up so that GC happens when we finish the PA
4690 SetSymbolValue(GC_PENDING
,T
,thread
);
4691 if (SymbolValue(GC_INHIBIT
,thread
) == NIL
)
4692 set_pseudo_atomic_interrupted(thread
);
4695 new_obj
= gc_alloc_with_region(nbytes
, page_type_flag
, region
, 0);
4697 #ifndef LISP_FEATURE_WIN32
4698 alloc_signal
= SymbolValue(ALLOC_SIGNAL
,thread
);
4699 if ((alloc_signal
& FIXNUM_TAG_MASK
) == 0) {
4700 if ((signed long) alloc_signal
<= 0) {
4701 SetSymbolValue(ALLOC_SIGNAL
, T
, thread
);
4702 #ifdef LISP_FEATURE_SB_THREAD
4703 kill_thread_safely(thread
->os_thread
, SIGPROF
);
4708 SetSymbolValue(ALLOC_SIGNAL
,
4709 alloc_signal
- (1 << N_FIXNUM_TAG_BITS
),
4719 general_alloc(long nbytes
, int page_type_flag
)
4721 struct thread
*thread
= arch_os_get_current_thread();
4722 /* Select correct region, and call general_alloc_internal with it.
4723 * For other then boxed allocation we must lock first, since the
4724 * region is shared. */
4725 if (BOXED_PAGE_FLAG
& page_type_flag
) {
4726 #ifdef LISP_FEATURE_SB_THREAD
4727 struct alloc_region
*region
= (thread
? &(thread
->alloc_region
) : &boxed_region
);
4729 struct alloc_region
*region
= &boxed_region
;
4731 return general_alloc_internal(nbytes
, page_type_flag
, region
, thread
);
4732 } else if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
4734 gc_assert(0 == thread_mutex_lock(&allocation_lock
));
4735 obj
= general_alloc_internal(nbytes
, page_type_flag
, &unboxed_region
, thread
);
4736 gc_assert(0 == thread_mutex_unlock(&allocation_lock
));
4739 lose("bad page type flag: %d", page_type_flag
);
4746 return general_alloc(nbytes
, BOXED_PAGE_FLAG
);
4750 * shared support for the OS-dependent signal handlers which
4751 * catch GENCGC-related write-protect violations
4753 void unhandled_sigmemoryfault(void* addr
);
4755 /* Depending on which OS we're running under, different signals might
4756 * be raised for a violation of write protection in the heap. This
4757 * function factors out the common generational GC magic which needs
4758 * to invoked in this case, and should be called from whatever signal
4759 * handler is appropriate for the OS we're running under.
4761 * Return true if this signal is a normal generational GC thing that
4762 * we were able to handle, or false if it was abnormal and control
4763 * should fall through to the general SIGSEGV/SIGBUS/whatever logic. */
4766 gencgc_handle_wp_violation(void* fault_addr
)
4768 page_index_t page_index
= find_page_index(fault_addr
);
4770 #ifdef QSHOW_SIGNALS
4771 FSHOW((stderr
, "heap WP violation? fault_addr=%x, page_index=%d\n",
4772 fault_addr
, page_index
));
4775 /* Check whether the fault is within the dynamic space. */
4776 if (page_index
== (-1)) {
4778 /* It can be helpful to be able to put a breakpoint on this
4779 * case to help diagnose low-level problems. */
4780 unhandled_sigmemoryfault(fault_addr
);
4782 /* not within the dynamic space -- not our responsibility */
4786 if (page_table
[page_index
].write_protected
) {
4787 /* Unprotect the page. */
4788 os_protect(page_address(page_index
), PAGE_BYTES
, OS_VM_PROT_ALL
);
4789 page_table
[page_index
].write_protected_cleared
= 1;
4790 page_table
[page_index
].write_protected
= 0;
4792 /* The only acceptable reason for this signal on a heap
4793 * access is that GENCGC write-protected the page.
4794 * However, if two CPUs hit a wp page near-simultaneously,
4795 * we had better not have the second one lose here if it
4796 * does this test after the first one has already set wp=0
4798 if(page_table
[page_index
].write_protected_cleared
!= 1)
4799 lose("fault in heap page %d not marked as write-protected\nboxed_region.first_page: %d, boxed_region.last_page %d\n",
4800 page_index
, boxed_region
.first_page
,
4801 boxed_region
.last_page
);
4803 /* Don't worry, we can handle it. */
4807 /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
4808 * it's not just a case of the program hitting the write barrier, and
4809 * are about to let Lisp deal with it. It's basically just a
4810 * convenient place to set a gdb breakpoint. */
4812 unhandled_sigmemoryfault(void *addr
)
4815 void gc_alloc_update_all_page_tables(void)
4817 /* Flush the alloc regions updating the tables. */
4820 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->alloc_region
);
4821 gc_alloc_update_page_tables(UNBOXED_PAGE_FLAG
, &unboxed_region
);
4822 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &boxed_region
);
4826 gc_set_region_empty(struct alloc_region
*region
)
4828 region
->first_page
= 0;
4829 region
->last_page
= -1;
4830 region
->start_addr
= page_address(0);
4831 region
->free_pointer
= page_address(0);
4832 region
->end_addr
= page_address(0);
4836 zero_all_free_pages()
4840 for (i
= 0; i
< last_free_page
; i
++) {
4841 if (page_free_p(i
)) {
4842 #ifdef READ_PROTECT_FREE_PAGES
4843 os_protect(page_address(i
),
4852 /* Things to do before doing a final GC before saving a core (without
4855 * + Pages in large_object pages aren't moved by the GC, so we need to
4856 * unset that flag from all pages.
4857 * + The pseudo-static generation isn't normally collected, but it seems
4858 * reasonable to collect it at least when saving a core. So move the
4859 * pages to a normal generation.
4862 prepare_for_final_gc ()
4865 for (i
= 0; i
< last_free_page
; i
++) {
4866 page_table
[i
].large_object
= 0;
4867 if (page_table
[i
].gen
== PSEUDO_STATIC_GENERATION
) {
4868 int used
= page_table
[i
].bytes_used
;
4869 page_table
[i
].gen
= HIGHEST_NORMAL_GENERATION
;
4870 generations
[PSEUDO_STATIC_GENERATION
].bytes_allocated
-= used
;
4871 generations
[HIGHEST_NORMAL_GENERATION
].bytes_allocated
+= used
;
4877 /* Do a non-conservative GC, and then save a core with the initial
4878 * function being set to the value of the static symbol
4879 * SB!VM:RESTART-LISP-FUNCTION */
4881 gc_and_save(char *filename
, boolean prepend_runtime
,
4882 boolean save_runtime_options
)
4885 void *runtime_bytes
= NULL
;
4886 size_t runtime_size
;
4888 file
= prepare_to_save(filename
, prepend_runtime
, &runtime_bytes
,
4893 conservative_stack
= 0;
4895 /* The filename might come from Lisp, and be moved by the now
4896 * non-conservative GC. */
4897 filename
= strdup(filename
);
4899 /* Collect twice: once into relatively high memory, and then back
4900 * into low memory. This compacts the retained data into the lower
4901 * pages, minimizing the size of the core file.
4903 prepare_for_final_gc();
4904 gencgc_alloc_start_page
= last_free_page
;
4905 collect_garbage(HIGHEST_NORMAL_GENERATION
+1);
4907 prepare_for_final_gc();
4908 gencgc_alloc_start_page
= -1;
4909 collect_garbage(HIGHEST_NORMAL_GENERATION
+1);
4911 if (prepend_runtime
)
4912 save_runtime_to_filehandle(file
, runtime_bytes
, runtime_size
);
4914 /* The dumper doesn't know that pages need to be zeroed before use. */
4915 zero_all_free_pages();
4916 save_to_filehandle(file
, filename
, SymbolValue(RESTART_LISP_FUNCTION
,0),
4917 prepend_runtime
, save_runtime_options
);
4918 /* Oops. Save still managed to fail. Since we've mangled the stack
4919 * beyond hope, there's not much we can do.
4920 * (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's
4921 * going to be rather unsatisfactory too... */
4922 lose("Attempt to save core after non-conservative GC failed.\n");