2 * GENerational Conservative Garbage Collector for SBCL
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 * For a review of garbage collection techniques (e.g. generational
18 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
19 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
20 * had been accepted for _ACM Computing Surveys_ and was available
21 * as a PostScript preprint through
22 * <http://www.cs.utexas.edu/users/oops/papers.html>
24 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
32 #if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)
33 #include "pthreads_win32.h"
41 #include "interrupt.h"
46 #include "gc-internal.h"
48 #include "pseudo-atomic.h"
50 #include "genesis/vector.h"
51 #include "genesis/weak-pointer.h"
52 #include "genesis/fdefn.h"
53 #include "genesis/simple-fun.h"
55 #include "genesis/hash-table.h"
56 #include "genesis/instance.h"
57 #include "genesis/layout.h"
59 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
60 #include "genesis/cons.h"
63 /* forward declarations */
64 page_index_t
gc_find_freeish_pages(page_index_t
*restart_page_ptr
, sword_t nbytes
,
72 /* As usually configured, generations 0-5 are normal collected generations,
73 6 is pseudo-static (the objects in which are never moved nor reclaimed),
74 and 7 is scratch space used when collecting a generation without promotion,
75 wherein it is moved to generation 7 and back again.
78 SCRATCH_GENERATION
= PSEUDO_STATIC_GENERATION
+1,
82 /* Should we use page protection to help avoid the scavenging of pages
83 * that don't have pointers to younger generations? */
84 boolean enable_page_protection
= 1;
86 /* the minimum size (in bytes) for a large object*/
87 /* NB this logic is unfortunately copied in 'compiler/x86-64/macros.lisp' */
88 #if (GENCGC_ALLOC_GRANULARITY >= PAGE_BYTES) && (GENCGC_ALLOC_GRANULARITY >= GENCGC_CARD_BYTES)
89 os_vm_size_t large_object_size
= 4 * GENCGC_ALLOC_GRANULARITY
;
90 #elif (GENCGC_CARD_BYTES >= PAGE_BYTES) && (GENCGC_CARD_BYTES >= GENCGC_ALLOC_GRANULARITY)
91 os_vm_size_t large_object_size
= 4 * GENCGC_CARD_BYTES
;
93 os_vm_size_t large_object_size
= 4 * PAGE_BYTES
;
96 /* Largest allocation seen since last GC. */
97 os_vm_size_t large_allocation
= 0;
104 /* the verbosity level. All non-error messages are disabled at level 0;
105 * and only a few rare messages are printed at level 1. */
107 boolean gencgc_verbose
= 1;
109 boolean gencgc_verbose
= 0;
112 /* FIXME: At some point enable the various error-checking things below
113 * and see what they say. */
115 /* We hunt for pointers to old-space, when GCing generations >= verify_gen.
116 * Set verify_gens to HIGHEST_NORMAL_GENERATION + 1 to disable this kind of
118 generation_index_t verify_gens
= HIGHEST_NORMAL_GENERATION
+ 1;
120 /* Should we do a pre-scan verify of generation 0 before it's GCed? */
121 boolean pre_verify_gen_0
= 0;
123 /* Should we check for bad pointers after gc_free_heap is called
124 * from Lisp PURIFY? */
125 boolean verify_after_free_heap
= 0;
127 /* Should we print a note when code objects are found in the dynamic space
128 * during a heap verify? */
129 boolean verify_dynamic_code_check
= 0;
131 #ifdef LISP_FEATURE_X86
132 /* Should we check code objects for fixup errors after they are transported? */
133 boolean check_code_fixups
= 0;
136 /* Should we check that newly allocated regions are zero filled? */
137 boolean gencgc_zero_check
= 0;
139 /* Should we check that the free space is zero filled? */
140 boolean gencgc_enable_verify_zero_fill
= 0;
142 /* Should we check that free pages are zero filled during gc_free_heap
143 * called after Lisp PURIFY? */
144 boolean gencgc_zero_check_during_free_heap
= 0;
146 /* When loading a core, don't do a full scan of the memory for the
147 * memory region boundaries. (Set to true by coreparse.c if the core
148 * contained a pagetable entry).
150 boolean gencgc_partial_pickup
= 0;
152 /* If defined, free pages are read-protected to ensure that nothing
156 /* #define READ_PROTECT_FREE_PAGES */
160 * GC structures and variables
163 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
164 os_vm_size_t bytes_allocated
= 0;
165 os_vm_size_t auto_gc_trigger
= 0;
167 /* the source and destination generations. These are set before a GC starts
169 generation_index_t from_space
;
170 generation_index_t new_space
;
172 /* Set to 1 when in GC */
173 boolean gc_active_p
= 0;
175 /* should the GC be conservative on stack. If false (only right before
176 * saving a core), don't scan the stack / mark pages dont_move. */
177 static boolean conservative_stack
= 1;
179 /* An array of page structures is allocated on gc initialization.
180 * This helps to quickly map between an address and its page structure.
181 * page_table_pages is set from the size of the dynamic space. */
182 page_index_t page_table_pages
;
183 struct page
*page_table
;
185 /* In GC cards that have conservative pointers to them, should we wipe out
186 * dwords in there that are not used, so that they do not act as false
187 * root to other things in the heap from then on? This is a new feature
188 * but in testing it is both reliable and no noticeable slowdown. */
191 /* a value that we use to wipe out unused words in GC cards that
192 * live alongside conservatively to pointed words. */
193 const lispobj wipe_with
= 0;
195 static inline boolean
page_allocated_p(page_index_t page
) {
196 return (page_table
[page
].allocated
!= FREE_PAGE_FLAG
);
199 static inline boolean
page_no_region_p(page_index_t page
) {
200 return !(page_table
[page
].allocated
& OPEN_REGION_PAGE_FLAG
);
203 static inline boolean
page_allocated_no_region_p(page_index_t page
) {
204 return ((page_table
[page
].allocated
& (UNBOXED_PAGE_FLAG
| BOXED_PAGE_FLAG
))
205 && page_no_region_p(page
));
208 static inline boolean
page_free_p(page_index_t page
) {
209 return (page_table
[page
].allocated
== FREE_PAGE_FLAG
);
212 static inline boolean
page_boxed_p(page_index_t page
) {
213 return (page_table
[page
].allocated
& BOXED_PAGE_FLAG
);
216 static inline boolean
page_boxed_no_region_p(page_index_t page
) {
217 return page_boxed_p(page
) && page_no_region_p(page
);
220 static inline boolean
page_unboxed_p(page_index_t page
) {
221 /* Both flags set == boxed code page */
222 return ((page_table
[page
].allocated
& UNBOXED_PAGE_FLAG
)
223 && !page_boxed_p(page
));
226 static inline boolean
protect_page_p(page_index_t page
, generation_index_t generation
) {
227 return (page_boxed_no_region_p(page
)
228 && (page_table
[page
].bytes_used
!= 0)
229 && !page_table
[page
].dont_move
230 && (page_table
[page
].gen
== generation
));
233 /* To map addresses to page structures the address of the first page
235 void *heap_base
= NULL
;
237 /* Calculate the start address for the given page number. */
239 page_address(page_index_t page_num
)
241 return (heap_base
+ (page_num
* GENCGC_CARD_BYTES
));
244 /* Calculate the address where the allocation region associated with
245 * the page starts. */
247 page_scan_start(page_index_t page_index
)
249 return page_address(page_index
)-page_table
[page_index
].scan_start_offset
;
252 /* True if the page starts a contiguous block. */
253 static inline boolean
254 page_starts_contiguous_block_p(page_index_t page_index
)
256 return page_table
[page_index
].scan_start_offset
== 0;
259 /* True if the page is the last page in a contiguous block. */
260 static inline boolean
261 page_ends_contiguous_block_p(page_index_t page_index
, generation_index_t gen
)
263 return (/* page doesn't fill block */
264 (page_table
[page_index
].bytes_used
< GENCGC_CARD_BYTES
)
265 /* page is last allocated page */
266 || ((page_index
+ 1) >= last_free_page
)
268 || page_free_p(page_index
+ 1)
269 /* next page contains no data */
270 || (page_table
[page_index
+ 1].bytes_used
== 0)
271 /* next page is in different generation */
272 || (page_table
[page_index
+ 1].gen
!= gen
)
273 /* next page starts its own contiguous block */
274 || (page_starts_contiguous_block_p(page_index
+ 1)));
277 /* Find the page index within the page_table for the given
278 * address. Return -1 on failure. */
280 find_page_index(void *addr
)
282 if (addr
>= heap_base
) {
283 page_index_t index
= ((pointer_sized_uint_t
)addr
-
284 (pointer_sized_uint_t
)heap_base
) / GENCGC_CARD_BYTES
;
285 if (index
< page_table_pages
)
292 npage_bytes(page_index_t npages
)
294 gc_assert(npages
>=0);
295 return ((os_vm_size_t
)npages
)*GENCGC_CARD_BYTES
;
298 /* Check that X is a higher address than Y and return offset from Y to
300 static inline os_vm_size_t
301 void_diff(void *x
, void *y
)
304 return (pointer_sized_uint_t
)x
- (pointer_sized_uint_t
)y
;
307 /* a structure to hold the state of a generation
309 * CAUTION: If you modify this, make sure to touch up the alien
310 * definition in src/code/gc.lisp accordingly. ...or better yes,
311 * deal with the FIXME there...
315 /* the first page that gc_alloc() checks on its next call */
316 page_index_t alloc_start_page
;
318 /* the first page that gc_alloc_unboxed() checks on its next call */
319 page_index_t alloc_unboxed_start_page
;
321 /* the first page that gc_alloc_large (boxed) considers on its next
322 * call. (Although it always allocates after the boxed_region.) */
323 page_index_t alloc_large_start_page
;
325 /* the first page that gc_alloc_large (unboxed) considers on its
326 * next call. (Although it always allocates after the
327 * current_unboxed_region.) */
328 page_index_t alloc_large_unboxed_start_page
;
330 /* the bytes allocated to this generation */
331 os_vm_size_t bytes_allocated
;
333 /* the number of bytes at which to trigger a GC */
334 os_vm_size_t gc_trigger
;
336 /* to calculate a new level for gc_trigger */
337 os_vm_size_t bytes_consed_between_gc
;
339 /* the number of GCs since the last raise */
342 /* the number of GCs to run on the generations before raising objects to the
344 int number_of_gcs_before_promotion
;
346 /* the cumulative sum of the bytes allocated to this generation. It is
347 * cleared after a GC on this generations, and update before new
348 * objects are added from a GC of a younger generation. Dividing by
349 * the bytes_allocated will give the average age of the memory in
350 * this generation since its last GC. */
351 os_vm_size_t cum_sum_bytes_allocated
;
353 /* a minimum average memory age before a GC will occur helps
354 * prevent a GC when a large number of new live objects have been
355 * added, in which case a GC could be a waste of time */
356 double minimum_age_before_gc
;
359 /* an array of generation structures. There needs to be one more
360 * generation structure than actual generations as the oldest
361 * generation is temporarily raised then lowered. */
362 struct generation generations
[NUM_GENERATIONS
];
364 /* the oldest generation that is will currently be GCed by default.
365 * Valid values are: 0, 1, ... HIGHEST_NORMAL_GENERATION
367 * The default of HIGHEST_NORMAL_GENERATION enables GC on all generations.
369 * Setting this to 0 effectively disables the generational nature of
370 * the GC. In some applications generational GC may not be useful
371 * because there are no long-lived objects.
373 * An intermediate value could be handy after moving long-lived data
374 * into an older generation so an unnecessary GC of this long-lived
375 * data can be avoided. */
376 generation_index_t gencgc_oldest_gen_to_gc
= HIGHEST_NORMAL_GENERATION
;
378 /* The maximum free page in the heap is maintained and used to update
379 * ALLOCATION_POINTER which is used by the room function to limit its
380 * search of the heap. XX Gencgc obviously needs to be better
381 * integrated with the Lisp code. */
382 page_index_t last_free_page
;
384 #ifdef LISP_FEATURE_SB_THREAD
385 /* This lock is to prevent multiple threads from simultaneously
386 * allocating new regions which overlap each other. Note that the
387 * majority of GC is single-threaded, but alloc() may be called from
388 * >1 thread at a time and must be thread-safe. This lock must be
389 * seized before all accesses to generations[] or to parts of
390 * page_table[] that other threads may want to see */
391 static pthread_mutex_t free_pages_lock
= PTHREAD_MUTEX_INITIALIZER
;
392 /* This lock is used to protect non-thread-local allocation. */
393 static pthread_mutex_t allocation_lock
= PTHREAD_MUTEX_INITIALIZER
;
396 extern os_vm_size_t gencgc_release_granularity
;
397 os_vm_size_t gencgc_release_granularity
= GENCGC_RELEASE_GRANULARITY
;
399 extern os_vm_size_t gencgc_alloc_granularity
;
400 os_vm_size_t gencgc_alloc_granularity
= GENCGC_ALLOC_GRANULARITY
;
404 * miscellaneous heap functions
407 /* Count the number of pages which are write-protected within the
408 * given generation. */
410 count_write_protect_generation_pages(generation_index_t generation
)
412 page_index_t i
, count
= 0;
414 for (i
= 0; i
< last_free_page
; i
++)
415 if (page_allocated_p(i
)
416 && (page_table
[i
].gen
== generation
)
417 && (page_table
[i
].write_protected
== 1))
422 /* Count the number of pages within the given generation. */
424 count_generation_pages(generation_index_t generation
)
427 page_index_t count
= 0;
429 for (i
= 0; i
< last_free_page
; i
++)
430 if (page_allocated_p(i
)
431 && (page_table
[i
].gen
== generation
))
438 count_dont_move_pages(void)
441 page_index_t count
= 0;
442 for (i
= 0; i
< last_free_page
; i
++) {
443 if (page_allocated_p(i
)
444 && (page_table
[i
].dont_move
!= 0)) {
452 /* Work through the pages and add up the number of bytes used for the
453 * given generation. */
455 count_generation_bytes_allocated (generation_index_t gen
)
458 os_vm_size_t result
= 0;
459 for (i
= 0; i
< last_free_page
; i
++) {
460 if (page_allocated_p(i
)
461 && (page_table
[i
].gen
== gen
))
462 result
+= page_table
[i
].bytes_used
;
467 /* Return the average age of the memory in a generation. */
469 generation_average_age(generation_index_t gen
)
471 if (generations
[gen
].bytes_allocated
== 0)
475 ((double)generations
[gen
].cum_sum_bytes_allocated
)
476 / ((double)generations
[gen
].bytes_allocated
);
480 write_generation_stats(FILE *file
)
482 generation_index_t i
;
484 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
485 #define FPU_STATE_SIZE 27
486 int fpu_state
[FPU_STATE_SIZE
];
487 #elif defined(LISP_FEATURE_PPC)
488 #define FPU_STATE_SIZE 32
489 long long fpu_state
[FPU_STATE_SIZE
];
490 #elif defined(LISP_FEATURE_SPARC)
492 * 32 (single-precision) FP registers, and the FP state register.
493 * But Sparc V9 has 32 double-precision registers (equivalent to 64
494 * single-precision, but can't be accessed), so we leave enough room
497 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
498 long long fpu_state
[FPU_STATE_SIZE
];
499 #elif defined(LISP_FEATURE_ARM)
500 #define FPU_STATE_SIZE 8
501 long long fpu_state
[FPU_STATE_SIZE
];
504 /* This code uses the FP instructions which may be set up for Lisp
505 * so they need to be saved and reset for C. */
508 /* Print the heap stats. */
510 " Gen StaPg UbSta LaSta LUbSt Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
512 for (i
= 0; i
< SCRATCH_GENERATION
; i
++) {
514 page_index_t boxed_cnt
= 0;
515 page_index_t unboxed_cnt
= 0;
516 page_index_t large_boxed_cnt
= 0;
517 page_index_t large_unboxed_cnt
= 0;
518 page_index_t pinned_cnt
=0;
520 for (j
= 0; j
< last_free_page
; j
++)
521 if (page_table
[j
].gen
== i
) {
523 /* Count the number of boxed pages within the given
525 if (page_boxed_p(j
)) {
526 if (page_table
[j
].large_object
)
531 if(page_table
[j
].dont_move
) pinned_cnt
++;
532 /* Count the number of unboxed pages within the given
534 if (page_unboxed_p(j
)) {
535 if (page_table
[j
].large_object
)
542 gc_assert(generations
[i
].bytes_allocated
543 == count_generation_bytes_allocated(i
));
545 " %1d: %5ld %5ld %5ld %5ld",
547 generations
[i
].alloc_start_page
,
548 generations
[i
].alloc_unboxed_start_page
,
549 generations
[i
].alloc_large_start_page
,
550 generations
[i
].alloc_large_unboxed_start_page
);
552 " %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
553 " %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
,
554 boxed_cnt
, unboxed_cnt
, large_boxed_cnt
,
555 large_unboxed_cnt
, pinned_cnt
);
560 " %4"PAGE_INDEX_FMT
" %3d %7.4f\n",
561 generations
[i
].bytes_allocated
,
562 (npage_bytes(count_generation_pages(i
)) - generations
[i
].bytes_allocated
),
563 generations
[i
].gc_trigger
,
564 count_write_protect_generation_pages(i
),
565 generations
[i
].num_gc
,
566 generation_average_age(i
));
568 fprintf(file
," Total bytes allocated = %"OS_VM_SIZE_FMT
"\n", bytes_allocated
);
569 fprintf(file
," Dynamic-space-size bytes = %"OS_VM_SIZE_FMT
"\n", dynamic_space_size
);
571 fpu_restore(fpu_state
);
575 write_heap_exhaustion_report(FILE *file
, long available
, long requested
,
576 struct thread
*thread
)
579 "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
580 gc_active_p
? "garbage collection" : "allocation",
583 write_generation_stats(file
);
584 fprintf(file
, "GC control variables:\n");
585 fprintf(file
, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n",
586 SymbolValue(GC_INHIBIT
,thread
)==NIL
? "false" : "true",
587 (SymbolValue(GC_PENDING
, thread
) == T
) ?
588 "true" : ((SymbolValue(GC_PENDING
, thread
) == NIL
) ?
589 "false" : "in progress"));
590 #ifdef LISP_FEATURE_SB_THREAD
591 fprintf(file
, " *STOP-FOR-GC-PENDING* = %s\n",
592 SymbolValue(STOP_FOR_GC_PENDING
,thread
)==NIL
? "false" : "true");
597 print_generation_stats(void)
599 write_generation_stats(stderr
);
602 extern char* gc_logfile
;
603 char * gc_logfile
= NULL
;
606 log_generation_stats(char *logfile
, char *header
)
609 FILE * log
= fopen(logfile
, "a");
611 fprintf(log
, "%s\n", header
);
612 write_generation_stats(log
);
615 fprintf(stderr
, "Could not open gc logfile: %s\n", logfile
);
622 report_heap_exhaustion(long available
, long requested
, struct thread
*th
)
625 FILE * log
= fopen(gc_logfile
, "a");
627 write_heap_exhaustion_report(log
, available
, requested
, th
);
630 fprintf(stderr
, "Could not open gc logfile: %s\n", gc_logfile
);
634 /* Always to stderr as well. */
635 write_heap_exhaustion_report(stderr
, available
, requested
, th
);
639 #if defined(LISP_FEATURE_X86)
640 void fast_bzero(void*, size_t); /* in <arch>-assem.S */
643 /* Zero the pages from START to END (inclusive), but use mmap/munmap instead
644 * if zeroing it ourselves, i.e. in practice give the memory back to the
645 * OS. Generally done after a large GC.
647 void zero_pages_with_mmap(page_index_t start
, page_index_t end
) {
649 void *addr
= page_address(start
), *new_addr
;
650 os_vm_size_t length
= npage_bytes(1+end
-start
);
655 gc_assert(length
>= gencgc_release_granularity
);
656 gc_assert((length
% gencgc_release_granularity
) == 0);
658 os_invalidate(addr
, length
);
659 new_addr
= os_validate(addr
, length
);
660 if (new_addr
== NULL
|| new_addr
!= addr
) {
661 lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x",
665 for (i
= start
; i
<= end
; i
++) {
666 page_table
[i
].need_to_zero
= 0;
670 /* Zero the pages from START to END (inclusive). Generally done just after
671 * a new region has been allocated.
674 zero_pages(page_index_t start
, page_index_t end
) {
678 #if defined(LISP_FEATURE_X86)
679 fast_bzero(page_address(start
), npage_bytes(1+end
-start
));
681 bzero(page_address(start
), npage_bytes(1+end
-start
));
687 zero_and_mark_pages(page_index_t start
, page_index_t end
) {
690 zero_pages(start
, end
);
691 for (i
= start
; i
<= end
; i
++)
692 page_table
[i
].need_to_zero
= 0;
695 /* Zero the pages from START to END (inclusive), except for those
696 * pages that are known to already zeroed. Mark all pages in the
697 * ranges as non-zeroed.
700 zero_dirty_pages(page_index_t start
, page_index_t end
) {
703 for (i
= start
; i
<= end
; i
++) {
704 if (!page_table
[i
].need_to_zero
) continue;
705 for (j
= i
+1; (j
<= end
) && (page_table
[j
].need_to_zero
); j
++);
710 for (i
= start
; i
<= end
; i
++) {
711 page_table
[i
].need_to_zero
= 1;
717 * To support quick and inline allocation, regions of memory can be
718 * allocated and then allocated from with just a free pointer and a
719 * check against an end address.
721 * Since objects can be allocated to spaces with different properties
722 * e.g. boxed/unboxed, generation, ages; there may need to be many
723 * allocation regions.
725 * Each allocation region may start within a partly used page. Many
726 * features of memory use are noted on a page wise basis, e.g. the
727 * generation; so if a region starts within an existing allocated page
728 * it must be consistent with this page.
730 * During the scavenging of the newspace, objects will be transported
731 * into an allocation region, and pointers updated to point to this
732 * allocation region. It is possible that these pointers will be
733 * scavenged again before the allocation region is closed, e.g. due to
734 * trans_list which jumps all over the place to cleanup the list. It
735 * is important to be able to determine properties of all objects
736 * pointed to when scavenging, e.g to detect pointers to the oldspace.
737 * Thus it's important that the allocation regions have the correct
738 * properties set when allocated, and not just set when closed. The
739 * region allocation routines return regions with the specified
740 * properties, and grab all the pages, setting their properties
741 * appropriately, except that the amount used is not known.
743 * These regions are used to support quicker allocation using just a
744 * free pointer. The actual space used by the region is not reflected
745 * in the pages tables until it is closed. It can't be scavenged until
748 * When finished with the region it should be closed, which will
749 * update the page tables for the actual space used returning unused
750 * space. Further it may be noted in the new regions which is
751 * necessary when scavenging the newspace.
753 * Large objects may be allocated directly without an allocation
754 * region, the page tables are updated immediately.
756 * Unboxed objects don't contain pointers to other objects and so
757 * don't need scavenging. Further they can't contain pointers to
758 * younger generations so WP is not needed. By allocating pages to
759 * unboxed objects the whole page never needs scavenging or
760 * write-protecting. */
762 /* We are only using two regions at present. Both are for the current
763 * newspace generation. */
764 struct alloc_region boxed_region
;
765 struct alloc_region unboxed_region
;
767 /* The generation currently being allocated to. */
768 static generation_index_t gc_alloc_generation
;
770 static inline page_index_t
771 generation_alloc_start_page(generation_index_t generation
, int page_type_flag
, int large
)
774 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
775 return generations
[generation
].alloc_large_unboxed_start_page
;
776 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
777 /* Both code and data. */
778 return generations
[generation
].alloc_large_start_page
;
780 lose("bad page type flag: %d", page_type_flag
);
783 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
784 return generations
[generation
].alloc_unboxed_start_page
;
785 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
786 /* Both code and data. */
787 return generations
[generation
].alloc_start_page
;
789 lose("bad page_type_flag: %d", page_type_flag
);
795 set_generation_alloc_start_page(generation_index_t generation
, int page_type_flag
, int large
,
799 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
800 generations
[generation
].alloc_large_unboxed_start_page
= page
;
801 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
802 /* Both code and data. */
803 generations
[generation
].alloc_large_start_page
= page
;
805 lose("bad page type flag: %d", page_type_flag
);
808 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
809 generations
[generation
].alloc_unboxed_start_page
= page
;
810 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
811 /* Both code and data. */
812 generations
[generation
].alloc_start_page
= page
;
814 lose("bad page type flag: %d", page_type_flag
);
819 /* Find a new region with room for at least the given number of bytes.
821 * It starts looking at the current generation's alloc_start_page. So
822 * may pick up from the previous region if there is enough space. This
823 * keeps the allocation contiguous when scavenging the newspace.
825 * The alloc_region should have been closed by a call to
826 * gc_alloc_update_page_tables(), and will thus be in an empty state.
828 * To assist the scavenging functions write-protected pages are not
829 * used. Free pages should not be write-protected.
831 * It is critical to the conservative GC that the start of regions be
832 * known. To help achieve this only small regions are allocated at a
835 * During scavenging, pointers may be found to within the current
836 * region and the page generation must be set so that pointers to the
837 * from space can be recognized. Therefore the generation of pages in
838 * the region are set to gc_alloc_generation. To prevent another
839 * allocation call using the same pages, all the pages in the region
840 * are allocated, although they will initially be empty.
843 gc_alloc_new_region(sword_t nbytes
, int page_type_flag
, struct alloc_region
*alloc_region
)
845 page_index_t first_page
;
846 page_index_t last_page
;
847 os_vm_size_t bytes_found
;
853 "/alloc_new_region for %d bytes from gen %d\n",
854 nbytes, gc_alloc_generation));
857 /* Check that the region is in a reset state. */
858 gc_assert((alloc_region
->first_page
== 0)
859 && (alloc_region
->last_page
== -1)
860 && (alloc_region
->free_pointer
== alloc_region
->end_addr
));
861 ret
= thread_mutex_lock(&free_pages_lock
);
863 first_page
= generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 0);
864 last_page
=gc_find_freeish_pages(&first_page
, nbytes
, page_type_flag
);
865 bytes_found
=(GENCGC_CARD_BYTES
- page_table
[first_page
].bytes_used
)
866 + npage_bytes(last_page
-first_page
);
868 /* Set up the alloc_region. */
869 alloc_region
->first_page
= first_page
;
870 alloc_region
->last_page
= last_page
;
871 alloc_region
->start_addr
= page_table
[first_page
].bytes_used
872 + page_address(first_page
);
873 alloc_region
->free_pointer
= alloc_region
->start_addr
;
874 alloc_region
->end_addr
= alloc_region
->start_addr
+ bytes_found
;
876 /* Set up the pages. */
878 /* The first page may have already been in use. */
879 if (page_table
[first_page
].bytes_used
== 0) {
880 page_table
[first_page
].allocated
= page_type_flag
;
881 page_table
[first_page
].gen
= gc_alloc_generation
;
882 page_table
[first_page
].large_object
= 0;
883 page_table
[first_page
].scan_start_offset
= 0;
884 // wiping should have free()ed and :=NULL
885 gc_assert(page_table
[first_page
].dontmove_dwords
== NULL
);
888 gc_assert(page_table
[first_page
].allocated
== page_type_flag
);
889 page_table
[first_page
].allocated
|= OPEN_REGION_PAGE_FLAG
;
891 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
892 gc_assert(page_table
[first_page
].large_object
== 0);
894 for (i
= first_page
+1; i
<= last_page
; i
++) {
895 page_table
[i
].allocated
= page_type_flag
;
896 page_table
[i
].gen
= gc_alloc_generation
;
897 page_table
[i
].large_object
= 0;
898 /* This may not be necessary for unboxed regions (think it was
900 page_table
[i
].scan_start_offset
=
901 void_diff(page_address(i
),alloc_region
->start_addr
);
902 page_table
[i
].allocated
|= OPEN_REGION_PAGE_FLAG
;
904 /* Bump up last_free_page. */
905 if (last_page
+1 > last_free_page
) {
906 last_free_page
= last_page
+1;
907 /* do we only want to call this on special occasions? like for
909 set_alloc_pointer((lispobj
)page_address(last_free_page
));
911 ret
= thread_mutex_unlock(&free_pages_lock
);
914 #ifdef READ_PROTECT_FREE_PAGES
915 os_protect(page_address(first_page
),
916 npage_bytes(1+last_page
-first_page
),
920 /* If the first page was only partial, don't check whether it's
921 * zeroed (it won't be) and don't zero it (since the parts that
922 * we're interested in are guaranteed to be zeroed).
924 if (page_table
[first_page
].bytes_used
) {
928 zero_dirty_pages(first_page
, last_page
);
930 /* we can do this after releasing free_pages_lock */
931 if (gencgc_zero_check
) {
933 for (p
= (word_t
*)alloc_region
->start_addr
;
934 p
< (word_t
*)alloc_region
->end_addr
; p
++) {
936 lose("The new region is not zero at %p (start=%p, end=%p).\n",
937 p
, alloc_region
->start_addr
, alloc_region
->end_addr
);
943 /* If the record_new_objects flag is 2 then all new regions created
946 * If it's 1 then then it is only recorded if the first page of the
947 * current region is <= new_areas_ignore_page. This helps avoid
948 * unnecessary recording when doing full scavenge pass.
950 * The new_object structure holds the page, byte offset, and size of
951 * new regions of objects. Each new area is placed in the array of
952 * these structures pointer to by new_areas. new_areas_index holds the
953 * offset into new_areas.
955 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
956 * later code must detect this and handle it, probably by doing a full
957 * scavenge of a generation. */
958 #define NUM_NEW_AREAS 512
959 static int record_new_objects
= 0;
960 static page_index_t new_areas_ignore_page
;
966 static struct new_area (*new_areas
)[];
967 static size_t new_areas_index
;
968 size_t max_new_areas
;
970 /* Add a new area to new_areas. */
972 add_new_area(page_index_t first_page
, size_t offset
, size_t size
)
974 size_t new_area_start
, c
;
977 /* Ignore if full. */
978 if (new_areas_index
>= NUM_NEW_AREAS
)
981 switch (record_new_objects
) {
985 if (first_page
> new_areas_ignore_page
)
994 new_area_start
= npage_bytes(first_page
) + offset
;
996 /* Search backwards for a prior area that this follows from. If
997 found this will save adding a new area. */
998 for (i
= new_areas_index
-1, c
= 0; (i
>= 0) && (c
< 8); i
--, c
++) {
1000 npage_bytes((*new_areas
)[i
].page
)
1001 + (*new_areas
)[i
].offset
1002 + (*new_areas
)[i
].size
;
1004 "/add_new_area S1 %d %d %d %d\n",
1005 i, c, new_area_start, area_end));*/
1006 if (new_area_start
== area_end
) {
1008 "/adding to [%d] %d %d %d with %d %d %d:\n",
1010 (*new_areas)[i].page,
1011 (*new_areas)[i].offset,
1012 (*new_areas)[i].size,
1016 (*new_areas
)[i
].size
+= size
;
1021 (*new_areas
)[new_areas_index
].page
= first_page
;
1022 (*new_areas
)[new_areas_index
].offset
= offset
;
1023 (*new_areas
)[new_areas_index
].size
= size
;
1025 "/new_area %d page %d offset %d size %d\n",
1026 new_areas_index, first_page, offset, size));*/
1029 /* Note the max new_areas used. */
1030 if (new_areas_index
> max_new_areas
)
1031 max_new_areas
= new_areas_index
;
1034 /* Update the tables for the alloc_region. The region may be added to
1037 * When done the alloc_region is set up so that the next quick alloc
1038 * will fail safely and thus a new region will be allocated. Further
1039 * it is safe to try to re-update the page table of this reset
1042 gc_alloc_update_page_tables(int page_type_flag
, struct alloc_region
*alloc_region
)
1045 page_index_t first_page
;
1046 page_index_t next_page
;
1047 os_vm_size_t bytes_used
;
1048 os_vm_size_t region_size
;
1049 os_vm_size_t byte_cnt
;
1050 page_bytes_t orig_first_page_bytes_used
;
1054 first_page
= alloc_region
->first_page
;
1056 /* Catch an unused alloc_region. */
1057 if ((first_page
== 0) && (alloc_region
->last_page
== -1))
1060 next_page
= first_page
+1;
1062 ret
= thread_mutex_lock(&free_pages_lock
);
1063 gc_assert(ret
== 0);
1064 if (alloc_region
->free_pointer
!= alloc_region
->start_addr
) {
1065 /* some bytes were allocated in the region */
1066 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
1068 gc_assert(alloc_region
->start_addr
==
1069 (page_address(first_page
)
1070 + page_table
[first_page
].bytes_used
));
1072 /* All the pages used need to be updated */
1074 /* Update the first page. */
1076 /* If the page was free then set up the gen, and
1077 * scan_start_offset. */
1078 if (page_table
[first_page
].bytes_used
== 0)
1079 gc_assert(page_starts_contiguous_block_p(first_page
));
1080 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1082 gc_assert(page_table
[first_page
].allocated
& page_type_flag
);
1083 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
1084 gc_assert(page_table
[first_page
].large_object
== 0);
1088 /* Calculate the number of bytes used in this page. This is not
1089 * always the number of new bytes, unless it was free. */
1091 if ((bytes_used
= void_diff(alloc_region
->free_pointer
,
1092 page_address(first_page
)))
1093 >GENCGC_CARD_BYTES
) {
1094 bytes_used
= GENCGC_CARD_BYTES
;
1097 page_table
[first_page
].bytes_used
= bytes_used
;
1098 byte_cnt
+= bytes_used
;
1101 /* All the rest of the pages should be free. We need to set
1102 * their scan_start_offset pointer to the start of the
1103 * region, and set the bytes_used. */
1105 page_table
[next_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1106 gc_assert(page_table
[next_page
].allocated
& page_type_flag
);
1107 gc_assert(page_table
[next_page
].bytes_used
== 0);
1108 gc_assert(page_table
[next_page
].gen
== gc_alloc_generation
);
1109 gc_assert(page_table
[next_page
].large_object
== 0);
1111 gc_assert(page_table
[next_page
].scan_start_offset
==
1112 void_diff(page_address(next_page
),
1113 alloc_region
->start_addr
));
1115 /* Calculate the number of bytes used in this page. */
1117 if ((bytes_used
= void_diff(alloc_region
->free_pointer
,
1118 page_address(next_page
)))>GENCGC_CARD_BYTES
) {
1119 bytes_used
= GENCGC_CARD_BYTES
;
1122 page_table
[next_page
].bytes_used
= bytes_used
;
1123 byte_cnt
+= bytes_used
;
1128 region_size
= void_diff(alloc_region
->free_pointer
,
1129 alloc_region
->start_addr
);
1130 bytes_allocated
+= region_size
;
1131 generations
[gc_alloc_generation
].bytes_allocated
+= region_size
;
1133 gc_assert((byte_cnt
- orig_first_page_bytes_used
) == region_size
);
1135 /* Set the generations alloc restart page to the last page of
1137 set_generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 0, next_page
-1);
1139 /* Add the region to the new_areas if requested. */
1140 if (BOXED_PAGE_FLAG
& page_type_flag
)
1141 add_new_area(first_page
,orig_first_page_bytes_used
, region_size
);
1145 "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
1147 gc_alloc_generation));
1150 /* There are no bytes allocated. Unallocate the first_page if
1151 * there are 0 bytes_used. */
1152 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1153 if (page_table
[first_page
].bytes_used
== 0)
1154 page_table
[first_page
].allocated
= FREE_PAGE_FLAG
;
1157 /* Unallocate any unused pages. */
1158 while (next_page
<= alloc_region
->last_page
) {
1159 gc_assert(page_table
[next_page
].bytes_used
== 0);
1160 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1163 ret
= thread_mutex_unlock(&free_pages_lock
);
1164 gc_assert(ret
== 0);
1166 /* alloc_region is per-thread, we're ok to do this unlocked */
1167 gc_set_region_empty(alloc_region
);
1170 /* Allocate a possibly large object. */
1172 gc_alloc_large(sword_t nbytes
, int page_type_flag
, struct alloc_region
*alloc_region
)
1175 page_index_t first_page
, next_page
, last_page
;
1176 page_bytes_t orig_first_page_bytes_used
;
1177 os_vm_size_t byte_cnt
;
1178 os_vm_size_t bytes_used
;
1181 ret
= thread_mutex_lock(&free_pages_lock
);
1182 gc_assert(ret
== 0);
1184 first_page
= generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 1);
1185 if (first_page
<= alloc_region
->last_page
) {
1186 first_page
= alloc_region
->last_page
+1;
1189 last_page
=gc_find_freeish_pages(&first_page
,nbytes
, page_type_flag
);
1191 gc_assert(first_page
> alloc_region
->last_page
);
1193 set_generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 1, last_page
);
1195 /* Set up the pages. */
1196 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
1198 /* If the first page was free then set up the gen, and
1199 * scan_start_offset. */
1200 if (page_table
[first_page
].bytes_used
== 0) {
1201 page_table
[first_page
].allocated
= page_type_flag
;
1202 page_table
[first_page
].gen
= gc_alloc_generation
;
1203 page_table
[first_page
].scan_start_offset
= 0;
1204 page_table
[first_page
].large_object
= 1;
1207 gc_assert(page_table
[first_page
].allocated
== page_type_flag
);
1208 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
1209 gc_assert(page_table
[first_page
].large_object
== 1);
1213 /* Calc. the number of bytes used in this page. This is not
1214 * always the number of new bytes, unless it was free. */
1216 if ((bytes_used
= nbytes
+orig_first_page_bytes_used
) > GENCGC_CARD_BYTES
) {
1217 bytes_used
= GENCGC_CARD_BYTES
;
1220 page_table
[first_page
].bytes_used
= bytes_used
;
1221 byte_cnt
+= bytes_used
;
1223 next_page
= first_page
+1;
1225 /* All the rest of the pages should be free. We need to set their
1226 * scan_start_offset pointer to the start of the region, and set
1227 * the bytes_used. */
1229 gc_assert(page_free_p(next_page
));
1230 gc_assert(page_table
[next_page
].bytes_used
== 0);
1231 page_table
[next_page
].allocated
= page_type_flag
;
1232 page_table
[next_page
].gen
= gc_alloc_generation
;
1233 page_table
[next_page
].large_object
= 1;
1235 page_table
[next_page
].scan_start_offset
=
1236 npage_bytes(next_page
-first_page
) - orig_first_page_bytes_used
;
1238 /* Calculate the number of bytes used in this page. */
1240 bytes_used
=(nbytes
+orig_first_page_bytes_used
)-byte_cnt
;
1241 if (bytes_used
> GENCGC_CARD_BYTES
) {
1242 bytes_used
= GENCGC_CARD_BYTES
;
1245 page_table
[next_page
].bytes_used
= bytes_used
;
1246 page_table
[next_page
].write_protected
=0;
1247 page_table
[next_page
].dont_move
=0;
1248 byte_cnt
+= bytes_used
;
1252 gc_assert((byte_cnt
-orig_first_page_bytes_used
) == (size_t)nbytes
);
1254 bytes_allocated
+= nbytes
;
1255 generations
[gc_alloc_generation
].bytes_allocated
+= nbytes
;
1257 /* Add the region to the new_areas if requested. */
1258 if (BOXED_PAGE_FLAG
& page_type_flag
)
1259 add_new_area(first_page
,orig_first_page_bytes_used
,nbytes
);
1261 /* Bump up last_free_page */
1262 if (last_page
+1 > last_free_page
) {
1263 last_free_page
= last_page
+1;
1264 set_alloc_pointer((lispobj
)(page_address(last_free_page
)));
1266 ret
= thread_mutex_unlock(&free_pages_lock
);
1267 gc_assert(ret
== 0);
1269 #ifdef READ_PROTECT_FREE_PAGES
1270 os_protect(page_address(first_page
),
1271 npage_bytes(1+last_page
-first_page
),
1275 zero_dirty_pages(first_page
, last_page
);
1277 return page_address(first_page
);
1280 static page_index_t gencgc_alloc_start_page
= -1;
1283 gc_heap_exhausted_error_or_lose (sword_t available
, sword_t requested
)
1285 struct thread
*thread
= arch_os_get_current_thread();
1286 /* Write basic information before doing anything else: if we don't
1287 * call to lisp this is a must, and even if we do there is always
1288 * the danger that we bounce back here before the error has been
1289 * handled, or indeed even printed.
1291 report_heap_exhaustion(available
, requested
, thread
);
1292 if (gc_active_p
|| (available
== 0)) {
1293 /* If we are in GC, or totally out of memory there is no way
1294 * to sanely transfer control to the lisp-side of things.
1296 lose("Heap exhausted, game over.");
1299 /* FIXME: assert free_pages_lock held */
1300 (void)thread_mutex_unlock(&free_pages_lock
);
1301 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
1302 gc_assert(get_pseudo_atomic_atomic(thread
));
1303 clear_pseudo_atomic_atomic(thread
);
1304 if (get_pseudo_atomic_interrupted(thread
))
1305 do_pending_interrupt();
1307 /* Another issue is that signalling HEAP-EXHAUSTED error leads
1308 * to running user code at arbitrary places, even in a
1309 * WITHOUT-INTERRUPTS which may lead to a deadlock without
1310 * running out of the heap. So at this point all bets are
1312 if (SymbolValue(INTERRUPTS_ENABLED
,thread
) == NIL
)
1313 corruption_warning_and_maybe_lose
1314 ("Signalling HEAP-EXHAUSTED in a WITHOUT-INTERRUPTS.");
1315 /* available and requested should be double word aligned, thus
1316 they can passed as fixnums and shifted later. */
1317 funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR
), available
, requested
);
1318 lose("HEAP-EXHAUSTED-ERROR fell through");
1323 gc_find_freeish_pages(page_index_t
*restart_page_ptr
, sword_t bytes
,
1326 page_index_t most_bytes_found_from
= 0, most_bytes_found_to
= 0;
1327 page_index_t first_page
, last_page
, restart_page
= *restart_page_ptr
;
1328 os_vm_size_t nbytes
= bytes
;
1329 os_vm_size_t nbytes_goal
= nbytes
;
1330 os_vm_size_t bytes_found
= 0;
1331 os_vm_size_t most_bytes_found
= 0;
1332 boolean small_object
= nbytes
< GENCGC_CARD_BYTES
;
1333 /* FIXME: assert(free_pages_lock is held); */
1335 if (nbytes_goal
< gencgc_alloc_granularity
)
1336 nbytes_goal
= gencgc_alloc_granularity
;
1338 /* Toggled by gc_and_save for heap compaction, normally -1. */
1339 if (gencgc_alloc_start_page
!= -1) {
1340 restart_page
= gencgc_alloc_start_page
;
1343 /* FIXME: This is on bytes instead of nbytes pending cleanup of
1344 * long from the interface. */
1345 gc_assert(bytes
>=0);
1346 /* Search for a page with at least nbytes of space. We prefer
1347 * not to split small objects on multiple pages, to reduce the
1348 * number of contiguous allocation regions spaning multiple
1349 * pages: this helps avoid excessive conservativism.
1351 * For other objects, we guarantee that they start on their own
1354 first_page
= restart_page
;
1355 while (first_page
< page_table_pages
) {
1357 if (page_free_p(first_page
)) {
1358 gc_assert(0 == page_table
[first_page
].bytes_used
);
1359 bytes_found
= GENCGC_CARD_BYTES
;
1360 } else if (small_object
&&
1361 (page_table
[first_page
].allocated
== page_type_flag
) &&
1362 (page_table
[first_page
].large_object
== 0) &&
1363 (page_table
[first_page
].gen
== gc_alloc_generation
) &&
1364 (page_table
[first_page
].write_protected
== 0) &&
1365 (page_table
[first_page
].dont_move
== 0)) {
1366 bytes_found
= GENCGC_CARD_BYTES
- page_table
[first_page
].bytes_used
;
1367 if (bytes_found
< nbytes
) {
1368 if (bytes_found
> most_bytes_found
)
1369 most_bytes_found
= bytes_found
;
1378 gc_assert(page_table
[first_page
].write_protected
== 0);
1379 for (last_page
= first_page
+1;
1380 ((last_page
< page_table_pages
) &&
1381 page_free_p(last_page
) &&
1382 (bytes_found
< nbytes_goal
));
1384 bytes_found
+= GENCGC_CARD_BYTES
;
1385 gc_assert(0 == page_table
[last_page
].bytes_used
);
1386 gc_assert(0 == page_table
[last_page
].write_protected
);
1389 if (bytes_found
> most_bytes_found
) {
1390 most_bytes_found
= bytes_found
;
1391 most_bytes_found_from
= first_page
;
1392 most_bytes_found_to
= last_page
;
1394 if (bytes_found
>= nbytes_goal
)
1397 first_page
= last_page
;
1400 bytes_found
= most_bytes_found
;
1401 restart_page
= first_page
+ 1;
1403 /* Check for a failure */
1404 if (bytes_found
< nbytes
) {
1405 gc_assert(restart_page
>= page_table_pages
);
1406 gc_heap_exhausted_error_or_lose(most_bytes_found
, nbytes
);
1409 gc_assert(most_bytes_found_to
);
1410 *restart_page_ptr
= most_bytes_found_from
;
1411 return most_bytes_found_to
-1;
1414 /* Allocate bytes. All the rest of the special-purpose allocation
1415 * functions will eventually call this */
1418 gc_alloc_with_region(sword_t nbytes
,int page_type_flag
, struct alloc_region
*my_region
,
1421 void *new_free_pointer
;
1423 if ((size_t)nbytes
>=large_object_size
)
1424 return gc_alloc_large(nbytes
, page_type_flag
, my_region
);
1426 /* Check whether there is room in the current alloc region. */
1427 new_free_pointer
= my_region
->free_pointer
+ nbytes
;
1429 /* fprintf(stderr, "alloc %d bytes from %p to %p\n", nbytes,
1430 my_region->free_pointer, new_free_pointer); */
1432 if (new_free_pointer
<= my_region
->end_addr
) {
1433 /* If so then allocate from the current alloc region. */
1434 void *new_obj
= my_region
->free_pointer
;
1435 my_region
->free_pointer
= new_free_pointer
;
1437 /* Unless a `quick' alloc was requested, check whether the
1438 alloc region is almost empty. */
1440 void_diff(my_region
->end_addr
,my_region
->free_pointer
) <= 32) {
1441 /* If so, finished with the current region. */
1442 gc_alloc_update_page_tables(page_type_flag
, my_region
);
1443 /* Set up a new region. */
1444 gc_alloc_new_region(32 /*bytes*/, page_type_flag
, my_region
);
1447 return((void *)new_obj
);
1450 /* Else not enough free space in the current region: retry with a
1453 gc_alloc_update_page_tables(page_type_flag
, my_region
);
1454 gc_alloc_new_region(nbytes
, page_type_flag
, my_region
);
1455 return gc_alloc_with_region(nbytes
, page_type_flag
, my_region
,0);
1458 /* Copy a large object. If the object is in a large object region then
1459 * it is simply promoted, else it is copied. If it's large enough then
1460 * it's copied to a large object region.
1462 * Bignums and vectors may have shrunk. If the object is not copied
1463 * the space needs to be reclaimed, and the page_tables corrected. */
1465 general_copy_large_object(lispobj object
, word_t nwords
, boolean boxedp
)
1469 page_index_t first_page
;
1471 gc_assert(is_lisp_pointer(object
));
1472 gc_assert(from_space_p(object
));
1473 gc_assert((nwords
& 0x01) == 0);
1475 if ((nwords
> 1024*1024) && gencgc_verbose
) {
1476 FSHOW((stderr
, "/general_copy_large_object: %d bytes\n",
1477 nwords
*N_WORD_BYTES
));
1480 /* Check whether it's a large object. */
1481 first_page
= find_page_index((void *)object
);
1482 gc_assert(first_page
>= 0);
1484 if (page_table
[first_page
].large_object
) {
1485 /* Promote the object. Note: Unboxed objects may have been
1486 * allocated to a BOXED region so it may be necessary to
1487 * change the region to UNBOXED. */
1488 os_vm_size_t remaining_bytes
;
1489 os_vm_size_t bytes_freed
;
1490 page_index_t next_page
;
1491 page_bytes_t old_bytes_used
;
1493 /* FIXME: This comment is somewhat stale.
1495 * Note: Any page write-protection must be removed, else a
1496 * later scavenge_newspace may incorrectly not scavenge these
1497 * pages. This would not be necessary if they are added to the
1498 * new areas, but let's do it for them all (they'll probably
1499 * be written anyway?). */
1501 gc_assert(page_starts_contiguous_block_p(first_page
));
1502 next_page
= first_page
;
1503 remaining_bytes
= nwords
*N_WORD_BYTES
;
1505 while (remaining_bytes
> GENCGC_CARD_BYTES
) {
1506 gc_assert(page_table
[next_page
].gen
== from_space
);
1507 gc_assert(page_table
[next_page
].large_object
);
1508 gc_assert(page_table
[next_page
].scan_start_offset
==
1509 npage_bytes(next_page
-first_page
));
1510 gc_assert(page_table
[next_page
].bytes_used
== GENCGC_CARD_BYTES
);
1511 /* Should have been unprotected by unprotect_oldspace()
1512 * for boxed objects, and after promotion unboxed ones
1513 * should not be on protected pages at all. */
1514 gc_assert(!page_table
[next_page
].write_protected
);
1517 gc_assert(page_boxed_p(next_page
));
1519 gc_assert(page_allocated_no_region_p(next_page
));
1520 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1522 page_table
[next_page
].gen
= new_space
;
1524 remaining_bytes
-= GENCGC_CARD_BYTES
;
1528 /* Now only one page remains, but the object may have shrunk so
1529 * there may be more unused pages which will be freed. */
1531 /* Object may have shrunk but shouldn't have grown - check. */
1532 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
1534 page_table
[next_page
].gen
= new_space
;
1537 gc_assert(page_boxed_p(next_page
));
1539 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1541 /* Adjust the bytes_used. */
1542 old_bytes_used
= page_table
[next_page
].bytes_used
;
1543 page_table
[next_page
].bytes_used
= remaining_bytes
;
1545 bytes_freed
= old_bytes_used
- remaining_bytes
;
1547 /* Free any remaining pages; needs care. */
1549 while ((old_bytes_used
== GENCGC_CARD_BYTES
) &&
1550 (page_table
[next_page
].gen
== from_space
) &&
1551 /* FIXME: It is not obvious to me why this is necessary
1552 * as a loop condition: it seems to me that the
1553 * scan_start_offset test should be sufficient, but
1554 * experimentally that is not the case. --NS
1557 page_boxed_p(next_page
) :
1558 page_allocated_no_region_p(next_page
)) &&
1559 page_table
[next_page
].large_object
&&
1560 (page_table
[next_page
].scan_start_offset
==
1561 npage_bytes(next_page
- first_page
))) {
1562 /* Checks out OK, free the page. Don't need to both zeroing
1563 * pages as this should have been done before shrinking the
1564 * object. These pages shouldn't be write-protected, even if
1565 * boxed they should be zero filled. */
1566 gc_assert(page_table
[next_page
].write_protected
== 0);
1568 old_bytes_used
= page_table
[next_page
].bytes_used
;
1569 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1570 page_table
[next_page
].bytes_used
= 0;
1571 bytes_freed
+= old_bytes_used
;
1575 if ((bytes_freed
> 0) && gencgc_verbose
) {
1577 "/general_copy_large_object bytes_freed=%"OS_VM_SIZE_FMT
"\n",
1581 generations
[from_space
].bytes_allocated
-= nwords
*N_WORD_BYTES
1583 generations
[new_space
].bytes_allocated
+= nwords
*N_WORD_BYTES
;
1584 bytes_allocated
-= bytes_freed
;
1586 /* Add the region to the new_areas if requested. */
1588 add_new_area(first_page
,0,nwords
*N_WORD_BYTES
);
1593 /* Get tag of object. */
1594 tag
= lowtag_of(object
);
1596 /* Allocate space. */
1597 new = gc_general_alloc(nwords
*N_WORD_BYTES
,
1598 (boxedp
? BOXED_PAGE_FLAG
: UNBOXED_PAGE_FLAG
),
1601 /* Copy the object. */
1602 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1604 /* Return Lisp pointer of new object. */
1605 return ((lispobj
) new) | tag
;
1610 copy_large_object(lispobj object
, sword_t nwords
)
1612 return general_copy_large_object(object
, nwords
, 1);
1616 copy_large_unboxed_object(lispobj object
, sword_t nwords
)
1618 return general_copy_large_object(object
, nwords
, 0);
1621 /* to copy unboxed objects */
1623 copy_unboxed_object(lispobj object
, sword_t nwords
)
1625 return gc_general_copy_object(object
, nwords
, UNBOXED_PAGE_FLAG
);
1630 * code and code-related objects
1633 static lispobj trans_fun_header(lispobj object);
1634 static lispobj trans_boxed(lispobj object);
1637 /* Scan a x86 compiled code object, looking for possible fixups that
1638 * have been missed after a move.
1640 * Two types of fixups are needed:
1641 * 1. Absolute fixups to within the code object.
1642 * 2. Relative fixups to outside the code object.
1644 * Currently only absolute fixups to the constant vector, or to the
1645 * code area are checked. */
1646 #ifdef LISP_FEATURE_X86
1648 sniff_code_object(struct code
*code
, os_vm_size_t displacement
)
1650 sword_t nheader_words
, ncode_words
, nwords
;
1651 os_vm_address_t constants_start_addr
= NULL
, constants_end_addr
, p
;
1652 os_vm_address_t code_start_addr
, code_end_addr
;
1653 os_vm_address_t code_addr
= (os_vm_address_t
)code
;
1654 int fixup_found
= 0;
1656 if (!check_code_fixups
)
1659 FSHOW((stderr
, "/sniffing code: %p, %lu\n", code
, displacement
));
1661 ncode_words
= fixnum_word_value(code
->code_size
);
1662 nheader_words
= HeaderValue(*(lispobj
*)code
);
1663 nwords
= ncode_words
+ nheader_words
;
1665 constants_start_addr
= code_addr
+ 5*N_WORD_BYTES
;
1666 constants_end_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1667 code_start_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1668 code_end_addr
= code_addr
+ nwords
*N_WORD_BYTES
;
1670 /* Work through the unboxed code. */
1671 for (p
= code_start_addr
; p
< code_end_addr
; p
++) {
1672 void *data
= *(void **)p
;
1673 unsigned d1
= *((unsigned char *)p
- 1);
1674 unsigned d2
= *((unsigned char *)p
- 2);
1675 unsigned d3
= *((unsigned char *)p
- 3);
1676 unsigned d4
= *((unsigned char *)p
- 4);
1678 unsigned d5
= *((unsigned char *)p
- 5);
1679 unsigned d6
= *((unsigned char *)p
- 6);
1682 /* Check for code references. */
1683 /* Check for a 32 bit word that looks like an absolute
1684 reference to within the code adea of the code object. */
1685 if ((data
>= (void*)(code_start_addr
-displacement
))
1686 && (data
< (void*)(code_end_addr
-displacement
))) {
1687 /* function header */
1689 && (((unsigned)p
- 4 - 4*HeaderValue(*((unsigned *)p
-1))) ==
1691 /* Skip the function header */
1695 /* the case of PUSH imm32 */
1699 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1700 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1701 FSHOW((stderr
, "/PUSH $0x%.8x\n", data
));
1703 /* the case of MOV [reg-8],imm32 */
1705 && (d2
==0x40 || d2
==0x41 || d2
==0x42 || d2
==0x43
1706 || d2
==0x45 || d2
==0x46 || d2
==0x47)
1710 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1711 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1712 FSHOW((stderr
, "/MOV [reg-8],$0x%.8x\n", data
));
1714 /* the case of LEA reg,[disp32] */
1715 if ((d2
== 0x8d) && ((d1
& 0xc7) == 5)) {
1718 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1719 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1720 FSHOW((stderr
,"/LEA reg,[$0x%.8x]\n", data
));
1724 /* Check for constant references. */
1725 /* Check for a 32 bit word that looks like an absolute
1726 reference to within the constant vector. Constant references
1728 if ((data
>= (void*)(constants_start_addr
-displacement
))
1729 && (data
< (void*)(constants_end_addr
-displacement
))
1730 && (((unsigned)data
& 0x3) == 0)) {
1735 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1736 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1737 FSHOW((stderr
,"/MOV eax,0x%.8x\n", data
));
1740 /* the case of MOV m32,EAX */
1744 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1745 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1746 FSHOW((stderr
, "/MOV 0x%.8x,eax\n", data
));
1749 /* the case of CMP m32,imm32 */
1750 if ((d1
== 0x3d) && (d2
== 0x81)) {
1753 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1754 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1756 FSHOW((stderr
, "/CMP 0x%.8x,immed32\n", data
));
1759 /* Check for a mod=00, r/m=101 byte. */
1760 if ((d1
& 0xc7) == 5) {
1765 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1766 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1767 FSHOW((stderr
,"/CMP 0x%.8x,reg\n", data
));
1769 /* the case of CMP reg32,m32 */
1773 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1774 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1775 FSHOW((stderr
, "/CMP reg32,0x%.8x\n", data
));
1777 /* the case of MOV m32,reg32 */
1781 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1782 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1783 FSHOW((stderr
, "/MOV 0x%.8x,reg32\n", data
));
1785 /* the case of MOV reg32,m32 */
1789 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1790 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1791 FSHOW((stderr
, "/MOV reg32,0x%.8x\n", data
));
1793 /* the case of LEA reg32,m32 */
1797 "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1798 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1799 FSHOW((stderr
, "/LEA reg32,0x%.8x\n", data
));
1805 /* If anything was found, print some information on the code
1809 "/compiled code object at %x: header words = %d, code words = %d\n",
1810 code
, nheader_words
, ncode_words
));
1812 "/const start = %x, end = %x\n",
1813 constants_start_addr
, constants_end_addr
));
1815 "/code start = %x, end = %x\n",
1816 code_start_addr
, code_end_addr
));
1821 #ifdef LISP_FEATURE_X86
1823 gencgc_apply_code_fixups(struct code
*old_code
, struct code
*new_code
)
1825 sword_t nheader_words
, ncode_words
, nwords
;
1826 os_vm_address_t constants_start_addr
, constants_end_addr
;
1827 os_vm_address_t code_start_addr
, code_end_addr
;
1828 os_vm_address_t code_addr
= (os_vm_address_t
)new_code
;
1829 os_vm_address_t old_addr
= (os_vm_address_t
)old_code
;
1830 os_vm_size_t displacement
= code_addr
- old_addr
;
1831 lispobj fixups
= NIL
;
1832 struct vector
*fixups_vector
;
1834 ncode_words
= fixnum_word_value(new_code
->code_size
);
1835 nheader_words
= HeaderValue(*(lispobj
*)new_code
);
1836 nwords
= ncode_words
+ nheader_words
;
1838 "/compiled code object at %x: header words = %d, code words = %d\n",
1839 new_code, nheader_words, ncode_words)); */
1840 constants_start_addr
= code_addr
+ 5*N_WORD_BYTES
;
1841 constants_end_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1842 code_start_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1843 code_end_addr
= code_addr
+ nwords
*N_WORD_BYTES
;
1846 "/const start = %x, end = %x\n",
1847 constants_start_addr,constants_end_addr));
1849 "/code start = %x; end = %x\n",
1850 code_start_addr,code_end_addr));
1853 /* The first constant should be a pointer to the fixups for this
1854 code objects. Check. */
1855 fixups
= new_code
->constants
[0];
1857 /* It will be 0 or the unbound-marker if there are no fixups (as
1858 * will be the case if the code object has been purified, for
1859 * example) and will be an other pointer if it is valid. */
1860 if ((fixups
== 0) || (fixups
== UNBOUND_MARKER_WIDETAG
) ||
1861 !is_lisp_pointer(fixups
)) {
1862 /* Check for possible errors. */
1863 if (check_code_fixups
)
1864 sniff_code_object(new_code
, displacement
);
1869 fixups_vector
= (struct vector
*)native_pointer(fixups
);
1871 /* Could be pointing to a forwarding pointer. */
1872 /* FIXME is this always in from_space? if so, could replace this code with
1873 * forwarding_pointer_p/forwarding_pointer_value */
1874 if (is_lisp_pointer(fixups
) &&
1875 (find_page_index((void*)fixups_vector
) != -1) &&
1876 (fixups_vector
->header
== 0x01)) {
1877 /* If so, then follow it. */
1878 /*SHOW("following pointer to a forwarding pointer");*/
1880 (struct vector
*)native_pointer((lispobj
)fixups_vector
->length
);
1883 /*SHOW("got fixups");*/
1885 if (widetag_of(fixups_vector
->header
) == SIMPLE_ARRAY_WORD_WIDETAG
) {
1886 /* Got the fixups for the code block. Now work through the vector,
1887 and apply a fixup at each address. */
1888 sword_t length
= fixnum_value(fixups_vector
->length
);
1890 for (i
= 0; i
< length
; i
++) {
1891 long offset
= fixups_vector
->data
[i
];
1892 /* Now check the current value of offset. */
1893 os_vm_address_t old_value
= *(os_vm_address_t
*)(code_start_addr
+ offset
);
1895 /* If it's within the old_code object then it must be an
1896 * absolute fixup (relative ones are not saved) */
1897 if ((old_value
>= old_addr
)
1898 && (old_value
< (old_addr
+ nwords
*N_WORD_BYTES
)))
1899 /* So add the dispacement. */
1900 *(os_vm_address_t
*)(code_start_addr
+ offset
) =
1901 old_value
+ displacement
;
1903 /* It is outside the old code object so it must be a
1904 * relative fixup (absolute fixups are not saved). So
1905 * subtract the displacement. */
1906 *(os_vm_address_t
*)(code_start_addr
+ offset
) =
1907 old_value
- displacement
;
1910 /* This used to just print a note to stderr, but a bogus fixup seems to
1911 * indicate real heap corruption, so a hard hailure is in order. */
1912 lose("fixup vector %p has a bad widetag: %d\n",
1913 fixups_vector
, widetag_of(fixups_vector
->header
));
1916 /* Check for possible errors. */
1917 if (check_code_fixups
) {
1918 sniff_code_object(new_code
,displacement
);
1924 trans_boxed_large(lispobj object
)
1929 gc_assert(is_lisp_pointer(object
));
1931 header
= *((lispobj
*) native_pointer(object
));
1932 length
= HeaderValue(header
) + 1;
1933 length
= CEILING(length
, 2);
1935 return copy_large_object(object
, length
);
1938 /* Doesn't seem to be used, delete it after the grace period. */
1941 trans_unboxed_large(lispobj object
)
1946 gc_assert(is_lisp_pointer(object
));
1948 header
= *((lispobj
*) native_pointer(object
));
1949 length
= HeaderValue(header
) + 1;
1950 length
= CEILING(length
, 2);
1952 return copy_large_unboxed_object(object
, length
);
1960 /* XX This is a hack adapted from cgc.c. These don't work too
1961 * efficiently with the gencgc as a list of the weak pointers is
1962 * maintained within the objects which causes writes to the pages. A
1963 * limited attempt is made to avoid unnecessary writes, but this needs
1965 #define WEAK_POINTER_NWORDS \
1966 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
1969 scav_weak_pointer(lispobj
*where
, lispobj object
)
1971 /* Since we overwrite the 'next' field, we have to make
1972 * sure not to do so for pointers already in the list.
1973 * Instead of searching the list of weak_pointers each
1974 * time, we ensure that next is always NULL when the weak
1975 * pointer isn't in the list, and not NULL otherwise.
1976 * Since we can't use NULL to denote end of list, we
1977 * use a pointer back to the same weak_pointer.
1979 struct weak_pointer
* wp
= (struct weak_pointer
*)where
;
1981 if (NULL
== wp
->next
) {
1982 wp
->next
= weak_pointers
;
1984 if (NULL
== wp
->next
)
1988 /* Do not let GC scavenge the value slot of the weak pointer.
1989 * (That is why it is a weak pointer.) */
1991 return WEAK_POINTER_NWORDS
;
1996 search_read_only_space(void *pointer
)
1998 lispobj
*start
= (lispobj
*) READ_ONLY_SPACE_START
;
1999 lispobj
*end
= (lispobj
*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0);
2000 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
2002 return (gc_search_space(start
,
2003 (((lispobj
*)pointer
)+2)-start
,
2004 (lispobj
*) pointer
));
2008 search_static_space(void *pointer
)
2010 lispobj
*start
= (lispobj
*)STATIC_SPACE_START
;
2011 lispobj
*end
= (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0);
2012 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
2014 return (gc_search_space(start
,
2015 (((lispobj
*)pointer
)+2)-start
,
2016 (lispobj
*) pointer
));
2019 /* a faster version for searching the dynamic space. This will work even
2020 * if the object is in a current allocation region. */
2022 search_dynamic_space(void *pointer
)
2024 page_index_t page_index
= find_page_index(pointer
);
2027 /* The address may be invalid, so do some checks. */
2028 if ((page_index
== -1) || page_free_p(page_index
))
2030 start
= (lispobj
*)page_scan_start(page_index
);
2031 return (gc_search_space(start
,
2032 (((lispobj
*)pointer
)+2)-start
,
2033 (lispobj
*)pointer
));
2036 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2038 /* Is there any possibility that pointer is a valid Lisp object
2039 * reference, and/or something else (e.g. subroutine call return
2040 * address) which should prevent us from moving the referred-to thing?
2041 * This is called from preserve_pointers() */
2043 possibly_valid_dynamic_space_pointer_s(lispobj
*pointer
,
2044 page_index_t addr_page_index
,
2045 lispobj
**store_here
)
2047 lispobj
*start_addr
;
2049 /* Find the object start address. */
2050 start_addr
= search_dynamic_space(pointer
);
2052 if (start_addr
== NULL
) {
2056 *store_here
= start_addr
;
2059 /* If the containing object is a code object, presume that the
2060 * pointer is valid, simply because it could be an unboxed return
2062 if (widetag_of(*start_addr
) == CODE_HEADER_WIDETAG
)
2065 /* Large object pages only contain ONE object, and it will never
2066 * be a CONS. However, arrays and bignums can be allocated larger
2067 * than necessary and then shrunk to fit, leaving what look like
2068 * (0 . 0) CONSes at the end. These appear valid to
2069 * looks_like_valid_lisp_pointer_p(), so pick them off here. */
2070 if (page_table
[addr_page_index
].large_object
&&
2071 (lowtag_of((lispobj
)pointer
) == LIST_POINTER_LOWTAG
))
2074 return looks_like_valid_lisp_pointer_p((lispobj
)pointer
, start_addr
);
2077 #endif // defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2080 valid_conservative_root_p(void *addr
, page_index_t addr_page_index
,
2081 lispobj
**begin_ptr
)
2083 #ifdef GENCGC_IS_PRECISE
2084 /* If we're in precise gencgc (non-x86oid as of this writing) then
2085 * we are only called on valid object pointers in the first place,
2086 * so we just have to do a bounds-check against the heap, a
2087 * generation check, and the already-pinned check. */
2088 if ((addr_page_index
== -1)
2089 || (page_table
[addr_page_index
].gen
!= from_space
)
2090 || (page_table
[addr_page_index
].dont_move
!= 0))
2093 /* quick check 1: Address is quite likely to have been invalid. */
2094 if ((addr_page_index
== -1)
2095 || page_free_p(addr_page_index
)
2096 || (page_table
[addr_page_index
].bytes_used
== 0)
2097 || (page_table
[addr_page_index
].gen
!= from_space
))
2099 gc_assert(!(page_table
[addr_page_index
].allocated
&OPEN_REGION_PAGE_FLAG
));
2101 /* quick check 2: Check the offset within the page.
2104 if (((uword_t
)addr
& (GENCGC_CARD_BYTES
- 1)) >
2105 page_table
[addr_page_index
].bytes_used
)
2108 /* Filter out anything which can't be a pointer to a Lisp object
2109 * (or, as a special case which also requires dont_move, a return
2110 * address referring to something in a CodeObject). This is
2111 * expensive but important, since it vastly reduces the
2112 * probability that random garbage will be bogusly interpreted as
2113 * a pointer which prevents a page from moving. */
2114 if (!possibly_valid_dynamic_space_pointer_s(addr
, addr_page_index
,
2123 in_dontmove_dwordindex_p(page_index_t page_index
, int dword_in_page
)
2125 if (page_table
[page_index
].dontmove_dwords
) {
2126 return page_table
[page_index
].dontmove_dwords
[dword_in_page
];
2132 in_dontmove_nativeptr_p(page_index_t page_index
, lispobj
*native_ptr
)
2134 if (page_table
[page_index
].dontmove_dwords
) {
2135 lispobj
*begin
= page_address(page_index
);
2136 int dword_in_page
= (native_ptr
- begin
) / 2;
2137 return in_dontmove_dwordindex_p(page_index
, dword_in_page
);
2143 /* Adjust large bignum and vector objects. This will adjust the
2144 * allocated region if the size has shrunk, and move unboxed objects
2145 * into unboxed pages. The pages are not promoted here, and the
2146 * promoted region is not added to the new_regions; this is really
2147 * only designed to be called from preserve_pointer(). Shouldn't fail
2148 * if this is missed, just may delay the moving of objects to unboxed
2149 * pages, and the freeing of pages. */
2151 maybe_adjust_large_object(lispobj
*where
)
2153 page_index_t first_page
;
2154 page_index_t next_page
;
2157 uword_t remaining_bytes
;
2158 uword_t bytes_freed
;
2159 uword_t old_bytes_used
;
2163 /* Check whether it's a vector or bignum object. */
2164 switch (widetag_of(where
[0])) {
2165 case SIMPLE_VECTOR_WIDETAG
:
2166 boxed
= BOXED_PAGE_FLAG
;
2168 case BIGNUM_WIDETAG
:
2169 case SIMPLE_BASE_STRING_WIDETAG
:
2170 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
2171 case SIMPLE_CHARACTER_STRING_WIDETAG
:
2173 case SIMPLE_BIT_VECTOR_WIDETAG
:
2174 case SIMPLE_ARRAY_NIL_WIDETAG
:
2175 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
2176 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
2177 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
2178 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
2179 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
2180 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
2182 case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG
:
2184 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
2185 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
2186 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
2187 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
2189 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
2190 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
2192 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
2193 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
2195 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
2196 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
2199 case SIMPLE_ARRAY_FIXNUM_WIDETAG
:
2201 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
2202 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
2204 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
2205 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
2207 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
2208 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
2209 #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
2210 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
2212 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
2213 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
2215 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
2216 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2218 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
2219 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
2221 boxed
= UNBOXED_PAGE_FLAG
;
2227 /* Find its current size. */
2228 nwords
= (sizetab
[widetag_of(where
[0])])(where
);
2230 first_page
= find_page_index((void *)where
);
2231 gc_assert(first_page
>= 0);
2233 /* Note: Any page write-protection must be removed, else a later
2234 * scavenge_newspace may incorrectly not scavenge these pages.
2235 * This would not be necessary if they are added to the new areas,
2236 * but lets do it for them all (they'll probably be written
2239 gc_assert(page_starts_contiguous_block_p(first_page
));
2241 next_page
= first_page
;
2242 remaining_bytes
= nwords
*N_WORD_BYTES
;
2243 while (remaining_bytes
> GENCGC_CARD_BYTES
) {
2244 gc_assert(page_table
[next_page
].gen
== from_space
);
2245 gc_assert(page_allocated_no_region_p(next_page
));
2246 gc_assert(page_table
[next_page
].large_object
);
2247 gc_assert(page_table
[next_page
].scan_start_offset
==
2248 npage_bytes(next_page
-first_page
));
2249 gc_assert(page_table
[next_page
].bytes_used
== GENCGC_CARD_BYTES
);
2251 page_table
[next_page
].allocated
= boxed
;
2253 /* Shouldn't be write-protected at this stage. Essential that the
2255 gc_assert(!page_table
[next_page
].write_protected
);
2256 remaining_bytes
-= GENCGC_CARD_BYTES
;
2260 /* Now only one page remains, but the object may have shrunk so
2261 * there may be more unused pages which will be freed. */
2263 /* Object may have shrunk but shouldn't have grown - check. */
2264 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
2266 page_table
[next_page
].allocated
= boxed
;
2267 gc_assert(page_table
[next_page
].allocated
==
2268 page_table
[first_page
].allocated
);
2270 /* Adjust the bytes_used. */
2271 old_bytes_used
= page_table
[next_page
].bytes_used
;
2272 page_table
[next_page
].bytes_used
= remaining_bytes
;
2274 bytes_freed
= old_bytes_used
- remaining_bytes
;
2276 /* Free any remaining pages; needs care. */
2278 while ((old_bytes_used
== GENCGC_CARD_BYTES
) &&
2279 (page_table
[next_page
].gen
== from_space
) &&
2280 page_allocated_no_region_p(next_page
) &&
2281 page_table
[next_page
].large_object
&&
2282 (page_table
[next_page
].scan_start_offset
==
2283 npage_bytes(next_page
- first_page
))) {
2284 /* It checks out OK, free the page. We don't need to both zeroing
2285 * pages as this should have been done before shrinking the
2286 * object. These pages shouldn't be write protected as they
2287 * should be zero filled. */
2288 gc_assert(page_table
[next_page
].write_protected
== 0);
2290 old_bytes_used
= page_table
[next_page
].bytes_used
;
2291 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
2292 page_table
[next_page
].bytes_used
= 0;
2293 bytes_freed
+= old_bytes_used
;
2297 if ((bytes_freed
> 0) && gencgc_verbose
) {
2299 "/maybe_adjust_large_object() freed %d\n",
2303 generations
[from_space
].bytes_allocated
-= bytes_freed
;
2304 bytes_allocated
-= bytes_freed
;
2310 * Why is this restricted to protected objects only?
2311 * Because the rest of the page has been scavenged already,
2312 * and since that leaves forwarding pointers in the unprotected
2313 * areas you cannot scavenge it again until those are gone.
2316 scavenge_pages_with_conservative_pointers_to_them_protected_objects_only()
2319 for (i
= 0; i
< last_free_page
; i
++) {
2320 if (!page_table
[i
].dontmove_dwords
) {
2323 lispobj
*begin
= page_address(i
);
2326 lispobj
*scavme_begin
= NULL
;
2327 for (dword
= 0; dword
< GENCGC_CARD_BYTES
/ N_WORD_BYTES
/ 2; dword
++) {
2328 if (in_dontmove_dwordindex_p(i
, dword
)) {
2329 if (!scavme_begin
) {
2330 scavme_begin
= begin
+ dword
* 2;
2333 // contiguous area stopped
2335 scavenge(scavme_begin
, (begin
+ dword
* 2) - scavme_begin
);
2337 scavme_begin
= NULL
;
2341 scavenge(scavme_begin
, (begin
+ dword
* 2) - scavme_begin
);
2346 int verbosefixes
= 0;
2352 int words_wiped
= 0;
2353 int lisp_pointers_wiped
= 0;
2354 int pages_considered
= 0;
2355 int n_pages_cannot_wipe
= 0;
2357 for (i
= 0; i
< last_free_page
; i
++) {
2358 if (!page_table
[i
].dont_move
) {
2362 if (!page_table
[i
].dontmove_dwords
) {
2363 n_pages_cannot_wipe
++;
2366 begin
= page_address(i
);
2368 for (dword
= 0; dword
< GENCGC_CARD_BYTES
/ N_WORD_BYTES
/ 2; dword
++) {
2369 if (!in_dontmove_dwordindex_p(i
, dword
)) {
2370 if (is_lisp_pointer(*(begin
+ dword
* 2))) {
2371 lisp_pointers_wiped
++;
2373 if (is_lisp_pointer(*(begin
+ dword
* 2 + 1))) {
2374 lisp_pointers_wiped
++;
2376 *(begin
+ dword
* 2) = wipe_with
;
2377 *(begin
+ dword
* 2 + 1) = wipe_with
;
2381 free(page_table
[i
].dontmove_dwords
);
2382 page_table
[i
].dontmove_dwords
= NULL
;
2384 // move the page to newspace
2385 generations
[new_space
].bytes_allocated
+= page_table
[i
].bytes_used
;
2386 generations
[page_table
[i
].gen
].bytes_allocated
-= page_table
[i
].bytes_used
;
2387 page_table
[i
].gen
= new_space
;
2389 if ((verbosefixes
>= 1 && lisp_pointers_wiped
> 0) || verbosefixes
>= 2) {
2390 fprintf(stderr
, "Cra25a: wiped %d words (%d lisp_pointers) in %d pages, cannot wipe %d pages \n"
2391 , words_wiped
, lisp_pointers_wiped
, pages_considered
, n_pages_cannot_wipe
);
2396 set_page_consi_bit(page_index_t pageindex
, lispobj
*mark_which_pointer
)
2398 struct page
*page
= &page_table
[pageindex
];
2403 gc_assert(mark_which_pointer
);
2404 if (page
->dontmove_dwords
== NULL
) {
2405 const int n_dwords_in_card
= GENCGC_CARD_BYTES
/ N_WORD_BYTES
/ 2;
2406 const int malloc_size
= sizeof(in_use_marker_t
) * n_dwords_in_card
;
2407 page
->dontmove_dwords
= malloc(malloc_size
);
2408 gc_assert(page
->dontmove_dwords
);
2409 bzero(page
->dontmove_dwords
, malloc_size
);
2412 int size
= (sizetab
[widetag_of(mark_which_pointer
[0])])(mark_which_pointer
);
2414 (fixnump(*mark_which_pointer
) ||
2415 is_lisp_pointer(*mark_which_pointer
) ||
2416 lowtag_of(*mark_which_pointer
) == 9 ||
2417 lowtag_of(*mark_which_pointer
) == 2)) {
2420 // print additional debug info for now.
2421 if (size
% 2 != 0) {
2422 fprintf(stderr
, "WIPE ERROR !dword, size %d, lowtag %d, world 0x%lld\n",
2424 lowtag_of(*mark_which_pointer
),
2425 (long long)*mark_which_pointer
);
2427 gc_assert(size
% 2 == 0);
2428 lispobj
*begin
= page_address(pageindex
);
2429 int begin_dword
= (mark_which_pointer
- begin
) / 2;
2431 for (dword
= begin_dword
; dword
< begin_dword
+ size
/ 2; dword
++) {
2432 page
->dontmove_dwords
[dword
] = 1;
2436 /* Take a possible pointer to a Lisp object and mark its page in the
2437 * page_table so that it will not be relocated during a GC.
2439 * This involves locating the page it points to, then backing up to
2440 * the start of its region, then marking all pages dont_move from there
2441 * up to the first page that's not full or has a different generation
2443 * It is assumed that all the page static flags have been cleared at
2444 * the start of a GC.
2446 * It is also assumed that the current gc_alloc() region has been
2447 * flushed and the tables updated. */
2450 preserve_pointer(void *addr
)
2452 page_index_t addr_page_index
= find_page_index(addr
);
2453 page_index_t first_page
;
2455 unsigned int region_allocation
;
2456 lispobj
*begin_ptr
= NULL
;
2458 if (!valid_conservative_root_p(addr
, addr_page_index
, &begin_ptr
))
2461 /* (Now that we know that addr_page_index is in range, it's
2462 * safe to index into page_table[] with it.) */
2463 region_allocation
= page_table
[addr_page_index
].allocated
;
2465 /* Find the beginning of the region. Note that there may be
2466 * objects in the region preceding the one that we were passed a
2467 * pointer to: if this is the case, we will write-protect all the
2468 * previous objects' pages too. */
2471 /* I think this'd work just as well, but without the assertions.
2472 * -dan 2004.01.01 */
2473 first_page
= find_page_index(page_scan_start(addr_page_index
))
2475 first_page
= addr_page_index
;
2476 while (!page_starts_contiguous_block_p(first_page
)) {
2478 /* Do some checks. */
2479 gc_assert(page_table
[first_page
].bytes_used
== GENCGC_CARD_BYTES
);
2480 gc_assert(page_table
[first_page
].gen
== from_space
);
2481 gc_assert(page_table
[first_page
].allocated
== region_allocation
);
2485 /* Adjust any large objects before promotion as they won't be
2486 * copied after promotion. */
2487 if (page_table
[first_page
].large_object
) {
2488 maybe_adjust_large_object(page_address(first_page
));
2489 /* It may have moved to unboxed pages. */
2490 region_allocation
= page_table
[first_page
].allocated
;
2493 /* Now work forward until the end of this contiguous area is found,
2494 * marking all pages as dont_move. */
2495 for (i
= first_page
; ;i
++) {
2496 gc_assert(page_table
[i
].allocated
== region_allocation
);
2498 /* Mark the page static. */
2499 page_table
[i
].dont_move
= 1;
2501 /* It is essential that the pages are not write protected as
2502 * they may have pointers into the old-space which need
2503 * scavenging. They shouldn't be write protected at this
2505 gc_assert(!page_table
[i
].write_protected
);
2507 /* Check whether this is the last page in this contiguous block.. */
2508 if (page_ends_contiguous_block_p(i
, from_space
))
2512 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2513 /* Do not do this for multi-page objects. Those pages do not need
2514 * object wipeout anyway.
2516 if (i
== first_page
) {
2517 /* We need the pointer to the beginning of the object
2518 * We might have gotten it above but maybe not, so make sure
2520 if (begin_ptr
== NULL
) {
2521 possibly_valid_dynamic_space_pointer_s(addr
, first_page
,
2524 set_page_consi_bit(first_page
, begin_ptr
);
2528 /* Check that the page is now static. */
2529 gc_assert(page_table
[addr_page_index
].dont_move
!= 0);
2532 /* If the given page is not write-protected, then scan it for pointers
2533 * to younger generations or the top temp. generation, if no
2534 * suspicious pointers are found then the page is write-protected.
2536 * Care is taken to check for pointers to the current gc_alloc()
2537 * region if it is a younger generation or the temp. generation. This
2538 * frees the caller from doing a gc_alloc_update_page_tables(). Actually
2539 * the gc_alloc_generation does not need to be checked as this is only
2540 * called from scavenge_generation() when the gc_alloc generation is
2541 * younger, so it just checks if there is a pointer to the current
2544 * We return 1 if the page was write-protected, else 0. */
2546 update_page_write_prot(page_index_t page
)
2548 generation_index_t gen
= page_table
[page
].gen
;
2551 void **page_addr
= (void **)page_address(page
);
2552 sword_t num_words
= page_table
[page
].bytes_used
/ N_WORD_BYTES
;
2554 /* Shouldn't be a free page. */
2555 gc_assert(page_allocated_p(page
));
2556 gc_assert(page_table
[page
].bytes_used
!= 0);
2558 /* Skip if it's already write-protected, pinned, or unboxed */
2559 if (page_table
[page
].write_protected
2560 /* FIXME: What's the reason for not write-protecting pinned pages? */
2561 || page_table
[page
].dont_move
2562 || page_unboxed_p(page
))
2565 /* Scan the page for pointers to younger generations or the
2566 * top temp. generation. */
2568 for (j
= 0; j
< num_words
; j
++) {
2569 void *ptr
= *(page_addr
+j
);
2572 /* Check that it's in the dynamic space */
2573 if (is_lisp_pointer((lispobj
)ptr
) && (index
= find_page_index(ptr
)) != -1)
2574 if (/* Does it point to a younger or the temp. generation? */
2575 (page_allocated_p(index
)
2576 && (page_table
[index
].bytes_used
!= 0)
2577 && ((page_table
[index
].gen
< gen
)
2578 || (page_table
[index
].gen
== SCRATCH_GENERATION
)))
2580 /* Or does it point within a current gc_alloc() region? */
2581 || ((boxed_region
.start_addr
<= ptr
)
2582 && (ptr
<= boxed_region
.free_pointer
))
2583 || ((unboxed_region
.start_addr
<= ptr
)
2584 && (ptr
<= unboxed_region
.free_pointer
))) {
2591 /* Write-protect the page. */
2592 /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
2594 os_protect((void *)page_addr
,
2596 OS_VM_PROT_READ
|OS_VM_PROT_EXECUTE
);
2598 /* Note the page as protected in the page tables. */
2599 page_table
[page
].write_protected
= 1;
2605 /* Scavenge all generations from FROM to TO, inclusive, except for
2606 * new_space which needs special handling, as new objects may be
2607 * added which are not checked here - use scavenge_newspace generation.
2609 * Write-protected pages should not have any pointers to the
2610 * from_space so do need scavenging; thus write-protected pages are
2611 * not always scavenged. There is some code to check that these pages
2612 * are not written; but to check fully the write-protected pages need
2613 * to be scavenged by disabling the code to skip them.
2615 * Under the current scheme when a generation is GCed the younger
2616 * generations will be empty. So, when a generation is being GCed it
2617 * is only necessary to scavenge the older generations for pointers
2618 * not the younger. So a page that does not have pointers to younger
2619 * generations does not need to be scavenged.
2621 * The write-protection can be used to note pages that don't have
2622 * pointers to younger pages. But pages can be written without having
2623 * pointers to younger generations. After the pages are scavenged here
2624 * they can be scanned for pointers to younger generations and if
2625 * there are none the page can be write-protected.
2627 * One complication is when the newspace is the top temp. generation.
2629 * Enabling SC_GEN_CK scavenges the write-protected pages and checks
2630 * that none were written, which they shouldn't be as they should have
2631 * no pointers to younger generations. This breaks down for weak
2632 * pointers as the objects contain a link to the next and are written
2633 * if a weak pointer is scavenged. Still it's a useful check. */
2635 scavenge_generations(generation_index_t from
, generation_index_t to
)
2638 page_index_t num_wp
= 0;
2642 /* Clear the write_protected_cleared flags on all pages. */
2643 for (i
= 0; i
< page_table_pages
; i
++)
2644 page_table
[i
].write_protected_cleared
= 0;
2647 for (i
= 0; i
< last_free_page
; i
++) {
2648 generation_index_t generation
= page_table
[i
].gen
;
2650 && (page_table
[i
].bytes_used
!= 0)
2651 && (generation
!= new_space
)
2652 && (generation
>= from
)
2653 && (generation
<= to
)) {
2654 page_index_t last_page
,j
;
2655 int write_protected
=1;
2657 /* This should be the start of a region */
2658 gc_assert(page_starts_contiguous_block_p(i
));
2660 /* Now work forward until the end of the region */
2661 for (last_page
= i
; ; last_page
++) {
2663 write_protected
&& page_table
[last_page
].write_protected
;
2664 if (page_ends_contiguous_block_p(last_page
, generation
))
2667 if (!write_protected
) {
2668 scavenge(page_address(i
),
2669 ((uword_t
)(page_table
[last_page
].bytes_used
2670 + npage_bytes(last_page
-i
)))
2673 /* Now scan the pages and write protect those that
2674 * don't have pointers to younger generations. */
2675 if (enable_page_protection
) {
2676 for (j
= i
; j
<= last_page
; j
++) {
2677 num_wp
+= update_page_write_prot(j
);
2680 if ((gencgc_verbose
> 1) && (num_wp
!= 0)) {
2682 "/write protected %d pages within generation %d\n",
2683 num_wp
, generation
));
2691 /* Check that none of the write_protected pages in this generation
2692 * have been written to. */
2693 for (i
= 0; i
< page_table_pages
; i
++) {
2694 if (page_allocated_p(i
)
2695 && (page_table
[i
].bytes_used
!= 0)
2696 && (page_table
[i
].gen
== generation
)
2697 && (page_table
[i
].write_protected_cleared
!= 0)) {
2698 FSHOW((stderr
, "/scavenge_generation() %d\n", generation
));
2700 "/page bytes_used=%d scan_start_offset=%lu dont_move=%d\n",
2701 page_table
[i
].bytes_used
,
2702 page_table
[i
].scan_start_offset
,
2703 page_table
[i
].dont_move
));
2704 lose("write to protected page %d in scavenge_generation()\n", i
);
2711 /* Scavenge a newspace generation. As it is scavenged new objects may
2712 * be allocated to it; these will also need to be scavenged. This
2713 * repeats until there are no more objects unscavenged in the
2714 * newspace generation.
2716 * To help improve the efficiency, areas written are recorded by
2717 * gc_alloc() and only these scavenged. Sometimes a little more will be
2718 * scavenged, but this causes no harm. An easy check is done that the
2719 * scavenged bytes equals the number allocated in the previous
2722 * Write-protected pages are not scanned except if they are marked
2723 * dont_move in which case they may have been promoted and still have
2724 * pointers to the from space.
2726 * Write-protected pages could potentially be written by alloc however
2727 * to avoid having to handle re-scavenging of write-protected pages
2728 * gc_alloc() does not write to write-protected pages.
2730 * New areas of objects allocated are recorded alternatively in the two
2731 * new_areas arrays below. */
2732 static struct new_area new_areas_1
[NUM_NEW_AREAS
];
2733 static struct new_area new_areas_2
[NUM_NEW_AREAS
];
2735 /* Do one full scan of the new space generation. This is not enough to
2736 * complete the job as new objects may be added to the generation in
2737 * the process which are not scavenged. */
2739 scavenge_newspace_generation_one_scan(generation_index_t generation
)
2744 "/starting one full scan of newspace generation %d\n",
2746 for (i
= 0; i
< last_free_page
; i
++) {
2747 /* Note that this skips over open regions when it encounters them. */
2749 && (page_table
[i
].bytes_used
!= 0)
2750 && (page_table
[i
].gen
== generation
)
2751 && ((page_table
[i
].write_protected
== 0)
2752 /* (This may be redundant as write_protected is now
2753 * cleared before promotion.) */
2754 || (page_table
[i
].dont_move
== 1))) {
2755 page_index_t last_page
;
2758 /* The scavenge will start at the scan_start_offset of
2761 * We need to find the full extent of this contiguous
2762 * block in case objects span pages.
2764 * Now work forward until the end of this contiguous area
2765 * is found. A small area is preferred as there is a
2766 * better chance of its pages being write-protected. */
2767 for (last_page
= i
; ;last_page
++) {
2768 /* If all pages are write-protected and movable,
2769 * then no need to scavenge */
2770 all_wp
=all_wp
&& page_table
[last_page
].write_protected
&&
2771 !page_table
[last_page
].dont_move
;
2773 /* Check whether this is the last page in this
2774 * contiguous block */
2775 if (page_ends_contiguous_block_p(last_page
, generation
))
2779 /* Do a limited check for write-protected pages. */
2781 sword_t nwords
= (((uword_t
)
2782 (page_table
[last_page
].bytes_used
2783 + npage_bytes(last_page
-i
)
2784 + page_table
[i
].scan_start_offset
))
2786 new_areas_ignore_page
= last_page
;
2788 scavenge(page_scan_start(i
), nwords
);
2795 "/done with one full scan of newspace generation %d\n",
2799 /* Do a complete scavenge of the newspace generation. */
2801 scavenge_newspace_generation(generation_index_t generation
)
2805 /* the new_areas array currently being written to by gc_alloc() */
2806 struct new_area (*current_new_areas
)[] = &new_areas_1
;
2807 size_t current_new_areas_index
;
2809 /* the new_areas created by the previous scavenge cycle */
2810 struct new_area (*previous_new_areas
)[] = NULL
;
2811 size_t previous_new_areas_index
;
2813 /* Flush the current regions updating the tables. */
2814 gc_alloc_update_all_page_tables();
2816 /* Turn on the recording of new areas by gc_alloc(). */
2817 new_areas
= current_new_areas
;
2818 new_areas_index
= 0;
2820 /* Don't need to record new areas that get scavenged anyway during
2821 * scavenge_newspace_generation_one_scan. */
2822 record_new_objects
= 1;
2824 /* Start with a full scavenge. */
2825 scavenge_newspace_generation_one_scan(generation
);
2827 /* Record all new areas now. */
2828 record_new_objects
= 2;
2830 /* Give a chance to weak hash tables to make other objects live.
2831 * FIXME: The algorithm implemented here for weak hash table gcing
2832 * is O(W^2+N) as Bruno Haible warns in
2833 * http://www.haible.de/bruno/papers/cs/weak/WeakDatastructures-writeup.html
2834 * see "Implementation 2". */
2835 scav_weak_hash_tables();
2837 /* Flush the current regions updating the tables. */
2838 gc_alloc_update_all_page_tables();
2840 /* Grab new_areas_index. */
2841 current_new_areas_index
= new_areas_index
;
2844 "The first scan is finished; current_new_areas_index=%d.\n",
2845 current_new_areas_index));*/
2847 while (current_new_areas_index
> 0) {
2848 /* Move the current to the previous new areas */
2849 previous_new_areas
= current_new_areas
;
2850 previous_new_areas_index
= current_new_areas_index
;
2852 /* Scavenge all the areas in previous new areas. Any new areas
2853 * allocated are saved in current_new_areas. */
2855 /* Allocate an array for current_new_areas; alternating between
2856 * new_areas_1 and 2 */
2857 if (previous_new_areas
== &new_areas_1
)
2858 current_new_areas
= &new_areas_2
;
2860 current_new_areas
= &new_areas_1
;
2862 /* Set up for gc_alloc(). */
2863 new_areas
= current_new_areas
;
2864 new_areas_index
= 0;
2866 /* Check whether previous_new_areas had overflowed. */
2867 if (previous_new_areas_index
>= NUM_NEW_AREAS
) {
2869 /* New areas of objects allocated have been lost so need to do a
2870 * full scan to be sure! If this becomes a problem try
2871 * increasing NUM_NEW_AREAS. */
2872 if (gencgc_verbose
) {
2873 SHOW("new_areas overflow, doing full scavenge");
2876 /* Don't need to record new areas that get scavenged
2877 * anyway during scavenge_newspace_generation_one_scan. */
2878 record_new_objects
= 1;
2880 scavenge_newspace_generation_one_scan(generation
);
2882 /* Record all new areas now. */
2883 record_new_objects
= 2;
2885 scav_weak_hash_tables();
2887 /* Flush the current regions updating the tables. */
2888 gc_alloc_update_all_page_tables();
2892 /* Work through previous_new_areas. */
2893 for (i
= 0; i
< previous_new_areas_index
; i
++) {
2894 page_index_t page
= (*previous_new_areas
)[i
].page
;
2895 size_t offset
= (*previous_new_areas
)[i
].offset
;
2896 size_t size
= (*previous_new_areas
)[i
].size
/ N_WORD_BYTES
;
2897 gc_assert((*previous_new_areas
)[i
].size
% N_WORD_BYTES
== 0);
2898 scavenge(page_address(page
)+offset
, size
);
2901 scav_weak_hash_tables();
2903 /* Flush the current regions updating the tables. */
2904 gc_alloc_update_all_page_tables();
2907 current_new_areas_index
= new_areas_index
;
2910 "The re-scan has finished; current_new_areas_index=%d.\n",
2911 current_new_areas_index));*/
2914 /* Turn off recording of areas allocated by gc_alloc(). */
2915 record_new_objects
= 0;
2920 /* Check that none of the write_protected pages in this generation
2921 * have been written to. */
2922 for (i
= 0; i
< page_table_pages
; i
++) {
2923 if (page_allocated_p(i
)
2924 && (page_table
[i
].bytes_used
!= 0)
2925 && (page_table
[i
].gen
== generation
)
2926 && (page_table
[i
].write_protected_cleared
!= 0)
2927 && (page_table
[i
].dont_move
== 0)) {
2928 lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d\n",
2929 i
, generation
, page_table
[i
].dont_move
);
2936 /* Un-write-protect all the pages in from_space. This is done at the
2937 * start of a GC else there may be many page faults while scavenging
2938 * the newspace (I've seen drive the system time to 99%). These pages
2939 * would need to be unprotected anyway before unmapping in
2940 * free_oldspace; not sure what effect this has on paging.. */
2942 unprotect_oldspace(void)
2945 void *region_addr
= 0;
2946 void *page_addr
= 0;
2947 uword_t region_bytes
= 0;
2949 for (i
= 0; i
< last_free_page
; i
++) {
2950 if (page_allocated_p(i
)
2951 && (page_table
[i
].bytes_used
!= 0)
2952 && (page_table
[i
].gen
== from_space
)) {
2954 /* Remove any write-protection. We should be able to rely
2955 * on the write-protect flag to avoid redundant calls. */
2956 if (page_table
[i
].write_protected
) {
2957 page_table
[i
].write_protected
= 0;
2958 page_addr
= page_address(i
);
2961 region_addr
= page_addr
;
2962 region_bytes
= GENCGC_CARD_BYTES
;
2963 } else if (region_addr
+ region_bytes
== page_addr
) {
2964 /* Region continue. */
2965 region_bytes
+= GENCGC_CARD_BYTES
;
2967 /* Unprotect previous region. */
2968 os_protect(region_addr
, region_bytes
, OS_VM_PROT_ALL
);
2969 /* First page in new region. */
2970 region_addr
= page_addr
;
2971 region_bytes
= GENCGC_CARD_BYTES
;
2977 /* Unprotect last region. */
2978 os_protect(region_addr
, region_bytes
, OS_VM_PROT_ALL
);
2982 /* Work through all the pages and free any in from_space. This
2983 * assumes that all objects have been copied or promoted to an older
2984 * generation. Bytes_allocated and the generation bytes_allocated
2985 * counter are updated. The number of bytes freed is returned. */
2989 uword_t bytes_freed
= 0;
2990 page_index_t first_page
, last_page
;
2995 /* Find a first page for the next region of pages. */
2996 while ((first_page
< last_free_page
)
2997 && (page_free_p(first_page
)
2998 || (page_table
[first_page
].bytes_used
== 0)
2999 || (page_table
[first_page
].gen
!= from_space
)))
3002 if (first_page
>= last_free_page
)
3005 /* Find the last page of this region. */
3006 last_page
= first_page
;
3009 /* Free the page. */
3010 bytes_freed
+= page_table
[last_page
].bytes_used
;
3011 generations
[page_table
[last_page
].gen
].bytes_allocated
-=
3012 page_table
[last_page
].bytes_used
;
3013 page_table
[last_page
].allocated
= FREE_PAGE_FLAG
;
3014 page_table
[last_page
].bytes_used
= 0;
3015 /* Should already be unprotected by unprotect_oldspace(). */
3016 gc_assert(!page_table
[last_page
].write_protected
);
3019 while ((last_page
< last_free_page
)
3020 && page_allocated_p(last_page
)
3021 && (page_table
[last_page
].bytes_used
!= 0)
3022 && (page_table
[last_page
].gen
== from_space
));
3024 #ifdef READ_PROTECT_FREE_PAGES
3025 os_protect(page_address(first_page
),
3026 npage_bytes(last_page
-first_page
),
3029 first_page
= last_page
;
3030 } while (first_page
< last_free_page
);
3032 bytes_allocated
-= bytes_freed
;
3037 /* Print some information about a pointer at the given address. */
3039 print_ptr(lispobj
*addr
)
3041 /* If addr is in the dynamic space then out the page information. */
3042 page_index_t pi1
= find_page_index((void*)addr
);
3045 fprintf(stderr
," %p: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n",
3048 page_table
[pi1
].allocated
,
3049 page_table
[pi1
].gen
,
3050 page_table
[pi1
].bytes_used
,
3051 page_table
[pi1
].scan_start_offset
,
3052 page_table
[pi1
].dont_move
);
3053 fprintf(stderr
," %x %x %x %x (%x) %x %x %x %x\n",
3067 is_in_stack_space(lispobj ptr
)
3069 /* For space verification: Pointers can be valid if they point
3070 * to a thread stack space. This would be faster if the thread
3071 * structures had page-table entries as if they were part of
3072 * the heap space. */
3074 for_each_thread(th
) {
3075 if ((th
->control_stack_start
<= (lispobj
*)ptr
) &&
3076 (th
->control_stack_end
>= (lispobj
*)ptr
)) {
3084 verify_space(lispobj
*start
, size_t words
)
3086 int is_in_dynamic_space
= (find_page_index((void*)start
) != -1);
3087 int is_in_readonly_space
=
3088 (READ_ONLY_SPACE_START
<= (uword_t
)start
&&
3089 (uword_t
)start
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0));
3093 lispobj thing
= *(lispobj
*)start
;
3095 if (is_lisp_pointer(thing
)) {
3096 page_index_t page_index
= find_page_index((void*)thing
);
3097 sword_t to_readonly_space
=
3098 (READ_ONLY_SPACE_START
<= thing
&&
3099 thing
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0));
3100 sword_t to_static_space
=
3101 (STATIC_SPACE_START
<= thing
&&
3102 thing
< SymbolValue(STATIC_SPACE_FREE_POINTER
,0));
3104 /* Does it point to the dynamic space? */
3105 if (page_index
!= -1) {
3106 /* If it's within the dynamic space it should point to a used
3107 * page. XX Could check the offset too. */
3108 if (page_allocated_p(page_index
)
3109 && (page_table
[page_index
].bytes_used
== 0))
3110 lose ("Ptr %p @ %p sees free page.\n", thing
, start
);
3111 /* Check that it doesn't point to a forwarding pointer! */
3112 if (*((lispobj
*)native_pointer(thing
)) == 0x01) {
3113 lose("Ptr %p @ %p sees forwarding ptr.\n", thing
, start
);
3115 /* Check that its not in the RO space as it would then be a
3116 * pointer from the RO to the dynamic space. */
3117 if (is_in_readonly_space
) {
3118 lose("ptr to dynamic space %p from RO space %x\n",
3121 /* Does it point to a plausible object? This check slows
3122 * it down a lot (so it's commented out).
3124 * "a lot" is serious: it ate 50 minutes cpu time on
3125 * my duron 950 before I came back from lunch and
3128 * FIXME: Add a variable to enable this
3131 if (!possibly_valid_dynamic_space_pointer_s((lispobj *)thing, page_index, NULL)) {
3132 lose("ptr %p to invalid object %p\n", thing, start);
3136 extern void funcallable_instance_tramp
;
3137 /* Verify that it points to another valid space. */
3138 if (!to_readonly_space
&& !to_static_space
3139 && (thing
!= (lispobj
)&funcallable_instance_tramp
)
3140 && !is_in_stack_space(thing
)) {
3141 lose("Ptr %p @ %p sees junk.\n", thing
, start
);
3145 if (!(fixnump(thing
))) {
3147 switch(widetag_of(*start
)) {
3150 case SIMPLE_VECTOR_WIDETAG
:
3152 case COMPLEX_WIDETAG
:
3153 case SIMPLE_ARRAY_WIDETAG
:
3154 case COMPLEX_BASE_STRING_WIDETAG
:
3155 #ifdef COMPLEX_CHARACTER_STRING_WIDETAG
3156 case COMPLEX_CHARACTER_STRING_WIDETAG
:
3158 case COMPLEX_VECTOR_NIL_WIDETAG
:
3159 case COMPLEX_BIT_VECTOR_WIDETAG
:
3160 case COMPLEX_VECTOR_WIDETAG
:
3161 case COMPLEX_ARRAY_WIDETAG
:
3162 case CLOSURE_HEADER_WIDETAG
:
3163 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
3164 case VALUE_CELL_HEADER_WIDETAG
:
3165 case SYMBOL_HEADER_WIDETAG
:
3166 case CHARACTER_WIDETAG
:
3167 #if N_WORD_BITS == 64
3168 case SINGLE_FLOAT_WIDETAG
:
3170 case UNBOUND_MARKER_WIDETAG
:
3175 case INSTANCE_HEADER_WIDETAG
:
3178 sword_t ntotal
= HeaderValue(thing
);
3179 lispobj layout
= ((struct instance
*)start
)->slots
[0];
3184 nuntagged
= ((struct layout
*)
3185 native_pointer(layout
))->n_untagged_slots
;
3186 verify_space(start
+ 1,
3187 ntotal
- fixnum_value(nuntagged
));
3191 case CODE_HEADER_WIDETAG
:
3193 lispobj object
= *start
;
3195 sword_t nheader_words
, ncode_words
, nwords
;
3197 struct simple_fun
*fheaderp
;
3199 code
= (struct code
*) start
;
3201 /* Check that it's not in the dynamic space.
3202 * FIXME: Isn't is supposed to be OK for code
3203 * objects to be in the dynamic space these days? */
3204 /* It is for byte compiled code, but there's
3205 * no byte compilation in SBCL anymore. */
3206 if (is_in_dynamic_space
3207 /* Only when enabled */
3208 && verify_dynamic_code_check
) {
3210 "/code object at %p in the dynamic space\n",
3214 ncode_words
= fixnum_word_value(code
->code_size
);
3215 nheader_words
= HeaderValue(object
);
3216 nwords
= ncode_words
+ nheader_words
;
3217 nwords
= CEILING(nwords
, 2);
3218 /* Scavenge the boxed section of the code data block */
3219 verify_space(start
+ 1, nheader_words
- 1);
3221 /* Scavenge the boxed section of each function
3222 * object in the code data block. */
3223 fheaderl
= code
->entry_points
;
3224 while (fheaderl
!= NIL
) {
3226 (struct simple_fun
*) native_pointer(fheaderl
);
3227 gc_assert(widetag_of(fheaderp
->header
) ==
3228 SIMPLE_FUN_HEADER_WIDETAG
);
3229 verify_space(&fheaderp
->name
, 1);
3230 verify_space(&fheaderp
->arglist
, 1);
3231 verify_space(&fheaderp
->type
, 1);
3232 fheaderl
= fheaderp
->next
;
3238 /* unboxed objects */
3239 case BIGNUM_WIDETAG
:
3240 #if N_WORD_BITS != 64
3241 case SINGLE_FLOAT_WIDETAG
:
3243 case DOUBLE_FLOAT_WIDETAG
:
3244 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3245 case LONG_FLOAT_WIDETAG
:
3247 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
3248 case COMPLEX_SINGLE_FLOAT_WIDETAG
:
3250 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
3251 case COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3253 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3254 case COMPLEX_LONG_FLOAT_WIDETAG
:
3256 #ifdef SIMD_PACK_WIDETAG
3257 case SIMD_PACK_WIDETAG
:
3259 case SIMPLE_BASE_STRING_WIDETAG
:
3260 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
3261 case SIMPLE_CHARACTER_STRING_WIDETAG
:
3263 case SIMPLE_BIT_VECTOR_WIDETAG
:
3264 case SIMPLE_ARRAY_NIL_WIDETAG
:
3265 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
3266 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
3267 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
3268 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
3269 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
3270 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
3272 case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG
:
3274 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
3275 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
3276 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
3277 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
3279 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
3280 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
3282 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
3283 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
3285 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
3286 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
3289 case SIMPLE_ARRAY_FIXNUM_WIDETAG
:
3291 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
3292 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
3294 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
3295 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
3297 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
3298 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
3299 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3300 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
3302 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
3303 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
3305 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
3306 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3308 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3309 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
3312 case WEAK_POINTER_WIDETAG
:
3313 #ifdef NO_TLS_VALUE_MARKER_WIDETAG
3314 case NO_TLS_VALUE_MARKER_WIDETAG
:
3316 count
= (sizetab
[widetag_of(*start
)])(start
);
3320 lose("Unhandled widetag %p at %p\n",
3321 widetag_of(*start
), start
);
3333 /* FIXME: It would be nice to make names consistent so that
3334 * foo_size meant size *in* *bytes* instead of size in some
3335 * arbitrary units. (Yes, this caused a bug, how did you guess?:-)
3336 * Some counts of lispobjs are called foo_count; it might be good
3337 * to grep for all foo_size and rename the appropriate ones to
3339 sword_t read_only_space_size
=
3340 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0)
3341 - (lispobj
*)READ_ONLY_SPACE_START
;
3342 sword_t static_space_size
=
3343 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0)
3344 - (lispobj
*)STATIC_SPACE_START
;
3346 for_each_thread(th
) {
3347 sword_t binding_stack_size
=
3348 (lispobj
*)get_binding_stack_pointer(th
)
3349 - (lispobj
*)th
->binding_stack_start
;
3350 verify_space(th
->binding_stack_start
, binding_stack_size
);
3352 verify_space((lispobj
*)READ_ONLY_SPACE_START
, read_only_space_size
);
3353 verify_space((lispobj
*)STATIC_SPACE_START
, static_space_size
);
3357 verify_generation(generation_index_t generation
)
3361 for (i
= 0; i
< last_free_page
; i
++) {
3362 if (page_allocated_p(i
)
3363 && (page_table
[i
].bytes_used
!= 0)
3364 && (page_table
[i
].gen
== generation
)) {
3365 page_index_t last_page
;
3367 /* This should be the start of a contiguous block */
3368 gc_assert(page_starts_contiguous_block_p(i
));
3370 /* Need to find the full extent of this contiguous block in case
3371 objects span pages. */
3373 /* Now work forward until the end of this contiguous area is
3375 for (last_page
= i
; ;last_page
++)
3376 /* Check whether this is the last page in this contiguous
3378 if (page_ends_contiguous_block_p(last_page
, generation
))
3381 verify_space(page_address(i
),
3383 (page_table
[last_page
].bytes_used
3384 + npage_bytes(last_page
-i
)))
3391 /* Check that all the free space is zero filled. */
3393 verify_zero_fill(void)
3397 for (page
= 0; page
< last_free_page
; page
++) {
3398 if (page_free_p(page
)) {
3399 /* The whole page should be zero filled. */
3400 sword_t
*start_addr
= (sword_t
*)page_address(page
);
3401 sword_t size
= 1024;
3403 for (i
= 0; i
< size
; i
++) {
3404 if (start_addr
[i
] != 0) {
3405 lose("free page not zero at %x\n", start_addr
+ i
);
3409 sword_t free_bytes
= GENCGC_CARD_BYTES
- page_table
[page
].bytes_used
;
3410 if (free_bytes
> 0) {
3411 sword_t
*start_addr
= (sword_t
*)((uword_t
)page_address(page
)
3412 + page_table
[page
].bytes_used
);
3413 sword_t size
= free_bytes
/ N_WORD_BYTES
;
3415 for (i
= 0; i
< size
; i
++) {
3416 if (start_addr
[i
] != 0) {
3417 lose("free region not zero at %x\n", start_addr
+ i
);
3425 /* External entry point for verify_zero_fill */
3427 gencgc_verify_zero_fill(void)
3429 /* Flush the alloc regions updating the tables. */
3430 gc_alloc_update_all_page_tables();
3431 SHOW("verifying zero fill");
3436 verify_dynamic_space(void)
3438 generation_index_t i
;
3440 for (i
= 0; i
<= HIGHEST_NORMAL_GENERATION
; i
++)
3441 verify_generation(i
);
3443 if (gencgc_enable_verify_zero_fill
)
3447 /* Write-protect all the dynamic boxed pages in the given generation. */
3449 write_protect_generation_pages(generation_index_t generation
)
3453 gc_assert(generation
< SCRATCH_GENERATION
);
3455 for (start
= 0; start
< last_free_page
; start
++) {
3456 if (protect_page_p(start
, generation
)) {
3460 /* Note the page as protected in the page tables. */
3461 page_table
[start
].write_protected
= 1;
3463 for (last
= start
+ 1; last
< last_free_page
; last
++) {
3464 if (!protect_page_p(last
, generation
))
3466 page_table
[last
].write_protected
= 1;
3469 page_start
= (void *)page_address(start
);
3471 os_protect(page_start
,
3472 npage_bytes(last
- start
),
3473 OS_VM_PROT_READ
| OS_VM_PROT_EXECUTE
);
3479 if (gencgc_verbose
> 1) {
3481 "/write protected %d of %d pages in generation %d\n",
3482 count_write_protect_generation_pages(generation
),
3483 count_generation_pages(generation
),
3488 #if defined(LISP_FEATURE_SB_THREAD) && (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
3490 preserve_context_registers (os_context_t
*c
)
3493 /* On Darwin the signal context isn't a contiguous block of memory,
3494 * so just preserve_pointering its contents won't be sufficient.
3496 #if defined(LISP_FEATURE_DARWIN)||defined(LISP_FEATURE_WIN32)
3497 #if defined LISP_FEATURE_X86
3498 preserve_pointer((void*)*os_context_register_addr(c
,reg_EAX
));
3499 preserve_pointer((void*)*os_context_register_addr(c
,reg_ECX
));
3500 preserve_pointer((void*)*os_context_register_addr(c
,reg_EDX
));
3501 preserve_pointer((void*)*os_context_register_addr(c
,reg_EBX
));
3502 preserve_pointer((void*)*os_context_register_addr(c
,reg_ESI
));
3503 preserve_pointer((void*)*os_context_register_addr(c
,reg_EDI
));
3504 preserve_pointer((void*)*os_context_pc_addr(c
));
3505 #elif defined LISP_FEATURE_X86_64
3506 preserve_pointer((void*)*os_context_register_addr(c
,reg_RAX
));
3507 preserve_pointer((void*)*os_context_register_addr(c
,reg_RCX
));
3508 preserve_pointer((void*)*os_context_register_addr(c
,reg_RDX
));
3509 preserve_pointer((void*)*os_context_register_addr(c
,reg_RBX
));
3510 preserve_pointer((void*)*os_context_register_addr(c
,reg_RSI
));
3511 preserve_pointer((void*)*os_context_register_addr(c
,reg_RDI
));
3512 preserve_pointer((void*)*os_context_register_addr(c
,reg_R8
));
3513 preserve_pointer((void*)*os_context_register_addr(c
,reg_R9
));
3514 preserve_pointer((void*)*os_context_register_addr(c
,reg_R10
));
3515 preserve_pointer((void*)*os_context_register_addr(c
,reg_R11
));
3516 preserve_pointer((void*)*os_context_register_addr(c
,reg_R12
));
3517 preserve_pointer((void*)*os_context_register_addr(c
,reg_R13
));
3518 preserve_pointer((void*)*os_context_register_addr(c
,reg_R14
));
3519 preserve_pointer((void*)*os_context_register_addr(c
,reg_R15
));
3520 preserve_pointer((void*)*os_context_pc_addr(c
));
3522 #error "preserve_context_registers needs to be tweaked for non-x86 Darwin"
3525 #if !defined(LISP_FEATURE_WIN32)
3526 for(ptr
= ((void **)(c
+1))-1; ptr
>=(void **)c
; ptr
--) {
3527 preserve_pointer(*ptr
);
3534 move_pinned_pages_to_newspace()
3538 /* scavenge() will evacuate all oldspace pages, but no newspace
3539 * pages. Pinned pages are precisely those pages which must not
3540 * be evacuated, so move them to newspace directly. */
3542 for (i
= 0; i
< last_free_page
; i
++) {
3543 if (page_table
[i
].dont_move
&&
3544 /* dont_move is cleared lazily, so validate the space as well. */
3545 page_table
[i
].gen
== from_space
) {
3546 if (page_table
[i
].dontmove_dwords
&& do_wipe_p
) {
3547 // do not move to newspace after all, this will be word-wiped
3550 page_table
[i
].gen
= new_space
;
3551 /* And since we're moving the pages wholesale, also adjust
3552 * the generation allocation counters. */
3553 generations
[new_space
].bytes_allocated
+= page_table
[i
].bytes_used
;
3554 generations
[from_space
].bytes_allocated
-= page_table
[i
].bytes_used
;
3559 /* Garbage collect a generation. If raise is 0 then the remains of the
3560 * generation are not raised to the next generation. */
3562 garbage_collect_generation(generation_index_t generation
, int raise
)
3564 uword_t bytes_freed
;
3566 uword_t static_space_size
;
3569 gc_assert(generation
<= HIGHEST_NORMAL_GENERATION
);
3571 /* The oldest generation can't be raised. */
3572 gc_assert((generation
!= HIGHEST_NORMAL_GENERATION
) || (raise
== 0));
3574 /* Check if weak hash tables were processed in the previous GC. */
3575 gc_assert(weak_hash_tables
== NULL
);
3577 /* Initialize the weak pointer list. */
3578 weak_pointers
= NULL
;
3580 /* When a generation is not being raised it is transported to a
3581 * temporary generation (NUM_GENERATIONS), and lowered when
3582 * done. Set up this new generation. There should be no pages
3583 * allocated to it yet. */
3585 gc_assert(generations
[SCRATCH_GENERATION
].bytes_allocated
== 0);
3588 /* Set the global src and dest. generations */
3589 from_space
= generation
;
3591 new_space
= generation
+1;
3593 new_space
= SCRATCH_GENERATION
;
3595 /* Change to a new space for allocation, resetting the alloc_start_page */
3596 gc_alloc_generation
= new_space
;
3597 generations
[new_space
].alloc_start_page
= 0;
3598 generations
[new_space
].alloc_unboxed_start_page
= 0;
3599 generations
[new_space
].alloc_large_start_page
= 0;
3600 generations
[new_space
].alloc_large_unboxed_start_page
= 0;
3602 /* Before any pointers are preserved, the dont_move flags on the
3603 * pages need to be cleared. */
3604 for (i
= 0; i
< last_free_page
; i
++)
3605 if(page_table
[i
].gen
==from_space
) {
3606 page_table
[i
].dont_move
= 0;
3607 gc_assert(page_table
[i
].dontmove_dwords
== NULL
);
3610 /* Un-write-protect the old-space pages. This is essential for the
3611 * promoted pages as they may contain pointers into the old-space
3612 * which need to be scavenged. It also helps avoid unnecessary page
3613 * faults as forwarding pointers are written into them. They need to
3614 * be un-protected anyway before unmapping later. */
3615 unprotect_oldspace();
3617 /* Scavenge the stacks' conservative roots. */
3619 /* there are potentially two stacks for each thread: the main
3620 * stack, which may contain Lisp pointers, and the alternate stack.
3621 * We don't ever run Lisp code on the altstack, but it may
3622 * host a sigcontext with lisp objects in it */
3624 /* what we need to do: (1) find the stack pointer for the main
3625 * stack; scavenge it (2) find the interrupt context on the
3626 * alternate stack that might contain lisp values, and scavenge
3629 /* we assume that none of the preceding applies to the thread that
3630 * initiates GC. If you ever call GC from inside an altstack
3631 * handler, you will lose. */
3633 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
3634 /* And if we're saving a core, there's no point in being conservative. */
3635 if (conservative_stack
) {
3636 for_each_thread(th
) {
3638 void **esp
=(void **)-1;
3639 if (th
->state
== STATE_DEAD
)
3641 # if defined(LISP_FEATURE_SB_SAFEPOINT)
3642 /* Conservative collect_garbage is always invoked with a
3643 * foreign C call or an interrupt handler on top of every
3644 * existing thread, so the stored SP in each thread
3645 * structure is valid, no matter which thread we are looking
3646 * at. For threads that were running Lisp code, the pitstop
3647 * and edge functions maintain this value within the
3648 * interrupt or exception handler. */
3649 esp
= os_get_csp(th
);
3650 assert_on_stack(th
, esp
);
3652 /* In addition to pointers on the stack, also preserve the
3653 * return PC, the only value from the context that we need
3654 * in addition to the SP. The return PC gets saved by the
3655 * foreign call wrapper, and removed from the control stack
3656 * into a register. */
3657 preserve_pointer(th
->pc_around_foreign_call
);
3659 /* And on platforms with interrupts: scavenge ctx registers. */
3661 /* Disabled on Windows, because it does not have an explicit
3662 * stack of `interrupt_contexts'. The reported CSP has been
3663 * chosen so that the current context on the stack is
3664 * covered by the stack scan. See also set_csp_from_context(). */
3665 # ifndef LISP_FEATURE_WIN32
3666 if (th
!= arch_os_get_current_thread()) {
3667 long k
= fixnum_value(
3668 SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
3670 preserve_context_registers(th
->interrupt_contexts
[--k
]);
3673 # elif defined(LISP_FEATURE_SB_THREAD)
3675 if(th
==arch_os_get_current_thread()) {
3676 /* Somebody is going to burn in hell for this, but casting
3677 * it in two steps shuts gcc up about strict aliasing. */
3678 esp
= (void **)((void *)&raise
);
3681 free
=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
3682 for(i
=free
-1;i
>=0;i
--) {
3683 os_context_t
*c
=th
->interrupt_contexts
[i
];
3684 esp1
= (void **) *os_context_register_addr(c
,reg_SP
);
3685 if (esp1
>=(void **)th
->control_stack_start
&&
3686 esp1
<(void **)th
->control_stack_end
) {
3687 if(esp1
<esp
) esp
=esp1
;
3688 preserve_context_registers(c
);
3693 esp
= (void **)((void *)&raise
);
3695 if (!esp
|| esp
== (void*) -1)
3696 lose("garbage_collect: no SP known for thread %x (OS %x)",
3698 for (ptr
= ((void **)th
->control_stack_end
)-1; ptr
>= esp
; ptr
--) {
3699 preserve_pointer(*ptr
);
3704 /* Non-x86oid systems don't have "conservative roots" as such, but
3705 * the same mechanism is used for objects pinned for use by alien
3707 for_each_thread(th
) {
3708 lispobj pin_list
= SymbolTlValue(PINNED_OBJECTS
,th
);
3709 while (pin_list
!= NIL
) {
3710 struct cons
*list_entry
=
3711 (struct cons
*)native_pointer(pin_list
);
3712 preserve_pointer(list_entry
->car
);
3713 pin_list
= list_entry
->cdr
;
3719 if (gencgc_verbose
> 1) {
3720 sword_t num_dont_move_pages
= count_dont_move_pages();
3722 "/non-movable pages due to conservative pointers = %ld (%lu bytes)\n",
3723 num_dont_move_pages
,
3724 npage_bytes(num_dont_move_pages
));
3728 /* Now that all of the pinned (dont_move) pages are known, and
3729 * before we start to scavenge (and thus relocate) objects,
3730 * relocate the pinned pages to newspace, so that the scavenger
3731 * will not attempt to relocate their contents. */
3732 move_pinned_pages_to_newspace();
3734 /* Scavenge all the rest of the roots. */
3736 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
3738 * If not x86, we need to scavenge the interrupt context(s) and the
3743 for_each_thread(th
) {
3744 scavenge_interrupt_contexts(th
);
3745 scavenge_control_stack(th
);
3748 # ifdef LISP_FEATURE_SB_SAFEPOINT
3749 /* In this case, scrub all stacks right here from the GCing thread
3750 * instead of doing what the comment below says. Suboptimal, but
3753 scrub_thread_control_stack(th
);
3755 /* Scrub the unscavenged control stack space, so that we can't run
3756 * into any stale pointers in a later GC (this is done by the
3757 * stop-for-gc handler in the other threads). */
3758 scrub_control_stack();
3763 /* Scavenge the Lisp functions of the interrupt handlers, taking
3764 * care to avoid SIG_DFL and SIG_IGN. */
3765 for (i
= 0; i
< NSIG
; i
++) {
3766 union interrupt_handler handler
= interrupt_handlers
[i
];
3767 if (!ARE_SAME_HANDLER(handler
.c
, SIG_IGN
) &&
3768 !ARE_SAME_HANDLER(handler
.c
, SIG_DFL
)) {
3769 scavenge((lispobj
*)(interrupt_handlers
+ i
), 1);
3772 /* Scavenge the binding stacks. */
3775 for_each_thread(th
) {
3776 sword_t len
= (lispobj
*)get_binding_stack_pointer(th
) -
3777 th
->binding_stack_start
;
3778 scavenge((lispobj
*) th
->binding_stack_start
,len
);
3779 #ifdef LISP_FEATURE_SB_THREAD
3780 /* do the tls as well */
3781 len
=(SymbolValue(FREE_TLS_INDEX
,0) >> WORD_SHIFT
) -
3782 (sizeof (struct thread
))/(sizeof (lispobj
));
3783 scavenge((lispobj
*) (th
+1),len
);
3788 /* The original CMU CL code had scavenge-read-only-space code
3789 * controlled by the Lisp-level variable
3790 * *SCAVENGE-READ-ONLY-SPACE*. It was disabled by default, and it
3791 * wasn't documented under what circumstances it was useful or
3792 * safe to turn it on, so it's been turned off in SBCL. If you
3793 * want/need this functionality, and can test and document it,
3794 * please submit a patch. */
3796 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE
) != NIL
) {
3797 uword_t read_only_space_size
=
3798 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
) -
3799 (lispobj
*)READ_ONLY_SPACE_START
;
3801 "/scavenge read only space: %d bytes\n",
3802 read_only_space_size
* sizeof(lispobj
)));
3803 scavenge( (lispobj
*) READ_ONLY_SPACE_START
, read_only_space_size
);
3807 /* Scavenge static space. */
3809 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0) -
3810 (lispobj
*)STATIC_SPACE_START
;
3811 if (gencgc_verbose
> 1) {
3813 "/scavenge static space: %d bytes\n",
3814 static_space_size
* sizeof(lispobj
)));
3816 scavenge( (lispobj
*) STATIC_SPACE_START
, static_space_size
);
3818 /* All generations but the generation being GCed need to be
3819 * scavenged. The new_space generation needs special handling as
3820 * objects may be moved in - it is handled separately below. */
3821 scavenge_generations(generation
+1, PSEUDO_STATIC_GENERATION
);
3823 scavenge_pages_with_conservative_pointers_to_them_protected_objects_only();
3825 /* Finally scavenge the new_space generation. Keep going until no
3826 * more objects are moved into the new generation */
3827 scavenge_newspace_generation(new_space
);
3829 /* FIXME: I tried reenabling this check when debugging unrelated
3830 * GC weirdness ca. sbcl-0.6.12.45, and it failed immediately.
3831 * Since the current GC code seems to work well, I'm guessing that
3832 * this debugging code is just stale, but I haven't tried to
3833 * figure it out. It should be figured out and then either made to
3834 * work or just deleted. */
3836 #define RESCAN_CHECK 0
3838 /* As a check re-scavenge the newspace once; no new objects should
3841 os_vm_size_t old_bytes_allocated
= bytes_allocated
;
3842 os_vm_size_t bytes_allocated
;
3844 /* Start with a full scavenge. */
3845 scavenge_newspace_generation_one_scan(new_space
);
3847 /* Flush the current regions, updating the tables. */
3848 gc_alloc_update_all_page_tables();
3850 bytes_allocated
= bytes_allocated
- old_bytes_allocated
;
3852 if (bytes_allocated
!= 0) {
3853 lose("Rescan of new_space allocated %d more bytes.\n",
3859 scan_weak_hash_tables();
3860 scan_weak_pointers();
3863 /* Flush the current regions, updating the tables. */
3864 gc_alloc_update_all_page_tables();
3866 /* Free the pages in oldspace, but not those marked dont_move. */
3867 bytes_freed
= free_oldspace();
3869 /* If the GC is not raising the age then lower the generation back
3870 * to its normal generation number */
3872 for (i
= 0; i
< last_free_page
; i
++)
3873 if ((page_table
[i
].bytes_used
!= 0)
3874 && (page_table
[i
].gen
== SCRATCH_GENERATION
))
3875 page_table
[i
].gen
= generation
;
3876 gc_assert(generations
[generation
].bytes_allocated
== 0);
3877 generations
[generation
].bytes_allocated
=
3878 generations
[SCRATCH_GENERATION
].bytes_allocated
;
3879 generations
[SCRATCH_GENERATION
].bytes_allocated
= 0;
3882 /* Reset the alloc_start_page for generation. */
3883 generations
[generation
].alloc_start_page
= 0;
3884 generations
[generation
].alloc_unboxed_start_page
= 0;
3885 generations
[generation
].alloc_large_start_page
= 0;
3886 generations
[generation
].alloc_large_unboxed_start_page
= 0;
3888 if (generation
>= verify_gens
) {
3889 if (gencgc_verbose
) {
3893 verify_dynamic_space();
3896 /* Set the new gc trigger for the GCed generation. */
3897 generations
[generation
].gc_trigger
=
3898 generations
[generation
].bytes_allocated
3899 + generations
[generation
].bytes_consed_between_gc
;
3902 generations
[generation
].num_gc
= 0;
3904 ++generations
[generation
].num_gc
;
3908 /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
3910 update_dynamic_space_free_pointer(void)
3912 page_index_t last_page
= -1, i
;
3914 for (i
= 0; i
< last_free_page
; i
++)
3915 if (page_allocated_p(i
) && (page_table
[i
].bytes_used
!= 0))
3918 last_free_page
= last_page
+1;
3920 set_alloc_pointer((lispobj
)(page_address(last_free_page
)));
3921 return 0; /* dummy value: return something ... */
3925 remap_page_range (page_index_t from
, page_index_t to
)
3927 /* There's a mysterious Solaris/x86 problem with using mmap
3928 * tricks for memory zeroing. See sbcl-devel thread
3929 * "Re: patch: standalone executable redux".
3931 #if defined(LISP_FEATURE_SUNOS)
3932 zero_and_mark_pages(from
, to
);
3935 release_granularity
= gencgc_release_granularity
/GENCGC_CARD_BYTES
,
3936 release_mask
= release_granularity
-1,
3938 aligned_from
= (from
+release_mask
)&~release_mask
,
3939 aligned_end
= (end
&~release_mask
);
3941 if (aligned_from
< aligned_end
) {
3942 zero_pages_with_mmap(aligned_from
, aligned_end
-1);
3943 if (aligned_from
!= from
)
3944 zero_and_mark_pages(from
, aligned_from
-1);
3945 if (aligned_end
!= end
)
3946 zero_and_mark_pages(aligned_end
, end
-1);
3948 zero_and_mark_pages(from
, to
);
3954 remap_free_pages (page_index_t from
, page_index_t to
, int forcibly
)
3956 page_index_t first_page
, last_page
;
3959 return remap_page_range(from
, to
);
3961 for (first_page
= from
; first_page
<= to
; first_page
++) {
3962 if (page_allocated_p(first_page
) ||
3963 (page_table
[first_page
].need_to_zero
== 0))
3966 last_page
= first_page
+ 1;
3967 while (page_free_p(last_page
) &&
3968 (last_page
<= to
) &&
3969 (page_table
[last_page
].need_to_zero
== 1))
3972 remap_page_range(first_page
, last_page
-1);
3974 first_page
= last_page
;
3978 generation_index_t small_generation_limit
= 1;
3980 /* GC all generations newer than last_gen, raising the objects in each
3981 * to the next older generation - we finish when all generations below
3982 * last_gen are empty. Then if last_gen is due for a GC, or if
3983 * last_gen==NUM_GENERATIONS (the scratch generation? eh?) we GC that
3984 * too. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
3986 * We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than
3987 * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */
3989 collect_garbage(generation_index_t last_gen
)
3991 generation_index_t gen
= 0, i
;
3992 int raise
, more
= 0;
3994 /* The largest value of last_free_page seen since the time
3995 * remap_free_pages was called. */
3996 static page_index_t high_water_mark
= 0;
3998 FSHOW((stderr
, "/entering collect_garbage(%d)\n", last_gen
));
3999 log_generation_stats(gc_logfile
, "=== GC Start ===");
4003 if (last_gen
> HIGHEST_NORMAL_GENERATION
+1) {
4005 "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
4010 /* Flush the alloc regions updating the tables. */
4011 gc_alloc_update_all_page_tables();
4013 /* Verify the new objects created by Lisp code. */
4014 if (pre_verify_gen_0
) {
4015 FSHOW((stderr
, "pre-checking generation 0\n"));
4016 verify_generation(0);
4019 if (gencgc_verbose
> 1)
4020 print_generation_stats();
4023 /* Collect the generation. */
4025 if (more
|| (gen
>= gencgc_oldest_gen_to_gc
)) {
4026 /* Never raise the oldest generation. Never raise the extra generation
4027 * collected due to more-flag. */
4033 || (generations
[gen
].num_gc
>= generations
[gen
].number_of_gcs_before_promotion
);
4034 /* If we would not normally raise this one, but we're
4035 * running low on space in comparison to the object-sizes
4036 * we've been seeing, raise it and collect the next one
4038 if (!raise
&& gen
== last_gen
) {
4039 more
= (2*large_allocation
) >= (dynamic_space_size
- bytes_allocated
);
4044 if (gencgc_verbose
> 1) {
4046 "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
4049 generations
[gen
].bytes_allocated
,
4050 generations
[gen
].gc_trigger
,
4051 generations
[gen
].num_gc
));
4054 /* If an older generation is being filled, then update its
4057 generations
[gen
+1].cum_sum_bytes_allocated
+=
4058 generations
[gen
+1].bytes_allocated
;
4061 garbage_collect_generation(gen
, raise
);
4063 /* Reset the memory age cum_sum. */
4064 generations
[gen
].cum_sum_bytes_allocated
= 0;
4066 if (gencgc_verbose
> 1) {
4067 FSHOW((stderr
, "GC of generation %d finished:\n", gen
));
4068 print_generation_stats();
4072 } while ((gen
<= gencgc_oldest_gen_to_gc
)
4073 && ((gen
< last_gen
)
4076 && (generations
[gen
].bytes_allocated
4077 > generations
[gen
].gc_trigger
)
4078 && (generation_average_age(gen
)
4079 > generations
[gen
].minimum_age_before_gc
))));
4081 /* Now if gen-1 was raised all generations before gen are empty.
4082 * If it wasn't raised then all generations before gen-1 are empty.
4084 * Now objects within this gen's pages cannot point to younger
4085 * generations unless they are written to. This can be exploited
4086 * by write-protecting the pages of gen; then when younger
4087 * generations are GCed only the pages which have been written
4092 gen_to_wp
= gen
- 1;
4094 /* There's not much point in WPing pages in generation 0 as it is
4095 * never scavenged (except promoted pages). */
4096 if ((gen_to_wp
> 0) && enable_page_protection
) {
4097 /* Check that they are all empty. */
4098 for (i
= 0; i
< gen_to_wp
; i
++) {
4099 if (generations
[i
].bytes_allocated
)
4100 lose("trying to write-protect gen. %d when gen. %d nonempty\n",
4103 write_protect_generation_pages(gen_to_wp
);
4106 /* Set gc_alloc() back to generation 0. The current regions should
4107 * be flushed after the above GCs. */
4108 gc_assert((boxed_region
.free_pointer
- boxed_region
.start_addr
) == 0);
4109 gc_alloc_generation
= 0;
4111 /* Save the high-water mark before updating last_free_page */
4112 if (last_free_page
> high_water_mark
)
4113 high_water_mark
= last_free_page
;
4115 update_dynamic_space_free_pointer();
4117 /* Update auto_gc_trigger. Make sure we trigger the next GC before
4118 * running out of heap! */
4119 if (bytes_consed_between_gcs
<= (dynamic_space_size
- bytes_allocated
))
4120 auto_gc_trigger
= bytes_allocated
+ bytes_consed_between_gcs
;
4122 auto_gc_trigger
= bytes_allocated
+ (dynamic_space_size
- bytes_allocated
)/2;
4125 fprintf(stderr
,"Next gc when %"OS_VM_SIZE_FMT
" bytes have been consed\n",
4128 /* If we did a big GC (arbitrarily defined as gen > 1), release memory
4131 if (gen
> small_generation_limit
) {
4132 if (last_free_page
> high_water_mark
)
4133 high_water_mark
= last_free_page
;
4134 remap_free_pages(0, high_water_mark
, 0);
4135 high_water_mark
= 0;
4139 large_allocation
= 0;
4141 log_generation_stats(gc_logfile
, "=== GC End ===");
4142 SHOW("returning from collect_garbage");
4145 /* This is called by Lisp PURIFY when it is finished. All live objects
4146 * will have been moved to the RO and Static heaps. The dynamic space
4147 * will need a full re-initialization. We don't bother having Lisp
4148 * PURIFY flush the current gc_alloc() region, as the page_tables are
4149 * re-initialized, and every page is zeroed to be sure. */
4153 page_index_t page
, last_page
;
4155 if (gencgc_verbose
> 1) {
4156 SHOW("entering gc_free_heap");
4159 for (page
= 0; page
< page_table_pages
; page
++) {
4160 /* Skip free pages which should already be zero filled. */
4161 if (page_allocated_p(page
)) {
4163 for (last_page
= page
;
4164 (last_page
< page_table_pages
) && page_allocated_p(last_page
);
4166 /* Mark the page free. The other slots are assumed invalid
4167 * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
4168 * should not be write-protected -- except that the
4169 * generation is used for the current region but it sets
4171 page_table
[page
].allocated
= FREE_PAGE_FLAG
;
4172 page_table
[page
].bytes_used
= 0;
4173 page_table
[page
].write_protected
= 0;
4176 #ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure
4177 * about this change. */
4178 page_start
= (void *)page_address(page
);
4179 os_protect(page_start
, npage_bytes(last_page
-page
), OS_VM_PROT_ALL
);
4180 remap_free_pages(page
, last_page
-1, 1);
4183 } else if (gencgc_zero_check_during_free_heap
) {
4184 /* Double-check that the page is zero filled. */
4185 sword_t
*page_start
;
4187 gc_assert(page_free_p(page
));
4188 gc_assert(page_table
[page
].bytes_used
== 0);
4189 page_start
= (sword_t
*)page_address(page
);
4190 for (i
=0; i
<GENCGC_CARD_BYTES
/sizeof(sword_t
); i
++) {
4191 if (page_start
[i
] != 0) {
4192 lose("free region not zero at %x\n", page_start
+ i
);
4198 bytes_allocated
= 0;
4200 /* Initialize the generations. */
4201 for (page
= 0; page
< NUM_GENERATIONS
; page
++) {
4202 generations
[page
].alloc_start_page
= 0;
4203 generations
[page
].alloc_unboxed_start_page
= 0;
4204 generations
[page
].alloc_large_start_page
= 0;
4205 generations
[page
].alloc_large_unboxed_start_page
= 0;
4206 generations
[page
].bytes_allocated
= 0;
4207 generations
[page
].gc_trigger
= 2000000;
4208 generations
[page
].num_gc
= 0;
4209 generations
[page
].cum_sum_bytes_allocated
= 0;
4212 if (gencgc_verbose
> 1)
4213 print_generation_stats();
4215 /* Initialize gc_alloc(). */
4216 gc_alloc_generation
= 0;
4218 gc_set_region_empty(&boxed_region
);
4219 gc_set_region_empty(&unboxed_region
);
4222 set_alloc_pointer((lispobj
)((char *)heap_base
));
4224 if (verify_after_free_heap
) {
4225 /* Check whether purify has left any bad pointers. */
4226 FSHOW((stderr
, "checking after free_heap\n"));
4236 #if defined(LISP_FEATURE_SB_SAFEPOINT)
4240 /* Compute the number of pages needed for the dynamic space.
4241 * Dynamic space size should be aligned on page size. */
4242 page_table_pages
= dynamic_space_size
/GENCGC_CARD_BYTES
;
4243 gc_assert(dynamic_space_size
== npage_bytes(page_table_pages
));
4245 /* Default nursery size to 5% of the total dynamic space size,
4247 bytes_consed_between_gcs
= dynamic_space_size
/(os_vm_size_t
)20;
4248 if (bytes_consed_between_gcs
< (1024*1024))
4249 bytes_consed_between_gcs
= 1024*1024;
4251 /* The page_table must be allocated using "calloc" to initialize
4252 * the page structures correctly. There used to be a separate
4253 * initialization loop (now commented out; see below) but that was
4254 * unnecessary and did hurt startup time. */
4255 page_table
= calloc(page_table_pages
, sizeof(struct page
));
4256 gc_assert(page_table
);
4259 scavtab
[WEAK_POINTER_WIDETAG
] = scav_weak_pointer
;
4260 transother
[SIMPLE_ARRAY_WIDETAG
] = trans_boxed_large
;
4262 heap_base
= (void*)DYNAMIC_SPACE_START
;
4264 /* The page structures are initialized implicitly when page_table
4265 * is allocated with "calloc" above. Formerly we had the following
4266 * explicit initialization here (comments converted to C99 style
4267 * for readability as C's block comments don't nest):
4269 * // Initialize each page structure.
4270 * for (i = 0; i < page_table_pages; i++) {
4271 * // Initialize all pages as free.
4272 * page_table[i].allocated = FREE_PAGE_FLAG;
4273 * page_table[i].bytes_used = 0;
4275 * // Pages are not write-protected at startup.
4276 * page_table[i].write_protected = 0;
4279 * Without this loop the image starts up much faster when dynamic
4280 * space is large -- which it is on 64-bit platforms already by
4281 * default -- and when "calloc" for large arrays is implemented
4282 * using copy-on-write of a page of zeroes -- which it is at least
4283 * on Linux. In this case the pages that page_table_pages is stored
4284 * in are mapped and cleared not before the corresponding part of
4285 * dynamic space is used. For example, this saves clearing 16 MB of
4286 * memory at startup if the page size is 4 KB and the size of
4287 * dynamic space is 4 GB.
4288 * FREE_PAGE_FLAG must be 0 for this to work correctly which is
4289 * asserted below: */
4291 /* Compile time assertion: If triggered, declares an array
4292 * of dimension -1 forcing a syntax error. The intent of the
4293 * assignment is to avoid an "unused variable" warning. */
4294 char assert_free_page_flag_0
[(FREE_PAGE_FLAG
) ? -1 : 1];
4295 assert_free_page_flag_0
[0] = assert_free_page_flag_0
[0];
4298 bytes_allocated
= 0;
4300 /* Initialize the generations.
4302 * FIXME: very similar to code in gc_free_heap(), should be shared */
4303 for (i
= 0; i
< NUM_GENERATIONS
; i
++) {
4304 generations
[i
].alloc_start_page
= 0;
4305 generations
[i
].alloc_unboxed_start_page
= 0;
4306 generations
[i
].alloc_large_start_page
= 0;
4307 generations
[i
].alloc_large_unboxed_start_page
= 0;
4308 generations
[i
].bytes_allocated
= 0;
4309 generations
[i
].gc_trigger
= 2000000;
4310 generations
[i
].num_gc
= 0;
4311 generations
[i
].cum_sum_bytes_allocated
= 0;
4312 /* the tune-able parameters */
4313 generations
[i
].bytes_consed_between_gc
4314 = bytes_consed_between_gcs
/(os_vm_size_t
)HIGHEST_NORMAL_GENERATION
;
4315 generations
[i
].number_of_gcs_before_promotion
= 1;
4316 generations
[i
].minimum_age_before_gc
= 0.75;
4319 /* Initialize gc_alloc. */
4320 gc_alloc_generation
= 0;
4321 gc_set_region_empty(&boxed_region
);
4322 gc_set_region_empty(&unboxed_region
);
4327 /* Pick up the dynamic space from after a core load.
4329 * The ALLOCATION_POINTER points to the end of the dynamic space.
4333 gencgc_pickup_dynamic(void)
4335 page_index_t page
= 0;
4336 void *alloc_ptr
= (void *)get_alloc_pointer();
4337 lispobj
*prev
=(lispobj
*)page_address(page
);
4338 generation_index_t gen
= PSEUDO_STATIC_GENERATION
;
4340 bytes_allocated
= 0;
4343 lispobj
*first
,*ptr
= (lispobj
*)page_address(page
);
4345 if (!gencgc_partial_pickup
|| page_allocated_p(page
)) {
4346 /* It is possible, though rare, for the saved page table
4347 * to contain free pages below alloc_ptr. */
4348 page_table
[page
].gen
= gen
;
4349 page_table
[page
].bytes_used
= GENCGC_CARD_BYTES
;
4350 page_table
[page
].large_object
= 0;
4351 page_table
[page
].write_protected
= 0;
4352 page_table
[page
].write_protected_cleared
= 0;
4353 page_table
[page
].dont_move
= 0;
4354 page_table
[page
].need_to_zero
= 1;
4356 bytes_allocated
+= GENCGC_CARD_BYTES
;
4359 if (!gencgc_partial_pickup
) {
4360 page_table
[page
].allocated
= BOXED_PAGE_FLAG
;
4361 first
=gc_search_space(prev
,(ptr
+2)-prev
,ptr
);
4364 page_table
[page
].scan_start_offset
=
4365 page_address(page
) - (void *)prev
;
4368 } while (page_address(page
) < alloc_ptr
);
4370 last_free_page
= page
;
4372 generations
[gen
].bytes_allocated
= bytes_allocated
;
4374 gc_alloc_update_all_page_tables();
4375 write_protect_generation_pages(gen
);
4379 gc_initialize_pointers(void)
4381 gencgc_pickup_dynamic();
4385 /* alloc(..) is the external interface for memory allocation. It
4386 * allocates to generation 0. It is not called from within the garbage
4387 * collector as it is only external uses that need the check for heap
4388 * size (GC trigger) and to disable the interrupts (interrupts are
4389 * always disabled during a GC).
4391 * The vops that call alloc(..) assume that the returned space is zero-filled.
4392 * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
4394 * The check for a GC trigger is only performed when the current
4395 * region is full, so in most cases it's not needed. */
4397 static inline lispobj
*
4398 general_alloc_internal(sword_t nbytes
, int page_type_flag
, struct alloc_region
*region
,
4399 struct thread
*thread
)
4401 #ifndef LISP_FEATURE_WIN32
4402 lispobj alloc_signal
;
4405 void *new_free_pointer
;
4406 os_vm_size_t trigger_bytes
= 0;
4408 gc_assert(nbytes
>0);
4410 /* Check for alignment allocation problems. */
4411 gc_assert((((uword_t
)region
->free_pointer
& LOWTAG_MASK
) == 0)
4412 && ((nbytes
& LOWTAG_MASK
) == 0));
4414 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
4415 /* Must be inside a PA section. */
4416 gc_assert(get_pseudo_atomic_atomic(thread
));
4419 if (nbytes
> large_allocation
)
4420 large_allocation
= nbytes
;
4422 /* maybe we can do this quickly ... */
4423 new_free_pointer
= region
->free_pointer
+ nbytes
;
4424 if (new_free_pointer
<= region
->end_addr
) {
4425 new_obj
= (void*)(region
->free_pointer
);
4426 region
->free_pointer
= new_free_pointer
;
4427 return(new_obj
); /* yup */
4430 /* We don't want to count nbytes against auto_gc_trigger unless we
4431 * have to: it speeds up the tenuring of objects and slows down
4432 * allocation. However, unless we do so when allocating _very_
4433 * large objects we are in danger of exhausting the heap without
4434 * running sufficient GCs.
4436 if (nbytes
>= bytes_consed_between_gcs
)
4437 trigger_bytes
= nbytes
;
4439 /* we have to go the long way around, it seems. Check whether we
4440 * should GC in the near future
4442 if (auto_gc_trigger
&& (bytes_allocated
+trigger_bytes
> auto_gc_trigger
)) {
4443 /* Don't flood the system with interrupts if the need to gc is
4444 * already noted. This can happen for example when SUB-GC
4445 * allocates or after a gc triggered in a WITHOUT-GCING. */
4446 if (SymbolValue(GC_PENDING
,thread
) == NIL
) {
4447 /* set things up so that GC happens when we finish the PA
4449 SetSymbolValue(GC_PENDING
,T
,thread
);
4450 if (SymbolValue(GC_INHIBIT
,thread
) == NIL
) {
4451 #ifdef LISP_FEATURE_SB_SAFEPOINT
4452 thread_register_gc_trigger();
4454 set_pseudo_atomic_interrupted(thread
);
4455 #ifdef GENCGC_IS_PRECISE
4456 /* PPC calls alloc() from a trap or from pa_alloc(),
4457 * look up the most context if it's from a trap. */
4459 os_context_t
*context
=
4460 thread
->interrupt_data
->allocation_trap_context
;
4461 maybe_save_gc_mask_and_block_deferrables
4462 (context
? os_context_sigmask_addr(context
) : NULL
);
4465 maybe_save_gc_mask_and_block_deferrables(NULL
);
4471 new_obj
= gc_alloc_with_region(nbytes
, page_type_flag
, region
, 0);
4473 #ifndef LISP_FEATURE_WIN32
4474 /* for sb-prof, and not supported on Windows yet */
4475 alloc_signal
= SymbolValue(ALLOC_SIGNAL
,thread
);
4476 if ((alloc_signal
& FIXNUM_TAG_MASK
) == 0) {
4477 if ((sword_t
) alloc_signal
<= 0) {
4478 SetSymbolValue(ALLOC_SIGNAL
, T
, thread
);
4481 SetSymbolValue(ALLOC_SIGNAL
,
4482 alloc_signal
- (1 << N_FIXNUM_TAG_BITS
),
4492 general_alloc(sword_t nbytes
, int page_type_flag
)
4494 struct thread
*thread
= arch_os_get_current_thread();
4495 /* Select correct region, and call general_alloc_internal with it.
4496 * For other then boxed allocation we must lock first, since the
4497 * region is shared. */
4498 if (BOXED_PAGE_FLAG
& page_type_flag
) {
4499 #ifdef LISP_FEATURE_SB_THREAD
4500 struct alloc_region
*region
= (thread
? &(thread
->alloc_region
) : &boxed_region
);
4502 struct alloc_region
*region
= &boxed_region
;
4504 return general_alloc_internal(nbytes
, page_type_flag
, region
, thread
);
4505 } else if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
4507 gc_assert(0 == thread_mutex_lock(&allocation_lock
));
4508 obj
= general_alloc_internal(nbytes
, page_type_flag
, &unboxed_region
, thread
);
4509 gc_assert(0 == thread_mutex_unlock(&allocation_lock
));
4512 lose("bad page type flag: %d", page_type_flag
);
4516 lispobj AMD64_SYSV_ABI
*
4519 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4520 struct thread
*self
= arch_os_get_current_thread();
4521 int was_pseudo_atomic
= get_pseudo_atomic_atomic(self
);
4522 if (!was_pseudo_atomic
)
4523 set_pseudo_atomic_atomic(self
);
4525 gc_assert(get_pseudo_atomic_atomic(arch_os_get_current_thread()));
4528 lispobj
*result
= general_alloc(nbytes
, BOXED_PAGE_FLAG
);
4530 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4531 if (!was_pseudo_atomic
)
4532 clear_pseudo_atomic_atomic(self
);
4539 * shared support for the OS-dependent signal handlers which
4540 * catch GENCGC-related write-protect violations
4542 void unhandled_sigmemoryfault(void* addr
);
4544 /* Depending on which OS we're running under, different signals might
4545 * be raised for a violation of write protection in the heap. This
4546 * function factors out the common generational GC magic which needs
4547 * to invoked in this case, and should be called from whatever signal
4548 * handler is appropriate for the OS we're running under.
4550 * Return true if this signal is a normal generational GC thing that
4551 * we were able to handle, or false if it was abnormal and control
4552 * should fall through to the general SIGSEGV/SIGBUS/whatever logic.
4554 * We have two control flags for this: one causes us to ignore faults
4555 * on unprotected pages completely, and the second complains to stderr
4556 * but allows us to continue without losing.
4558 extern boolean ignore_memoryfaults_on_unprotected_pages
;
4559 boolean ignore_memoryfaults_on_unprotected_pages
= 0;
4561 extern boolean continue_after_memoryfault_on_unprotected_pages
;
4562 boolean continue_after_memoryfault_on_unprotected_pages
= 0;
4565 gencgc_handle_wp_violation(void* fault_addr
)
4567 page_index_t page_index
= find_page_index(fault_addr
);
4570 FSHOW((stderr
, "heap WP violation? fault_addr=%x, page_index=%d\n",
4571 fault_addr
, page_index
));
4574 /* Check whether the fault is within the dynamic space. */
4575 if (page_index
== (-1)) {
4577 /* It can be helpful to be able to put a breakpoint on this
4578 * case to help diagnose low-level problems. */
4579 unhandled_sigmemoryfault(fault_addr
);
4581 /* not within the dynamic space -- not our responsibility */
4586 ret
= thread_mutex_lock(&free_pages_lock
);
4587 gc_assert(ret
== 0);
4588 if (page_table
[page_index
].write_protected
) {
4589 /* Unprotect the page. */
4590 os_protect(page_address(page_index
), GENCGC_CARD_BYTES
, OS_VM_PROT_ALL
);
4591 page_table
[page_index
].write_protected_cleared
= 1;
4592 page_table
[page_index
].write_protected
= 0;
4593 } else if (!ignore_memoryfaults_on_unprotected_pages
) {
4594 /* The only acceptable reason for this signal on a heap
4595 * access is that GENCGC write-protected the page.
4596 * However, if two CPUs hit a wp page near-simultaneously,
4597 * we had better not have the second one lose here if it
4598 * does this test after the first one has already set wp=0
4600 if(page_table
[page_index
].write_protected_cleared
!= 1) {
4601 void lisp_backtrace(int frames
);
4604 "Fault @ %p, page %"PAGE_INDEX_FMT
" not marked as write-protected:\n"
4605 " boxed_region.first_page: %"PAGE_INDEX_FMT
","
4606 " boxed_region.last_page %"PAGE_INDEX_FMT
"\n"
4607 " page.scan_start_offset: %"OS_VM_SIZE_FMT
"\n"
4608 " page.bytes_used: %"PAGE_BYTES_FMT
"\n"
4609 " page.allocated: %d\n"
4610 " page.write_protected: %d\n"
4611 " page.write_protected_cleared: %d\n"
4612 " page.generation: %d\n",
4615 boxed_region
.first_page
,
4616 boxed_region
.last_page
,
4617 page_table
[page_index
].scan_start_offset
,
4618 page_table
[page_index
].bytes_used
,
4619 page_table
[page_index
].allocated
,
4620 page_table
[page_index
].write_protected
,
4621 page_table
[page_index
].write_protected_cleared
,
4622 page_table
[page_index
].gen
);
4623 if (!continue_after_memoryfault_on_unprotected_pages
)
4627 ret
= thread_mutex_unlock(&free_pages_lock
);
4628 gc_assert(ret
== 0);
4629 /* Don't worry, we can handle it. */
4633 /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
4634 * it's not just a case of the program hitting the write barrier, and
4635 * are about to let Lisp deal with it. It's basically just a
4636 * convenient place to set a gdb breakpoint. */
4638 unhandled_sigmemoryfault(void *addr
)
4641 void gc_alloc_update_all_page_tables(void)
4643 /* Flush the alloc regions updating the tables. */
4645 for_each_thread(th
) {
4646 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->alloc_region
);
4647 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
4648 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->sprof_alloc_region
);
4651 gc_alloc_update_page_tables(UNBOXED_PAGE_FLAG
, &unboxed_region
);
4652 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &boxed_region
);
4656 gc_set_region_empty(struct alloc_region
*region
)
4658 region
->first_page
= 0;
4659 region
->last_page
= -1;
4660 region
->start_addr
= page_address(0);
4661 region
->free_pointer
= page_address(0);
4662 region
->end_addr
= page_address(0);
4666 zero_all_free_pages()
4670 for (i
= 0; i
< last_free_page
; i
++) {
4671 if (page_free_p(i
)) {
4672 #ifdef READ_PROTECT_FREE_PAGES
4673 os_protect(page_address(i
),
4682 /* Things to do before doing a final GC before saving a core (without
4685 * + Pages in large_object pages aren't moved by the GC, so we need to
4686 * unset that flag from all pages.
4687 * + The pseudo-static generation isn't normally collected, but it seems
4688 * reasonable to collect it at least when saving a core. So move the
4689 * pages to a normal generation.
4692 prepare_for_final_gc ()
4697 for (i
= 0; i
< last_free_page
; i
++) {
4698 page_table
[i
].large_object
= 0;
4699 if (page_table
[i
].gen
== PSEUDO_STATIC_GENERATION
) {
4700 int used
= page_table
[i
].bytes_used
;
4701 page_table
[i
].gen
= HIGHEST_NORMAL_GENERATION
;
4702 generations
[PSEUDO_STATIC_GENERATION
].bytes_allocated
-= used
;
4703 generations
[HIGHEST_NORMAL_GENERATION
].bytes_allocated
+= used
;
4709 /* Do a non-conservative GC, and then save a core with the initial
4710 * function being set to the value of the static symbol
4711 * SB!VM:RESTART-LISP-FUNCTION */
4713 gc_and_save(char *filename
, boolean prepend_runtime
,
4714 boolean save_runtime_options
, boolean compressed
,
4715 int compression_level
, int application_type
)
4718 void *runtime_bytes
= NULL
;
4719 size_t runtime_size
;
4721 file
= prepare_to_save(filename
, prepend_runtime
, &runtime_bytes
,
4726 conservative_stack
= 0;
4728 /* The filename might come from Lisp, and be moved by the now
4729 * non-conservative GC. */
4730 filename
= strdup(filename
);
4732 /* Collect twice: once into relatively high memory, and then back
4733 * into low memory. This compacts the retained data into the lower
4734 * pages, minimizing the size of the core file.
4736 prepare_for_final_gc();
4737 gencgc_alloc_start_page
= last_free_page
;
4738 collect_garbage(HIGHEST_NORMAL_GENERATION
+1);
4740 prepare_for_final_gc();
4741 gencgc_alloc_start_page
= -1;
4742 collect_garbage(HIGHEST_NORMAL_GENERATION
+1);
4744 if (prepend_runtime
)
4745 save_runtime_to_filehandle(file
, runtime_bytes
, runtime_size
,
4748 /* The dumper doesn't know that pages need to be zeroed before use. */
4749 zero_all_free_pages();
4750 save_to_filehandle(file
, filename
, SymbolValue(RESTART_LISP_FUNCTION
,0),
4751 prepend_runtime
, save_runtime_options
,
4752 compressed
? compression_level
: COMPRESSION_LEVEL_NONE
);
4753 /* Oops. Save still managed to fail. Since we've mangled the stack
4754 * beyond hope, there's not much we can do.
4755 * (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's
4756 * going to be rather unsatisfactory too... */
4757 lose("Attempt to save core after non-conservative GC failed.\n");