2 * GENerational Conservative Garbage Collector for SBCL
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 * For a review of garbage collection techniques (e.g. generational
18 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
19 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
20 * had been accepted for _ACM Computing Surveys_ and was available
21 * as a PostScript preprint through
22 * <http://www.cs.utexas.edu/users/oops/papers.html>
24 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
32 #if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)
33 #include "pthreads_win32.h"
41 #include "interrupt.h"
46 #include "gc-internal.h"
48 #include "pseudo-atomic.h"
50 #include "genesis/vector.h"
51 #include "genesis/weak-pointer.h"
52 #include "genesis/fdefn.h"
53 #include "genesis/simple-fun.h"
55 #include "genesis/hash-table.h"
56 #include "genesis/instance.h"
57 #include "genesis/layout.h"
59 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
60 #include "genesis/cons.h"
63 /* forward declarations */
64 page_index_t
gc_find_freeish_pages(page_index_t
*restart_page_ptr
, sword_t nbytes
,
72 /* As usually configured, generations 0-5 are normal collected generations,
73 6 is pseudo-static (the objects in which are never moved nor reclaimed),
74 and 7 is scratch space used when collecting a generation without promotion,
75 wherein it is moved to generation 7 and back again.
78 SCRATCH_GENERATION
= PSEUDO_STATIC_GENERATION
+1,
82 /* Should we use page protection to help avoid the scavenging of pages
83 * that don't have pointers to younger generations? */
84 boolean enable_page_protection
= 1;
86 /* Largest allocation seen since last GC. */
87 os_vm_size_t large_allocation
= 0;
94 /* the verbosity level. All non-error messages are disabled at level 0;
95 * and only a few rare messages are printed at level 1. */
97 boolean gencgc_verbose
= 1;
99 boolean gencgc_verbose
= 0;
102 /* FIXME: At some point enable the various error-checking things below
103 * and see what they say. */
105 /* We hunt for pointers to old-space, when GCing generations >= verify_gen.
106 * Set verify_gens to HIGHEST_NORMAL_GENERATION + 1 to disable this kind of
108 generation_index_t verify_gens
= HIGHEST_NORMAL_GENERATION
+ 1;
110 /* Should we do a pre-scan verify of generation 0 before it's GCed? */
111 boolean pre_verify_gen_0
= 0;
113 /* Should we check for bad pointers after gc_free_heap is called
114 * from Lisp PURIFY? */
115 boolean verify_after_free_heap
= 0;
117 /* Should we print a note when code objects are found in the dynamic space
118 * during a heap verify? */
119 boolean verify_dynamic_code_check
= 0;
121 #ifdef LISP_FEATURE_X86
122 /* Should we check code objects for fixup errors after they are transported? */
123 boolean check_code_fixups
= 0;
126 /* Should we check that newly allocated regions are zero filled? */
127 boolean gencgc_zero_check
= 0;
129 /* Should we check that the free space is zero filled? */
130 boolean gencgc_enable_verify_zero_fill
= 0;
132 /* Should we check that free pages are zero filled during gc_free_heap
133 * called after Lisp PURIFY? */
134 boolean gencgc_zero_check_during_free_heap
= 0;
136 /* When loading a core, don't do a full scan of the memory for the
137 * memory region boundaries. (Set to true by coreparse.c if the core
138 * contained a pagetable entry).
140 boolean gencgc_partial_pickup
= 0;
142 /* If defined, free pages are read-protected to ensure that nothing
146 /* #define READ_PROTECT_FREE_PAGES */
150 * GC structures and variables
153 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
154 os_vm_size_t bytes_allocated
= 0;
155 os_vm_size_t auto_gc_trigger
= 0;
157 /* the source and destination generations. These are set before a GC starts
159 generation_index_t from_space
;
160 generation_index_t new_space
;
162 /* Set to 1 when in GC */
163 boolean gc_active_p
= 0;
165 /* should the GC be conservative on stack. If false (only right before
166 * saving a core), don't scan the stack / mark pages dont_move. */
167 static boolean conservative_stack
= 1;
169 /* An array of page structures is allocated on gc initialization.
170 * This helps to quickly map between an address and its page structure.
171 * page_table_pages is set from the size of the dynamic space. */
172 page_index_t page_table_pages
;
173 struct page
*page_table
;
175 in_use_marker_t
*page_table_dontmove_dwords
;
176 size_t page_table_dontmove_dwords_size_in_bytes
;
178 /* In GC cards that have conservative pointers to them, should we wipe out
179 * dwords in there that are not used, so that they do not act as false
180 * root to other things in the heap from then on? This is a new feature
181 * but in testing it is both reliable and no noticeable slowdown. */
184 /* a value that we use to wipe out unused words in GC cards that
185 * live alongside conservatively to pointed words. */
186 const lispobj wipe_with
= 0;
188 static inline boolean
page_allocated_p(page_index_t page
) {
189 return (page_table
[page
].allocated
!= FREE_PAGE_FLAG
);
192 static inline boolean
page_no_region_p(page_index_t page
) {
193 return !(page_table
[page
].allocated
& OPEN_REGION_PAGE_FLAG
);
196 static inline boolean
page_allocated_no_region_p(page_index_t page
) {
197 return ((page_table
[page
].allocated
& (UNBOXED_PAGE_FLAG
| BOXED_PAGE_FLAG
))
198 && page_no_region_p(page
));
201 static inline boolean
page_free_p(page_index_t page
) {
202 return (page_table
[page
].allocated
== FREE_PAGE_FLAG
);
205 static inline boolean
page_boxed_p(page_index_t page
) {
206 return (page_table
[page
].allocated
& BOXED_PAGE_FLAG
);
209 static inline boolean
page_boxed_no_region_p(page_index_t page
) {
210 return page_boxed_p(page
) && page_no_region_p(page
);
213 static inline boolean
page_unboxed_p(page_index_t page
) {
214 /* Both flags set == boxed code page */
215 return ((page_table
[page
].allocated
& UNBOXED_PAGE_FLAG
)
216 && !page_boxed_p(page
));
219 static inline boolean
protect_page_p(page_index_t page
, generation_index_t generation
) {
220 return (page_boxed_no_region_p(page
)
221 && (page_table
[page
].bytes_used
!= 0)
222 && !page_table
[page
].dont_move
223 && (page_table
[page
].gen
== generation
));
226 /* To map addresses to page structures the address of the first page
228 void *heap_base
= NULL
;
230 /* Calculate the start address for the given page number. */
232 page_address(page_index_t page_num
)
234 return (heap_base
+ (page_num
* GENCGC_CARD_BYTES
));
237 /* Calculate the address where the allocation region associated with
238 * the page starts. */
240 page_scan_start(page_index_t page_index
)
242 return page_address(page_index
)-page_table
[page_index
].scan_start_offset
;
245 /* True if the page starts a contiguous block. */
246 static inline boolean
247 page_starts_contiguous_block_p(page_index_t page_index
)
249 return page_table
[page_index
].scan_start_offset
== 0;
252 /* True if the page is the last page in a contiguous block. */
253 static inline boolean
254 page_ends_contiguous_block_p(page_index_t page_index
, generation_index_t gen
)
256 return (/* page doesn't fill block */
257 (page_table
[page_index
].bytes_used
< GENCGC_CARD_BYTES
)
258 /* page is last allocated page */
259 || ((page_index
+ 1) >= last_free_page
)
261 || page_free_p(page_index
+ 1)
262 /* next page contains no data */
263 || (page_table
[page_index
+ 1].bytes_used
== 0)
264 /* next page is in different generation */
265 || (page_table
[page_index
+ 1].gen
!= gen
)
266 /* next page starts its own contiguous block */
267 || (page_starts_contiguous_block_p(page_index
+ 1)));
270 /* Find the page index within the page_table for the given
271 * address. Return -1 on failure. */
273 find_page_index(void *addr
)
275 if (addr
>= heap_base
) {
276 page_index_t index
= ((pointer_sized_uint_t
)addr
-
277 (pointer_sized_uint_t
)heap_base
) / GENCGC_CARD_BYTES
;
278 if (index
< page_table_pages
)
285 npage_bytes(page_index_t npages
)
287 gc_assert(npages
>=0);
288 return ((os_vm_size_t
)npages
)*GENCGC_CARD_BYTES
;
291 /* Check that X is a higher address than Y and return offset from Y to
293 static inline os_vm_size_t
294 void_diff(void *x
, void *y
)
297 return (pointer_sized_uint_t
)x
- (pointer_sized_uint_t
)y
;
300 /* a structure to hold the state of a generation
302 * CAUTION: If you modify this, make sure to touch up the alien
303 * definition in src/code/gc.lisp accordingly. ...or better yes,
304 * deal with the FIXME there...
308 /* the first page that gc_alloc() checks on its next call */
309 page_index_t alloc_start_page
;
311 /* the first page that gc_alloc_unboxed() checks on its next call */
312 page_index_t alloc_unboxed_start_page
;
314 /* the first page that gc_alloc_large (boxed) considers on its next
315 * call. (Although it always allocates after the boxed_region.) */
316 page_index_t alloc_large_start_page
;
318 /* the first page that gc_alloc_large (unboxed) considers on its
319 * next call. (Although it always allocates after the
320 * current_unboxed_region.) */
321 page_index_t alloc_large_unboxed_start_page
;
323 /* the bytes allocated to this generation */
324 os_vm_size_t bytes_allocated
;
326 /* the number of bytes at which to trigger a GC */
327 os_vm_size_t gc_trigger
;
329 /* to calculate a new level for gc_trigger */
330 os_vm_size_t bytes_consed_between_gc
;
332 /* the number of GCs since the last raise */
335 /* the number of GCs to run on the generations before raising objects to the
337 int number_of_gcs_before_promotion
;
339 /* the cumulative sum of the bytes allocated to this generation. It is
340 * cleared after a GC on this generations, and update before new
341 * objects are added from a GC of a younger generation. Dividing by
342 * the bytes_allocated will give the average age of the memory in
343 * this generation since its last GC. */
344 os_vm_size_t cum_sum_bytes_allocated
;
346 /* a minimum average memory age before a GC will occur helps
347 * prevent a GC when a large number of new live objects have been
348 * added, in which case a GC could be a waste of time */
349 double minimum_age_before_gc
;
352 /* an array of generation structures. There needs to be one more
353 * generation structure than actual generations as the oldest
354 * generation is temporarily raised then lowered. */
355 struct generation generations
[NUM_GENERATIONS
];
357 /* the oldest generation that is will currently be GCed by default.
358 * Valid values are: 0, 1, ... HIGHEST_NORMAL_GENERATION
360 * The default of HIGHEST_NORMAL_GENERATION enables GC on all generations.
362 * Setting this to 0 effectively disables the generational nature of
363 * the GC. In some applications generational GC may not be useful
364 * because there are no long-lived objects.
366 * An intermediate value could be handy after moving long-lived data
367 * into an older generation so an unnecessary GC of this long-lived
368 * data can be avoided. */
369 generation_index_t gencgc_oldest_gen_to_gc
= HIGHEST_NORMAL_GENERATION
;
371 /* META: Is nobody aside from me bothered by this especially misleading
372 * use of the word "last"? It could mean either "ultimate" or "prior",
373 * but in fact means neither. It is the *FIRST* page that should be grabbed
374 * for more space, so it is min free page, or 1+ the max used page. */
375 /* The maximum free page in the heap is maintained and used to update
376 * ALLOCATION_POINTER which is used by the room function to limit its
377 * search of the heap. XX Gencgc obviously needs to be better
378 * integrated with the Lisp code. */
380 page_index_t last_free_page
;
382 #ifdef LISP_FEATURE_SB_THREAD
383 /* This lock is to prevent multiple threads from simultaneously
384 * allocating new regions which overlap each other. Note that the
385 * majority of GC is single-threaded, but alloc() may be called from
386 * >1 thread at a time and must be thread-safe. This lock must be
387 * seized before all accesses to generations[] or to parts of
388 * page_table[] that other threads may want to see */
389 static pthread_mutex_t free_pages_lock
= PTHREAD_MUTEX_INITIALIZER
;
390 /* This lock is used to protect non-thread-local allocation. */
391 static pthread_mutex_t allocation_lock
= PTHREAD_MUTEX_INITIALIZER
;
394 extern os_vm_size_t gencgc_release_granularity
;
395 os_vm_size_t gencgc_release_granularity
= GENCGC_RELEASE_GRANULARITY
;
397 extern os_vm_size_t gencgc_alloc_granularity
;
398 os_vm_size_t gencgc_alloc_granularity
= GENCGC_ALLOC_GRANULARITY
;
402 * miscellaneous heap functions
405 /* Count the number of pages which are write-protected within the
406 * given generation. */
408 count_write_protect_generation_pages(generation_index_t generation
)
410 page_index_t i
, count
= 0;
412 for (i
= 0; i
< last_free_page
; i
++)
413 if (page_allocated_p(i
)
414 && (page_table
[i
].gen
== generation
)
415 && (page_table
[i
].write_protected
== 1))
420 /* Count the number of pages within the given generation. */
422 count_generation_pages(generation_index_t generation
)
425 page_index_t count
= 0;
427 for (i
= 0; i
< last_free_page
; i
++)
428 if (page_allocated_p(i
)
429 && (page_table
[i
].gen
== generation
))
436 count_dont_move_pages(void)
439 page_index_t count
= 0;
440 for (i
= 0; i
< last_free_page
; i
++) {
441 if (page_allocated_p(i
)
442 && (page_table
[i
].dont_move
!= 0)) {
450 /* Work through the pages and add up the number of bytes used for the
451 * given generation. */
453 count_generation_bytes_allocated (generation_index_t gen
)
456 os_vm_size_t result
= 0;
457 for (i
= 0; i
< last_free_page
; i
++) {
458 if (page_allocated_p(i
)
459 && (page_table
[i
].gen
== gen
))
460 result
+= page_table
[i
].bytes_used
;
465 /* Return the average age of the memory in a generation. */
467 generation_average_age(generation_index_t gen
)
469 if (generations
[gen
].bytes_allocated
== 0)
473 ((double)generations
[gen
].cum_sum_bytes_allocated
)
474 / ((double)generations
[gen
].bytes_allocated
);
478 write_generation_stats(FILE *file
)
480 generation_index_t i
;
482 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
483 #define FPU_STATE_SIZE 27
484 int fpu_state
[FPU_STATE_SIZE
];
485 #elif defined(LISP_FEATURE_PPC)
486 #define FPU_STATE_SIZE 32
487 long long fpu_state
[FPU_STATE_SIZE
];
488 #elif defined(LISP_FEATURE_SPARC)
490 * 32 (single-precision) FP registers, and the FP state register.
491 * But Sparc V9 has 32 double-precision registers (equivalent to 64
492 * single-precision, but can't be accessed), so we leave enough room
495 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
496 long long fpu_state
[FPU_STATE_SIZE
];
497 #elif defined(LISP_FEATURE_ARM)
498 #define FPU_STATE_SIZE 8
499 long long fpu_state
[FPU_STATE_SIZE
];
500 #elif defined(LISP_FEATURE_ARM64)
501 #define FPU_STATE_SIZE 64
502 long fpu_state
[FPU_STATE_SIZE
];
505 /* This code uses the FP instructions which may be set up for Lisp
506 * so they need to be saved and reset for C. */
509 /* Print the heap stats. */
511 " Gen StaPg UbSta LaSta LUbSt Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
513 for (i
= 0; i
< SCRATCH_GENERATION
; i
++) {
515 page_index_t boxed_cnt
= 0;
516 page_index_t unboxed_cnt
= 0;
517 page_index_t large_boxed_cnt
= 0;
518 page_index_t large_unboxed_cnt
= 0;
519 page_index_t pinned_cnt
=0;
521 for (j
= 0; j
< last_free_page
; j
++)
522 if (page_table
[j
].gen
== i
) {
524 /* Count the number of boxed pages within the given
526 if (page_boxed_p(j
)) {
527 if (page_table
[j
].large_object
)
532 if(page_table
[j
].dont_move
) pinned_cnt
++;
533 /* Count the number of unboxed pages within the given
535 if (page_unboxed_p(j
)) {
536 if (page_table
[j
].large_object
)
543 gc_assert(generations
[i
].bytes_allocated
544 == count_generation_bytes_allocated(i
));
546 " %1d: %5ld %5ld %5ld %5ld",
548 generations
[i
].alloc_start_page
,
549 generations
[i
].alloc_unboxed_start_page
,
550 generations
[i
].alloc_large_start_page
,
551 generations
[i
].alloc_large_unboxed_start_page
);
553 " %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
554 " %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
,
555 boxed_cnt
, unboxed_cnt
, large_boxed_cnt
,
556 large_unboxed_cnt
, pinned_cnt
);
561 " %4"PAGE_INDEX_FMT
" %3d %7.4f\n",
562 generations
[i
].bytes_allocated
,
563 (npage_bytes(count_generation_pages(i
)) - generations
[i
].bytes_allocated
),
564 generations
[i
].gc_trigger
,
565 count_write_protect_generation_pages(i
),
566 generations
[i
].num_gc
,
567 generation_average_age(i
));
569 fprintf(file
," Total bytes allocated = %"OS_VM_SIZE_FMT
"\n", bytes_allocated
);
570 fprintf(file
," Dynamic-space-size bytes = %"OS_VM_SIZE_FMT
"\n", dynamic_space_size
);
572 fpu_restore(fpu_state
);
576 write_heap_exhaustion_report(FILE *file
, long available
, long requested
,
577 struct thread
*thread
)
580 "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
581 gc_active_p
? "garbage collection" : "allocation",
584 write_generation_stats(file
);
585 fprintf(file
, "GC control variables:\n");
586 fprintf(file
, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n",
587 SymbolValue(GC_INHIBIT
,thread
)==NIL
? "false" : "true",
588 (SymbolValue(GC_PENDING
, thread
) == T
) ?
589 "true" : ((SymbolValue(GC_PENDING
, thread
) == NIL
) ?
590 "false" : "in progress"));
591 #ifdef LISP_FEATURE_SB_THREAD
592 fprintf(file
, " *STOP-FOR-GC-PENDING* = %s\n",
593 SymbolValue(STOP_FOR_GC_PENDING
,thread
)==NIL
? "false" : "true");
598 print_generation_stats(void)
600 write_generation_stats(stderr
);
603 extern char* gc_logfile
;
604 char * gc_logfile
= NULL
;
607 log_generation_stats(char *logfile
, char *header
)
610 FILE * log
= fopen(logfile
, "a");
612 fprintf(log
, "%s\n", header
);
613 write_generation_stats(log
);
616 fprintf(stderr
, "Could not open gc logfile: %s\n", logfile
);
623 report_heap_exhaustion(long available
, long requested
, struct thread
*th
)
626 FILE * log
= fopen(gc_logfile
, "a");
628 write_heap_exhaustion_report(log
, available
, requested
, th
);
631 fprintf(stderr
, "Could not open gc logfile: %s\n", gc_logfile
);
635 /* Always to stderr as well. */
636 write_heap_exhaustion_report(stderr
, available
, requested
, th
);
640 #if defined(LISP_FEATURE_X86)
641 void fast_bzero(void*, size_t); /* in <arch>-assem.S */
644 /* Zero the pages from START to END (inclusive), but use mmap/munmap instead
645 * if zeroing it ourselves, i.e. in practice give the memory back to the
646 * OS. Generally done after a large GC.
648 void zero_pages_with_mmap(page_index_t start
, page_index_t end
) {
650 void *addr
= page_address(start
), *new_addr
;
651 os_vm_size_t length
= npage_bytes(1+end
-start
);
656 gc_assert(length
>= gencgc_release_granularity
);
657 gc_assert((length
% gencgc_release_granularity
) == 0);
659 os_invalidate(addr
, length
);
660 new_addr
= os_validate(addr
, length
);
661 if (new_addr
== NULL
|| new_addr
!= addr
) {
662 lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x",
666 for (i
= start
; i
<= end
; i
++) {
667 page_table
[i
].need_to_zero
= 0;
671 /* Zero the pages from START to END (inclusive). Generally done just after
672 * a new region has been allocated.
675 zero_pages(page_index_t start
, page_index_t end
) {
679 #if defined(LISP_FEATURE_X86)
680 fast_bzero(page_address(start
), npage_bytes(1+end
-start
));
682 bzero(page_address(start
), npage_bytes(1+end
-start
));
688 zero_and_mark_pages(page_index_t start
, page_index_t end
) {
691 zero_pages(start
, end
);
692 for (i
= start
; i
<= end
; i
++)
693 page_table
[i
].need_to_zero
= 0;
696 /* Zero the pages from START to END (inclusive), except for those
697 * pages that are known to already zeroed. Mark all pages in the
698 * ranges as non-zeroed.
701 zero_dirty_pages(page_index_t start
, page_index_t end
) {
704 for (i
= start
; i
<= end
; i
++) {
705 if (!page_table
[i
].need_to_zero
) continue;
706 for (j
= i
+1; (j
<= end
) && (page_table
[j
].need_to_zero
); j
++);
711 for (i
= start
; i
<= end
; i
++) {
712 page_table
[i
].need_to_zero
= 1;
718 * To support quick and inline allocation, regions of memory can be
719 * allocated and then allocated from with just a free pointer and a
720 * check against an end address.
722 * Since objects can be allocated to spaces with different properties
723 * e.g. boxed/unboxed, generation, ages; there may need to be many
724 * allocation regions.
726 * Each allocation region may start within a partly used page. Many
727 * features of memory use are noted on a page wise basis, e.g. the
728 * generation; so if a region starts within an existing allocated page
729 * it must be consistent with this page.
731 * During the scavenging of the newspace, objects will be transported
732 * into an allocation region, and pointers updated to point to this
733 * allocation region. It is possible that these pointers will be
734 * scavenged again before the allocation region is closed, e.g. due to
735 * trans_list which jumps all over the place to cleanup the list. It
736 * is important to be able to determine properties of all objects
737 * pointed to when scavenging, e.g to detect pointers to the oldspace.
738 * Thus it's important that the allocation regions have the correct
739 * properties set when allocated, and not just set when closed. The
740 * region allocation routines return regions with the specified
741 * properties, and grab all the pages, setting their properties
742 * appropriately, except that the amount used is not known.
744 * These regions are used to support quicker allocation using just a
745 * free pointer. The actual space used by the region is not reflected
746 * in the pages tables until it is closed. It can't be scavenged until
749 * When finished with the region it should be closed, which will
750 * update the page tables for the actual space used returning unused
751 * space. Further it may be noted in the new regions which is
752 * necessary when scavenging the newspace.
754 * Large objects may be allocated directly without an allocation
755 * region, the page tables are updated immediately.
757 * Unboxed objects don't contain pointers to other objects and so
758 * don't need scavenging. Further they can't contain pointers to
759 * younger generations so WP is not needed. By allocating pages to
760 * unboxed objects the whole page never needs scavenging or
761 * write-protecting. */
763 /* We are only using two regions at present. Both are for the current
764 * newspace generation. */
765 struct alloc_region boxed_region
;
766 struct alloc_region unboxed_region
;
768 /* The generation currently being allocated to. */
769 static generation_index_t gc_alloc_generation
;
771 static inline page_index_t
772 generation_alloc_start_page(generation_index_t generation
, int page_type_flag
, int large
)
775 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
776 return generations
[generation
].alloc_large_unboxed_start_page
;
777 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
778 /* Both code and data. */
779 return generations
[generation
].alloc_large_start_page
;
781 lose("bad page type flag: %d", page_type_flag
);
784 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
785 return generations
[generation
].alloc_unboxed_start_page
;
786 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
787 /* Both code and data. */
788 return generations
[generation
].alloc_start_page
;
790 lose("bad page_type_flag: %d", page_type_flag
);
796 set_generation_alloc_start_page(generation_index_t generation
, int page_type_flag
, int large
,
800 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
801 generations
[generation
].alloc_large_unboxed_start_page
= page
;
802 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
803 /* Both code and data. */
804 generations
[generation
].alloc_large_start_page
= page
;
806 lose("bad page type flag: %d", page_type_flag
);
809 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
810 generations
[generation
].alloc_unboxed_start_page
= page
;
811 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
812 /* Both code and data. */
813 generations
[generation
].alloc_start_page
= page
;
815 lose("bad page type flag: %d", page_type_flag
);
820 const int n_dwords_in_card
= GENCGC_CARD_BYTES
/ N_WORD_BYTES
/ 2;
822 dontmove_dwords(page_index_t page
)
824 if (page_table
[page
].has_dontmove_dwords
)
825 return &page_table_dontmove_dwords
[page
* n_dwords_in_card
];
829 /* Find a new region with room for at least the given number of bytes.
831 * It starts looking at the current generation's alloc_start_page. So
832 * may pick up from the previous region if there is enough space. This
833 * keeps the allocation contiguous when scavenging the newspace.
835 * The alloc_region should have been closed by a call to
836 * gc_alloc_update_page_tables(), and will thus be in an empty state.
838 * To assist the scavenging functions write-protected pages are not
839 * used. Free pages should not be write-protected.
841 * It is critical to the conservative GC that the start of regions be
842 * known. To help achieve this only small regions are allocated at a
845 * During scavenging, pointers may be found to within the current
846 * region and the page generation must be set so that pointers to the
847 * from space can be recognized. Therefore the generation of pages in
848 * the region are set to gc_alloc_generation. To prevent another
849 * allocation call using the same pages, all the pages in the region
850 * are allocated, although they will initially be empty.
853 gc_alloc_new_region(sword_t nbytes
, int page_type_flag
, struct alloc_region
*alloc_region
)
855 page_index_t first_page
;
856 page_index_t last_page
;
857 os_vm_size_t bytes_found
;
863 "/alloc_new_region for %d bytes from gen %d\n",
864 nbytes, gc_alloc_generation));
867 /* Check that the region is in a reset state. */
868 gc_assert((alloc_region
->first_page
== 0)
869 && (alloc_region
->last_page
== -1)
870 && (alloc_region
->free_pointer
== alloc_region
->end_addr
));
871 ret
= thread_mutex_lock(&free_pages_lock
);
873 first_page
= generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 0);
874 last_page
=gc_find_freeish_pages(&first_page
, nbytes
, page_type_flag
);
875 bytes_found
=(GENCGC_CARD_BYTES
- page_table
[first_page
].bytes_used
)
876 + npage_bytes(last_page
-first_page
);
878 /* Set up the alloc_region. */
879 alloc_region
->first_page
= first_page
;
880 alloc_region
->last_page
= last_page
;
881 alloc_region
->start_addr
= page_table
[first_page
].bytes_used
882 + page_address(first_page
);
883 alloc_region
->free_pointer
= alloc_region
->start_addr
;
884 alloc_region
->end_addr
= alloc_region
->start_addr
+ bytes_found
;
886 /* Set up the pages. */
888 /* The first page may have already been in use. */
889 if (page_table
[first_page
].bytes_used
== 0) {
890 page_table
[first_page
].allocated
= page_type_flag
;
891 page_table
[first_page
].gen
= gc_alloc_generation
;
892 page_table
[first_page
].large_object
= 0;
893 page_table
[first_page
].scan_start_offset
= 0;
894 // wiping should have free()ed and :=NULL
895 gc_assert(dontmove_dwords(first_page
) == NULL
);
898 gc_assert(page_table
[first_page
].allocated
== page_type_flag
);
899 page_table
[first_page
].allocated
|= OPEN_REGION_PAGE_FLAG
;
901 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
902 gc_assert(page_table
[first_page
].large_object
== 0);
904 for (i
= first_page
+1; i
<= last_page
; i
++) {
905 page_table
[i
].allocated
= page_type_flag
;
906 page_table
[i
].gen
= gc_alloc_generation
;
907 page_table
[i
].large_object
= 0;
908 /* This may not be necessary for unboxed regions (think it was
910 page_table
[i
].scan_start_offset
=
911 void_diff(page_address(i
),alloc_region
->start_addr
);
912 page_table
[i
].allocated
|= OPEN_REGION_PAGE_FLAG
;
914 /* Bump up last_free_page. */
915 if (last_page
+1 > last_free_page
) {
916 last_free_page
= last_page
+1;
917 /* do we only want to call this on special occasions? like for
919 set_alloc_pointer((lispobj
)page_address(last_free_page
));
921 ret
= thread_mutex_unlock(&free_pages_lock
);
924 #ifdef READ_PROTECT_FREE_PAGES
925 os_protect(page_address(first_page
),
926 npage_bytes(1+last_page
-first_page
),
930 /* If the first page was only partial, don't check whether it's
931 * zeroed (it won't be) and don't zero it (since the parts that
932 * we're interested in are guaranteed to be zeroed).
934 if (page_table
[first_page
].bytes_used
) {
938 zero_dirty_pages(first_page
, last_page
);
940 /* we can do this after releasing free_pages_lock */
941 if (gencgc_zero_check
) {
943 for (p
= (word_t
*)alloc_region
->start_addr
;
944 p
< (word_t
*)alloc_region
->end_addr
; p
++) {
946 lose("The new region is not zero at %p (start=%p, end=%p).\n",
947 p
, alloc_region
->start_addr
, alloc_region
->end_addr
);
953 /* If the record_new_objects flag is 2 then all new regions created
956 * If it's 1 then then it is only recorded if the first page of the
957 * current region is <= new_areas_ignore_page. This helps avoid
958 * unnecessary recording when doing full scavenge pass.
960 * The new_object structure holds the page, byte offset, and size of
961 * new regions of objects. Each new area is placed in the array of
962 * these structures pointer to by new_areas. new_areas_index holds the
963 * offset into new_areas.
965 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
966 * later code must detect this and handle it, probably by doing a full
967 * scavenge of a generation. */
968 #define NUM_NEW_AREAS 512
969 static int record_new_objects
= 0;
970 static page_index_t new_areas_ignore_page
;
976 static struct new_area (*new_areas
)[];
977 static size_t new_areas_index
;
978 size_t max_new_areas
;
980 /* Add a new area to new_areas. */
982 add_new_area(page_index_t first_page
, size_t offset
, size_t size
)
984 size_t new_area_start
, c
;
987 /* Ignore if full. */
988 if (new_areas_index
>= NUM_NEW_AREAS
)
991 switch (record_new_objects
) {
995 if (first_page
> new_areas_ignore_page
)
1004 new_area_start
= npage_bytes(first_page
) + offset
;
1006 /* Search backwards for a prior area that this follows from. If
1007 found this will save adding a new area. */
1008 for (i
= new_areas_index
-1, c
= 0; (i
>= 0) && (c
< 8); i
--, c
++) {
1010 npage_bytes((*new_areas
)[i
].page
)
1011 + (*new_areas
)[i
].offset
1012 + (*new_areas
)[i
].size
;
1014 "/add_new_area S1 %d %d %d %d\n",
1015 i, c, new_area_start, area_end));*/
1016 if (new_area_start
== area_end
) {
1018 "/adding to [%d] %d %d %d with %d %d %d:\n",
1020 (*new_areas)[i].page,
1021 (*new_areas)[i].offset,
1022 (*new_areas)[i].size,
1026 (*new_areas
)[i
].size
+= size
;
1031 (*new_areas
)[new_areas_index
].page
= first_page
;
1032 (*new_areas
)[new_areas_index
].offset
= offset
;
1033 (*new_areas
)[new_areas_index
].size
= size
;
1035 "/new_area %d page %d offset %d size %d\n",
1036 new_areas_index, first_page, offset, size));*/
1039 /* Note the max new_areas used. */
1040 if (new_areas_index
> max_new_areas
)
1041 max_new_areas
= new_areas_index
;
1044 /* Update the tables for the alloc_region. The region may be added to
1047 * When done the alloc_region is set up so that the next quick alloc
1048 * will fail safely and thus a new region will be allocated. Further
1049 * it is safe to try to re-update the page table of this reset
1052 gc_alloc_update_page_tables(int page_type_flag
, struct alloc_region
*alloc_region
)
1055 page_index_t first_page
;
1056 page_index_t next_page
;
1057 os_vm_size_t bytes_used
;
1058 os_vm_size_t region_size
;
1059 os_vm_size_t byte_cnt
;
1060 page_bytes_t orig_first_page_bytes_used
;
1064 first_page
= alloc_region
->first_page
;
1066 /* Catch an unused alloc_region. */
1067 if ((first_page
== 0) && (alloc_region
->last_page
== -1))
1070 next_page
= first_page
+1;
1072 ret
= thread_mutex_lock(&free_pages_lock
);
1073 gc_assert(ret
== 0);
1074 if (alloc_region
->free_pointer
!= alloc_region
->start_addr
) {
1075 /* some bytes were allocated in the region */
1076 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
1078 gc_assert(alloc_region
->start_addr
==
1079 (page_address(first_page
)
1080 + page_table
[first_page
].bytes_used
));
1082 /* All the pages used need to be updated */
1084 /* Update the first page. */
1086 /* If the page was free then set up the gen, and
1087 * scan_start_offset. */
1088 if (page_table
[first_page
].bytes_used
== 0)
1089 gc_assert(page_starts_contiguous_block_p(first_page
));
1090 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1092 gc_assert(page_table
[first_page
].allocated
& page_type_flag
);
1093 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
1094 gc_assert(page_table
[first_page
].large_object
== 0);
1098 /* Calculate the number of bytes used in this page. This is not
1099 * always the number of new bytes, unless it was free. */
1101 if ((bytes_used
= void_diff(alloc_region
->free_pointer
,
1102 page_address(first_page
)))
1103 >GENCGC_CARD_BYTES
) {
1104 bytes_used
= GENCGC_CARD_BYTES
;
1107 page_table
[first_page
].bytes_used
= bytes_used
;
1108 byte_cnt
+= bytes_used
;
1111 /* All the rest of the pages should be free. We need to set
1112 * their scan_start_offset pointer to the start of the
1113 * region, and set the bytes_used. */
1115 page_table
[next_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1116 gc_assert(page_table
[next_page
].allocated
& page_type_flag
);
1117 gc_assert(page_table
[next_page
].bytes_used
== 0);
1118 gc_assert(page_table
[next_page
].gen
== gc_alloc_generation
);
1119 gc_assert(page_table
[next_page
].large_object
== 0);
1121 gc_assert(page_table
[next_page
].scan_start_offset
==
1122 void_diff(page_address(next_page
),
1123 alloc_region
->start_addr
));
1125 /* Calculate the number of bytes used in this page. */
1127 if ((bytes_used
= void_diff(alloc_region
->free_pointer
,
1128 page_address(next_page
)))>GENCGC_CARD_BYTES
) {
1129 bytes_used
= GENCGC_CARD_BYTES
;
1132 page_table
[next_page
].bytes_used
= bytes_used
;
1133 byte_cnt
+= bytes_used
;
1138 region_size
= void_diff(alloc_region
->free_pointer
,
1139 alloc_region
->start_addr
);
1140 bytes_allocated
+= region_size
;
1141 generations
[gc_alloc_generation
].bytes_allocated
+= region_size
;
1143 gc_assert((byte_cnt
- orig_first_page_bytes_used
) == region_size
);
1145 /* Set the generations alloc restart page to the last page of
1147 set_generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 0, next_page
-1);
1149 /* Add the region to the new_areas if requested. */
1150 if (BOXED_PAGE_FLAG
& page_type_flag
)
1151 add_new_area(first_page
,orig_first_page_bytes_used
, region_size
);
1155 "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
1157 gc_alloc_generation));
1160 /* There are no bytes allocated. Unallocate the first_page if
1161 * there are 0 bytes_used. */
1162 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1163 if (page_table
[first_page
].bytes_used
== 0)
1164 page_table
[first_page
].allocated
= FREE_PAGE_FLAG
;
1167 /* Unallocate any unused pages. */
1168 while (next_page
<= alloc_region
->last_page
) {
1169 gc_assert(page_table
[next_page
].bytes_used
== 0);
1170 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1173 ret
= thread_mutex_unlock(&free_pages_lock
);
1174 gc_assert(ret
== 0);
1176 /* alloc_region is per-thread, we're ok to do this unlocked */
1177 gc_set_region_empty(alloc_region
);
1180 /* Allocate a possibly large object. */
1182 gc_alloc_large(sword_t nbytes
, int page_type_flag
, struct alloc_region
*alloc_region
)
1185 page_index_t first_page
, next_page
, last_page
;
1186 page_bytes_t orig_first_page_bytes_used
;
1187 os_vm_size_t byte_cnt
;
1188 os_vm_size_t bytes_used
;
1191 ret
= thread_mutex_lock(&free_pages_lock
);
1192 gc_assert(ret
== 0);
1194 first_page
= generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 1);
1195 if (first_page
<= alloc_region
->last_page
) {
1196 first_page
= alloc_region
->last_page
+1;
1199 last_page
=gc_find_freeish_pages(&first_page
,nbytes
, page_type_flag
);
1201 gc_assert(first_page
> alloc_region
->last_page
);
1203 set_generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 1, last_page
);
1205 /* Set up the pages. */
1206 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
1208 /* If the first page was free then set up the gen, and
1209 * scan_start_offset. */
1210 if (page_table
[first_page
].bytes_used
== 0) {
1211 page_table
[first_page
].allocated
= page_type_flag
;
1212 page_table
[first_page
].gen
= gc_alloc_generation
;
1213 page_table
[first_page
].scan_start_offset
= 0;
1214 page_table
[first_page
].large_object
= 1;
1217 gc_assert(page_table
[first_page
].allocated
== page_type_flag
);
1218 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
1219 gc_assert(page_table
[first_page
].large_object
== 1);
1223 /* Calc. the number of bytes used in this page. This is not
1224 * always the number of new bytes, unless it was free. */
1226 if ((bytes_used
= nbytes
+orig_first_page_bytes_used
) > GENCGC_CARD_BYTES
) {
1227 bytes_used
= GENCGC_CARD_BYTES
;
1230 page_table
[first_page
].bytes_used
= bytes_used
;
1231 byte_cnt
+= bytes_used
;
1233 next_page
= first_page
+1;
1235 /* All the rest of the pages should be free. We need to set their
1236 * scan_start_offset pointer to the start of the region, and set
1237 * the bytes_used. */
1239 gc_assert(page_free_p(next_page
));
1240 gc_assert(page_table
[next_page
].bytes_used
== 0);
1241 page_table
[next_page
].allocated
= page_type_flag
;
1242 page_table
[next_page
].gen
= gc_alloc_generation
;
1243 page_table
[next_page
].large_object
= 1;
1245 page_table
[next_page
].scan_start_offset
=
1246 npage_bytes(next_page
-first_page
) - orig_first_page_bytes_used
;
1248 /* Calculate the number of bytes used in this page. */
1250 bytes_used
=(nbytes
+orig_first_page_bytes_used
)-byte_cnt
;
1251 if (bytes_used
> GENCGC_CARD_BYTES
) {
1252 bytes_used
= GENCGC_CARD_BYTES
;
1255 page_table
[next_page
].bytes_used
= bytes_used
;
1256 page_table
[next_page
].write_protected
=0;
1257 page_table
[next_page
].dont_move
=0;
1258 byte_cnt
+= bytes_used
;
1262 gc_assert((byte_cnt
-orig_first_page_bytes_used
) == (size_t)nbytes
);
1264 bytes_allocated
+= nbytes
;
1265 generations
[gc_alloc_generation
].bytes_allocated
+= nbytes
;
1267 /* Add the region to the new_areas if requested. */
1268 if (BOXED_PAGE_FLAG
& page_type_flag
)
1269 add_new_area(first_page
,orig_first_page_bytes_used
,nbytes
);
1271 /* Bump up last_free_page */
1272 if (last_page
+1 > last_free_page
) {
1273 last_free_page
= last_page
+1;
1274 set_alloc_pointer((lispobj
)(page_address(last_free_page
)));
1276 ret
= thread_mutex_unlock(&free_pages_lock
);
1277 gc_assert(ret
== 0);
1279 #ifdef READ_PROTECT_FREE_PAGES
1280 os_protect(page_address(first_page
),
1281 npage_bytes(1+last_page
-first_page
),
1285 zero_dirty_pages(first_page
, last_page
);
1287 return page_address(first_page
);
1290 static page_index_t gencgc_alloc_start_page
= -1;
1293 gc_heap_exhausted_error_or_lose (sword_t available
, sword_t requested
)
1295 struct thread
*thread
= arch_os_get_current_thread();
1296 /* Write basic information before doing anything else: if we don't
1297 * call to lisp this is a must, and even if we do there is always
1298 * the danger that we bounce back here before the error has been
1299 * handled, or indeed even printed.
1301 report_heap_exhaustion(available
, requested
, thread
);
1302 if (gc_active_p
|| (available
== 0)) {
1303 /* If we are in GC, or totally out of memory there is no way
1304 * to sanely transfer control to the lisp-side of things.
1306 lose("Heap exhausted, game over.");
1309 /* FIXME: assert free_pages_lock held */
1310 (void)thread_mutex_unlock(&free_pages_lock
);
1311 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
1312 gc_assert(get_pseudo_atomic_atomic(thread
));
1313 clear_pseudo_atomic_atomic(thread
);
1314 if (get_pseudo_atomic_interrupted(thread
))
1315 do_pending_interrupt();
1317 /* Another issue is that signalling HEAP-EXHAUSTED error leads
1318 * to running user code at arbitrary places, even in a
1319 * WITHOUT-INTERRUPTS which may lead to a deadlock without
1320 * running out of the heap. So at this point all bets are
1322 if (SymbolValue(INTERRUPTS_ENABLED
,thread
) == NIL
)
1323 corruption_warning_and_maybe_lose
1324 ("Signalling HEAP-EXHAUSTED in a WITHOUT-INTERRUPTS.");
1325 /* available and requested should be double word aligned, thus
1326 they can passed as fixnums and shifted later. */
1327 funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR
), available
, requested
);
1328 lose("HEAP-EXHAUSTED-ERROR fell through");
1333 gc_find_freeish_pages(page_index_t
*restart_page_ptr
, sword_t bytes
,
1336 page_index_t most_bytes_found_from
= 0, most_bytes_found_to
= 0;
1337 page_index_t first_page
, last_page
, restart_page
= *restart_page_ptr
;
1338 os_vm_size_t nbytes
= bytes
;
1339 os_vm_size_t nbytes_goal
= nbytes
;
1340 os_vm_size_t bytes_found
= 0;
1341 os_vm_size_t most_bytes_found
= 0;
1342 boolean small_object
= nbytes
< GENCGC_CARD_BYTES
;
1343 /* FIXME: assert(free_pages_lock is held); */
1345 if (nbytes_goal
< gencgc_alloc_granularity
)
1346 nbytes_goal
= gencgc_alloc_granularity
;
1348 /* Toggled by gc_and_save for heap compaction, normally -1. */
1349 if (gencgc_alloc_start_page
!= -1) {
1350 restart_page
= gencgc_alloc_start_page
;
1353 /* FIXME: This is on bytes instead of nbytes pending cleanup of
1354 * long from the interface. */
1355 gc_assert(bytes
>=0);
1356 /* Search for a page with at least nbytes of space. We prefer
1357 * not to split small objects on multiple pages, to reduce the
1358 * number of contiguous allocation regions spaning multiple
1359 * pages: this helps avoid excessive conservativism.
1361 * For other objects, we guarantee that they start on their own
1364 first_page
= restart_page
;
1365 while (first_page
< page_table_pages
) {
1367 if (page_free_p(first_page
)) {
1368 gc_assert(0 == page_table
[first_page
].bytes_used
);
1369 bytes_found
= GENCGC_CARD_BYTES
;
1370 } else if (small_object
&&
1371 (page_table
[first_page
].allocated
== page_type_flag
) &&
1372 (page_table
[first_page
].large_object
== 0) &&
1373 (page_table
[first_page
].gen
== gc_alloc_generation
) &&
1374 (page_table
[first_page
].write_protected
== 0) &&
1375 (page_table
[first_page
].dont_move
== 0)) {
1376 bytes_found
= GENCGC_CARD_BYTES
- page_table
[first_page
].bytes_used
;
1377 if (bytes_found
< nbytes
) {
1378 if (bytes_found
> most_bytes_found
)
1379 most_bytes_found
= bytes_found
;
1388 gc_assert(page_table
[first_page
].write_protected
== 0);
1389 for (last_page
= first_page
+1;
1390 ((last_page
< page_table_pages
) &&
1391 page_free_p(last_page
) &&
1392 (bytes_found
< nbytes_goal
));
1394 bytes_found
+= GENCGC_CARD_BYTES
;
1395 gc_assert(0 == page_table
[last_page
].bytes_used
);
1396 gc_assert(0 == page_table
[last_page
].write_protected
);
1399 if (bytes_found
> most_bytes_found
) {
1400 most_bytes_found
= bytes_found
;
1401 most_bytes_found_from
= first_page
;
1402 most_bytes_found_to
= last_page
;
1404 if (bytes_found
>= nbytes_goal
)
1407 first_page
= last_page
;
1410 bytes_found
= most_bytes_found
;
1411 restart_page
= first_page
+ 1;
1413 /* Check for a failure */
1414 if (bytes_found
< nbytes
) {
1415 gc_assert(restart_page
>= page_table_pages
);
1416 gc_heap_exhausted_error_or_lose(most_bytes_found
, nbytes
);
1419 gc_assert(most_bytes_found_to
);
1420 *restart_page_ptr
= most_bytes_found_from
;
1421 return most_bytes_found_to
-1;
1424 /* Allocate bytes. All the rest of the special-purpose allocation
1425 * functions will eventually call this */
1428 gc_alloc_with_region(sword_t nbytes
,int page_type_flag
, struct alloc_region
*my_region
,
1431 void *new_free_pointer
;
1433 if (nbytes
>=LARGE_OBJECT_SIZE
)
1434 return gc_alloc_large(nbytes
, page_type_flag
, my_region
);
1436 /* Check whether there is room in the current alloc region. */
1437 new_free_pointer
= my_region
->free_pointer
+ nbytes
;
1439 /* fprintf(stderr, "alloc %d bytes from %p to %p\n", nbytes,
1440 my_region->free_pointer, new_free_pointer); */
1442 if (new_free_pointer
<= my_region
->end_addr
) {
1443 /* If so then allocate from the current alloc region. */
1444 void *new_obj
= my_region
->free_pointer
;
1445 my_region
->free_pointer
= new_free_pointer
;
1447 /* Unless a `quick' alloc was requested, check whether the
1448 alloc region is almost empty. */
1450 void_diff(my_region
->end_addr
,my_region
->free_pointer
) <= 32) {
1451 /* If so, finished with the current region. */
1452 gc_alloc_update_page_tables(page_type_flag
, my_region
);
1453 /* Set up a new region. */
1454 gc_alloc_new_region(32 /*bytes*/, page_type_flag
, my_region
);
1457 return((void *)new_obj
);
1460 /* Else not enough free space in the current region: retry with a
1463 gc_alloc_update_page_tables(page_type_flag
, my_region
);
1464 gc_alloc_new_region(nbytes
, page_type_flag
, my_region
);
1465 return gc_alloc_with_region(nbytes
, page_type_flag
, my_region
,0);
1468 /* Copy a large object. If the object is in a large object region then
1469 * it is simply promoted, else it is copied. If it's large enough then
1470 * it's copied to a large object region.
1472 * Bignums and vectors may have shrunk. If the object is not copied
1473 * the space needs to be reclaimed, and the page_tables corrected. */
1475 general_copy_large_object(lispobj object
, word_t nwords
, boolean boxedp
)
1479 page_index_t first_page
;
1481 gc_assert(is_lisp_pointer(object
));
1482 gc_assert(from_space_p(object
));
1483 gc_assert((nwords
& 0x01) == 0);
1485 if ((nwords
> 1024*1024) && gencgc_verbose
) {
1486 FSHOW((stderr
, "/general_copy_large_object: %d bytes\n",
1487 nwords
*N_WORD_BYTES
));
1490 /* Check whether it's a large object. */
1491 first_page
= find_page_index((void *)object
);
1492 gc_assert(first_page
>= 0);
1494 if (page_table
[first_page
].large_object
) {
1495 /* Promote the object. Note: Unboxed objects may have been
1496 * allocated to a BOXED region so it may be necessary to
1497 * change the region to UNBOXED. */
1498 os_vm_size_t remaining_bytes
;
1499 os_vm_size_t bytes_freed
;
1500 page_index_t next_page
;
1501 page_bytes_t old_bytes_used
;
1503 /* FIXME: This comment is somewhat stale.
1505 * Note: Any page write-protection must be removed, else a
1506 * later scavenge_newspace may incorrectly not scavenge these
1507 * pages. This would not be necessary if they are added to the
1508 * new areas, but let's do it for them all (they'll probably
1509 * be written anyway?). */
1511 gc_assert(page_starts_contiguous_block_p(first_page
));
1512 next_page
= first_page
;
1513 remaining_bytes
= nwords
*N_WORD_BYTES
;
1515 while (remaining_bytes
> GENCGC_CARD_BYTES
) {
1516 gc_assert(page_table
[next_page
].gen
== from_space
);
1517 gc_assert(page_table
[next_page
].large_object
);
1518 gc_assert(page_table
[next_page
].scan_start_offset
==
1519 npage_bytes(next_page
-first_page
));
1520 gc_assert(page_table
[next_page
].bytes_used
== GENCGC_CARD_BYTES
);
1521 /* Should have been unprotected by unprotect_oldspace()
1522 * for boxed objects, and after promotion unboxed ones
1523 * should not be on protected pages at all. */
1524 gc_assert(!page_table
[next_page
].write_protected
);
1527 gc_assert(page_boxed_p(next_page
));
1529 gc_assert(page_allocated_no_region_p(next_page
));
1530 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1532 page_table
[next_page
].gen
= new_space
;
1534 remaining_bytes
-= GENCGC_CARD_BYTES
;
1538 /* Now only one page remains, but the object may have shrunk so
1539 * there may be more unused pages which will be freed. */
1541 /* Object may have shrunk but shouldn't have grown - check. */
1542 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
1544 page_table
[next_page
].gen
= new_space
;
1547 gc_assert(page_boxed_p(next_page
));
1549 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1551 /* Adjust the bytes_used. */
1552 old_bytes_used
= page_table
[next_page
].bytes_used
;
1553 page_table
[next_page
].bytes_used
= remaining_bytes
;
1555 bytes_freed
= old_bytes_used
- remaining_bytes
;
1557 /* Free any remaining pages; needs care. */
1559 while ((old_bytes_used
== GENCGC_CARD_BYTES
) &&
1560 (page_table
[next_page
].gen
== from_space
) &&
1561 /* FIXME: It is not obvious to me why this is necessary
1562 * as a loop condition: it seems to me that the
1563 * scan_start_offset test should be sufficient, but
1564 * experimentally that is not the case. --NS
1567 page_boxed_p(next_page
) :
1568 page_allocated_no_region_p(next_page
)) &&
1569 page_table
[next_page
].large_object
&&
1570 (page_table
[next_page
].scan_start_offset
==
1571 npage_bytes(next_page
- first_page
))) {
1572 /* Checks out OK, free the page. Don't need to both zeroing
1573 * pages as this should have been done before shrinking the
1574 * object. These pages shouldn't be write-protected, even if
1575 * boxed they should be zero filled. */
1576 gc_assert(page_table
[next_page
].write_protected
== 0);
1578 old_bytes_used
= page_table
[next_page
].bytes_used
;
1579 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1580 page_table
[next_page
].bytes_used
= 0;
1581 bytes_freed
+= old_bytes_used
;
1585 if ((bytes_freed
> 0) && gencgc_verbose
) {
1587 "/general_copy_large_object bytes_freed=%"OS_VM_SIZE_FMT
"\n",
1591 generations
[from_space
].bytes_allocated
-= nwords
*N_WORD_BYTES
1593 generations
[new_space
].bytes_allocated
+= nwords
*N_WORD_BYTES
;
1594 bytes_allocated
-= bytes_freed
;
1596 /* Add the region to the new_areas if requested. */
1598 add_new_area(first_page
,0,nwords
*N_WORD_BYTES
);
1603 /* Get tag of object. */
1604 tag
= lowtag_of(object
);
1606 /* Allocate space. */
1607 new = gc_general_alloc(nwords
*N_WORD_BYTES
,
1608 (boxedp
? BOXED_PAGE_FLAG
: UNBOXED_PAGE_FLAG
),
1611 /* Copy the object. */
1612 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1614 /* Return Lisp pointer of new object. */
1615 return ((lispobj
) new) | tag
;
1620 copy_large_object(lispobj object
, sword_t nwords
)
1622 return general_copy_large_object(object
, nwords
, 1);
1626 copy_large_unboxed_object(lispobj object
, sword_t nwords
)
1628 return general_copy_large_object(object
, nwords
, 0);
1631 /* to copy unboxed objects */
1633 copy_unboxed_object(lispobj object
, sword_t nwords
)
1635 return gc_general_copy_object(object
, nwords
, UNBOXED_PAGE_FLAG
);
1640 * code and code-related objects
1643 static lispobj trans_fun_header(lispobj object);
1644 static lispobj trans_boxed(lispobj object);
1647 /* Scan a x86 compiled code object, looking for possible fixups that
1648 * have been missed after a move.
1650 * Two types of fixups are needed:
1651 * 1. Absolute fixups to within the code object.
1652 * 2. Relative fixups to outside the code object.
1654 * Currently only absolute fixups to the constant vector, or to the
1655 * code area are checked. */
1656 #ifdef LISP_FEATURE_X86
1658 sniff_code_object(struct code
*code
, os_vm_size_t displacement
)
1660 sword_t nheader_words
, ncode_words
, nwords
;
1661 os_vm_address_t constants_start_addr
= NULL
, constants_end_addr
, p
;
1662 os_vm_address_t code_start_addr
, code_end_addr
;
1663 os_vm_address_t code_addr
= (os_vm_address_t
)code
;
1664 int fixup_found
= 0;
1666 if (!check_code_fixups
)
1669 FSHOW((stderr
, "/sniffing code: %p, %lu\n", code
, displacement
));
1671 ncode_words
= code_instruction_words(code
->code_size
);
1672 nheader_words
= code_header_words(*(lispobj
*)code
);
1673 nwords
= ncode_words
+ nheader_words
;
1675 constants_start_addr
= code_addr
+ 5*N_WORD_BYTES
;
1676 constants_end_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1677 code_start_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1678 code_end_addr
= code_addr
+ nwords
*N_WORD_BYTES
;
1680 /* Work through the unboxed code. */
1681 for (p
= code_start_addr
; p
< code_end_addr
; p
++) {
1682 void *data
= *(void **)p
;
1683 unsigned d1
= *((unsigned char *)p
- 1);
1684 unsigned d2
= *((unsigned char *)p
- 2);
1685 unsigned d3
= *((unsigned char *)p
- 3);
1686 unsigned d4
= *((unsigned char *)p
- 4);
1688 unsigned d5
= *((unsigned char *)p
- 5);
1689 unsigned d6
= *((unsigned char *)p
- 6);
1692 /* Check for code references. */
1693 /* Check for a 32 bit word that looks like an absolute
1694 reference to within the code adea of the code object. */
1695 if ((data
>= (void*)(code_start_addr
-displacement
))
1696 && (data
< (void*)(code_end_addr
-displacement
))) {
1697 /* function header */
1699 && (((unsigned)p
- 4 - 4*HeaderValue(*((unsigned *)p
-1))) ==
1701 /* Skip the function header */
1705 /* the case of PUSH imm32 */
1709 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1710 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1711 FSHOW((stderr
, "/PUSH $0x%.8x\n", data
));
1713 /* the case of MOV [reg-8],imm32 */
1715 && (d2
==0x40 || d2
==0x41 || d2
==0x42 || d2
==0x43
1716 || d2
==0x45 || d2
==0x46 || d2
==0x47)
1720 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1721 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1722 FSHOW((stderr
, "/MOV [reg-8],$0x%.8x\n", data
));
1724 /* the case of LEA reg,[disp32] */
1725 if ((d2
== 0x8d) && ((d1
& 0xc7) == 5)) {
1728 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1729 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1730 FSHOW((stderr
,"/LEA reg,[$0x%.8x]\n", data
));
1734 /* Check for constant references. */
1735 /* Check for a 32 bit word that looks like an absolute
1736 reference to within the constant vector. Constant references
1738 if ((data
>= (void*)(constants_start_addr
-displacement
))
1739 && (data
< (void*)(constants_end_addr
-displacement
))
1740 && (((unsigned)data
& 0x3) == 0)) {
1745 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1746 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1747 FSHOW((stderr
,"/MOV eax,0x%.8x\n", data
));
1750 /* the case of MOV m32,EAX */
1754 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1755 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1756 FSHOW((stderr
, "/MOV 0x%.8x,eax\n", data
));
1759 /* the case of CMP m32,imm32 */
1760 if ((d1
== 0x3d) && (d2
== 0x81)) {
1763 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1764 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1766 FSHOW((stderr
, "/CMP 0x%.8x,immed32\n", data
));
1769 /* Check for a mod=00, r/m=101 byte. */
1770 if ((d1
& 0xc7) == 5) {
1775 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1776 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1777 FSHOW((stderr
,"/CMP 0x%.8x,reg\n", data
));
1779 /* the case of CMP reg32,m32 */
1783 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1784 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1785 FSHOW((stderr
, "/CMP reg32,0x%.8x\n", data
));
1787 /* the case of MOV m32,reg32 */
1791 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1792 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1793 FSHOW((stderr
, "/MOV 0x%.8x,reg32\n", data
));
1795 /* the case of MOV reg32,m32 */
1799 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1800 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1801 FSHOW((stderr
, "/MOV reg32,0x%.8x\n", data
));
1803 /* the case of LEA reg32,m32 */
1807 "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1808 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1809 FSHOW((stderr
, "/LEA reg32,0x%.8x\n", data
));
1815 /* If anything was found, print some information on the code
1819 "/compiled code object at %x: header words = %d, code words = %d\n",
1820 code
, nheader_words
, ncode_words
));
1822 "/const start = %x, end = %x\n",
1823 constants_start_addr
, constants_end_addr
));
1825 "/code start = %x, end = %x\n",
1826 code_start_addr
, code_end_addr
));
1831 #ifdef LISP_FEATURE_X86
1833 gencgc_apply_code_fixups(struct code
*old_code
, struct code
*new_code
)
1835 sword_t nheader_words
, ncode_words
, nwords
;
1836 os_vm_address_t constants_start_addr
, constants_end_addr
;
1837 os_vm_address_t code_start_addr
, code_end_addr
;
1838 os_vm_address_t code_addr
= (os_vm_address_t
)new_code
;
1839 os_vm_address_t old_addr
= (os_vm_address_t
)old_code
;
1840 os_vm_size_t displacement
= code_addr
- old_addr
;
1841 lispobj fixups
= NIL
;
1842 struct vector
*fixups_vector
;
1844 ncode_words
= code_instruction_words(new_code
->code_size
);
1845 nheader_words
= code_header_words(*(lispobj
*)new_code
);
1846 nwords
= ncode_words
+ nheader_words
;
1848 "/compiled code object at %x: header words = %d, code words = %d\n",
1849 new_code, nheader_words, ncode_words)); */
1850 constants_start_addr
= code_addr
+ 5*N_WORD_BYTES
;
1851 constants_end_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1852 code_start_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1853 code_end_addr
= code_addr
+ nwords
*N_WORD_BYTES
;
1856 "/const start = %x, end = %x\n",
1857 constants_start_addr,constants_end_addr));
1859 "/code start = %x; end = %x\n",
1860 code_start_addr,code_end_addr));
1863 /* The first constant should be a pointer to the fixups for this
1864 code objects. Check. */
1865 fixups
= new_code
->constants
[0];
1867 /* It will be 0 or the unbound-marker if there are no fixups (as
1868 * will be the case if the code object has been purified, for
1869 * example) and will be an other pointer if it is valid. */
1870 if ((fixups
== 0) || (fixups
== UNBOUND_MARKER_WIDETAG
) ||
1871 !is_lisp_pointer(fixups
)) {
1872 /* Check for possible errors. */
1873 if (check_code_fixups
)
1874 sniff_code_object(new_code
, displacement
);
1879 fixups_vector
= (struct vector
*)native_pointer(fixups
);
1881 /* Could be pointing to a forwarding pointer. */
1882 /* FIXME is this always in from_space? if so, could replace this code with
1883 * forwarding_pointer_p/forwarding_pointer_value */
1884 if (is_lisp_pointer(fixups
) &&
1885 (find_page_index((void*)fixups_vector
) != -1) &&
1886 (fixups_vector
->header
== 0x01)) {
1887 /* If so, then follow it. */
1888 /*SHOW("following pointer to a forwarding pointer");*/
1890 (struct vector
*)native_pointer((lispobj
)fixups_vector
->length
);
1893 /*SHOW("got fixups");*/
1895 if (widetag_of(fixups_vector
->header
) == SIMPLE_ARRAY_WORD_WIDETAG
) {
1896 /* Got the fixups for the code block. Now work through the vector,
1897 and apply a fixup at each address. */
1898 sword_t length
= fixnum_value(fixups_vector
->length
);
1900 for (i
= 0; i
< length
; i
++) {
1901 long offset
= fixups_vector
->data
[i
];
1902 /* Now check the current value of offset. */
1903 os_vm_address_t old_value
= *(os_vm_address_t
*)(code_start_addr
+ offset
);
1905 /* If it's within the old_code object then it must be an
1906 * absolute fixup (relative ones are not saved) */
1907 if ((old_value
>= old_addr
)
1908 && (old_value
< (old_addr
+ nwords
*N_WORD_BYTES
)))
1909 /* So add the dispacement. */
1910 *(os_vm_address_t
*)(code_start_addr
+ offset
) =
1911 old_value
+ displacement
;
1913 /* It is outside the old code object so it must be a
1914 * relative fixup (absolute fixups are not saved). So
1915 * subtract the displacement. */
1916 *(os_vm_address_t
*)(code_start_addr
+ offset
) =
1917 old_value
- displacement
;
1920 /* This used to just print a note to stderr, but a bogus fixup seems to
1921 * indicate real heap corruption, so a hard hailure is in order. */
1922 lose("fixup vector %p has a bad widetag: %d\n",
1923 fixups_vector
, widetag_of(fixups_vector
->header
));
1926 /* Check for possible errors. */
1927 if (check_code_fixups
) {
1928 sniff_code_object(new_code
,displacement
);
1934 trans_boxed_large(lispobj object
)
1939 gc_assert(is_lisp_pointer(object
));
1941 header
= *((lispobj
*) native_pointer(object
));
1942 length
= HeaderValue(header
) + 1;
1943 length
= CEILING(length
, 2);
1945 return copy_large_object(object
, length
);
1952 /* XX This is a hack adapted from cgc.c. These don't work too
1953 * efficiently with the gencgc as a list of the weak pointers is
1954 * maintained within the objects which causes writes to the pages. A
1955 * limited attempt is made to avoid unnecessary writes, but this needs
1957 #define WEAK_POINTER_NWORDS \
1958 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
1961 scav_weak_pointer(lispobj
*where
, lispobj object
)
1963 /* Since we overwrite the 'next' field, we have to make
1964 * sure not to do so for pointers already in the list.
1965 * Instead of searching the list of weak_pointers each
1966 * time, we ensure that next is always NULL when the weak
1967 * pointer isn't in the list, and not NULL otherwise.
1968 * Since we can't use NULL to denote end of list, we
1969 * use a pointer back to the same weak_pointer.
1971 struct weak_pointer
* wp
= (struct weak_pointer
*)where
;
1973 if (NULL
== wp
->next
) {
1974 wp
->next
= weak_pointers
;
1976 if (NULL
== wp
->next
)
1980 /* Do not let GC scavenge the value slot of the weak pointer.
1981 * (That is why it is a weak pointer.) */
1983 return WEAK_POINTER_NWORDS
;
1988 search_read_only_space(void *pointer
)
1990 lispobj
*start
= (lispobj
*) READ_ONLY_SPACE_START
;
1991 lispobj
*end
= (lispobj
*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0);
1992 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
1994 return (gc_search_space(start
,
1995 (((lispobj
*)pointer
)+2)-start
,
1996 (lispobj
*) pointer
));
2000 search_static_space(void *pointer
)
2002 lispobj
*start
= (lispobj
*)STATIC_SPACE_START
;
2003 lispobj
*end
= (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0);
2004 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
2006 return (gc_search_space(start
,
2007 (((lispobj
*)pointer
)+2)-start
,
2008 (lispobj
*) pointer
));
2011 /* a faster version for searching the dynamic space. This will work even
2012 * if the object is in a current allocation region. */
2014 search_dynamic_space(void *pointer
)
2016 page_index_t page_index
= find_page_index(pointer
);
2019 /* The address may be invalid, so do some checks. */
2020 if ((page_index
== -1) || page_free_p(page_index
))
2022 start
= (lispobj
*)page_scan_start(page_index
);
2023 return (gc_search_space(start
,
2024 (((lispobj
*)pointer
)+2)-start
,
2025 (lispobj
*)pointer
));
2028 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2030 /* Is there any possibility that pointer is a valid Lisp object
2031 * reference, and/or something else (e.g. subroutine call return
2032 * address) which should prevent us from moving the referred-to thing?
2033 * This is called from preserve_pointers() */
2035 possibly_valid_dynamic_space_pointer_s(lispobj
*pointer
,
2036 page_index_t addr_page_index
,
2037 lispobj
**store_here
)
2039 lispobj
*start_addr
;
2041 /* Find the object start address. */
2042 start_addr
= search_dynamic_space(pointer
);
2044 if (start_addr
== NULL
) {
2048 *store_here
= start_addr
;
2051 /* If the containing object is a code object, presume that the
2052 * pointer is valid, simply because it could be an unboxed return
2054 if (widetag_of(*start_addr
) == CODE_HEADER_WIDETAG
)
2057 /* Large object pages only contain ONE object, and it will never
2058 * be a CONS. However, arrays and bignums can be allocated larger
2059 * than necessary and then shrunk to fit, leaving what look like
2060 * (0 . 0) CONSes at the end. These appear valid to
2061 * looks_like_valid_lisp_pointer_p(), so pick them off here. */
2062 if (page_table
[addr_page_index
].large_object
&&
2063 (lowtag_of((lispobj
)pointer
) == LIST_POINTER_LOWTAG
))
2066 return looks_like_valid_lisp_pointer_p((lispobj
)pointer
, start_addr
);
2069 #endif // defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2072 valid_conservative_root_p(void *addr
, page_index_t addr_page_index
,
2073 lispobj
**begin_ptr
)
2075 #ifdef GENCGC_IS_PRECISE
2076 /* If we're in precise gencgc (non-x86oid as of this writing) then
2077 * we are only called on valid object pointers in the first place,
2078 * so we just have to do a bounds-check against the heap, a
2079 * generation check, and the already-pinned check. */
2080 if ((addr_page_index
== -1)
2081 || (page_table
[addr_page_index
].gen
!= from_space
)
2082 || (page_table
[addr_page_index
].dont_move
!= 0))
2085 /* quick check 1: Address is quite likely to have been invalid. */
2086 if ((addr_page_index
== -1)
2087 || page_free_p(addr_page_index
)
2088 || (page_table
[addr_page_index
].bytes_used
== 0)
2089 || (page_table
[addr_page_index
].gen
!= from_space
))
2091 gc_assert(!(page_table
[addr_page_index
].allocated
&OPEN_REGION_PAGE_FLAG
));
2093 /* quick check 2: Check the offset within the page.
2096 if (((uword_t
)addr
& (GENCGC_CARD_BYTES
- 1)) >
2097 page_table
[addr_page_index
].bytes_used
)
2100 /* Filter out anything which can't be a pointer to a Lisp object
2101 * (or, as a special case which also requires dont_move, a return
2102 * address referring to something in a CodeObject). This is
2103 * expensive but important, since it vastly reduces the
2104 * probability that random garbage will be bogusly interpreted as
2105 * a pointer which prevents a page from moving. */
2106 if (!possibly_valid_dynamic_space_pointer_s(addr
, addr_page_index
,
2115 in_dontmove_dwordindex_p(page_index_t page_index
, int dword_in_page
)
2117 in_use_marker_t
*marker
;
2118 marker
= dontmove_dwords(page_index
);
2120 return marker
[dword_in_page
];
2124 in_dontmove_nativeptr_p(page_index_t page_index
, lispobj
*native_ptr
)
2126 if (dontmove_dwords(page_index
)) {
2127 lispobj
*begin
= page_address(page_index
);
2128 int dword_in_page
= (native_ptr
- begin
) / 2;
2129 return in_dontmove_dwordindex_p(page_index
, dword_in_page
);
2135 /* Adjust large bignum and vector objects. This will adjust the
2136 * allocated region if the size has shrunk, and move unboxed objects
2137 * into unboxed pages. The pages are not promoted here, and the
2138 * promoted region is not added to the new_regions; this is really
2139 * only designed to be called from preserve_pointer(). Shouldn't fail
2140 * if this is missed, just may delay the moving of objects to unboxed
2141 * pages, and the freeing of pages. */
2143 maybe_adjust_large_object(lispobj
*where
)
2145 page_index_t first_page
;
2146 page_index_t next_page
;
2149 uword_t remaining_bytes
;
2150 uword_t bytes_freed
;
2151 uword_t old_bytes_used
;
2155 /* Check whether it's a vector or bignum object. */
2156 switch (widetag_of(where
[0])) {
2157 case SIMPLE_VECTOR_WIDETAG
:
2158 boxed
= BOXED_PAGE_FLAG
;
2160 case BIGNUM_WIDETAG
:
2161 case SIMPLE_BASE_STRING_WIDETAG
:
2162 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
2163 case SIMPLE_CHARACTER_STRING_WIDETAG
:
2165 case SIMPLE_BIT_VECTOR_WIDETAG
:
2166 case SIMPLE_ARRAY_NIL_WIDETAG
:
2167 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
2168 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
2169 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
2170 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
2171 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
2172 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
2174 case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG
:
2176 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
2177 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
2178 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
2179 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
2181 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
2182 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
2184 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
2185 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
2187 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
2188 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
2191 case SIMPLE_ARRAY_FIXNUM_WIDETAG
:
2193 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
2194 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
2196 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
2197 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
2199 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
2200 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
2201 #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
2202 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
2204 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
2205 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
2207 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
2208 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2210 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
2211 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
2213 boxed
= UNBOXED_PAGE_FLAG
;
2219 /* Find its current size. */
2220 nwords
= (sizetab
[widetag_of(where
[0])])(where
);
2222 first_page
= find_page_index((void *)where
);
2223 gc_assert(first_page
>= 0);
2225 /* Note: Any page write-protection must be removed, else a later
2226 * scavenge_newspace may incorrectly not scavenge these pages.
2227 * This would not be necessary if they are added to the new areas,
2228 * but lets do it for them all (they'll probably be written
2231 gc_assert(page_starts_contiguous_block_p(first_page
));
2233 next_page
= first_page
;
2234 remaining_bytes
= nwords
*N_WORD_BYTES
;
2235 while (remaining_bytes
> GENCGC_CARD_BYTES
) {
2236 gc_assert(page_table
[next_page
].gen
== from_space
);
2237 gc_assert(page_allocated_no_region_p(next_page
));
2238 gc_assert(page_table
[next_page
].large_object
);
2239 gc_assert(page_table
[next_page
].scan_start_offset
==
2240 npage_bytes(next_page
-first_page
));
2241 gc_assert(page_table
[next_page
].bytes_used
== GENCGC_CARD_BYTES
);
2243 page_table
[next_page
].allocated
= boxed
;
2245 /* Shouldn't be write-protected at this stage. Essential that the
2247 gc_assert(!page_table
[next_page
].write_protected
);
2248 remaining_bytes
-= GENCGC_CARD_BYTES
;
2252 /* Now only one page remains, but the object may have shrunk so
2253 * there may be more unused pages which will be freed. */
2255 /* Object may have shrunk but shouldn't have grown - check. */
2256 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
2258 page_table
[next_page
].allocated
= boxed
;
2259 gc_assert(page_table
[next_page
].allocated
==
2260 page_table
[first_page
].allocated
);
2262 /* Adjust the bytes_used. */
2263 old_bytes_used
= page_table
[next_page
].bytes_used
;
2264 page_table
[next_page
].bytes_used
= remaining_bytes
;
2266 bytes_freed
= old_bytes_used
- remaining_bytes
;
2268 /* Free any remaining pages; needs care. */
2270 while ((old_bytes_used
== GENCGC_CARD_BYTES
) &&
2271 (page_table
[next_page
].gen
== from_space
) &&
2272 page_allocated_no_region_p(next_page
) &&
2273 page_table
[next_page
].large_object
&&
2274 (page_table
[next_page
].scan_start_offset
==
2275 npage_bytes(next_page
- first_page
))) {
2276 /* It checks out OK, free the page. We don't need to both zeroing
2277 * pages as this should have been done before shrinking the
2278 * object. These pages shouldn't be write protected as they
2279 * should be zero filled. */
2280 gc_assert(page_table
[next_page
].write_protected
== 0);
2282 old_bytes_used
= page_table
[next_page
].bytes_used
;
2283 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
2284 page_table
[next_page
].bytes_used
= 0;
2285 bytes_freed
+= old_bytes_used
;
2289 if ((bytes_freed
> 0) && gencgc_verbose
) {
2291 "/maybe_adjust_large_object() freed %d\n",
2295 generations
[from_space
].bytes_allocated
-= bytes_freed
;
2296 bytes_allocated
-= bytes_freed
;
2302 * Why is this restricted to protected objects only?
2303 * Because the rest of the page has been scavenged already,
2304 * and since that leaves forwarding pointers in the unprotected
2305 * areas you cannot scavenge it again until those are gone.
2308 scavenge_pages_with_conservative_pointers_to_them_protected_objects_only()
2311 for (i
= 0; i
< last_free_page
; i
++) {
2312 if (!dontmove_dwords(i
)) {
2315 lispobj
*begin
= page_address(i
);
2318 lispobj
*scavme_begin
= NULL
;
2319 for (dword
= 0; dword
< GENCGC_CARD_BYTES
/ N_WORD_BYTES
/ 2; dword
++) {
2320 if (in_dontmove_dwordindex_p(i
, dword
)) {
2321 if (!scavme_begin
) {
2322 scavme_begin
= begin
+ dword
* 2;
2325 // contiguous area stopped
2327 scavenge(scavme_begin
, (begin
+ dword
* 2) - scavme_begin
);
2329 scavme_begin
= NULL
;
2333 scavenge(scavme_begin
, (begin
+ dword
* 2) - scavme_begin
);
2338 int verbosefixes
= 0;
2344 int words_wiped
= 0;
2345 int lisp_pointers_wiped
= 0;
2346 int pages_considered
= 0;
2347 int n_pages_cannot_wipe
= 0;
2349 for (i
= 0; i
< last_free_page
; i
++) {
2350 if (!page_table
[i
].dont_move
) {
2354 if (!dontmove_dwords(i
)) {
2355 n_pages_cannot_wipe
++;
2358 begin
= page_address(i
);
2360 for (dword
= 0; dword
< GENCGC_CARD_BYTES
/ N_WORD_BYTES
/ 2; dword
++) {
2361 if (!in_dontmove_dwordindex_p(i
, dword
)) {
2362 if (is_lisp_pointer(*(begin
+ dword
* 2))) {
2363 lisp_pointers_wiped
++;
2365 if (is_lisp_pointer(*(begin
+ dword
* 2 + 1))) {
2366 lisp_pointers_wiped
++;
2368 *(begin
+ dword
* 2) = wipe_with
;
2369 *(begin
+ dword
* 2 + 1) = wipe_with
;
2373 page_table
[i
].has_dontmove_dwords
= 0;
2375 // move the page to newspace
2376 generations
[new_space
].bytes_allocated
+= page_table
[i
].bytes_used
;
2377 generations
[page_table
[i
].gen
].bytes_allocated
-= page_table
[i
].bytes_used
;
2378 page_table
[i
].gen
= new_space
;
2380 #ifndef LISP_FEATURE_WIN32
2381 madvise(page_table_dontmove_dwords
, page_table_dontmove_dwords_size_in_bytes
, MADV_DONTNEED
);
2383 if ((verbosefixes
>= 1 && lisp_pointers_wiped
> 0) || verbosefixes
>= 2) {
2384 fprintf(stderr
, "gencgc: wiped %d words (%d lisp_pointers) in %d pages, cannot wipe %d pages \n"
2385 , words_wiped
, lisp_pointers_wiped
, pages_considered
, n_pages_cannot_wipe
);
2390 set_page_consi_bit(page_index_t pageindex
, lispobj
*mark_which_pointer
)
2392 struct page
*page
= &page_table
[pageindex
];
2397 gc_assert(mark_which_pointer
);
2398 if (!page
->has_dontmove_dwords
) {
2399 page
->has_dontmove_dwords
= 1;
2400 bzero(dontmove_dwords(pageindex
),
2401 sizeof(in_use_marker_t
) * n_dwords_in_card
);
2403 int size
= (sizetab
[widetag_of(mark_which_pointer
[0])])(mark_which_pointer
);
2405 (fixnump(*mark_which_pointer
) ||
2406 is_lisp_pointer(*mark_which_pointer
) ||
2407 lowtag_of(*mark_which_pointer
) == 9 ||
2408 lowtag_of(*mark_which_pointer
) == 2)) {
2411 if (size
% 2 != 0) {
2412 fprintf(stderr
, "WIPE ERROR !dword, size %d, lowtag %d, world 0x%lld\n",
2414 lowtag_of(*mark_which_pointer
),
2415 (long long)*mark_which_pointer
);
2417 gc_assert(size
% 2 == 0);
2418 lispobj
*begin
= page_address(pageindex
);
2419 int begin_dword
= (mark_which_pointer
- begin
) / 2;
2421 in_use_marker_t
*marker
= dontmove_dwords(pageindex
);
2422 for (dword
= begin_dword
; dword
< begin_dword
+ size
/ 2; dword
++) {
2427 /* Take a possible pointer to a Lisp object and mark its page in the
2428 * page_table so that it will not be relocated during a GC.
2430 * This involves locating the page it points to, then backing up to
2431 * the start of its region, then marking all pages dont_move from there
2432 * up to the first page that's not full or has a different generation
2434 * It is assumed that all the page static flags have been cleared at
2435 * the start of a GC.
2437 * It is also assumed that the current gc_alloc() region has been
2438 * flushed and the tables updated. */
2441 preserve_pointer(void *addr
)
2443 page_index_t addr_page_index
= find_page_index(addr
);
2444 page_index_t first_page
;
2446 unsigned int region_allocation
;
2447 lispobj
*begin_ptr
= NULL
;
2449 if (!valid_conservative_root_p(addr
, addr_page_index
, &begin_ptr
))
2452 /* (Now that we know that addr_page_index is in range, it's
2453 * safe to index into page_table[] with it.) */
2454 region_allocation
= page_table
[addr_page_index
].allocated
;
2456 /* Find the beginning of the region. Note that there may be
2457 * objects in the region preceding the one that we were passed a
2458 * pointer to: if this is the case, we will write-protect all the
2459 * previous objects' pages too. */
2462 /* I think this'd work just as well, but without the assertions.
2463 * -dan 2004.01.01 */
2464 first_page
= find_page_index(page_scan_start(addr_page_index
))
2466 first_page
= addr_page_index
;
2467 while (!page_starts_contiguous_block_p(first_page
)) {
2469 /* Do some checks. */
2470 gc_assert(page_table
[first_page
].bytes_used
== GENCGC_CARD_BYTES
);
2471 gc_assert(page_table
[first_page
].gen
== from_space
);
2472 gc_assert(page_table
[first_page
].allocated
== region_allocation
);
2476 /* Adjust any large objects before promotion as they won't be
2477 * copied after promotion. */
2478 if (page_table
[first_page
].large_object
) {
2479 maybe_adjust_large_object(page_address(first_page
));
2480 /* It may have moved to unboxed pages. */
2481 region_allocation
= page_table
[first_page
].allocated
;
2484 /* Now work forward until the end of this contiguous area is found,
2485 * marking all pages as dont_move. */
2486 for (i
= first_page
; ;i
++) {
2487 gc_assert(page_table
[i
].allocated
== region_allocation
);
2489 /* Mark the page static. */
2490 page_table
[i
].dont_move
= 1;
2492 /* It is essential that the pages are not write protected as
2493 * they may have pointers into the old-space which need
2494 * scavenging. They shouldn't be write protected at this
2496 gc_assert(!page_table
[i
].write_protected
);
2498 /* Check whether this is the last page in this contiguous block.. */
2499 if (page_ends_contiguous_block_p(i
, from_space
))
2503 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2504 /* Do not do this for multi-page objects. Those pages do not need
2505 * object wipeout anyway.
2507 if (i
== first_page
) {
2508 /* We need the pointer to the beginning of the object
2509 * We might have gotten it above but maybe not, so make sure
2511 if (begin_ptr
== NULL
) {
2512 possibly_valid_dynamic_space_pointer_s(addr
, first_page
,
2515 set_page_consi_bit(first_page
, begin_ptr
);
2519 /* Check that the page is now static. */
2520 gc_assert(page_table
[addr_page_index
].dont_move
!= 0);
2523 /* If the given page is not write-protected, then scan it for pointers
2524 * to younger generations or the top temp. generation, if no
2525 * suspicious pointers are found then the page is write-protected.
2527 * Care is taken to check for pointers to the current gc_alloc()
2528 * region if it is a younger generation or the temp. generation. This
2529 * frees the caller from doing a gc_alloc_update_page_tables(). Actually
2530 * the gc_alloc_generation does not need to be checked as this is only
2531 * called from scavenge_generation() when the gc_alloc generation is
2532 * younger, so it just checks if there is a pointer to the current
2535 * We return 1 if the page was write-protected, else 0. */
2537 update_page_write_prot(page_index_t page
)
2539 generation_index_t gen
= page_table
[page
].gen
;
2542 void **page_addr
= (void **)page_address(page
);
2543 sword_t num_words
= page_table
[page
].bytes_used
/ N_WORD_BYTES
;
2545 /* Shouldn't be a free page. */
2546 gc_assert(page_allocated_p(page
));
2547 gc_assert(page_table
[page
].bytes_used
!= 0);
2549 /* Skip if it's already write-protected, pinned, or unboxed */
2550 if (page_table
[page
].write_protected
2551 /* FIXME: What's the reason for not write-protecting pinned pages? */
2552 || page_table
[page
].dont_move
2553 || page_unboxed_p(page
))
2556 /* Scan the page for pointers to younger generations or the
2557 * top temp. generation. */
2559 /* This is conservative: any word satisfying is_lisp_pointer() is
2560 * assumed to be a pointer despite that it might be machine code
2561 * or part of an unboxed array */
2562 for (j
= 0; j
< num_words
; j
++) {
2563 void *ptr
= *(page_addr
+j
);
2566 /* Check that it's in the dynamic space */
2567 if (is_lisp_pointer((lispobj
)ptr
) && (index
= find_page_index(ptr
)) != -1)
2568 if (/* Does it point to a younger or the temp. generation? */
2569 (page_allocated_p(index
)
2570 && (page_table
[index
].bytes_used
!= 0)
2571 && ((page_table
[index
].gen
< gen
)
2572 || (page_table
[index
].gen
== SCRATCH_GENERATION
)))
2574 /* Or does it point within a current gc_alloc() region? */
2575 || ((boxed_region
.start_addr
<= ptr
)
2576 && (ptr
<= boxed_region
.free_pointer
))
2577 || ((unboxed_region
.start_addr
<= ptr
)
2578 && (ptr
<= unboxed_region
.free_pointer
))) {
2585 /* Write-protect the page. */
2586 /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
2588 os_protect((void *)page_addr
,
2590 OS_VM_PROT_READ
|OS_VM_PROT_EXECUTE
);
2592 /* Note the page as protected in the page tables. */
2593 page_table
[page
].write_protected
= 1;
2599 /* Scavenge all generations from FROM to TO, inclusive, except for
2600 * new_space which needs special handling, as new objects may be
2601 * added which are not checked here - use scavenge_newspace generation.
2603 * Write-protected pages should not have any pointers to the
2604 * from_space so do need scavenging; thus write-protected pages are
2605 * not always scavenged. There is some code to check that these pages
2606 * are not written; but to check fully the write-protected pages need
2607 * to be scavenged by disabling the code to skip them.
2609 * Under the current scheme when a generation is GCed the younger
2610 * generations will be empty. So, when a generation is being GCed it
2611 * is only necessary to scavenge the older generations for pointers
2612 * not the younger. So a page that does not have pointers to younger
2613 * generations does not need to be scavenged.
2615 * The write-protection can be used to note pages that don't have
2616 * pointers to younger pages. But pages can be written without having
2617 * pointers to younger generations. After the pages are scavenged here
2618 * they can be scanned for pointers to younger generations and if
2619 * there are none the page can be write-protected.
2621 * One complication is when the newspace is the top temp. generation.
2623 * Enabling SC_GEN_CK scavenges the write-protected pages and checks
2624 * that none were written, which they shouldn't be as they should have
2625 * no pointers to younger generations. This breaks down for weak
2626 * pointers as the objects contain a link to the next and are written
2627 * if a weak pointer is scavenged. Still it's a useful check. */
2629 scavenge_generations(generation_index_t from
, generation_index_t to
)
2632 page_index_t num_wp
= 0;
2636 /* Clear the write_protected_cleared flags on all pages. */
2637 for (i
= 0; i
< page_table_pages
; i
++)
2638 page_table
[i
].write_protected_cleared
= 0;
2641 for (i
= 0; i
< last_free_page
; i
++) {
2642 generation_index_t generation
= page_table
[i
].gen
;
2644 && (page_table
[i
].bytes_used
!= 0)
2645 && (generation
!= new_space
)
2646 && (generation
>= from
)
2647 && (generation
<= to
)) {
2648 page_index_t last_page
,j
;
2649 int write_protected
=1;
2651 /* This should be the start of a region */
2652 gc_assert(page_starts_contiguous_block_p(i
));
2654 /* Now work forward until the end of the region */
2655 for (last_page
= i
; ; last_page
++) {
2657 write_protected
&& page_table
[last_page
].write_protected
;
2658 if (page_ends_contiguous_block_p(last_page
, generation
))
2661 if (!write_protected
) {
2662 scavenge(page_address(i
),
2663 ((uword_t
)(page_table
[last_page
].bytes_used
2664 + npage_bytes(last_page
-i
)))
2667 /* Now scan the pages and write protect those that
2668 * don't have pointers to younger generations. */
2669 if (enable_page_protection
) {
2670 for (j
= i
; j
<= last_page
; j
++) {
2671 num_wp
+= update_page_write_prot(j
);
2674 if ((gencgc_verbose
> 1) && (num_wp
!= 0)) {
2676 "/write protected %d pages within generation %d\n",
2677 num_wp
, generation
));
2685 /* Check that none of the write_protected pages in this generation
2686 * have been written to. */
2687 for (i
= 0; i
< page_table_pages
; i
++) {
2688 if (page_allocated_p(i
)
2689 && (page_table
[i
].bytes_used
!= 0)
2690 && (page_table
[i
].gen
== generation
)
2691 && (page_table
[i
].write_protected_cleared
!= 0)) {
2692 FSHOW((stderr
, "/scavenge_generation() %d\n", generation
));
2694 "/page bytes_used=%d scan_start_offset=%lu dont_move=%d\n",
2695 page_table
[i
].bytes_used
,
2696 page_table
[i
].scan_start_offset
,
2697 page_table
[i
].dont_move
));
2698 lose("write to protected page %d in scavenge_generation()\n", i
);
2705 /* Scavenge a newspace generation. As it is scavenged new objects may
2706 * be allocated to it; these will also need to be scavenged. This
2707 * repeats until there are no more objects unscavenged in the
2708 * newspace generation.
2710 * To help improve the efficiency, areas written are recorded by
2711 * gc_alloc() and only these scavenged. Sometimes a little more will be
2712 * scavenged, but this causes no harm. An easy check is done that the
2713 * scavenged bytes equals the number allocated in the previous
2716 * Write-protected pages are not scanned except if they are marked
2717 * dont_move in which case they may have been promoted and still have
2718 * pointers to the from space.
2720 * Write-protected pages could potentially be written by alloc however
2721 * to avoid having to handle re-scavenging of write-protected pages
2722 * gc_alloc() does not write to write-protected pages.
2724 * New areas of objects allocated are recorded alternatively in the two
2725 * new_areas arrays below. */
2726 static struct new_area new_areas_1
[NUM_NEW_AREAS
];
2727 static struct new_area new_areas_2
[NUM_NEW_AREAS
];
2729 /* Do one full scan of the new space generation. This is not enough to
2730 * complete the job as new objects may be added to the generation in
2731 * the process which are not scavenged. */
2733 scavenge_newspace_generation_one_scan(generation_index_t generation
)
2738 "/starting one full scan of newspace generation %d\n",
2740 for (i
= 0; i
< last_free_page
; i
++) {
2741 /* Note that this skips over open regions when it encounters them. */
2743 && (page_table
[i
].bytes_used
!= 0)
2744 && (page_table
[i
].gen
== generation
)
2745 && ((page_table
[i
].write_protected
== 0)
2746 /* (This may be redundant as write_protected is now
2747 * cleared before promotion.) */
2748 || (page_table
[i
].dont_move
== 1))) {
2749 page_index_t last_page
;
2752 /* The scavenge will start at the scan_start_offset of
2755 * We need to find the full extent of this contiguous
2756 * block in case objects span pages.
2758 * Now work forward until the end of this contiguous area
2759 * is found. A small area is preferred as there is a
2760 * better chance of its pages being write-protected. */
2761 for (last_page
= i
; ;last_page
++) {
2762 /* If all pages are write-protected and movable,
2763 * then no need to scavenge */
2764 all_wp
=all_wp
&& page_table
[last_page
].write_protected
&&
2765 !page_table
[last_page
].dont_move
;
2767 /* Check whether this is the last page in this
2768 * contiguous block */
2769 if (page_ends_contiguous_block_p(last_page
, generation
))
2773 /* Do a limited check for write-protected pages. */
2775 sword_t nwords
= (((uword_t
)
2776 (page_table
[last_page
].bytes_used
2777 + npage_bytes(last_page
-i
)
2778 + page_table
[i
].scan_start_offset
))
2780 new_areas_ignore_page
= last_page
;
2782 scavenge(page_scan_start(i
), nwords
);
2789 "/done with one full scan of newspace generation %d\n",
2793 /* Do a complete scavenge of the newspace generation. */
2795 scavenge_newspace_generation(generation_index_t generation
)
2799 /* the new_areas array currently being written to by gc_alloc() */
2800 struct new_area (*current_new_areas
)[] = &new_areas_1
;
2801 size_t current_new_areas_index
;
2803 /* the new_areas created by the previous scavenge cycle */
2804 struct new_area (*previous_new_areas
)[] = NULL
;
2805 size_t previous_new_areas_index
;
2807 /* Flush the current regions updating the tables. */
2808 gc_alloc_update_all_page_tables();
2810 /* Turn on the recording of new areas by gc_alloc(). */
2811 new_areas
= current_new_areas
;
2812 new_areas_index
= 0;
2814 /* Don't need to record new areas that get scavenged anyway during
2815 * scavenge_newspace_generation_one_scan. */
2816 record_new_objects
= 1;
2818 /* Start with a full scavenge. */
2819 scavenge_newspace_generation_one_scan(generation
);
2821 /* Record all new areas now. */
2822 record_new_objects
= 2;
2824 /* Give a chance to weak hash tables to make other objects live.
2825 * FIXME: The algorithm implemented here for weak hash table gcing
2826 * is O(W^2+N) as Bruno Haible warns in
2827 * http://www.haible.de/bruno/papers/cs/weak/WeakDatastructures-writeup.html
2828 * see "Implementation 2". */
2829 scav_weak_hash_tables();
2831 /* Flush the current regions updating the tables. */
2832 gc_alloc_update_all_page_tables();
2834 /* Grab new_areas_index. */
2835 current_new_areas_index
= new_areas_index
;
2838 "The first scan is finished; current_new_areas_index=%d.\n",
2839 current_new_areas_index));*/
2841 while (current_new_areas_index
> 0) {
2842 /* Move the current to the previous new areas */
2843 previous_new_areas
= current_new_areas
;
2844 previous_new_areas_index
= current_new_areas_index
;
2846 /* Scavenge all the areas in previous new areas. Any new areas
2847 * allocated are saved in current_new_areas. */
2849 /* Allocate an array for current_new_areas; alternating between
2850 * new_areas_1 and 2 */
2851 if (previous_new_areas
== &new_areas_1
)
2852 current_new_areas
= &new_areas_2
;
2854 current_new_areas
= &new_areas_1
;
2856 /* Set up for gc_alloc(). */
2857 new_areas
= current_new_areas
;
2858 new_areas_index
= 0;
2860 /* Check whether previous_new_areas had overflowed. */
2861 if (previous_new_areas_index
>= NUM_NEW_AREAS
) {
2863 /* New areas of objects allocated have been lost so need to do a
2864 * full scan to be sure! If this becomes a problem try
2865 * increasing NUM_NEW_AREAS. */
2866 if (gencgc_verbose
) {
2867 SHOW("new_areas overflow, doing full scavenge");
2870 /* Don't need to record new areas that get scavenged
2871 * anyway during scavenge_newspace_generation_one_scan. */
2872 record_new_objects
= 1;
2874 scavenge_newspace_generation_one_scan(generation
);
2876 /* Record all new areas now. */
2877 record_new_objects
= 2;
2879 scav_weak_hash_tables();
2881 /* Flush the current regions updating the tables. */
2882 gc_alloc_update_all_page_tables();
2886 /* Work through previous_new_areas. */
2887 for (i
= 0; i
< previous_new_areas_index
; i
++) {
2888 page_index_t page
= (*previous_new_areas
)[i
].page
;
2889 size_t offset
= (*previous_new_areas
)[i
].offset
;
2890 size_t size
= (*previous_new_areas
)[i
].size
/ N_WORD_BYTES
;
2891 gc_assert((*previous_new_areas
)[i
].size
% N_WORD_BYTES
== 0);
2892 scavenge(page_address(page
)+offset
, size
);
2895 scav_weak_hash_tables();
2897 /* Flush the current regions updating the tables. */
2898 gc_alloc_update_all_page_tables();
2901 current_new_areas_index
= new_areas_index
;
2904 "The re-scan has finished; current_new_areas_index=%d.\n",
2905 current_new_areas_index));*/
2908 /* Turn off recording of areas allocated by gc_alloc(). */
2909 record_new_objects
= 0;
2914 /* Check that none of the write_protected pages in this generation
2915 * have been written to. */
2916 for (i
= 0; i
< page_table_pages
; i
++) {
2917 if (page_allocated_p(i
)
2918 && (page_table
[i
].bytes_used
!= 0)
2919 && (page_table
[i
].gen
== generation
)
2920 && (page_table
[i
].write_protected_cleared
!= 0)
2921 && (page_table
[i
].dont_move
== 0)) {
2922 lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d\n",
2923 i
, generation
, page_table
[i
].dont_move
);
2930 /* Un-write-protect all the pages in from_space. This is done at the
2931 * start of a GC else there may be many page faults while scavenging
2932 * the newspace (I've seen drive the system time to 99%). These pages
2933 * would need to be unprotected anyway before unmapping in
2934 * free_oldspace; not sure what effect this has on paging.. */
2936 unprotect_oldspace(void)
2939 void *region_addr
= 0;
2940 void *page_addr
= 0;
2941 uword_t region_bytes
= 0;
2943 for (i
= 0; i
< last_free_page
; i
++) {
2944 if (page_allocated_p(i
)
2945 && (page_table
[i
].bytes_used
!= 0)
2946 && (page_table
[i
].gen
== from_space
)) {
2948 /* Remove any write-protection. We should be able to rely
2949 * on the write-protect flag to avoid redundant calls. */
2950 if (page_table
[i
].write_protected
) {
2951 page_table
[i
].write_protected
= 0;
2952 page_addr
= page_address(i
);
2955 region_addr
= page_addr
;
2956 region_bytes
= GENCGC_CARD_BYTES
;
2957 } else if (region_addr
+ region_bytes
== page_addr
) {
2958 /* Region continue. */
2959 region_bytes
+= GENCGC_CARD_BYTES
;
2961 /* Unprotect previous region. */
2962 os_protect(region_addr
, region_bytes
, OS_VM_PROT_ALL
);
2963 /* First page in new region. */
2964 region_addr
= page_addr
;
2965 region_bytes
= GENCGC_CARD_BYTES
;
2971 /* Unprotect last region. */
2972 os_protect(region_addr
, region_bytes
, OS_VM_PROT_ALL
);
2976 /* Work through all the pages and free any in from_space. This
2977 * assumes that all objects have been copied or promoted to an older
2978 * generation. Bytes_allocated and the generation bytes_allocated
2979 * counter are updated. The number of bytes freed is returned. */
2983 uword_t bytes_freed
= 0;
2984 page_index_t first_page
, last_page
;
2989 /* Find a first page for the next region of pages. */
2990 while ((first_page
< last_free_page
)
2991 && (page_free_p(first_page
)
2992 || (page_table
[first_page
].bytes_used
== 0)
2993 || (page_table
[first_page
].gen
!= from_space
)))
2996 if (first_page
>= last_free_page
)
2999 /* Find the last page of this region. */
3000 last_page
= first_page
;
3003 /* Free the page. */
3004 bytes_freed
+= page_table
[last_page
].bytes_used
;
3005 generations
[page_table
[last_page
].gen
].bytes_allocated
-=
3006 page_table
[last_page
].bytes_used
;
3007 page_table
[last_page
].allocated
= FREE_PAGE_FLAG
;
3008 page_table
[last_page
].bytes_used
= 0;
3009 /* Should already be unprotected by unprotect_oldspace(). */
3010 gc_assert(!page_table
[last_page
].write_protected
);
3013 while ((last_page
< last_free_page
)
3014 && page_allocated_p(last_page
)
3015 && (page_table
[last_page
].bytes_used
!= 0)
3016 && (page_table
[last_page
].gen
== from_space
));
3018 #ifdef READ_PROTECT_FREE_PAGES
3019 os_protect(page_address(first_page
),
3020 npage_bytes(last_page
-first_page
),
3023 first_page
= last_page
;
3024 } while (first_page
< last_free_page
);
3026 bytes_allocated
-= bytes_freed
;
3031 /* Print some information about a pointer at the given address. */
3033 print_ptr(lispobj
*addr
)
3035 /* If addr is in the dynamic space then out the page information. */
3036 page_index_t pi1
= find_page_index((void*)addr
);
3039 fprintf(stderr
," %p: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n",
3042 page_table
[pi1
].allocated
,
3043 page_table
[pi1
].gen
,
3044 page_table
[pi1
].bytes_used
,
3045 page_table
[pi1
].scan_start_offset
,
3046 page_table
[pi1
].dont_move
);
3047 fprintf(stderr
," %x %x %x %x (%x) %x %x %x %x\n",
3061 is_in_stack_space(lispobj ptr
)
3063 /* For space verification: Pointers can be valid if they point
3064 * to a thread stack space. This would be faster if the thread
3065 * structures had page-table entries as if they were part of
3066 * the heap space. */
3068 for_each_thread(th
) {
3069 if ((th
->control_stack_start
<= (lispobj
*)ptr
) &&
3070 (th
->control_stack_end
>= (lispobj
*)ptr
)) {
3078 verify_space(lispobj
*start
, size_t words
)
3080 int is_in_dynamic_space
= (find_page_index((void*)start
) != -1);
3081 int is_in_readonly_space
=
3082 (READ_ONLY_SPACE_START
<= (uword_t
)start
&&
3083 (uword_t
)start
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0));
3087 lispobj thing
= *(lispobj
*)start
;
3089 if (is_lisp_pointer(thing
)) {
3090 page_index_t page_index
= find_page_index((void*)thing
);
3091 sword_t to_readonly_space
=
3092 (READ_ONLY_SPACE_START
<= thing
&&
3093 thing
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0));
3094 sword_t to_static_space
=
3095 (STATIC_SPACE_START
<= thing
&&
3096 thing
< SymbolValue(STATIC_SPACE_FREE_POINTER
,0));
3098 /* Does it point to the dynamic space? */
3099 if (page_index
!= -1) {
3100 /* If it's within the dynamic space it should point to a used
3101 * page. XX Could check the offset too. */
3102 if (page_allocated_p(page_index
)
3103 && (page_table
[page_index
].bytes_used
== 0))
3104 lose ("Ptr %p @ %p sees free page.\n", thing
, start
);
3105 /* Check that it doesn't point to a forwarding pointer! */
3106 if (*((lispobj
*)native_pointer(thing
)) == 0x01) {
3107 lose("Ptr %p @ %p sees forwarding ptr.\n", thing
, start
);
3109 /* Check that its not in the RO space as it would then be a
3110 * pointer from the RO to the dynamic space. */
3111 if (is_in_readonly_space
) {
3112 lose("ptr to dynamic space %p from RO space %x\n",
3115 /* Does it point to a plausible object? This check slows
3116 * it down a lot (so it's commented out).
3118 * "a lot" is serious: it ate 50 minutes cpu time on
3119 * my duron 950 before I came back from lunch and
3122 * FIXME: Add a variable to enable this
3125 if (!possibly_valid_dynamic_space_pointer_s((lispobj *)thing, page_index, NULL)) {
3126 lose("ptr %p to invalid object %p\n", thing, start);
3130 extern char funcallable_instance_tramp
;
3131 /* Verify that it points to another valid space. */
3132 if (!to_readonly_space
&& !to_static_space
3133 && (thing
!= (lispobj
)&funcallable_instance_tramp
)
3134 && !is_in_stack_space(thing
)) {
3135 lose("Ptr %p @ %p sees junk.\n", thing
, start
);
3139 if (!(fixnump(thing
))) {
3141 switch(widetag_of(*start
)) {
3144 case SIMPLE_VECTOR_WIDETAG
:
3146 case COMPLEX_WIDETAG
:
3147 case SIMPLE_ARRAY_WIDETAG
:
3148 case COMPLEX_BASE_STRING_WIDETAG
:
3149 #ifdef COMPLEX_CHARACTER_STRING_WIDETAG
3150 case COMPLEX_CHARACTER_STRING_WIDETAG
:
3152 case COMPLEX_VECTOR_NIL_WIDETAG
:
3153 case COMPLEX_BIT_VECTOR_WIDETAG
:
3154 case COMPLEX_VECTOR_WIDETAG
:
3155 case COMPLEX_ARRAY_WIDETAG
:
3156 case CLOSURE_HEADER_WIDETAG
:
3157 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
3158 case VALUE_CELL_HEADER_WIDETAG
:
3159 case SYMBOL_HEADER_WIDETAG
:
3160 case CHARACTER_WIDETAG
:
3161 #if N_WORD_BITS == 64
3162 case SINGLE_FLOAT_WIDETAG
:
3164 case UNBOUND_MARKER_WIDETAG
:
3169 case INSTANCE_HEADER_WIDETAG
:
3171 sword_t ntotal
= instance_length(thing
);
3172 lispobj layout
= instance_layout(start
);
3177 instance_scan_interleaved(verify_space
,
3179 native_pointer(layout
));
3183 case CODE_HEADER_WIDETAG
:
3185 lispobj object
= *start
;
3187 sword_t nheader_words
, ncode_words
, nwords
;
3189 struct simple_fun
*fheaderp
;
3191 code
= (struct code
*) start
;
3193 /* Check that it's not in the dynamic space.
3194 * FIXME: Isn't is supposed to be OK for code
3195 * objects to be in the dynamic space these days? */
3196 /* It is for byte compiled code, but there's
3197 * no byte compilation in SBCL anymore. */
3198 if (is_in_dynamic_space
3199 /* Only when enabled */
3200 && verify_dynamic_code_check
) {
3202 "/code object at %p in the dynamic space\n",
3206 ncode_words
= code_instruction_words(code
->code_size
);
3207 nheader_words
= code_header_words(object
);
3208 nwords
= ncode_words
+ nheader_words
;
3209 nwords
= CEILING(nwords
, 2);
3210 /* Scavenge the boxed section of the code data block */
3211 verify_space(start
+ 1, nheader_words
- 1);
3213 /* Scavenge the boxed section of each function
3214 * object in the code data block. */
3215 fheaderl
= code
->entry_points
;
3216 while (fheaderl
!= NIL
) {
3218 (struct simple_fun
*) native_pointer(fheaderl
);
3219 gc_assert(widetag_of(fheaderp
->header
) ==
3220 SIMPLE_FUN_HEADER_WIDETAG
);
3221 verify_space(SIMPLE_FUN_SCAV_START(fheaderp
),
3222 SIMPLE_FUN_SCAV_NWORDS(fheaderp
));
3223 fheaderl
= fheaderp
->next
;
3229 /* unboxed objects */
3230 case BIGNUM_WIDETAG
:
3231 #if N_WORD_BITS != 64
3232 case SINGLE_FLOAT_WIDETAG
:
3234 case DOUBLE_FLOAT_WIDETAG
:
3235 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3236 case LONG_FLOAT_WIDETAG
:
3238 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
3239 case COMPLEX_SINGLE_FLOAT_WIDETAG
:
3241 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
3242 case COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3244 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3245 case COMPLEX_LONG_FLOAT_WIDETAG
:
3247 #ifdef SIMD_PACK_WIDETAG
3248 case SIMD_PACK_WIDETAG
:
3250 case SIMPLE_BASE_STRING_WIDETAG
:
3251 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
3252 case SIMPLE_CHARACTER_STRING_WIDETAG
:
3254 case SIMPLE_BIT_VECTOR_WIDETAG
:
3255 case SIMPLE_ARRAY_NIL_WIDETAG
:
3256 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
3257 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
3258 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
3259 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
3260 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
3261 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
3263 case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG
:
3265 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
3266 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
3267 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
3268 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
3270 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
3271 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
3273 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
3274 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
3276 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
3277 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
3280 case SIMPLE_ARRAY_FIXNUM_WIDETAG
:
3282 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
3283 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
3285 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
3286 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
3288 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
3289 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
3290 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3291 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
3293 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
3294 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
3296 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
3297 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3299 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3300 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
3303 case WEAK_POINTER_WIDETAG
:
3304 #ifdef NO_TLS_VALUE_MARKER_WIDETAG
3305 case NO_TLS_VALUE_MARKER_WIDETAG
:
3307 count
= (sizetab
[widetag_of(*start
)])(start
);
3311 lose("Unhandled widetag %p at %p\n",
3312 widetag_of(*start
), start
);
3324 /* FIXME: It would be nice to make names consistent so that
3325 * foo_size meant size *in* *bytes* instead of size in some
3326 * arbitrary units. (Yes, this caused a bug, how did you guess?:-)
3327 * Some counts of lispobjs are called foo_count; it might be good
3328 * to grep for all foo_size and rename the appropriate ones to
3330 sword_t read_only_space_size
=
3331 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0)
3332 - (lispobj
*)READ_ONLY_SPACE_START
;
3333 sword_t static_space_size
=
3334 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0)
3335 - (lispobj
*)STATIC_SPACE_START
;
3337 for_each_thread(th
) {
3338 sword_t binding_stack_size
=
3339 (lispobj
*)get_binding_stack_pointer(th
)
3340 - (lispobj
*)th
->binding_stack_start
;
3341 verify_space(th
->binding_stack_start
, binding_stack_size
);
3343 verify_space((lispobj
*)READ_ONLY_SPACE_START
, read_only_space_size
);
3344 verify_space((lispobj
*)STATIC_SPACE_START
, static_space_size
);
3348 verify_generation(generation_index_t generation
)
3352 for (i
= 0; i
< last_free_page
; i
++) {
3353 if (page_allocated_p(i
)
3354 && (page_table
[i
].bytes_used
!= 0)
3355 && (page_table
[i
].gen
== generation
)) {
3356 page_index_t last_page
;
3358 /* This should be the start of a contiguous block */
3359 gc_assert(page_starts_contiguous_block_p(i
));
3361 /* Need to find the full extent of this contiguous block in case
3362 objects span pages. */
3364 /* Now work forward until the end of this contiguous area is
3366 for (last_page
= i
; ;last_page
++)
3367 /* Check whether this is the last page in this contiguous
3369 if (page_ends_contiguous_block_p(last_page
, generation
))
3372 verify_space(page_address(i
),
3374 (page_table
[last_page
].bytes_used
3375 + npage_bytes(last_page
-i
)))
3382 /* Check that all the free space is zero filled. */
3384 verify_zero_fill(void)
3388 for (page
= 0; page
< last_free_page
; page
++) {
3389 if (page_free_p(page
)) {
3390 /* The whole page should be zero filled. */
3391 sword_t
*start_addr
= (sword_t
*)page_address(page
);
3392 sword_t size
= 1024;
3394 for (i
= 0; i
< size
; i
++) {
3395 if (start_addr
[i
] != 0) {
3396 lose("free page not zero at %x\n", start_addr
+ i
);
3400 sword_t free_bytes
= GENCGC_CARD_BYTES
- page_table
[page
].bytes_used
;
3401 if (free_bytes
> 0) {
3402 sword_t
*start_addr
= (sword_t
*)((uword_t
)page_address(page
)
3403 + page_table
[page
].bytes_used
);
3404 sword_t size
= free_bytes
/ N_WORD_BYTES
;
3406 for (i
= 0; i
< size
; i
++) {
3407 if (start_addr
[i
] != 0) {
3408 lose("free region not zero at %x\n", start_addr
+ i
);
3416 /* External entry point for verify_zero_fill */
3418 gencgc_verify_zero_fill(void)
3420 /* Flush the alloc regions updating the tables. */
3421 gc_alloc_update_all_page_tables();
3422 SHOW("verifying zero fill");
3427 verify_dynamic_space(void)
3429 generation_index_t i
;
3431 for (i
= 0; i
<= HIGHEST_NORMAL_GENERATION
; i
++)
3432 verify_generation(i
);
3434 if (gencgc_enable_verify_zero_fill
)
3438 /* Write-protect all the dynamic boxed pages in the given generation. */
3440 write_protect_generation_pages(generation_index_t generation
)
3444 gc_assert(generation
< SCRATCH_GENERATION
);
3446 for (start
= 0; start
< last_free_page
; start
++) {
3447 if (protect_page_p(start
, generation
)) {
3451 /* Note the page as protected in the page tables. */
3452 page_table
[start
].write_protected
= 1;
3454 for (last
= start
+ 1; last
< last_free_page
; last
++) {
3455 if (!protect_page_p(last
, generation
))
3457 page_table
[last
].write_protected
= 1;
3460 page_start
= (void *)page_address(start
);
3462 os_protect(page_start
,
3463 npage_bytes(last
- start
),
3464 OS_VM_PROT_READ
| OS_VM_PROT_EXECUTE
);
3470 if (gencgc_verbose
> 1) {
3472 "/write protected %d of %d pages in generation %d\n",
3473 count_write_protect_generation_pages(generation
),
3474 count_generation_pages(generation
),
3479 #if defined(LISP_FEATURE_SB_THREAD) && (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
3481 preserve_context_registers (os_context_t
*c
)
3484 /* On Darwin the signal context isn't a contiguous block of memory,
3485 * so just preserve_pointering its contents won't be sufficient.
3487 #if defined(LISP_FEATURE_DARWIN)||defined(LISP_FEATURE_WIN32)
3488 #if defined LISP_FEATURE_X86
3489 preserve_pointer((void*)*os_context_register_addr(c
,reg_EAX
));
3490 preserve_pointer((void*)*os_context_register_addr(c
,reg_ECX
));
3491 preserve_pointer((void*)*os_context_register_addr(c
,reg_EDX
));
3492 preserve_pointer((void*)*os_context_register_addr(c
,reg_EBX
));
3493 preserve_pointer((void*)*os_context_register_addr(c
,reg_ESI
));
3494 preserve_pointer((void*)*os_context_register_addr(c
,reg_EDI
));
3495 preserve_pointer((void*)*os_context_pc_addr(c
));
3496 #elif defined LISP_FEATURE_X86_64
3497 preserve_pointer((void*)*os_context_register_addr(c
,reg_RAX
));
3498 preserve_pointer((void*)*os_context_register_addr(c
,reg_RCX
));
3499 preserve_pointer((void*)*os_context_register_addr(c
,reg_RDX
));
3500 preserve_pointer((void*)*os_context_register_addr(c
,reg_RBX
));
3501 preserve_pointer((void*)*os_context_register_addr(c
,reg_RSI
));
3502 preserve_pointer((void*)*os_context_register_addr(c
,reg_RDI
));
3503 preserve_pointer((void*)*os_context_register_addr(c
,reg_R8
));
3504 preserve_pointer((void*)*os_context_register_addr(c
,reg_R9
));
3505 preserve_pointer((void*)*os_context_register_addr(c
,reg_R10
));
3506 preserve_pointer((void*)*os_context_register_addr(c
,reg_R11
));
3507 preserve_pointer((void*)*os_context_register_addr(c
,reg_R12
));
3508 preserve_pointer((void*)*os_context_register_addr(c
,reg_R13
));
3509 preserve_pointer((void*)*os_context_register_addr(c
,reg_R14
));
3510 preserve_pointer((void*)*os_context_register_addr(c
,reg_R15
));
3511 preserve_pointer((void*)*os_context_pc_addr(c
));
3513 #error "preserve_context_registers needs to be tweaked for non-x86 Darwin"
3516 #if !defined(LISP_FEATURE_WIN32)
3517 for(ptr
= ((void **)(c
+1))-1; ptr
>=(void **)c
; ptr
--) {
3518 preserve_pointer(*ptr
);
3525 move_pinned_pages_to_newspace()
3529 /* scavenge() will evacuate all oldspace pages, but no newspace
3530 * pages. Pinned pages are precisely those pages which must not
3531 * be evacuated, so move them to newspace directly. */
3533 for (i
= 0; i
< last_free_page
; i
++) {
3534 if (page_table
[i
].dont_move
&&
3535 /* dont_move is cleared lazily, so validate the space as well. */
3536 page_table
[i
].gen
== from_space
) {
3537 if (dontmove_dwords(i
) && do_wipe_p
) {
3538 // do not move to newspace after all, this will be word-wiped
3541 page_table
[i
].gen
= new_space
;
3542 /* And since we're moving the pages wholesale, also adjust
3543 * the generation allocation counters. */
3544 generations
[new_space
].bytes_allocated
+= page_table
[i
].bytes_used
;
3545 generations
[from_space
].bytes_allocated
-= page_table
[i
].bytes_used
;
3550 /* Garbage collect a generation. If raise is 0 then the remains of the
3551 * generation are not raised to the next generation. */
3553 garbage_collect_generation(generation_index_t generation
, int raise
)
3556 uword_t static_space_size
;
3559 gc_assert(generation
<= HIGHEST_NORMAL_GENERATION
);
3561 /* The oldest generation can't be raised. */
3562 gc_assert((generation
!= HIGHEST_NORMAL_GENERATION
) || (raise
== 0));
3564 /* Check if weak hash tables were processed in the previous GC. */
3565 gc_assert(weak_hash_tables
== NULL
);
3567 /* Initialize the weak pointer list. */
3568 weak_pointers
= NULL
;
3570 /* When a generation is not being raised it is transported to a
3571 * temporary generation (NUM_GENERATIONS), and lowered when
3572 * done. Set up this new generation. There should be no pages
3573 * allocated to it yet. */
3575 gc_assert(generations
[SCRATCH_GENERATION
].bytes_allocated
== 0);
3578 /* Set the global src and dest. generations */
3579 from_space
= generation
;
3581 new_space
= generation
+1;
3583 new_space
= SCRATCH_GENERATION
;
3585 /* Change to a new space for allocation, resetting the alloc_start_page */
3586 gc_alloc_generation
= new_space
;
3587 generations
[new_space
].alloc_start_page
= 0;
3588 generations
[new_space
].alloc_unboxed_start_page
= 0;
3589 generations
[new_space
].alloc_large_start_page
= 0;
3590 generations
[new_space
].alloc_large_unboxed_start_page
= 0;
3592 /* Before any pointers are preserved, the dont_move flags on the
3593 * pages need to be cleared. */
3594 for (i
= 0; i
< last_free_page
; i
++)
3595 if(page_table
[i
].gen
==from_space
) {
3596 page_table
[i
].dont_move
= 0;
3597 gc_assert(dontmove_dwords(i
) == NULL
);
3600 /* Un-write-protect the old-space pages. This is essential for the
3601 * promoted pages as they may contain pointers into the old-space
3602 * which need to be scavenged. It also helps avoid unnecessary page
3603 * faults as forwarding pointers are written into them. They need to
3604 * be un-protected anyway before unmapping later. */
3605 unprotect_oldspace();
3607 /* Scavenge the stacks' conservative roots. */
3609 /* there are potentially two stacks for each thread: the main
3610 * stack, which may contain Lisp pointers, and the alternate stack.
3611 * We don't ever run Lisp code on the altstack, but it may
3612 * host a sigcontext with lisp objects in it */
3614 /* what we need to do: (1) find the stack pointer for the main
3615 * stack; scavenge it (2) find the interrupt context on the
3616 * alternate stack that might contain lisp values, and scavenge
3619 /* we assume that none of the preceding applies to the thread that
3620 * initiates GC. If you ever call GC from inside an altstack
3621 * handler, you will lose. */
3623 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
3624 /* And if we're saving a core, there's no point in being conservative. */
3625 if (conservative_stack
) {
3626 for_each_thread(th
) {
3628 void **esp
=(void **)-1;
3629 if (th
->state
== STATE_DEAD
)
3631 # if defined(LISP_FEATURE_SB_SAFEPOINT)
3632 /* Conservative collect_garbage is always invoked with a
3633 * foreign C call or an interrupt handler on top of every
3634 * existing thread, so the stored SP in each thread
3635 * structure is valid, no matter which thread we are looking
3636 * at. For threads that were running Lisp code, the pitstop
3637 * and edge functions maintain this value within the
3638 * interrupt or exception handler. */
3639 esp
= os_get_csp(th
);
3640 assert_on_stack(th
, esp
);
3642 /* In addition to pointers on the stack, also preserve the
3643 * return PC, the only value from the context that we need
3644 * in addition to the SP. The return PC gets saved by the
3645 * foreign call wrapper, and removed from the control stack
3646 * into a register. */
3647 preserve_pointer(th
->pc_around_foreign_call
);
3649 /* And on platforms with interrupts: scavenge ctx registers. */
3651 /* Disabled on Windows, because it does not have an explicit
3652 * stack of `interrupt_contexts'. The reported CSP has been
3653 * chosen so that the current context on the stack is
3654 * covered by the stack scan. See also set_csp_from_context(). */
3655 # ifndef LISP_FEATURE_WIN32
3656 if (th
!= arch_os_get_current_thread()) {
3657 long k
= fixnum_value(
3658 SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
3660 preserve_context_registers(th
->interrupt_contexts
[--k
]);
3663 # elif defined(LISP_FEATURE_SB_THREAD)
3665 if(th
==arch_os_get_current_thread()) {
3666 /* Somebody is going to burn in hell for this, but casting
3667 * it in two steps shuts gcc up about strict aliasing. */
3668 esp
= (void **)((void *)&raise
);
3671 free
=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
3672 for(i
=free
-1;i
>=0;i
--) {
3673 os_context_t
*c
=th
->interrupt_contexts
[i
];
3674 esp1
= (void **) *os_context_register_addr(c
,reg_SP
);
3675 if (esp1
>=(void **)th
->control_stack_start
&&
3676 esp1
<(void **)th
->control_stack_end
) {
3677 if(esp1
<esp
) esp
=esp1
;
3678 preserve_context_registers(c
);
3683 esp
= (void **)((void *)&raise
);
3685 if (!esp
|| esp
== (void*) -1)
3686 lose("garbage_collect: no SP known for thread %x (OS %x)",
3688 for (ptr
= ((void **)th
->control_stack_end
)-1; ptr
>= esp
; ptr
--) {
3689 preserve_pointer(*ptr
);
3694 /* Non-x86oid systems don't have "conservative roots" as such, but
3695 * the same mechanism is used for objects pinned for use by alien
3697 for_each_thread(th
) {
3698 lispobj pin_list
= SymbolTlValue(PINNED_OBJECTS
,th
);
3699 while (pin_list
!= NIL
) {
3700 struct cons
*list_entry
=
3701 (struct cons
*)native_pointer(pin_list
);
3702 preserve_pointer(list_entry
->car
);
3703 pin_list
= list_entry
->cdr
;
3709 if (gencgc_verbose
> 1) {
3710 sword_t num_dont_move_pages
= count_dont_move_pages();
3712 "/non-movable pages due to conservative pointers = %ld (%lu bytes)\n",
3713 num_dont_move_pages
,
3714 npage_bytes(num_dont_move_pages
));
3718 /* Now that all of the pinned (dont_move) pages are known, and
3719 * before we start to scavenge (and thus relocate) objects,
3720 * relocate the pinned pages to newspace, so that the scavenger
3721 * will not attempt to relocate their contents. */
3722 move_pinned_pages_to_newspace();
3724 /* Scavenge all the rest of the roots. */
3726 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
3728 * If not x86, we need to scavenge the interrupt context(s) and the
3733 for_each_thread(th
) {
3734 scavenge_interrupt_contexts(th
);
3735 scavenge_control_stack(th
);
3738 # ifdef LISP_FEATURE_SB_SAFEPOINT
3739 /* In this case, scrub all stacks right here from the GCing thread
3740 * instead of doing what the comment below says. Suboptimal, but
3743 scrub_thread_control_stack(th
);
3745 /* Scrub the unscavenged control stack space, so that we can't run
3746 * into any stale pointers in a later GC (this is done by the
3747 * stop-for-gc handler in the other threads). */
3748 scrub_control_stack();
3753 /* Scavenge the Lisp functions of the interrupt handlers, taking
3754 * care to avoid SIG_DFL and SIG_IGN. */
3755 for (i
= 0; i
< NSIG
; i
++) {
3756 union interrupt_handler handler
= interrupt_handlers
[i
];
3757 if (!ARE_SAME_HANDLER(handler
.c
, SIG_IGN
) &&
3758 !ARE_SAME_HANDLER(handler
.c
, SIG_DFL
)) {
3759 scavenge((lispobj
*)(interrupt_handlers
+ i
), 1);
3762 /* Scavenge the binding stacks. */
3765 for_each_thread(th
) {
3766 sword_t len
= (lispobj
*)get_binding_stack_pointer(th
) -
3767 th
->binding_stack_start
;
3768 scavenge((lispobj
*) th
->binding_stack_start
,len
);
3769 #ifdef LISP_FEATURE_SB_THREAD
3770 /* do the tls as well */
3771 len
=(SymbolValue(FREE_TLS_INDEX
,0) >> WORD_SHIFT
) -
3772 (sizeof (struct thread
))/(sizeof (lispobj
));
3773 scavenge((lispobj
*) (th
+1),len
);
3778 /* The original CMU CL code had scavenge-read-only-space code
3779 * controlled by the Lisp-level variable
3780 * *SCAVENGE-READ-ONLY-SPACE*. It was disabled by default, and it
3781 * wasn't documented under what circumstances it was useful or
3782 * safe to turn it on, so it's been turned off in SBCL. If you
3783 * want/need this functionality, and can test and document it,
3784 * please submit a patch. */
3786 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE
) != NIL
) {
3787 uword_t read_only_space_size
=
3788 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
) -
3789 (lispobj
*)READ_ONLY_SPACE_START
;
3791 "/scavenge read only space: %d bytes\n",
3792 read_only_space_size
* sizeof(lispobj
)));
3793 scavenge( (lispobj
*) READ_ONLY_SPACE_START
, read_only_space_size
);
3797 /* Scavenge static space. */
3799 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0) -
3800 (lispobj
*)STATIC_SPACE_START
;
3801 if (gencgc_verbose
> 1) {
3803 "/scavenge static space: %d bytes\n",
3804 static_space_size
* sizeof(lispobj
)));
3806 scavenge( (lispobj
*) STATIC_SPACE_START
, static_space_size
);
3808 /* All generations but the generation being GCed need to be
3809 * scavenged. The new_space generation needs special handling as
3810 * objects may be moved in - it is handled separately below. */
3811 scavenge_generations(generation
+1, PSEUDO_STATIC_GENERATION
);
3813 scavenge_pages_with_conservative_pointers_to_them_protected_objects_only();
3815 /* Finally scavenge the new_space generation. Keep going until no
3816 * more objects are moved into the new generation */
3817 scavenge_newspace_generation(new_space
);
3819 /* FIXME: I tried reenabling this check when debugging unrelated
3820 * GC weirdness ca. sbcl-0.6.12.45, and it failed immediately.
3821 * Since the current GC code seems to work well, I'm guessing that
3822 * this debugging code is just stale, but I haven't tried to
3823 * figure it out. It should be figured out and then either made to
3824 * work or just deleted. */
3826 #define RESCAN_CHECK 0
3828 /* As a check re-scavenge the newspace once; no new objects should
3831 os_vm_size_t old_bytes_allocated
= bytes_allocated
;
3832 os_vm_size_t bytes_allocated
;
3834 /* Start with a full scavenge. */
3835 scavenge_newspace_generation_one_scan(new_space
);
3837 /* Flush the current regions, updating the tables. */
3838 gc_alloc_update_all_page_tables();
3840 bytes_allocated
= bytes_allocated
- old_bytes_allocated
;
3842 if (bytes_allocated
!= 0) {
3843 lose("Rescan of new_space allocated %d more bytes.\n",
3849 scan_weak_hash_tables();
3850 scan_weak_pointers();
3853 /* Flush the current regions, updating the tables. */
3854 gc_alloc_update_all_page_tables();
3856 /* Free the pages in oldspace, but not those marked dont_move. */
3859 /* If the GC is not raising the age then lower the generation back
3860 * to its normal generation number */
3862 for (i
= 0; i
< last_free_page
; i
++)
3863 if ((page_table
[i
].bytes_used
!= 0)
3864 && (page_table
[i
].gen
== SCRATCH_GENERATION
))
3865 page_table
[i
].gen
= generation
;
3866 gc_assert(generations
[generation
].bytes_allocated
== 0);
3867 generations
[generation
].bytes_allocated
=
3868 generations
[SCRATCH_GENERATION
].bytes_allocated
;
3869 generations
[SCRATCH_GENERATION
].bytes_allocated
= 0;
3872 /* Reset the alloc_start_page for generation. */
3873 generations
[generation
].alloc_start_page
= 0;
3874 generations
[generation
].alloc_unboxed_start_page
= 0;
3875 generations
[generation
].alloc_large_start_page
= 0;
3876 generations
[generation
].alloc_large_unboxed_start_page
= 0;
3878 if (generation
>= verify_gens
) {
3879 if (gencgc_verbose
) {
3883 verify_dynamic_space();
3886 /* Set the new gc trigger for the GCed generation. */
3887 generations
[generation
].gc_trigger
=
3888 generations
[generation
].bytes_allocated
3889 + generations
[generation
].bytes_consed_between_gc
;
3892 generations
[generation
].num_gc
= 0;
3894 ++generations
[generation
].num_gc
;
3898 /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
3900 update_dynamic_space_free_pointer(void)
3902 page_index_t last_page
= -1, i
;
3904 for (i
= 0; i
< last_free_page
; i
++)
3905 if (page_allocated_p(i
) && (page_table
[i
].bytes_used
!= 0))
3908 last_free_page
= last_page
+1;
3910 set_alloc_pointer((lispobj
)(page_address(last_free_page
)));
3911 return 0; /* dummy value: return something ... */
3915 remap_page_range (page_index_t from
, page_index_t to
)
3917 /* There's a mysterious Solaris/x86 problem with using mmap
3918 * tricks for memory zeroing. See sbcl-devel thread
3919 * "Re: patch: standalone executable redux".
3921 #if defined(LISP_FEATURE_SUNOS)
3922 zero_and_mark_pages(from
, to
);
3925 release_granularity
= gencgc_release_granularity
/GENCGC_CARD_BYTES
,
3926 release_mask
= release_granularity
-1,
3928 aligned_from
= (from
+release_mask
)&~release_mask
,
3929 aligned_end
= (end
&~release_mask
);
3931 if (aligned_from
< aligned_end
) {
3932 zero_pages_with_mmap(aligned_from
, aligned_end
-1);
3933 if (aligned_from
!= from
)
3934 zero_and_mark_pages(from
, aligned_from
-1);
3935 if (aligned_end
!= end
)
3936 zero_and_mark_pages(aligned_end
, end
-1);
3938 zero_and_mark_pages(from
, to
);
3944 remap_free_pages (page_index_t from
, page_index_t to
, int forcibly
)
3946 page_index_t first_page
, last_page
;
3949 return remap_page_range(from
, to
);
3951 for (first_page
= from
; first_page
<= to
; first_page
++) {
3952 if (page_allocated_p(first_page
) ||
3953 (page_table
[first_page
].need_to_zero
== 0))
3956 last_page
= first_page
+ 1;
3957 while (page_free_p(last_page
) &&
3958 (last_page
<= to
) &&
3959 (page_table
[last_page
].need_to_zero
== 1))
3962 remap_page_range(first_page
, last_page
-1);
3964 first_page
= last_page
;
3968 generation_index_t small_generation_limit
= 1;
3970 /* GC all generations newer than last_gen, raising the objects in each
3971 * to the next older generation - we finish when all generations below
3972 * last_gen are empty. Then if last_gen is due for a GC, or if
3973 * last_gen==NUM_GENERATIONS (the scratch generation? eh?) we GC that
3974 * too. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
3976 * We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than
3977 * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */
3979 collect_garbage(generation_index_t last_gen
)
3981 generation_index_t gen
= 0, i
;
3982 int raise
, more
= 0;
3984 /* The largest value of last_free_page seen since the time
3985 * remap_free_pages was called. */
3986 static page_index_t high_water_mark
= 0;
3988 FSHOW((stderr
, "/entering collect_garbage(%d)\n", last_gen
));
3989 log_generation_stats(gc_logfile
, "=== GC Start ===");
3993 if (last_gen
> HIGHEST_NORMAL_GENERATION
+1) {
3995 "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
4000 /* Flush the alloc regions updating the tables. */
4001 gc_alloc_update_all_page_tables();
4003 /* Verify the new objects created by Lisp code. */
4004 if (pre_verify_gen_0
) {
4005 FSHOW((stderr
, "pre-checking generation 0\n"));
4006 verify_generation(0);
4009 if (gencgc_verbose
> 1)
4010 print_generation_stats();
4013 /* Collect the generation. */
4015 if (more
|| (gen
>= gencgc_oldest_gen_to_gc
)) {
4016 /* Never raise the oldest generation. Never raise the extra generation
4017 * collected due to more-flag. */
4023 || (generations
[gen
].num_gc
>= generations
[gen
].number_of_gcs_before_promotion
);
4024 /* If we would not normally raise this one, but we're
4025 * running low on space in comparison to the object-sizes
4026 * we've been seeing, raise it and collect the next one
4028 if (!raise
&& gen
== last_gen
) {
4029 more
= (2*large_allocation
) >= (dynamic_space_size
- bytes_allocated
);
4034 if (gencgc_verbose
> 1) {
4036 "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
4039 generations
[gen
].bytes_allocated
,
4040 generations
[gen
].gc_trigger
,
4041 generations
[gen
].num_gc
));
4044 /* If an older generation is being filled, then update its
4047 generations
[gen
+1].cum_sum_bytes_allocated
+=
4048 generations
[gen
+1].bytes_allocated
;
4051 garbage_collect_generation(gen
, raise
);
4053 /* Reset the memory age cum_sum. */
4054 generations
[gen
].cum_sum_bytes_allocated
= 0;
4056 if (gencgc_verbose
> 1) {
4057 FSHOW((stderr
, "GC of generation %d finished:\n", gen
));
4058 print_generation_stats();
4062 } while ((gen
<= gencgc_oldest_gen_to_gc
)
4063 && ((gen
< last_gen
)
4066 && (generations
[gen
].bytes_allocated
4067 > generations
[gen
].gc_trigger
)
4068 && (generation_average_age(gen
)
4069 > generations
[gen
].minimum_age_before_gc
))));
4071 /* Now if gen-1 was raised all generations before gen are empty.
4072 * If it wasn't raised then all generations before gen-1 are empty.
4074 * Now objects within this gen's pages cannot point to younger
4075 * generations unless they are written to. This can be exploited
4076 * by write-protecting the pages of gen; then when younger
4077 * generations are GCed only the pages which have been written
4082 gen_to_wp
= gen
- 1;
4084 /* There's not much point in WPing pages in generation 0 as it is
4085 * never scavenged (except promoted pages). */
4086 if ((gen_to_wp
> 0) && enable_page_protection
) {
4087 /* Check that they are all empty. */
4088 for (i
= 0; i
< gen_to_wp
; i
++) {
4089 if (generations
[i
].bytes_allocated
)
4090 lose("trying to write-protect gen. %d when gen. %d nonempty\n",
4093 write_protect_generation_pages(gen_to_wp
);
4096 /* Set gc_alloc() back to generation 0. The current regions should
4097 * be flushed after the above GCs. */
4098 gc_assert((boxed_region
.free_pointer
- boxed_region
.start_addr
) == 0);
4099 gc_alloc_generation
= 0;
4101 /* Save the high-water mark before updating last_free_page */
4102 if (last_free_page
> high_water_mark
)
4103 high_water_mark
= last_free_page
;
4105 update_dynamic_space_free_pointer();
4107 /* Update auto_gc_trigger. Make sure we trigger the next GC before
4108 * running out of heap! */
4109 if (bytes_consed_between_gcs
<= (dynamic_space_size
- bytes_allocated
))
4110 auto_gc_trigger
= bytes_allocated
+ bytes_consed_between_gcs
;
4112 auto_gc_trigger
= bytes_allocated
+ (dynamic_space_size
- bytes_allocated
)/2;
4115 fprintf(stderr
,"Next gc when %"OS_VM_SIZE_FMT
" bytes have been consed\n",
4118 /* If we did a big GC (arbitrarily defined as gen > 1), release memory
4121 if (gen
> small_generation_limit
) {
4122 if (last_free_page
> high_water_mark
)
4123 high_water_mark
= last_free_page
;
4124 remap_free_pages(0, high_water_mark
, 0);
4125 high_water_mark
= 0;
4129 large_allocation
= 0;
4131 log_generation_stats(gc_logfile
, "=== GC End ===");
4132 SHOW("returning from collect_garbage");
4135 /* This is called by Lisp PURIFY when it is finished. All live objects
4136 * will have been moved to the RO and Static heaps. The dynamic space
4137 * will need a full re-initialization. We don't bother having Lisp
4138 * PURIFY flush the current gc_alloc() region, as the page_tables are
4139 * re-initialized, and every page is zeroed to be sure. */
4143 page_index_t page
, last_page
;
4145 if (gencgc_verbose
> 1) {
4146 SHOW("entering gc_free_heap");
4149 for (page
= 0; page
< page_table_pages
; page
++) {
4150 /* Skip free pages which should already be zero filled. */
4151 if (page_allocated_p(page
)) {
4153 for (last_page
= page
;
4154 (last_page
< page_table_pages
) && page_allocated_p(last_page
);
4156 /* Mark the page free. The other slots are assumed invalid
4157 * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
4158 * should not be write-protected -- except that the
4159 * generation is used for the current region but it sets
4161 page_table
[page
].allocated
= FREE_PAGE_FLAG
;
4162 page_table
[page
].bytes_used
= 0;
4163 page_table
[page
].write_protected
= 0;
4166 #ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure
4167 * about this change. */
4168 page_start
= (void *)page_address(page
);
4169 os_protect(page_start
, npage_bytes(last_page
-page
), OS_VM_PROT_ALL
);
4170 remap_free_pages(page
, last_page
-1, 1);
4173 } else if (gencgc_zero_check_during_free_heap
) {
4174 /* Double-check that the page is zero filled. */
4175 sword_t
*page_start
;
4177 gc_assert(page_free_p(page
));
4178 gc_assert(page_table
[page
].bytes_used
== 0);
4179 page_start
= (sword_t
*)page_address(page
);
4180 for (i
=0; i
<(long)(GENCGC_CARD_BYTES
/sizeof(sword_t
)); i
++) {
4181 if (page_start
[i
] != 0) {
4182 lose("free region not zero at %x\n", page_start
+ i
);
4188 bytes_allocated
= 0;
4190 /* Initialize the generations. */
4191 for (page
= 0; page
< NUM_GENERATIONS
; page
++) {
4192 generations
[page
].alloc_start_page
= 0;
4193 generations
[page
].alloc_unboxed_start_page
= 0;
4194 generations
[page
].alloc_large_start_page
= 0;
4195 generations
[page
].alloc_large_unboxed_start_page
= 0;
4196 generations
[page
].bytes_allocated
= 0;
4197 generations
[page
].gc_trigger
= 2000000;
4198 generations
[page
].num_gc
= 0;
4199 generations
[page
].cum_sum_bytes_allocated
= 0;
4202 if (gencgc_verbose
> 1)
4203 print_generation_stats();
4205 /* Initialize gc_alloc(). */
4206 gc_alloc_generation
= 0;
4208 gc_set_region_empty(&boxed_region
);
4209 gc_set_region_empty(&unboxed_region
);
4212 set_alloc_pointer((lispobj
)((char *)heap_base
));
4214 if (verify_after_free_heap
) {
4215 /* Check whether purify has left any bad pointers. */
4216 FSHOW((stderr
, "checking after free_heap\n"));
4226 #if defined(LISP_FEATURE_SB_SAFEPOINT)
4230 /* Compute the number of pages needed for the dynamic space.
4231 * Dynamic space size should be aligned on page size. */
4232 page_table_pages
= dynamic_space_size
/GENCGC_CARD_BYTES
;
4233 gc_assert(dynamic_space_size
== npage_bytes(page_table_pages
));
4235 /* Default nursery size to 5% of the total dynamic space size,
4237 bytes_consed_between_gcs
= dynamic_space_size
/(os_vm_size_t
)20;
4238 if (bytes_consed_between_gcs
< (1024*1024))
4239 bytes_consed_between_gcs
= 1024*1024;
4241 /* The page_table must be allocated using "calloc" to initialize
4242 * the page structures correctly. There used to be a separate
4243 * initialization loop (now commented out; see below) but that was
4244 * unnecessary and did hurt startup time. */
4245 page_table
= calloc(page_table_pages
, sizeof(struct page
));
4246 gc_assert(page_table
);
4247 size_t total_size
= sizeof(in_use_marker_t
) * n_dwords_in_card
*
4249 /* We use mmap directly here so that we can use a minimum of
4250 system calls per page during GC.
4251 All we need here now is a madvise(DONTNEED) at the end of GC. */
4252 page_table_dontmove_dwords
= os_validate(NULL
, total_size
);
4253 /* We do not need to zero, in fact we shouldn't. Pages actually
4254 used are zeroed before use. */
4256 gc_assert(page_table_dontmove_dwords
);
4257 page_table_dontmove_dwords_size_in_bytes
= total_size
;
4260 scavtab
[WEAK_POINTER_WIDETAG
] = scav_weak_pointer
;
4261 transother
[SIMPLE_ARRAY_WIDETAG
] = trans_boxed_large
;
4263 heap_base
= (void*)DYNAMIC_SPACE_START
;
4265 /* The page structures are initialized implicitly when page_table
4266 * is allocated with "calloc" above. Formerly we had the following
4267 * explicit initialization here (comments converted to C99 style
4268 * for readability as C's block comments don't nest):
4270 * // Initialize each page structure.
4271 * for (i = 0; i < page_table_pages; i++) {
4272 * // Initialize all pages as free.
4273 * page_table[i].allocated = FREE_PAGE_FLAG;
4274 * page_table[i].bytes_used = 0;
4276 * // Pages are not write-protected at startup.
4277 * page_table[i].write_protected = 0;
4280 * Without this loop the image starts up much faster when dynamic
4281 * space is large -- which it is on 64-bit platforms already by
4282 * default -- and when "calloc" for large arrays is implemented
4283 * using copy-on-write of a page of zeroes -- which it is at least
4284 * on Linux. In this case the pages that page_table_pages is stored
4285 * in are mapped and cleared not before the corresponding part of
4286 * dynamic space is used. For example, this saves clearing 16 MB of
4287 * memory at startup if the page size is 4 KB and the size of
4288 * dynamic space is 4 GB.
4289 * FREE_PAGE_FLAG must be 0 for this to work correctly which is
4290 * asserted below: */
4292 /* Compile time assertion: If triggered, declares an array
4293 * of dimension -1 forcing a syntax error. The intent of the
4294 * assignment is to avoid an "unused variable" warning. */
4295 char assert_free_page_flag_0
[(FREE_PAGE_FLAG
) ? -1 : 1];
4296 assert_free_page_flag_0
[0] = assert_free_page_flag_0
[0];
4299 bytes_allocated
= 0;
4301 /* Initialize the generations.
4303 * FIXME: very similar to code in gc_free_heap(), should be shared */
4304 for (i
= 0; i
< NUM_GENERATIONS
; i
++) {
4305 generations
[i
].alloc_start_page
= 0;
4306 generations
[i
].alloc_unboxed_start_page
= 0;
4307 generations
[i
].alloc_large_start_page
= 0;
4308 generations
[i
].alloc_large_unboxed_start_page
= 0;
4309 generations
[i
].bytes_allocated
= 0;
4310 generations
[i
].gc_trigger
= 2000000;
4311 generations
[i
].num_gc
= 0;
4312 generations
[i
].cum_sum_bytes_allocated
= 0;
4313 /* the tune-able parameters */
4314 generations
[i
].bytes_consed_between_gc
4315 = bytes_consed_between_gcs
/(os_vm_size_t
)HIGHEST_NORMAL_GENERATION
;
4316 generations
[i
].number_of_gcs_before_promotion
= 1;
4317 generations
[i
].minimum_age_before_gc
= 0.75;
4320 /* Initialize gc_alloc. */
4321 gc_alloc_generation
= 0;
4322 gc_set_region_empty(&boxed_region
);
4323 gc_set_region_empty(&unboxed_region
);
4328 /* Pick up the dynamic space from after a core load.
4330 * The ALLOCATION_POINTER points to the end of the dynamic space.
4334 gencgc_pickup_dynamic(void)
4336 page_index_t page
= 0;
4337 void *alloc_ptr
= (void *)get_alloc_pointer();
4338 lispobj
*prev
=(lispobj
*)page_address(page
);
4339 generation_index_t gen
= PSEUDO_STATIC_GENERATION
;
4341 bytes_allocated
= 0;
4344 lispobj
*first
,*ptr
= (lispobj
*)page_address(page
);
4346 if (!gencgc_partial_pickup
|| page_allocated_p(page
)) {
4347 /* It is possible, though rare, for the saved page table
4348 * to contain free pages below alloc_ptr. */
4349 page_table
[page
].gen
= gen
;
4350 page_table
[page
].bytes_used
= GENCGC_CARD_BYTES
;
4351 page_table
[page
].large_object
= 0;
4352 page_table
[page
].write_protected
= 0;
4353 page_table
[page
].write_protected_cleared
= 0;
4354 page_table
[page
].dont_move
= 0;
4355 page_table
[page
].need_to_zero
= 1;
4357 bytes_allocated
+= GENCGC_CARD_BYTES
;
4360 if (!gencgc_partial_pickup
) {
4361 page_table
[page
].allocated
= BOXED_PAGE_FLAG
;
4362 first
=gc_search_space(prev
,(ptr
+2)-prev
,ptr
);
4365 page_table
[page
].scan_start_offset
=
4366 page_address(page
) - (void *)prev
;
4369 } while (page_address(page
) < alloc_ptr
);
4371 last_free_page
= page
;
4373 generations
[gen
].bytes_allocated
= bytes_allocated
;
4375 gc_alloc_update_all_page_tables();
4376 write_protect_generation_pages(gen
);
4380 gc_initialize_pointers(void)
4382 gencgc_pickup_dynamic();
4386 /* alloc(..) is the external interface for memory allocation. It
4387 * allocates to generation 0. It is not called from within the garbage
4388 * collector as it is only external uses that need the check for heap
4389 * size (GC trigger) and to disable the interrupts (interrupts are
4390 * always disabled during a GC).
4392 * The vops that call alloc(..) assume that the returned space is zero-filled.
4393 * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
4395 * The check for a GC trigger is only performed when the current
4396 * region is full, so in most cases it's not needed. */
4398 static inline lispobj
*
4399 general_alloc_internal(sword_t nbytes
, int page_type_flag
, struct alloc_region
*region
,
4400 struct thread
*thread
)
4402 #ifndef LISP_FEATURE_WIN32
4403 lispobj alloc_signal
;
4406 void *new_free_pointer
;
4407 os_vm_size_t trigger_bytes
= 0;
4409 gc_assert(nbytes
> 0);
4411 /* Check for alignment allocation problems. */
4412 gc_assert((((uword_t
)region
->free_pointer
& LOWTAG_MASK
) == 0)
4413 && ((nbytes
& LOWTAG_MASK
) == 0));
4415 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
4416 /* Must be inside a PA section. */
4417 gc_assert(get_pseudo_atomic_atomic(thread
));
4420 if ((os_vm_size_t
) nbytes
> large_allocation
)
4421 large_allocation
= nbytes
;
4423 /* maybe we can do this quickly ... */
4424 new_free_pointer
= region
->free_pointer
+ nbytes
;
4425 if (new_free_pointer
<= region
->end_addr
) {
4426 new_obj
= (void*)(region
->free_pointer
);
4427 region
->free_pointer
= new_free_pointer
;
4428 return(new_obj
); /* yup */
4431 /* We don't want to count nbytes against auto_gc_trigger unless we
4432 * have to: it speeds up the tenuring of objects and slows down
4433 * allocation. However, unless we do so when allocating _very_
4434 * large objects we are in danger of exhausting the heap without
4435 * running sufficient GCs.
4437 if ((os_vm_size_t
) nbytes
>= bytes_consed_between_gcs
)
4438 trigger_bytes
= nbytes
;
4440 /* we have to go the long way around, it seems. Check whether we
4441 * should GC in the near future
4443 if (auto_gc_trigger
&& (bytes_allocated
+trigger_bytes
> auto_gc_trigger
)) {
4444 /* Don't flood the system with interrupts if the need to gc is
4445 * already noted. This can happen for example when SUB-GC
4446 * allocates or after a gc triggered in a WITHOUT-GCING. */
4447 if (SymbolValue(GC_PENDING
,thread
) == NIL
) {
4448 /* set things up so that GC happens when we finish the PA
4450 SetSymbolValue(GC_PENDING
,T
,thread
);
4451 if (SymbolValue(GC_INHIBIT
,thread
) == NIL
) {
4452 #ifdef LISP_FEATURE_SB_SAFEPOINT
4453 thread_register_gc_trigger();
4455 set_pseudo_atomic_interrupted(thread
);
4456 #ifdef GENCGC_IS_PRECISE
4457 /* PPC calls alloc() from a trap
4458 * look up the most context if it's from a trap. */
4460 os_context_t
*context
=
4461 thread
->interrupt_data
->allocation_trap_context
;
4462 maybe_save_gc_mask_and_block_deferrables
4463 (context
? os_context_sigmask_addr(context
) : NULL
);
4466 maybe_save_gc_mask_and_block_deferrables(NULL
);
4472 new_obj
= gc_alloc_with_region(nbytes
, page_type_flag
, region
, 0);
4474 #ifndef LISP_FEATURE_WIN32
4475 /* for sb-prof, and not supported on Windows yet */
4476 alloc_signal
= SymbolValue(ALLOC_SIGNAL
,thread
);
4477 if ((alloc_signal
& FIXNUM_TAG_MASK
) == 0) {
4478 if ((sword_t
) alloc_signal
<= 0) {
4479 SetSymbolValue(ALLOC_SIGNAL
, T
, thread
);
4482 SetSymbolValue(ALLOC_SIGNAL
,
4483 alloc_signal
- (1 << N_FIXNUM_TAG_BITS
),
4493 general_alloc(sword_t nbytes
, int page_type_flag
)
4495 struct thread
*thread
= arch_os_get_current_thread();
4496 /* Select correct region, and call general_alloc_internal with it.
4497 * For other then boxed allocation we must lock first, since the
4498 * region is shared. */
4499 if (BOXED_PAGE_FLAG
& page_type_flag
) {
4500 #ifdef LISP_FEATURE_SB_THREAD
4501 struct alloc_region
*region
= (thread
? &(thread
->alloc_region
) : &boxed_region
);
4503 struct alloc_region
*region
= &boxed_region
;
4505 return general_alloc_internal(nbytes
, page_type_flag
, region
, thread
);
4506 } else if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
4508 gc_assert(0 == thread_mutex_lock(&allocation_lock
));
4509 obj
= general_alloc_internal(nbytes
, page_type_flag
, &unboxed_region
, thread
);
4510 gc_assert(0 == thread_mutex_unlock(&allocation_lock
));
4513 lose("bad page type flag: %d", page_type_flag
);
4517 lispobj AMD64_SYSV_ABI
*
4518 alloc(sword_t nbytes
)
4520 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4521 struct thread
*self
= arch_os_get_current_thread();
4522 int was_pseudo_atomic
= get_pseudo_atomic_atomic(self
);
4523 if (!was_pseudo_atomic
)
4524 set_pseudo_atomic_atomic(self
);
4526 gc_assert(get_pseudo_atomic_atomic(arch_os_get_current_thread()));
4529 lispobj
*result
= general_alloc(nbytes
, BOXED_PAGE_FLAG
);
4531 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4532 if (!was_pseudo_atomic
)
4533 clear_pseudo_atomic_atomic(self
);
4540 * shared support for the OS-dependent signal handlers which
4541 * catch GENCGC-related write-protect violations
4543 void unhandled_sigmemoryfault(void* addr
);
4545 /* Depending on which OS we're running under, different signals might
4546 * be raised for a violation of write protection in the heap. This
4547 * function factors out the common generational GC magic which needs
4548 * to invoked in this case, and should be called from whatever signal
4549 * handler is appropriate for the OS we're running under.
4551 * Return true if this signal is a normal generational GC thing that
4552 * we were able to handle, or false if it was abnormal and control
4553 * should fall through to the general SIGSEGV/SIGBUS/whatever logic.
4555 * We have two control flags for this: one causes us to ignore faults
4556 * on unprotected pages completely, and the second complains to stderr
4557 * but allows us to continue without losing.
4559 extern boolean ignore_memoryfaults_on_unprotected_pages
;
4560 boolean ignore_memoryfaults_on_unprotected_pages
= 0;
4562 extern boolean continue_after_memoryfault_on_unprotected_pages
;
4563 boolean continue_after_memoryfault_on_unprotected_pages
= 0;
4566 gencgc_handle_wp_violation(void* fault_addr
)
4568 page_index_t page_index
= find_page_index(fault_addr
);
4572 "heap WP violation? fault_addr=%p, page_index=%"PAGE_INDEX_FMT
"\n",
4573 fault_addr
, page_index
));
4576 /* Check whether the fault is within the dynamic space. */
4577 if (page_index
== (-1)) {
4579 /* It can be helpful to be able to put a breakpoint on this
4580 * case to help diagnose low-level problems. */
4581 unhandled_sigmemoryfault(fault_addr
);
4583 /* not within the dynamic space -- not our responsibility */
4588 ret
= thread_mutex_lock(&free_pages_lock
);
4589 gc_assert(ret
== 0);
4590 if (page_table
[page_index
].write_protected
) {
4591 /* Unprotect the page. */
4592 os_protect(page_address(page_index
), GENCGC_CARD_BYTES
, OS_VM_PROT_ALL
);
4593 page_table
[page_index
].write_protected_cleared
= 1;
4594 page_table
[page_index
].write_protected
= 0;
4595 } else if (!ignore_memoryfaults_on_unprotected_pages
) {
4596 /* The only acceptable reason for this signal on a heap
4597 * access is that GENCGC write-protected the page.
4598 * However, if two CPUs hit a wp page near-simultaneously,
4599 * we had better not have the second one lose here if it
4600 * does this test after the first one has already set wp=0
4602 if(page_table
[page_index
].write_protected_cleared
!= 1) {
4603 void lisp_backtrace(int frames
);
4606 "Fault @ %p, page %"PAGE_INDEX_FMT
" not marked as write-protected:\n"
4607 " boxed_region.first_page: %"PAGE_INDEX_FMT
","
4608 " boxed_region.last_page %"PAGE_INDEX_FMT
"\n"
4609 " page.scan_start_offset: %"OS_VM_SIZE_FMT
"\n"
4610 " page.bytes_used: %"PAGE_BYTES_FMT
"\n"
4611 " page.allocated: %d\n"
4612 " page.write_protected: %d\n"
4613 " page.write_protected_cleared: %d\n"
4614 " page.generation: %d\n",
4617 boxed_region
.first_page
,
4618 boxed_region
.last_page
,
4619 page_table
[page_index
].scan_start_offset
,
4620 page_table
[page_index
].bytes_used
,
4621 page_table
[page_index
].allocated
,
4622 page_table
[page_index
].write_protected
,
4623 page_table
[page_index
].write_protected_cleared
,
4624 page_table
[page_index
].gen
);
4625 if (!continue_after_memoryfault_on_unprotected_pages
)
4629 ret
= thread_mutex_unlock(&free_pages_lock
);
4630 gc_assert(ret
== 0);
4631 /* Don't worry, we can handle it. */
4635 /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
4636 * it's not just a case of the program hitting the write barrier, and
4637 * are about to let Lisp deal with it. It's basically just a
4638 * convenient place to set a gdb breakpoint. */
4640 unhandled_sigmemoryfault(void *addr
)
4643 void gc_alloc_update_all_page_tables(void)
4645 /* Flush the alloc regions updating the tables. */
4647 for_each_thread(th
) {
4648 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->alloc_region
);
4649 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
4650 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->sprof_alloc_region
);
4653 gc_alloc_update_page_tables(UNBOXED_PAGE_FLAG
, &unboxed_region
);
4654 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &boxed_region
);
4658 gc_set_region_empty(struct alloc_region
*region
)
4660 region
->first_page
= 0;
4661 region
->last_page
= -1;
4662 region
->start_addr
= page_address(0);
4663 region
->free_pointer
= page_address(0);
4664 region
->end_addr
= page_address(0);
4668 zero_all_free_pages()
4672 for (i
= 0; i
< last_free_page
; i
++) {
4673 if (page_free_p(i
)) {
4674 #ifdef READ_PROTECT_FREE_PAGES
4675 os_protect(page_address(i
),
4684 /* Things to do before doing a final GC before saving a core (without
4687 * + Pages in large_object pages aren't moved by the GC, so we need to
4688 * unset that flag from all pages.
4689 * + The pseudo-static generation isn't normally collected, but it seems
4690 * reasonable to collect it at least when saving a core. So move the
4691 * pages to a normal generation.
4694 prepare_for_final_gc ()
4699 for (i
= 0; i
< last_free_page
; i
++) {
4700 page_table
[i
].large_object
= 0;
4701 if (page_table
[i
].gen
== PSEUDO_STATIC_GENERATION
) {
4702 int used
= page_table
[i
].bytes_used
;
4703 page_table
[i
].gen
= HIGHEST_NORMAL_GENERATION
;
4704 generations
[PSEUDO_STATIC_GENERATION
].bytes_allocated
-= used
;
4705 generations
[HIGHEST_NORMAL_GENERATION
].bytes_allocated
+= used
;
4711 /* Do a non-conservative GC, and then save a core with the initial
4712 * function being set to the value of the static symbol
4713 * SB!VM:RESTART-LISP-FUNCTION */
4715 gc_and_save(char *filename
, boolean prepend_runtime
,
4716 boolean save_runtime_options
, boolean compressed
,
4717 int compression_level
, int application_type
)
4720 void *runtime_bytes
= NULL
;
4721 size_t runtime_size
;
4723 file
= prepare_to_save(filename
, prepend_runtime
, &runtime_bytes
,
4728 conservative_stack
= 0;
4730 /* The filename might come from Lisp, and be moved by the now
4731 * non-conservative GC. */
4732 filename
= strdup(filename
);
4734 /* Collect twice: once into relatively high memory, and then back
4735 * into low memory. This compacts the retained data into the lower
4736 * pages, minimizing the size of the core file.
4738 prepare_for_final_gc();
4739 gencgc_alloc_start_page
= last_free_page
;
4740 collect_garbage(HIGHEST_NORMAL_GENERATION
+1);
4742 prepare_for_final_gc();
4743 gencgc_alloc_start_page
= -1;
4744 collect_garbage(HIGHEST_NORMAL_GENERATION
+1);
4746 if (prepend_runtime
)
4747 save_runtime_to_filehandle(file
, runtime_bytes
, runtime_size
,
4750 /* The dumper doesn't know that pages need to be zeroed before use. */
4751 zero_all_free_pages();
4752 save_to_filehandle(file
, filename
, SymbolValue(RESTART_LISP_FUNCTION
,0),
4753 prepend_runtime
, save_runtime_options
,
4754 compressed
? compression_level
: COMPRESSION_LEVEL_NONE
);
4755 /* Oops. Save still managed to fail. Since we've mangled the stack
4756 * beyond hope, there's not much we can do.
4757 * (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's
4758 * going to be rather unsatisfactory too... */
4759 lose("Attempt to save core after non-conservative GC failed.\n");