2 * GENerational Conservative Garbage Collector for SBCL
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 * For a review of garbage collection techniques (e.g. generational
18 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
19 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
20 * had been accepted for _ACM Computing Surveys_ and was available
21 * as a PostScript preprint through
22 * <http://www.cs.utexas.edu/users/oops/papers.html>
24 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
32 #if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)
33 #include "pthreads_win32.h"
41 #include "interrupt.h"
46 #include "gc-internal.h"
48 #include "pseudo-atomic.h"
50 #include "genesis/vector.h"
51 #include "genesis/weak-pointer.h"
52 #include "genesis/fdefn.h"
53 #include "genesis/simple-fun.h"
55 #include "genesis/hash-table.h"
56 #include "genesis/instance.h"
57 #include "genesis/layout.h"
59 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
60 #include "genesis/cons.h"
63 /* forward declarations */
64 page_index_t
gc_find_freeish_pages(page_index_t
*restart_page_ptr
, sword_t nbytes
,
72 /* As usually configured, generations 0-5 are normal collected generations,
73 6 is pseudo-static (the objects in which are never moved nor reclaimed),
74 and 7 is scratch space used when collecting a generation without promotion,
75 wherein it is moved to generation 7 and back again.
78 SCRATCH_GENERATION
= PSEUDO_STATIC_GENERATION
+1,
82 /* Should we use page protection to help avoid the scavenging of pages
83 * that don't have pointers to younger generations? */
84 boolean enable_page_protection
= 1;
86 /* Largest allocation seen since last GC. */
87 os_vm_size_t large_allocation
= 0;
94 /* the verbosity level. All non-error messages are disabled at level 0;
95 * and only a few rare messages are printed at level 1. */
97 boolean gencgc_verbose
= 1;
99 boolean gencgc_verbose
= 0;
102 /* FIXME: At some point enable the various error-checking things below
103 * and see what they say. */
105 /* We hunt for pointers to old-space, when GCing generations >= verify_gen.
106 * Set verify_gens to HIGHEST_NORMAL_GENERATION + 1 to disable this kind of
108 generation_index_t verify_gens
= HIGHEST_NORMAL_GENERATION
+ 1;
110 /* Should we do a pre-scan verify of generation 0 before it's GCed? */
111 boolean pre_verify_gen_0
= 0;
113 /* Should we check for bad pointers after gc_free_heap is called
114 * from Lisp PURIFY? */
115 boolean verify_after_free_heap
= 0;
117 /* Should we print a note when code objects are found in the dynamic space
118 * during a heap verify? */
119 boolean verify_dynamic_code_check
= 0;
121 #ifdef LISP_FEATURE_X86
122 /* Should we check code objects for fixup errors after they are transported? */
123 boolean check_code_fixups
= 0;
126 /* Should we check that newly allocated regions are zero filled? */
127 boolean gencgc_zero_check
= 0;
129 /* Should we check that the free space is zero filled? */
130 boolean gencgc_enable_verify_zero_fill
= 0;
132 /* Should we check that free pages are zero filled during gc_free_heap
133 * called after Lisp PURIFY? */
134 boolean gencgc_zero_check_during_free_heap
= 0;
136 /* When loading a core, don't do a full scan of the memory for the
137 * memory region boundaries. (Set to true by coreparse.c if the core
138 * contained a pagetable entry).
140 boolean gencgc_partial_pickup
= 0;
142 /* If defined, free pages are read-protected to ensure that nothing
146 /* #define READ_PROTECT_FREE_PAGES */
150 * GC structures and variables
153 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
154 os_vm_size_t bytes_allocated
= 0;
155 os_vm_size_t auto_gc_trigger
= 0;
157 /* the source and destination generations. These are set before a GC starts
159 generation_index_t from_space
;
160 generation_index_t new_space
;
162 /* Set to 1 when in GC */
163 boolean gc_active_p
= 0;
165 /* should the GC be conservative on stack. If false (only right before
166 * saving a core), don't scan the stack / mark pages dont_move. */
167 static boolean conservative_stack
= 1;
169 /* An array of page structures is allocated on gc initialization.
170 * This helps to quickly map between an address and its page structure.
171 * page_table_pages is set from the size of the dynamic space. */
172 page_index_t page_table_pages
;
173 struct page
*page_table
;
175 in_use_marker_t
*page_table_pinned_dwords
;
176 size_t pins_map_size_in_bytes
;
178 /* In GC cards that have conservative pointers to them, should we wipe out
179 * dwords in there that are not used, so that they do not act as false
180 * root to other things in the heap from then on? This is a new feature
181 * but in testing it is both reliable and no noticeable slowdown. */
184 static inline boolean
page_allocated_p(page_index_t page
) {
185 return (page_table
[page
].allocated
!= FREE_PAGE_FLAG
);
188 static inline boolean
page_no_region_p(page_index_t page
) {
189 return !(page_table
[page
].allocated
& OPEN_REGION_PAGE_FLAG
);
192 static inline boolean
page_allocated_no_region_p(page_index_t page
) {
193 return ((page_table
[page
].allocated
& (UNBOXED_PAGE_FLAG
| BOXED_PAGE_FLAG
))
194 && page_no_region_p(page
));
197 static inline boolean
page_free_p(page_index_t page
) {
198 return (page_table
[page
].allocated
== FREE_PAGE_FLAG
);
201 static inline boolean
page_boxed_p(page_index_t page
) {
202 return (page_table
[page
].allocated
& BOXED_PAGE_FLAG
);
205 static inline boolean
page_boxed_no_region_p(page_index_t page
) {
206 return page_boxed_p(page
) && page_no_region_p(page
);
209 static inline boolean
page_unboxed_p(page_index_t page
) {
210 /* Both flags set == boxed code page */
211 return ((page_table
[page
].allocated
& UNBOXED_PAGE_FLAG
)
212 && !page_boxed_p(page
));
215 static inline boolean
protect_page_p(page_index_t page
, generation_index_t generation
) {
216 return (page_boxed_no_region_p(page
)
217 && (page_table
[page
].bytes_used
!= 0)
218 && !page_table
[page
].dont_move
219 && (page_table
[page
].gen
== generation
));
222 /* To map addresses to page structures the address of the first page
224 void *heap_base
= NULL
;
226 /* Calculate the start address for the given page number. */
228 page_address(page_index_t page_num
)
230 return (heap_base
+ (page_num
* GENCGC_CARD_BYTES
));
233 /* Calculate the address where the allocation region associated with
234 * the page starts. */
236 page_scan_start(page_index_t page_index
)
238 return page_address(page_index
)-page_table
[page_index
].scan_start_offset
;
241 /* True if the page starts a contiguous block. */
242 static inline boolean
243 page_starts_contiguous_block_p(page_index_t page_index
)
245 return page_table
[page_index
].scan_start_offset
== 0;
248 /* True if the page is the last page in a contiguous block. */
249 static inline boolean
250 page_ends_contiguous_block_p(page_index_t page_index
, generation_index_t gen
)
252 return (/* page doesn't fill block */
253 (page_table
[page_index
].bytes_used
< GENCGC_CARD_BYTES
)
254 /* page is last allocated page */
255 || ((page_index
+ 1) >= last_free_page
)
257 || page_free_p(page_index
+ 1)
258 /* next page contains no data */
259 || (page_table
[page_index
+ 1].bytes_used
== 0)
260 /* next page is in different generation */
261 || (page_table
[page_index
+ 1].gen
!= gen
)
262 /* next page starts its own contiguous block */
263 || (page_starts_contiguous_block_p(page_index
+ 1)));
266 /* Find the page index within the page_table for the given
267 * address. Return -1 on failure. */
269 find_page_index(void *addr
)
271 if (addr
>= heap_base
) {
272 page_index_t index
= ((pointer_sized_uint_t
)addr
-
273 (pointer_sized_uint_t
)heap_base
) / GENCGC_CARD_BYTES
;
274 if (index
< page_table_pages
)
281 npage_bytes(page_index_t npages
)
283 gc_assert(npages
>=0);
284 return ((os_vm_size_t
)npages
)*GENCGC_CARD_BYTES
;
287 /* Check that X is a higher address than Y and return offset from Y to
289 static inline os_vm_size_t
290 void_diff(void *x
, void *y
)
293 return (pointer_sized_uint_t
)x
- (pointer_sized_uint_t
)y
;
296 /* a structure to hold the state of a generation
298 * CAUTION: If you modify this, make sure to touch up the alien
299 * definition in src/code/gc.lisp accordingly. ...or better yes,
300 * deal with the FIXME there...
304 /* the first page that gc_alloc() checks on its next call */
305 page_index_t alloc_start_page
;
307 /* the first page that gc_alloc_unboxed() checks on its next call */
308 page_index_t alloc_unboxed_start_page
;
310 /* the first page that gc_alloc_large (boxed) considers on its next
311 * call. (Although it always allocates after the boxed_region.) */
312 page_index_t alloc_large_start_page
;
314 /* the first page that gc_alloc_large (unboxed) considers on its
315 * next call. (Although it always allocates after the
316 * current_unboxed_region.) */
317 page_index_t alloc_large_unboxed_start_page
;
319 /* the bytes allocated to this generation */
320 os_vm_size_t bytes_allocated
;
322 /* the number of bytes at which to trigger a GC */
323 os_vm_size_t gc_trigger
;
325 /* to calculate a new level for gc_trigger */
326 os_vm_size_t bytes_consed_between_gc
;
328 /* the number of GCs since the last raise */
331 /* the number of GCs to run on the generations before raising objects to the
333 int number_of_gcs_before_promotion
;
335 /* the cumulative sum of the bytes allocated to this generation. It is
336 * cleared after a GC on this generations, and update before new
337 * objects are added from a GC of a younger generation. Dividing by
338 * the bytes_allocated will give the average age of the memory in
339 * this generation since its last GC. */
340 os_vm_size_t cum_sum_bytes_allocated
;
342 /* a minimum average memory age before a GC will occur helps
343 * prevent a GC when a large number of new live objects have been
344 * added, in which case a GC could be a waste of time */
345 double minimum_age_before_gc
;
348 /* an array of generation structures. There needs to be one more
349 * generation structure than actual generations as the oldest
350 * generation is temporarily raised then lowered. */
351 struct generation generations
[NUM_GENERATIONS
];
353 /* the oldest generation that is will currently be GCed by default.
354 * Valid values are: 0, 1, ... HIGHEST_NORMAL_GENERATION
356 * The default of HIGHEST_NORMAL_GENERATION enables GC on all generations.
358 * Setting this to 0 effectively disables the generational nature of
359 * the GC. In some applications generational GC may not be useful
360 * because there are no long-lived objects.
362 * An intermediate value could be handy after moving long-lived data
363 * into an older generation so an unnecessary GC of this long-lived
364 * data can be avoided. */
365 generation_index_t gencgc_oldest_gen_to_gc
= HIGHEST_NORMAL_GENERATION
;
367 /* META: Is nobody aside from me bothered by this especially misleading
368 * use of the word "last"? It could mean either "ultimate" or "prior",
369 * but in fact means neither. It is the *FIRST* page that should be grabbed
370 * for more space, so it is min free page, or 1+ the max used page. */
371 /* The maximum free page in the heap is maintained and used to update
372 * ALLOCATION_POINTER which is used by the room function to limit its
373 * search of the heap. XX Gencgc obviously needs to be better
374 * integrated with the Lisp code. */
376 page_index_t last_free_page
;
378 #ifdef LISP_FEATURE_SB_THREAD
379 /* This lock is to prevent multiple threads from simultaneously
380 * allocating new regions which overlap each other. Note that the
381 * majority of GC is single-threaded, but alloc() may be called from
382 * >1 thread at a time and must be thread-safe. This lock must be
383 * seized before all accesses to generations[] or to parts of
384 * page_table[] that other threads may want to see */
385 static pthread_mutex_t free_pages_lock
= PTHREAD_MUTEX_INITIALIZER
;
386 /* This lock is used to protect non-thread-local allocation. */
387 static pthread_mutex_t allocation_lock
= PTHREAD_MUTEX_INITIALIZER
;
390 extern os_vm_size_t gencgc_release_granularity
;
391 os_vm_size_t gencgc_release_granularity
= GENCGC_RELEASE_GRANULARITY
;
393 extern os_vm_size_t gencgc_alloc_granularity
;
394 os_vm_size_t gencgc_alloc_granularity
= GENCGC_ALLOC_GRANULARITY
;
398 * miscellaneous heap functions
401 /* Count the number of pages which are write-protected within the
402 * given generation. */
404 count_write_protect_generation_pages(generation_index_t generation
)
406 page_index_t i
, count
= 0;
408 for (i
= 0; i
< last_free_page
; i
++)
409 if (page_allocated_p(i
)
410 && (page_table
[i
].gen
== generation
)
411 && (page_table
[i
].write_protected
== 1))
416 /* Count the number of pages within the given generation. */
418 count_generation_pages(generation_index_t generation
)
421 page_index_t count
= 0;
423 for (i
= 0; i
< last_free_page
; i
++)
424 if (page_allocated_p(i
)
425 && (page_table
[i
].gen
== generation
))
432 count_dont_move_pages(void)
435 page_index_t count
= 0;
436 for (i
= 0; i
< last_free_page
; i
++) {
437 if (page_allocated_p(i
)
438 && (page_table
[i
].dont_move
!= 0)) {
446 /* Work through the pages and add up the number of bytes used for the
447 * given generation. */
449 count_generation_bytes_allocated (generation_index_t gen
)
452 os_vm_size_t result
= 0;
453 for (i
= 0; i
< last_free_page
; i
++) {
454 if (page_allocated_p(i
)
455 && (page_table
[i
].gen
== gen
))
456 result
+= page_table
[i
].bytes_used
;
461 /* Return the average age of the memory in a generation. */
463 generation_average_age(generation_index_t gen
)
465 if (generations
[gen
].bytes_allocated
== 0)
469 ((double)generations
[gen
].cum_sum_bytes_allocated
)
470 / ((double)generations
[gen
].bytes_allocated
);
473 #ifdef LISP_FEATURE_X86
474 extern void fpu_save(void *);
475 extern void fpu_restore(void *);
479 write_generation_stats(FILE *file
)
481 generation_index_t i
;
483 #ifdef LISP_FEATURE_X86
486 /* Can end up here after calling alloc_tramp which doesn't prepare
487 * the x87 state, and the C ABI uses a different mode */
491 /* Print the heap stats. */
493 " Gen StaPg UbSta LaSta LUbSt Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
495 for (i
= 0; i
< SCRATCH_GENERATION
; i
++) {
497 page_index_t boxed_cnt
= 0;
498 page_index_t unboxed_cnt
= 0;
499 page_index_t large_boxed_cnt
= 0;
500 page_index_t large_unboxed_cnt
= 0;
501 page_index_t pinned_cnt
=0;
503 for (j
= 0; j
< last_free_page
; j
++)
504 if (page_table
[j
].gen
== i
) {
506 /* Count the number of boxed pages within the given
508 if (page_boxed_p(j
)) {
509 if (page_table
[j
].large_object
)
514 if(page_table
[j
].dont_move
) pinned_cnt
++;
515 /* Count the number of unboxed pages within the given
517 if (page_unboxed_p(j
)) {
518 if (page_table
[j
].large_object
)
525 gc_assert(generations
[i
].bytes_allocated
526 == count_generation_bytes_allocated(i
));
528 " %1d: %5ld %5ld %5ld %5ld",
530 generations
[i
].alloc_start_page
,
531 generations
[i
].alloc_unboxed_start_page
,
532 generations
[i
].alloc_large_start_page
,
533 generations
[i
].alloc_large_unboxed_start_page
);
535 " %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
536 " %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
,
537 boxed_cnt
, unboxed_cnt
, large_boxed_cnt
,
538 large_unboxed_cnt
, pinned_cnt
);
543 " %4"PAGE_INDEX_FMT
" %3d %7.4f\n",
544 generations
[i
].bytes_allocated
,
545 (npage_bytes(count_generation_pages(i
)) - generations
[i
].bytes_allocated
),
546 generations
[i
].gc_trigger
,
547 count_write_protect_generation_pages(i
),
548 generations
[i
].num_gc
,
549 generation_average_age(i
));
551 fprintf(file
," Total bytes allocated = %"OS_VM_SIZE_FMT
"\n", bytes_allocated
);
552 fprintf(file
," Dynamic-space-size bytes = %"OS_VM_SIZE_FMT
"\n", dynamic_space_size
);
554 #ifdef LISP_FEATURE_X86
555 fpu_restore(fpu_state
);
560 write_heap_exhaustion_report(FILE *file
, long available
, long requested
,
561 struct thread
*thread
)
564 "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
565 gc_active_p
? "garbage collection" : "allocation",
568 write_generation_stats(file
);
569 fprintf(file
, "GC control variables:\n");
570 fprintf(file
, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n",
571 SymbolValue(GC_INHIBIT
,thread
)==NIL
? "false" : "true",
572 (SymbolValue(GC_PENDING
, thread
) == T
) ?
573 "true" : ((SymbolValue(GC_PENDING
, thread
) == NIL
) ?
574 "false" : "in progress"));
575 #ifdef LISP_FEATURE_SB_THREAD
576 fprintf(file
, " *STOP-FOR-GC-PENDING* = %s\n",
577 SymbolValue(STOP_FOR_GC_PENDING
,thread
)==NIL
? "false" : "true");
582 print_generation_stats(void)
584 write_generation_stats(stderr
);
587 extern char* gc_logfile
;
588 char * gc_logfile
= NULL
;
591 log_generation_stats(char *logfile
, char *header
)
594 FILE * log
= fopen(logfile
, "a");
596 fprintf(log
, "%s\n", header
);
597 write_generation_stats(log
);
600 fprintf(stderr
, "Could not open gc logfile: %s\n", logfile
);
607 report_heap_exhaustion(long available
, long requested
, struct thread
*th
)
610 FILE * log
= fopen(gc_logfile
, "a");
612 write_heap_exhaustion_report(log
, available
, requested
, th
);
615 fprintf(stderr
, "Could not open gc logfile: %s\n", gc_logfile
);
619 /* Always to stderr as well. */
620 write_heap_exhaustion_report(stderr
, available
, requested
, th
);
624 #if defined(LISP_FEATURE_X86)
625 void fast_bzero(void*, size_t); /* in <arch>-assem.S */
628 /* Zero the pages from START to END (inclusive), but use mmap/munmap instead
629 * if zeroing it ourselves, i.e. in practice give the memory back to the
630 * OS. Generally done after a large GC.
632 void zero_pages_with_mmap(page_index_t start
, page_index_t end
) {
634 void *addr
= page_address(start
), *new_addr
;
635 os_vm_size_t length
= npage_bytes(1+end
-start
);
640 gc_assert(length
>= gencgc_release_granularity
);
641 gc_assert((length
% gencgc_release_granularity
) == 0);
643 os_invalidate(addr
, length
);
644 new_addr
= os_validate(addr
, length
);
645 if (new_addr
== NULL
|| new_addr
!= addr
) {
646 lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x",
650 for (i
= start
; i
<= end
; i
++) {
651 page_table
[i
].need_to_zero
= 0;
655 /* Zero the pages from START to END (inclusive). Generally done just after
656 * a new region has been allocated.
659 zero_pages(page_index_t start
, page_index_t end
) {
663 #if defined(LISP_FEATURE_X86)
664 fast_bzero(page_address(start
), npage_bytes(1+end
-start
));
666 bzero(page_address(start
), npage_bytes(1+end
-start
));
672 zero_and_mark_pages(page_index_t start
, page_index_t end
) {
675 zero_pages(start
, end
);
676 for (i
= start
; i
<= end
; i
++)
677 page_table
[i
].need_to_zero
= 0;
680 /* Zero the pages from START to END (inclusive), except for those
681 * pages that are known to already zeroed. Mark all pages in the
682 * ranges as non-zeroed.
685 zero_dirty_pages(page_index_t start
, page_index_t end
) {
688 for (i
= start
; i
<= end
; i
++) {
689 if (!page_table
[i
].need_to_zero
) continue;
690 for (j
= i
+1; (j
<= end
) && (page_table
[j
].need_to_zero
); j
++);
695 for (i
= start
; i
<= end
; i
++) {
696 page_table
[i
].need_to_zero
= 1;
702 * To support quick and inline allocation, regions of memory can be
703 * allocated and then allocated from with just a free pointer and a
704 * check against an end address.
706 * Since objects can be allocated to spaces with different properties
707 * e.g. boxed/unboxed, generation, ages; there may need to be many
708 * allocation regions.
710 * Each allocation region may start within a partly used page. Many
711 * features of memory use are noted on a page wise basis, e.g. the
712 * generation; so if a region starts within an existing allocated page
713 * it must be consistent with this page.
715 * During the scavenging of the newspace, objects will be transported
716 * into an allocation region, and pointers updated to point to this
717 * allocation region. It is possible that these pointers will be
718 * scavenged again before the allocation region is closed, e.g. due to
719 * trans_list which jumps all over the place to cleanup the list. It
720 * is important to be able to determine properties of all objects
721 * pointed to when scavenging, e.g to detect pointers to the oldspace.
722 * Thus it's important that the allocation regions have the correct
723 * properties set when allocated, and not just set when closed. The
724 * region allocation routines return regions with the specified
725 * properties, and grab all the pages, setting their properties
726 * appropriately, except that the amount used is not known.
728 * These regions are used to support quicker allocation using just a
729 * free pointer. The actual space used by the region is not reflected
730 * in the pages tables until it is closed. It can't be scavenged until
733 * When finished with the region it should be closed, which will
734 * update the page tables for the actual space used returning unused
735 * space. Further it may be noted in the new regions which is
736 * necessary when scavenging the newspace.
738 * Large objects may be allocated directly without an allocation
739 * region, the page tables are updated immediately.
741 * Unboxed objects don't contain pointers to other objects and so
742 * don't need scavenging. Further they can't contain pointers to
743 * younger generations so WP is not needed. By allocating pages to
744 * unboxed objects the whole page never needs scavenging or
745 * write-protecting. */
747 /* We are only using two regions at present. Both are for the current
748 * newspace generation. */
749 struct alloc_region boxed_region
;
750 struct alloc_region unboxed_region
;
752 /* The generation currently being allocated to. */
753 static generation_index_t gc_alloc_generation
;
755 static inline page_index_t
756 generation_alloc_start_page(generation_index_t generation
, int page_type_flag
, int large
)
759 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
760 return generations
[generation
].alloc_large_unboxed_start_page
;
761 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
762 /* Both code and data. */
763 return generations
[generation
].alloc_large_start_page
;
765 lose("bad page type flag: %d", page_type_flag
);
768 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
769 return generations
[generation
].alloc_unboxed_start_page
;
770 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
771 /* Both code and data. */
772 return generations
[generation
].alloc_start_page
;
774 lose("bad page_type_flag: %d", page_type_flag
);
780 set_generation_alloc_start_page(generation_index_t generation
, int page_type_flag
, int large
,
784 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
785 generations
[generation
].alloc_large_unboxed_start_page
= page
;
786 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
787 /* Both code and data. */
788 generations
[generation
].alloc_large_start_page
= page
;
790 lose("bad page type flag: %d", page_type_flag
);
793 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
794 generations
[generation
].alloc_unboxed_start_page
= page
;
795 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
796 /* Both code and data. */
797 generations
[generation
].alloc_start_page
= page
;
799 lose("bad page type flag: %d", page_type_flag
);
804 const int n_dwords_in_card
= GENCGC_CARD_BYTES
/ N_WORD_BYTES
/ 2;
806 pinned_dwords(page_index_t page
)
808 if (page_table
[page
].has_pin_map
)
809 return &page_table_pinned_dwords
[page
* (n_dwords_in_card
/N_WORD_BITS
)];
813 /* Find a new region with room for at least the given number of bytes.
815 * It starts looking at the current generation's alloc_start_page. So
816 * may pick up from the previous region if there is enough space. This
817 * keeps the allocation contiguous when scavenging the newspace.
819 * The alloc_region should have been closed by a call to
820 * gc_alloc_update_page_tables(), and will thus be in an empty state.
822 * To assist the scavenging functions write-protected pages are not
823 * used. Free pages should not be write-protected.
825 * It is critical to the conservative GC that the start of regions be
826 * known. To help achieve this only small regions are allocated at a
829 * During scavenging, pointers may be found to within the current
830 * region and the page generation must be set so that pointers to the
831 * from space can be recognized. Therefore the generation of pages in
832 * the region are set to gc_alloc_generation. To prevent another
833 * allocation call using the same pages, all the pages in the region
834 * are allocated, although they will initially be empty.
837 gc_alloc_new_region(sword_t nbytes
, int page_type_flag
, struct alloc_region
*alloc_region
)
839 page_index_t first_page
;
840 page_index_t last_page
;
841 os_vm_size_t bytes_found
;
847 "/alloc_new_region for %d bytes from gen %d\n",
848 nbytes, gc_alloc_generation));
851 /* Check that the region is in a reset state. */
852 gc_assert((alloc_region
->first_page
== 0)
853 && (alloc_region
->last_page
== -1)
854 && (alloc_region
->free_pointer
== alloc_region
->end_addr
));
855 ret
= thread_mutex_lock(&free_pages_lock
);
857 first_page
= generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 0);
858 last_page
=gc_find_freeish_pages(&first_page
, nbytes
, page_type_flag
);
859 bytes_found
=(GENCGC_CARD_BYTES
- page_table
[first_page
].bytes_used
)
860 + npage_bytes(last_page
-first_page
);
862 /* Set up the alloc_region. */
863 alloc_region
->first_page
= first_page
;
864 alloc_region
->last_page
= last_page
;
865 alloc_region
->start_addr
= page_table
[first_page
].bytes_used
866 + page_address(first_page
);
867 alloc_region
->free_pointer
= alloc_region
->start_addr
;
868 alloc_region
->end_addr
= alloc_region
->start_addr
+ bytes_found
;
870 /* Set up the pages. */
872 /* The first page may have already been in use. */
873 if (page_table
[first_page
].bytes_used
== 0) {
874 page_table
[first_page
].allocated
= page_type_flag
;
875 page_table
[first_page
].gen
= gc_alloc_generation
;
876 page_table
[first_page
].large_object
= 0;
877 page_table
[first_page
].scan_start_offset
= 0;
878 // wiping should have free()ed and :=NULL
879 gc_assert(pinned_dwords(first_page
) == NULL
);
882 gc_assert(page_table
[first_page
].allocated
== page_type_flag
);
883 page_table
[first_page
].allocated
|= OPEN_REGION_PAGE_FLAG
;
885 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
886 gc_assert(page_table
[first_page
].large_object
== 0);
888 for (i
= first_page
+1; i
<= last_page
; i
++) {
889 page_table
[i
].allocated
= page_type_flag
;
890 page_table
[i
].gen
= gc_alloc_generation
;
891 page_table
[i
].large_object
= 0;
892 /* This may not be necessary for unboxed regions (think it was
894 page_table
[i
].scan_start_offset
=
895 void_diff(page_address(i
),alloc_region
->start_addr
);
896 page_table
[i
].allocated
|= OPEN_REGION_PAGE_FLAG
;
898 /* Bump up last_free_page. */
899 if (last_page
+1 > last_free_page
) {
900 last_free_page
= last_page
+1;
901 /* do we only want to call this on special occasions? like for
903 set_alloc_pointer((lispobj
)page_address(last_free_page
));
905 ret
= thread_mutex_unlock(&free_pages_lock
);
908 #ifdef READ_PROTECT_FREE_PAGES
909 os_protect(page_address(first_page
),
910 npage_bytes(1+last_page
-first_page
),
914 /* If the first page was only partial, don't check whether it's
915 * zeroed (it won't be) and don't zero it (since the parts that
916 * we're interested in are guaranteed to be zeroed).
918 if (page_table
[first_page
].bytes_used
) {
922 zero_dirty_pages(first_page
, last_page
);
924 /* we can do this after releasing free_pages_lock */
925 if (gencgc_zero_check
) {
927 for (p
= (word_t
*)alloc_region
->start_addr
;
928 p
< (word_t
*)alloc_region
->end_addr
; p
++) {
930 lose("The new region is not zero at %p (start=%p, end=%p).\n",
931 p
, alloc_region
->start_addr
, alloc_region
->end_addr
);
937 /* If the record_new_objects flag is 2 then all new regions created
940 * If it's 1 then then it is only recorded if the first page of the
941 * current region is <= new_areas_ignore_page. This helps avoid
942 * unnecessary recording when doing full scavenge pass.
944 * The new_object structure holds the page, byte offset, and size of
945 * new regions of objects. Each new area is placed in the array of
946 * these structures pointer to by new_areas. new_areas_index holds the
947 * offset into new_areas.
949 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
950 * later code must detect this and handle it, probably by doing a full
951 * scavenge of a generation. */
952 #define NUM_NEW_AREAS 512
953 static int record_new_objects
= 0;
954 static page_index_t new_areas_ignore_page
;
960 static struct new_area (*new_areas
)[];
961 static size_t new_areas_index
;
962 size_t max_new_areas
;
964 /* Add a new area to new_areas. */
966 add_new_area(page_index_t first_page
, size_t offset
, size_t size
)
968 size_t new_area_start
, c
;
971 /* Ignore if full. */
972 if (new_areas_index
>= NUM_NEW_AREAS
)
975 switch (record_new_objects
) {
979 if (first_page
> new_areas_ignore_page
)
988 new_area_start
= npage_bytes(first_page
) + offset
;
990 /* Search backwards for a prior area that this follows from. If
991 found this will save adding a new area. */
992 for (i
= new_areas_index
-1, c
= 0; (i
>= 0) && (c
< 8); i
--, c
++) {
994 npage_bytes((*new_areas
)[i
].page
)
995 + (*new_areas
)[i
].offset
996 + (*new_areas
)[i
].size
;
998 "/add_new_area S1 %d %d %d %d\n",
999 i, c, new_area_start, area_end));*/
1000 if (new_area_start
== area_end
) {
1002 "/adding to [%d] %d %d %d with %d %d %d:\n",
1004 (*new_areas)[i].page,
1005 (*new_areas)[i].offset,
1006 (*new_areas)[i].size,
1010 (*new_areas
)[i
].size
+= size
;
1015 (*new_areas
)[new_areas_index
].page
= first_page
;
1016 (*new_areas
)[new_areas_index
].offset
= offset
;
1017 (*new_areas
)[new_areas_index
].size
= size
;
1019 "/new_area %d page %d offset %d size %d\n",
1020 new_areas_index, first_page, offset, size));*/
1023 /* Note the max new_areas used. */
1024 if (new_areas_index
> max_new_areas
)
1025 max_new_areas
= new_areas_index
;
1028 /* Update the tables for the alloc_region. The region may be added to
1031 * When done the alloc_region is set up so that the next quick alloc
1032 * will fail safely and thus a new region will be allocated. Further
1033 * it is safe to try to re-update the page table of this reset
1036 gc_alloc_update_page_tables(int page_type_flag
, struct alloc_region
*alloc_region
)
1039 page_index_t first_page
;
1040 page_index_t next_page
;
1041 os_vm_size_t bytes_used
;
1042 os_vm_size_t region_size
;
1043 os_vm_size_t byte_cnt
;
1044 page_bytes_t orig_first_page_bytes_used
;
1048 first_page
= alloc_region
->first_page
;
1050 /* Catch an unused alloc_region. */
1051 if ((first_page
== 0) && (alloc_region
->last_page
== -1))
1054 next_page
= first_page
+1;
1056 ret
= thread_mutex_lock(&free_pages_lock
);
1057 gc_assert(ret
== 0);
1058 if (alloc_region
->free_pointer
!= alloc_region
->start_addr
) {
1059 /* some bytes were allocated in the region */
1060 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
1062 gc_assert(alloc_region
->start_addr
==
1063 (page_address(first_page
)
1064 + page_table
[first_page
].bytes_used
));
1066 /* All the pages used need to be updated */
1068 /* Update the first page. */
1070 /* If the page was free then set up the gen, and
1071 * scan_start_offset. */
1072 if (page_table
[first_page
].bytes_used
== 0)
1073 gc_assert(page_starts_contiguous_block_p(first_page
));
1074 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1076 gc_assert(page_table
[first_page
].allocated
& page_type_flag
);
1077 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
1078 gc_assert(page_table
[first_page
].large_object
== 0);
1082 /* Calculate the number of bytes used in this page. This is not
1083 * always the number of new bytes, unless it was free. */
1085 if ((bytes_used
= void_diff(alloc_region
->free_pointer
,
1086 page_address(first_page
)))
1087 >GENCGC_CARD_BYTES
) {
1088 bytes_used
= GENCGC_CARD_BYTES
;
1091 page_table
[first_page
].bytes_used
= bytes_used
;
1092 byte_cnt
+= bytes_used
;
1095 /* All the rest of the pages should be free. We need to set
1096 * their scan_start_offset pointer to the start of the
1097 * region, and set the bytes_used. */
1099 page_table
[next_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1100 gc_assert(page_table
[next_page
].allocated
& page_type_flag
);
1101 gc_assert(page_table
[next_page
].bytes_used
== 0);
1102 gc_assert(page_table
[next_page
].gen
== gc_alloc_generation
);
1103 gc_assert(page_table
[next_page
].large_object
== 0);
1105 gc_assert(page_table
[next_page
].scan_start_offset
==
1106 void_diff(page_address(next_page
),
1107 alloc_region
->start_addr
));
1109 /* Calculate the number of bytes used in this page. */
1111 if ((bytes_used
= void_diff(alloc_region
->free_pointer
,
1112 page_address(next_page
)))>GENCGC_CARD_BYTES
) {
1113 bytes_used
= GENCGC_CARD_BYTES
;
1116 page_table
[next_page
].bytes_used
= bytes_used
;
1117 byte_cnt
+= bytes_used
;
1122 region_size
= void_diff(alloc_region
->free_pointer
,
1123 alloc_region
->start_addr
);
1124 bytes_allocated
+= region_size
;
1125 generations
[gc_alloc_generation
].bytes_allocated
+= region_size
;
1127 gc_assert((byte_cnt
- orig_first_page_bytes_used
) == region_size
);
1129 /* Set the generations alloc restart page to the last page of
1131 set_generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 0, next_page
-1);
1133 /* Add the region to the new_areas if requested. */
1134 if (BOXED_PAGE_FLAG
& page_type_flag
)
1135 add_new_area(first_page
,orig_first_page_bytes_used
, region_size
);
1139 "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
1141 gc_alloc_generation));
1144 /* There are no bytes allocated. Unallocate the first_page if
1145 * there are 0 bytes_used. */
1146 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1147 if (page_table
[first_page
].bytes_used
== 0)
1148 page_table
[first_page
].allocated
= FREE_PAGE_FLAG
;
1151 /* Unallocate any unused pages. */
1152 while (next_page
<= alloc_region
->last_page
) {
1153 gc_assert(page_table
[next_page
].bytes_used
== 0);
1154 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1157 ret
= thread_mutex_unlock(&free_pages_lock
);
1158 gc_assert(ret
== 0);
1160 /* alloc_region is per-thread, we're ok to do this unlocked */
1161 gc_set_region_empty(alloc_region
);
1164 /* Allocate a possibly large object. */
1166 gc_alloc_large(sword_t nbytes
, int page_type_flag
, struct alloc_region
*alloc_region
)
1169 page_index_t first_page
, next_page
, last_page
;
1170 page_bytes_t orig_first_page_bytes_used
;
1171 os_vm_size_t byte_cnt
;
1172 os_vm_size_t bytes_used
;
1175 ret
= thread_mutex_lock(&free_pages_lock
);
1176 gc_assert(ret
== 0);
1178 first_page
= generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 1);
1179 if (first_page
<= alloc_region
->last_page
) {
1180 first_page
= alloc_region
->last_page
+1;
1183 last_page
=gc_find_freeish_pages(&first_page
,nbytes
, page_type_flag
);
1185 gc_assert(first_page
> alloc_region
->last_page
);
1187 set_generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 1, last_page
);
1189 /* Set up the pages. */
1190 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
1192 /* If the first page was free then set up the gen, and
1193 * scan_start_offset. */
1194 if (page_table
[first_page
].bytes_used
== 0) {
1195 page_table
[first_page
].allocated
= page_type_flag
;
1196 page_table
[first_page
].gen
= gc_alloc_generation
;
1197 page_table
[first_page
].scan_start_offset
= 0;
1198 page_table
[first_page
].large_object
= 1;
1201 gc_assert(page_table
[first_page
].allocated
== page_type_flag
);
1202 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
1203 gc_assert(page_table
[first_page
].large_object
== 1);
1207 /* Calc. the number of bytes used in this page. This is not
1208 * always the number of new bytes, unless it was free. */
1210 if ((bytes_used
= nbytes
+orig_first_page_bytes_used
) > GENCGC_CARD_BYTES
) {
1211 bytes_used
= GENCGC_CARD_BYTES
;
1214 page_table
[first_page
].bytes_used
= bytes_used
;
1215 byte_cnt
+= bytes_used
;
1217 next_page
= first_page
+1;
1219 /* All the rest of the pages should be free. We need to set their
1220 * scan_start_offset pointer to the start of the region, and set
1221 * the bytes_used. */
1223 gc_assert(page_free_p(next_page
));
1224 gc_assert(page_table
[next_page
].bytes_used
== 0);
1225 page_table
[next_page
].allocated
= page_type_flag
;
1226 page_table
[next_page
].gen
= gc_alloc_generation
;
1227 page_table
[next_page
].large_object
= 1;
1229 page_table
[next_page
].scan_start_offset
=
1230 npage_bytes(next_page
-first_page
) - orig_first_page_bytes_used
;
1232 /* Calculate the number of bytes used in this page. */
1234 bytes_used
=(nbytes
+orig_first_page_bytes_used
)-byte_cnt
;
1235 if (bytes_used
> GENCGC_CARD_BYTES
) {
1236 bytes_used
= GENCGC_CARD_BYTES
;
1239 page_table
[next_page
].bytes_used
= bytes_used
;
1240 page_table
[next_page
].write_protected
=0;
1241 page_table
[next_page
].dont_move
=0;
1242 byte_cnt
+= bytes_used
;
1246 gc_assert((byte_cnt
-orig_first_page_bytes_used
) == (size_t)nbytes
);
1248 bytes_allocated
+= nbytes
;
1249 generations
[gc_alloc_generation
].bytes_allocated
+= nbytes
;
1251 /* Add the region to the new_areas if requested. */
1252 if (BOXED_PAGE_FLAG
& page_type_flag
)
1253 add_new_area(first_page
,orig_first_page_bytes_used
,nbytes
);
1255 /* Bump up last_free_page */
1256 if (last_page
+1 > last_free_page
) {
1257 last_free_page
= last_page
+1;
1258 set_alloc_pointer((lispobj
)(page_address(last_free_page
)));
1260 ret
= thread_mutex_unlock(&free_pages_lock
);
1261 gc_assert(ret
== 0);
1263 #ifdef READ_PROTECT_FREE_PAGES
1264 os_protect(page_address(first_page
),
1265 npage_bytes(1+last_page
-first_page
),
1269 zero_dirty_pages(first_page
, last_page
);
1271 return page_address(first_page
);
1274 static page_index_t gencgc_alloc_start_page
= -1;
1277 gc_heap_exhausted_error_or_lose (sword_t available
, sword_t requested
)
1279 struct thread
*thread
= arch_os_get_current_thread();
1280 /* Write basic information before doing anything else: if we don't
1281 * call to lisp this is a must, and even if we do there is always
1282 * the danger that we bounce back here before the error has been
1283 * handled, or indeed even printed.
1285 report_heap_exhaustion(available
, requested
, thread
);
1286 if (gc_active_p
|| (available
== 0)) {
1287 /* If we are in GC, or totally out of memory there is no way
1288 * to sanely transfer control to the lisp-side of things.
1290 lose("Heap exhausted, game over.");
1293 /* FIXME: assert free_pages_lock held */
1294 (void)thread_mutex_unlock(&free_pages_lock
);
1295 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
1296 gc_assert(get_pseudo_atomic_atomic(thread
));
1297 clear_pseudo_atomic_atomic(thread
);
1298 if (get_pseudo_atomic_interrupted(thread
))
1299 do_pending_interrupt();
1301 /* Another issue is that signalling HEAP-EXHAUSTED error leads
1302 * to running user code at arbitrary places, even in a
1303 * WITHOUT-INTERRUPTS which may lead to a deadlock without
1304 * running out of the heap. So at this point all bets are
1306 if (SymbolValue(INTERRUPTS_ENABLED
,thread
) == NIL
)
1307 corruption_warning_and_maybe_lose
1308 ("Signalling HEAP-EXHAUSTED in a WITHOUT-INTERRUPTS.");
1309 /* available and requested should be double word aligned, thus
1310 they can passed as fixnums and shifted later. */
1311 funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR
), available
, requested
);
1312 lose("HEAP-EXHAUSTED-ERROR fell through");
1317 gc_find_freeish_pages(page_index_t
*restart_page_ptr
, sword_t bytes
,
1320 page_index_t most_bytes_found_from
= 0, most_bytes_found_to
= 0;
1321 page_index_t first_page
, last_page
, restart_page
= *restart_page_ptr
;
1322 os_vm_size_t nbytes
= bytes
;
1323 os_vm_size_t nbytes_goal
= nbytes
;
1324 os_vm_size_t bytes_found
= 0;
1325 os_vm_size_t most_bytes_found
= 0;
1326 boolean small_object
= nbytes
< GENCGC_CARD_BYTES
;
1327 /* FIXME: assert(free_pages_lock is held); */
1329 if (nbytes_goal
< gencgc_alloc_granularity
)
1330 nbytes_goal
= gencgc_alloc_granularity
;
1332 /* Toggled by gc_and_save for heap compaction, normally -1. */
1333 if (gencgc_alloc_start_page
!= -1) {
1334 restart_page
= gencgc_alloc_start_page
;
1337 /* FIXME: This is on bytes instead of nbytes pending cleanup of
1338 * long from the interface. */
1339 gc_assert(bytes
>=0);
1340 /* Search for a page with at least nbytes of space. We prefer
1341 * not to split small objects on multiple pages, to reduce the
1342 * number of contiguous allocation regions spaning multiple
1343 * pages: this helps avoid excessive conservativism.
1345 * For other objects, we guarantee that they start on their own
1348 first_page
= restart_page
;
1349 while (first_page
< page_table_pages
) {
1351 if (page_free_p(first_page
)) {
1352 gc_assert(0 == page_table
[first_page
].bytes_used
);
1353 bytes_found
= GENCGC_CARD_BYTES
;
1354 } else if (small_object
&&
1355 (page_table
[first_page
].allocated
== page_type_flag
) &&
1356 (page_table
[first_page
].large_object
== 0) &&
1357 (page_table
[first_page
].gen
== gc_alloc_generation
) &&
1358 (page_table
[first_page
].write_protected
== 0) &&
1359 (page_table
[first_page
].dont_move
== 0)) {
1360 bytes_found
= GENCGC_CARD_BYTES
- page_table
[first_page
].bytes_used
;
1361 if (bytes_found
< nbytes
) {
1362 if (bytes_found
> most_bytes_found
)
1363 most_bytes_found
= bytes_found
;
1372 gc_assert(page_table
[first_page
].write_protected
== 0);
1373 for (last_page
= first_page
+1;
1374 ((last_page
< page_table_pages
) &&
1375 page_free_p(last_page
) &&
1376 (bytes_found
< nbytes_goal
));
1378 bytes_found
+= GENCGC_CARD_BYTES
;
1379 gc_assert(0 == page_table
[last_page
].bytes_used
);
1380 gc_assert(0 == page_table
[last_page
].write_protected
);
1383 if (bytes_found
> most_bytes_found
) {
1384 most_bytes_found
= bytes_found
;
1385 most_bytes_found_from
= first_page
;
1386 most_bytes_found_to
= last_page
;
1388 if (bytes_found
>= nbytes_goal
)
1391 first_page
= last_page
;
1394 bytes_found
= most_bytes_found
;
1395 restart_page
= first_page
+ 1;
1397 /* Check for a failure */
1398 if (bytes_found
< nbytes
) {
1399 gc_assert(restart_page
>= page_table_pages
);
1400 gc_heap_exhausted_error_or_lose(most_bytes_found
, nbytes
);
1403 gc_assert(most_bytes_found_to
);
1404 *restart_page_ptr
= most_bytes_found_from
;
1405 return most_bytes_found_to
-1;
1408 /* Allocate bytes. All the rest of the special-purpose allocation
1409 * functions will eventually call this */
1412 gc_alloc_with_region(sword_t nbytes
,int page_type_flag
, struct alloc_region
*my_region
,
1415 void *new_free_pointer
;
1417 if (nbytes
>=LARGE_OBJECT_SIZE
)
1418 return gc_alloc_large(nbytes
, page_type_flag
, my_region
);
1420 /* Check whether there is room in the current alloc region. */
1421 new_free_pointer
= my_region
->free_pointer
+ nbytes
;
1423 /* fprintf(stderr, "alloc %d bytes from %p to %p\n", nbytes,
1424 my_region->free_pointer, new_free_pointer); */
1426 if (new_free_pointer
<= my_region
->end_addr
) {
1427 /* If so then allocate from the current alloc region. */
1428 void *new_obj
= my_region
->free_pointer
;
1429 my_region
->free_pointer
= new_free_pointer
;
1431 /* Unless a `quick' alloc was requested, check whether the
1432 alloc region is almost empty. */
1434 void_diff(my_region
->end_addr
,my_region
->free_pointer
) <= 32) {
1435 /* If so, finished with the current region. */
1436 gc_alloc_update_page_tables(page_type_flag
, my_region
);
1437 /* Set up a new region. */
1438 gc_alloc_new_region(32 /*bytes*/, page_type_flag
, my_region
);
1441 return((void *)new_obj
);
1444 /* Else not enough free space in the current region: retry with a
1447 gc_alloc_update_page_tables(page_type_flag
, my_region
);
1448 gc_alloc_new_region(nbytes
, page_type_flag
, my_region
);
1449 return gc_alloc_with_region(nbytes
, page_type_flag
, my_region
,0);
1452 /* Copy a large object. If the object is in a large object region then
1453 * it is simply promoted, else it is copied. If it's large enough then
1454 * it's copied to a large object region.
1456 * Bignums and vectors may have shrunk. If the object is not copied
1457 * the space needs to be reclaimed, and the page_tables corrected. */
1459 general_copy_large_object(lispobj object
, word_t nwords
, boolean boxedp
)
1463 page_index_t first_page
;
1465 gc_assert(is_lisp_pointer(object
));
1466 gc_assert(from_space_p(object
));
1467 gc_assert((nwords
& 0x01) == 0);
1469 if ((nwords
> 1024*1024) && gencgc_verbose
) {
1470 FSHOW((stderr
, "/general_copy_large_object: %d bytes\n",
1471 nwords
*N_WORD_BYTES
));
1474 /* Check whether it's a large object. */
1475 first_page
= find_page_index((void *)object
);
1476 gc_assert(first_page
>= 0);
1478 if (page_table
[first_page
].large_object
) {
1479 /* Promote the object. Note: Unboxed objects may have been
1480 * allocated to a BOXED region so it may be necessary to
1481 * change the region to UNBOXED. */
1482 os_vm_size_t remaining_bytes
;
1483 os_vm_size_t bytes_freed
;
1484 page_index_t next_page
;
1485 page_bytes_t old_bytes_used
;
1487 /* FIXME: This comment is somewhat stale.
1489 * Note: Any page write-protection must be removed, else a
1490 * later scavenge_newspace may incorrectly not scavenge these
1491 * pages. This would not be necessary if they are added to the
1492 * new areas, but let's do it for them all (they'll probably
1493 * be written anyway?). */
1495 gc_assert(page_starts_contiguous_block_p(first_page
));
1496 next_page
= first_page
;
1497 remaining_bytes
= nwords
*N_WORD_BYTES
;
1499 while (remaining_bytes
> GENCGC_CARD_BYTES
) {
1500 gc_assert(page_table
[next_page
].gen
== from_space
);
1501 gc_assert(page_table
[next_page
].large_object
);
1502 gc_assert(page_table
[next_page
].scan_start_offset
==
1503 npage_bytes(next_page
-first_page
));
1504 gc_assert(page_table
[next_page
].bytes_used
== GENCGC_CARD_BYTES
);
1505 /* Should have been unprotected by unprotect_oldspace()
1506 * for boxed objects, and after promotion unboxed ones
1507 * should not be on protected pages at all. */
1508 gc_assert(!page_table
[next_page
].write_protected
);
1511 gc_assert(page_boxed_p(next_page
));
1513 gc_assert(page_allocated_no_region_p(next_page
));
1514 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1516 page_table
[next_page
].gen
= new_space
;
1518 remaining_bytes
-= GENCGC_CARD_BYTES
;
1522 /* Now only one page remains, but the object may have shrunk so
1523 * there may be more unused pages which will be freed. */
1525 /* Object may have shrunk but shouldn't have grown - check. */
1526 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
1528 page_table
[next_page
].gen
= new_space
;
1531 gc_assert(page_boxed_p(next_page
));
1533 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1535 /* Adjust the bytes_used. */
1536 old_bytes_used
= page_table
[next_page
].bytes_used
;
1537 page_table
[next_page
].bytes_used
= remaining_bytes
;
1539 bytes_freed
= old_bytes_used
- remaining_bytes
;
1541 /* Free any remaining pages; needs care. */
1543 while ((old_bytes_used
== GENCGC_CARD_BYTES
) &&
1544 (page_table
[next_page
].gen
== from_space
) &&
1545 /* FIXME: It is not obvious to me why this is necessary
1546 * as a loop condition: it seems to me that the
1547 * scan_start_offset test should be sufficient, but
1548 * experimentally that is not the case. --NS
1551 page_boxed_p(next_page
) :
1552 page_allocated_no_region_p(next_page
)) &&
1553 page_table
[next_page
].large_object
&&
1554 (page_table
[next_page
].scan_start_offset
==
1555 npage_bytes(next_page
- first_page
))) {
1556 /* Checks out OK, free the page. Don't need to both zeroing
1557 * pages as this should have been done before shrinking the
1558 * object. These pages shouldn't be write-protected, even if
1559 * boxed they should be zero filled. */
1560 gc_assert(page_table
[next_page
].write_protected
== 0);
1562 old_bytes_used
= page_table
[next_page
].bytes_used
;
1563 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1564 page_table
[next_page
].bytes_used
= 0;
1565 bytes_freed
+= old_bytes_used
;
1569 if ((bytes_freed
> 0) && gencgc_verbose
) {
1571 "/general_copy_large_object bytes_freed=%"OS_VM_SIZE_FMT
"\n",
1575 generations
[from_space
].bytes_allocated
-= nwords
*N_WORD_BYTES
1577 generations
[new_space
].bytes_allocated
+= nwords
*N_WORD_BYTES
;
1578 bytes_allocated
-= bytes_freed
;
1580 /* Add the region to the new_areas if requested. */
1582 add_new_area(first_page
,0,nwords
*N_WORD_BYTES
);
1587 /* Get tag of object. */
1588 tag
= lowtag_of(object
);
1590 /* Allocate space. */
1591 new = gc_general_alloc(nwords
*N_WORD_BYTES
,
1592 (boxedp
? BOXED_PAGE_FLAG
: UNBOXED_PAGE_FLAG
),
1595 /* Copy the object. */
1596 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1598 /* Return Lisp pointer of new object. */
1599 return ((lispobj
) new) | tag
;
1604 copy_large_object(lispobj object
, sword_t nwords
)
1606 return general_copy_large_object(object
, nwords
, 1);
1610 copy_large_unboxed_object(lispobj object
, sword_t nwords
)
1612 return general_copy_large_object(object
, nwords
, 0);
1615 /* to copy unboxed objects */
1617 copy_unboxed_object(lispobj object
, sword_t nwords
)
1619 return gc_general_copy_object(object
, nwords
, UNBOXED_PAGE_FLAG
);
1624 * code and code-related objects
1627 static lispobj trans_fun_header(lispobj object);
1628 static lispobj trans_boxed(lispobj object);
1631 /* Scan a x86 compiled code object, looking for possible fixups that
1632 * have been missed after a move.
1634 * Two types of fixups are needed:
1635 * 1. Absolute fixups to within the code object.
1636 * 2. Relative fixups to outside the code object.
1638 * Currently only absolute fixups to the constant vector, or to the
1639 * code area are checked. */
1640 #ifdef LISP_FEATURE_X86
1642 sniff_code_object(struct code
*code
, os_vm_size_t displacement
)
1644 sword_t nheader_words
, ncode_words
, nwords
;
1645 os_vm_address_t constants_start_addr
= NULL
, constants_end_addr
, p
;
1646 os_vm_address_t code_start_addr
, code_end_addr
;
1647 os_vm_address_t code_addr
= (os_vm_address_t
)code
;
1648 int fixup_found
= 0;
1650 if (!check_code_fixups
)
1653 FSHOW((stderr
, "/sniffing code: %p, %lu\n", code
, displacement
));
1655 ncode_words
= code_instruction_words(code
->code_size
);
1656 nheader_words
= code_header_words(*(lispobj
*)code
);
1657 nwords
= ncode_words
+ nheader_words
;
1659 constants_start_addr
= code_addr
+ 5*N_WORD_BYTES
;
1660 constants_end_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1661 code_start_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1662 code_end_addr
= code_addr
+ nwords
*N_WORD_BYTES
;
1664 /* Work through the unboxed code. */
1665 for (p
= code_start_addr
; p
< code_end_addr
; p
++) {
1666 void *data
= *(void **)p
;
1667 unsigned d1
= *((unsigned char *)p
- 1);
1668 unsigned d2
= *((unsigned char *)p
- 2);
1669 unsigned d3
= *((unsigned char *)p
- 3);
1670 unsigned d4
= *((unsigned char *)p
- 4);
1672 unsigned d5
= *((unsigned char *)p
- 5);
1673 unsigned d6
= *((unsigned char *)p
- 6);
1676 /* Check for code references. */
1677 /* Check for a 32 bit word that looks like an absolute
1678 reference to within the code adea of the code object. */
1679 if ((data
>= (void*)(code_start_addr
-displacement
))
1680 && (data
< (void*)(code_end_addr
-displacement
))) {
1681 /* function header */
1683 && (((unsigned)p
- 4 - 4*HeaderValue(*((unsigned *)p
-1))) ==
1685 /* Skip the function header */
1689 /* the case of PUSH imm32 */
1693 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1694 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1695 FSHOW((stderr
, "/PUSH $0x%.8x\n", data
));
1697 /* the case of MOV [reg-8],imm32 */
1699 && (d2
==0x40 || d2
==0x41 || d2
==0x42 || d2
==0x43
1700 || d2
==0x45 || d2
==0x46 || d2
==0x47)
1704 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1705 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1706 FSHOW((stderr
, "/MOV [reg-8],$0x%.8x\n", data
));
1708 /* the case of LEA reg,[disp32] */
1709 if ((d2
== 0x8d) && ((d1
& 0xc7) == 5)) {
1712 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1713 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1714 FSHOW((stderr
,"/LEA reg,[$0x%.8x]\n", data
));
1718 /* Check for constant references. */
1719 /* Check for a 32 bit word that looks like an absolute
1720 reference to within the constant vector. Constant references
1722 if ((data
>= (void*)(constants_start_addr
-displacement
))
1723 && (data
< (void*)(constants_end_addr
-displacement
))
1724 && (((unsigned)data
& 0x3) == 0)) {
1729 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1730 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1731 FSHOW((stderr
,"/MOV eax,0x%.8x\n", data
));
1734 /* the case of MOV m32,EAX */
1738 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1739 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1740 FSHOW((stderr
, "/MOV 0x%.8x,eax\n", data
));
1743 /* the case of CMP m32,imm32 */
1744 if ((d1
== 0x3d) && (d2
== 0x81)) {
1747 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1748 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1750 FSHOW((stderr
, "/CMP 0x%.8x,immed32\n", data
));
1753 /* Check for a mod=00, r/m=101 byte. */
1754 if ((d1
& 0xc7) == 5) {
1759 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1760 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1761 FSHOW((stderr
,"/CMP 0x%.8x,reg\n", data
));
1763 /* the case of CMP reg32,m32 */
1767 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1768 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1769 FSHOW((stderr
, "/CMP reg32,0x%.8x\n", data
));
1771 /* the case of MOV m32,reg32 */
1775 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1776 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1777 FSHOW((stderr
, "/MOV 0x%.8x,reg32\n", data
));
1779 /* the case of MOV reg32,m32 */
1783 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1784 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1785 FSHOW((stderr
, "/MOV reg32,0x%.8x\n", data
));
1787 /* the case of LEA reg32,m32 */
1791 "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1792 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1793 FSHOW((stderr
, "/LEA reg32,0x%.8x\n", data
));
1799 /* If anything was found, print some information on the code
1803 "/compiled code object at %x: header words = %d, code words = %d\n",
1804 code
, nheader_words
, ncode_words
));
1806 "/const start = %x, end = %x\n",
1807 constants_start_addr
, constants_end_addr
));
1809 "/code start = %x, end = %x\n",
1810 code_start_addr
, code_end_addr
));
1815 #ifdef LISP_FEATURE_X86
1817 gencgc_apply_code_fixups(struct code
*old_code
, struct code
*new_code
)
1819 sword_t nheader_words
, ncode_words
, nwords
;
1820 os_vm_address_t constants_start_addr
, constants_end_addr
;
1821 os_vm_address_t code_start_addr
, code_end_addr
;
1822 os_vm_address_t code_addr
= (os_vm_address_t
)new_code
;
1823 os_vm_address_t old_addr
= (os_vm_address_t
)old_code
;
1824 os_vm_size_t displacement
= code_addr
- old_addr
;
1825 lispobj fixups
= NIL
;
1826 struct vector
*fixups_vector
;
1828 ncode_words
= code_instruction_words(new_code
->code_size
);
1829 nheader_words
= code_header_words(*(lispobj
*)new_code
);
1830 nwords
= ncode_words
+ nheader_words
;
1832 "/compiled code object at %x: header words = %d, code words = %d\n",
1833 new_code, nheader_words, ncode_words)); */
1834 constants_start_addr
= code_addr
+ 5*N_WORD_BYTES
;
1835 constants_end_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1836 code_start_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1837 code_end_addr
= code_addr
+ nwords
*N_WORD_BYTES
;
1840 "/const start = %x, end = %x\n",
1841 constants_start_addr,constants_end_addr));
1843 "/code start = %x; end = %x\n",
1844 code_start_addr,code_end_addr));
1847 /* The first constant should be a pointer to the fixups for this
1848 code objects. Check. */
1849 fixups
= new_code
->constants
[0];
1851 /* It will be 0 or the unbound-marker if there are no fixups (as
1852 * will be the case if the code object has been purified, for
1853 * example) and will be an other pointer if it is valid. */
1854 if ((fixups
== 0) || (fixups
== UNBOUND_MARKER_WIDETAG
) ||
1855 !is_lisp_pointer(fixups
)) {
1856 /* Check for possible errors. */
1857 if (check_code_fixups
)
1858 sniff_code_object(new_code
, displacement
);
1863 fixups_vector
= (struct vector
*)native_pointer(fixups
);
1865 /* Could be pointing to a forwarding pointer. */
1866 /* FIXME is this always in from_space? if so, could replace this code with
1867 * forwarding_pointer_p/forwarding_pointer_value */
1868 if (is_lisp_pointer(fixups
) &&
1869 (find_page_index((void*)fixups_vector
) != -1) &&
1870 (fixups_vector
->header
== 0x01)) {
1871 /* If so, then follow it. */
1872 /*SHOW("following pointer to a forwarding pointer");*/
1874 (struct vector
*)native_pointer((lispobj
)fixups_vector
->length
);
1877 /*SHOW("got fixups");*/
1879 if (widetag_of(fixups_vector
->header
) == SIMPLE_ARRAY_WORD_WIDETAG
) {
1880 /* Got the fixups for the code block. Now work through the vector,
1881 and apply a fixup at each address. */
1882 sword_t length
= fixnum_value(fixups_vector
->length
);
1884 for (i
= 0; i
< length
; i
++) {
1885 long offset
= fixups_vector
->data
[i
];
1886 /* Now check the current value of offset. */
1887 os_vm_address_t old_value
= *(os_vm_address_t
*)(code_start_addr
+ offset
);
1889 /* If it's within the old_code object then it must be an
1890 * absolute fixup (relative ones are not saved) */
1891 if ((old_value
>= old_addr
)
1892 && (old_value
< (old_addr
+ nwords
*N_WORD_BYTES
)))
1893 /* So add the dispacement. */
1894 *(os_vm_address_t
*)(code_start_addr
+ offset
) =
1895 old_value
+ displacement
;
1897 /* It is outside the old code object so it must be a
1898 * relative fixup (absolute fixups are not saved). So
1899 * subtract the displacement. */
1900 *(os_vm_address_t
*)(code_start_addr
+ offset
) =
1901 old_value
- displacement
;
1904 /* This used to just print a note to stderr, but a bogus fixup seems to
1905 * indicate real heap corruption, so a hard hailure is in order. */
1906 lose("fixup vector %p has a bad widetag: %d\n",
1907 fixups_vector
, widetag_of(fixups_vector
->header
));
1910 /* Check for possible errors. */
1911 if (check_code_fixups
) {
1912 sniff_code_object(new_code
,displacement
);
1918 trans_boxed_large(lispobj object
)
1923 gc_assert(is_lisp_pointer(object
));
1925 header
= *((lispobj
*) native_pointer(object
));
1926 length
= HeaderValue(header
) + 1;
1927 length
= CEILING(length
, 2);
1929 return copy_large_object(object
, length
);
1936 /* XX This is a hack adapted from cgc.c. These don't work too
1937 * efficiently with the gencgc as a list of the weak pointers is
1938 * maintained within the objects which causes writes to the pages. A
1939 * limited attempt is made to avoid unnecessary writes, but this needs
1941 #define WEAK_POINTER_NWORDS \
1942 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
1945 scav_weak_pointer(lispobj
*where
, lispobj object
)
1947 /* Since we overwrite the 'next' field, we have to make
1948 * sure not to do so for pointers already in the list.
1949 * Instead of searching the list of weak_pointers each
1950 * time, we ensure that next is always NULL when the weak
1951 * pointer isn't in the list, and not NULL otherwise.
1952 * Since we can't use NULL to denote end of list, we
1953 * use a pointer back to the same weak_pointer.
1955 struct weak_pointer
* wp
= (struct weak_pointer
*)where
;
1957 if (NULL
== wp
->next
) {
1958 wp
->next
= weak_pointers
;
1960 if (NULL
== wp
->next
)
1964 /* Do not let GC scavenge the value slot of the weak pointer.
1965 * (That is why it is a weak pointer.) */
1967 return WEAK_POINTER_NWORDS
;
1972 search_read_only_space(void *pointer
)
1974 lispobj
*start
= (lispobj
*) READ_ONLY_SPACE_START
;
1975 lispobj
*end
= (lispobj
*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0);
1976 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
1978 return (gc_search_space(start
,
1979 (((lispobj
*)pointer
)+2)-start
,
1980 (lispobj
*) pointer
));
1984 search_static_space(void *pointer
)
1986 lispobj
*start
= (lispobj
*)STATIC_SPACE_START
;
1987 lispobj
*end
= (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0);
1988 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
1990 return (gc_search_space(start
,
1991 (((lispobj
*)pointer
)+2)-start
,
1992 (lispobj
*) pointer
));
1995 /* a faster version for searching the dynamic space. This will work even
1996 * if the object is in a current allocation region. */
1998 search_dynamic_space(void *pointer
)
2000 page_index_t page_index
= find_page_index(pointer
);
2003 /* The address may be invalid, so do some checks. */
2004 if ((page_index
== -1) || page_free_p(page_index
))
2006 start
= (lispobj
*)page_scan_start(page_index
);
2007 return (gc_search_space(start
,
2008 (((lispobj
*)pointer
)+2)-start
,
2009 (lispobj
*)pointer
));
2012 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2014 /* Is there any possibility that pointer is a valid Lisp object
2015 * reference, and/or something else (e.g. subroutine call return
2016 * address) which should prevent us from moving the referred-to thing?
2017 * This is called from preserve_pointers() */
2019 possibly_valid_dynamic_space_pointer_s(lispobj
*pointer
,
2020 page_index_t addr_page_index
,
2021 lispobj
**store_here
)
2023 lispobj
*start_addr
;
2025 /* Find the object start address. */
2026 start_addr
= search_dynamic_space(pointer
);
2028 if (start_addr
== NULL
) {
2032 *store_here
= start_addr
;
2035 /* If the containing object is a code object, presume that the
2036 * pointer is valid, simply because it could be an unboxed return
2038 if (widetag_of(*start_addr
) == CODE_HEADER_WIDETAG
)
2041 /* Large object pages only contain ONE object, and it will never
2042 * be a CONS. However, arrays and bignums can be allocated larger
2043 * than necessary and then shrunk to fit, leaving what look like
2044 * (0 . 0) CONSes at the end. These appear valid to
2045 * looks_like_valid_lisp_pointer_p(), so pick them off here. */
2046 if (page_table
[addr_page_index
].large_object
&&
2047 (lowtag_of((lispobj
)pointer
) == LIST_POINTER_LOWTAG
))
2050 return looks_like_valid_lisp_pointer_p((lispobj
)pointer
, start_addr
);
2053 #endif // defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2056 valid_conservative_root_p(void *addr
, page_index_t addr_page_index
,
2057 lispobj
**begin_ptr
)
2059 #ifdef GENCGC_IS_PRECISE
2060 /* If we're in precise gencgc (non-x86oid as of this writing) then
2061 * we are only called on valid object pointers in the first place,
2062 * so we just have to do a bounds-check against the heap, a
2063 * generation check, and the already-pinned check. */
2064 if ((addr_page_index
== -1)
2065 || (page_table
[addr_page_index
].gen
!= from_space
)
2066 || (page_table
[addr_page_index
].dont_move
!= 0))
2069 /* quick check 1: Address is quite likely to have been invalid. */
2070 if ((addr_page_index
== -1)
2071 || page_free_p(addr_page_index
)
2072 || (page_table
[addr_page_index
].bytes_used
== 0)
2073 || (page_table
[addr_page_index
].gen
!= from_space
))
2075 gc_assert(!(page_table
[addr_page_index
].allocated
&OPEN_REGION_PAGE_FLAG
));
2077 /* quick check 2: Check the offset within the page.
2080 if (((uword_t
)addr
& (GENCGC_CARD_BYTES
- 1)) >
2081 page_table
[addr_page_index
].bytes_used
)
2084 /* Filter out anything which can't be a pointer to a Lisp object
2085 * (or, as a special case which also requires dont_move, a return
2086 * address referring to something in a CodeObject). This is
2087 * expensive but important, since it vastly reduces the
2088 * probability that random garbage will be bogusly interpreted as
2089 * a pointer which prevents a page from moving. */
2090 if (!possibly_valid_dynamic_space_pointer_s(addr
, addr_page_index
,
2099 in_dontmove_nativeptr_p(page_index_t page_index
, lispobj
*native_ptr
)
2101 in_use_marker_t
*markers
= pinned_dwords(page_index
);
2103 lispobj
*begin
= page_address(page_index
);
2104 int dword_in_page
= (native_ptr
- begin
) / 2;
2105 return (markers
[dword_in_page
/ N_WORD_BITS
] >> (dword_in_page
% N_WORD_BITS
)) & 1;
2111 /* Adjust large bignum and vector objects. This will adjust the
2112 * allocated region if the size has shrunk, and move unboxed objects
2113 * into unboxed pages. The pages are not promoted here, and the
2114 * promoted region is not added to the new_regions; this is really
2115 * only designed to be called from preserve_pointer(). Shouldn't fail
2116 * if this is missed, just may delay the moving of objects to unboxed
2117 * pages, and the freeing of pages. */
2119 maybe_adjust_large_object(lispobj
*where
)
2121 page_index_t first_page
;
2122 page_index_t next_page
;
2125 uword_t remaining_bytes
;
2126 uword_t bytes_freed
;
2127 uword_t old_bytes_used
;
2131 /* Check whether it's a vector or bignum object. */
2132 switch (widetag_of(where
[0])) {
2133 case SIMPLE_VECTOR_WIDETAG
:
2134 boxed
= BOXED_PAGE_FLAG
;
2136 case BIGNUM_WIDETAG
:
2137 case SIMPLE_BASE_STRING_WIDETAG
:
2138 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
2139 case SIMPLE_CHARACTER_STRING_WIDETAG
:
2141 case SIMPLE_BIT_VECTOR_WIDETAG
:
2142 case SIMPLE_ARRAY_NIL_WIDETAG
:
2143 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
2144 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
2145 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
2146 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
2147 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
2148 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
2150 case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG
:
2152 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
2153 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
2154 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
2155 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
2157 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
2158 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
2160 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
2161 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
2163 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
2164 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
2167 case SIMPLE_ARRAY_FIXNUM_WIDETAG
:
2169 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
2170 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
2172 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
2173 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
2175 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
2176 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
2177 #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
2178 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
2180 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
2181 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
2183 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
2184 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2186 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
2187 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
2189 boxed
= UNBOXED_PAGE_FLAG
;
2195 /* Find its current size. */
2196 nwords
= (sizetab
[widetag_of(where
[0])])(where
);
2198 first_page
= find_page_index((void *)where
);
2199 gc_assert(first_page
>= 0);
2201 /* Note: Any page write-protection must be removed, else a later
2202 * scavenge_newspace may incorrectly not scavenge these pages.
2203 * This would not be necessary if they are added to the new areas,
2204 * but lets do it for them all (they'll probably be written
2207 gc_assert(page_starts_contiguous_block_p(first_page
));
2209 next_page
= first_page
;
2210 remaining_bytes
= nwords
*N_WORD_BYTES
;
2211 while (remaining_bytes
> GENCGC_CARD_BYTES
) {
2212 gc_assert(page_table
[next_page
].gen
== from_space
);
2213 gc_assert(page_allocated_no_region_p(next_page
));
2214 gc_assert(page_table
[next_page
].large_object
);
2215 gc_assert(page_table
[next_page
].scan_start_offset
==
2216 npage_bytes(next_page
-first_page
));
2217 gc_assert(page_table
[next_page
].bytes_used
== GENCGC_CARD_BYTES
);
2219 page_table
[next_page
].allocated
= boxed
;
2221 /* Shouldn't be write-protected at this stage. Essential that the
2223 gc_assert(!page_table
[next_page
].write_protected
);
2224 remaining_bytes
-= GENCGC_CARD_BYTES
;
2228 /* Now only one page remains, but the object may have shrunk so
2229 * there may be more unused pages which will be freed. */
2231 /* Object may have shrunk but shouldn't have grown - check. */
2232 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
2234 page_table
[next_page
].allocated
= boxed
;
2235 gc_assert(page_table
[next_page
].allocated
==
2236 page_table
[first_page
].allocated
);
2238 /* Adjust the bytes_used. */
2239 old_bytes_used
= page_table
[next_page
].bytes_used
;
2240 page_table
[next_page
].bytes_used
= remaining_bytes
;
2242 bytes_freed
= old_bytes_used
- remaining_bytes
;
2244 /* Free any remaining pages; needs care. */
2246 while ((old_bytes_used
== GENCGC_CARD_BYTES
) &&
2247 (page_table
[next_page
].gen
== from_space
) &&
2248 page_allocated_no_region_p(next_page
) &&
2249 page_table
[next_page
].large_object
&&
2250 (page_table
[next_page
].scan_start_offset
==
2251 npage_bytes(next_page
- first_page
))) {
2252 /* It checks out OK, free the page. We don't need to both zeroing
2253 * pages as this should have been done before shrinking the
2254 * object. These pages shouldn't be write protected as they
2255 * should be zero filled. */
2256 gc_assert(page_table
[next_page
].write_protected
== 0);
2258 old_bytes_used
= page_table
[next_page
].bytes_used
;
2259 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
2260 page_table
[next_page
].bytes_used
= 0;
2261 bytes_freed
+= old_bytes_used
;
2265 if ((bytes_freed
> 0) && gencgc_verbose
) {
2267 "/maybe_adjust_large_object() freed %d\n",
2271 generations
[from_space
].bytes_allocated
-= bytes_freed
;
2272 bytes_allocated
-= bytes_freed
;
2278 * Why is this restricted to protected objects only?
2279 * Because the rest of the page has been scavenged already,
2280 * and since that leaves forwarding pointers in the unprotected
2281 * areas you cannot scavenge it again until those are gone.
2284 scavenge_pinned_range(void* page_base
, int start
, int count
)
2286 // 'start' and 'count' are expressed in units of dwords
2287 scavenge((lispobj
*)page_base
+ 2*start
, 2*count
);
2291 scavenge_pinned_ranges()
2294 for (page
= 0; page
< last_free_page
; page
++) {
2295 in_use_marker_t
* bitmap
= pinned_dwords(page
);
2298 GENCGC_CARD_BYTES
/ (2*N_WORD_BYTES
) / N_WORD_BITS
,
2299 0, scavenge_pinned_range
, page_address(page
));
2303 static void wipe_range(void* page_base
, int start
, int count
)
2305 bzero((lispobj
*)page_base
+ 2*start
, count
*2*N_WORD_BYTES
);
2309 wipe_nonpinned_words()
2312 in_use_marker_t
* bitmap
;
2314 for (i
= 0; i
< last_free_page
; i
++) {
2315 if (page_table
[i
].dont_move
&& (bitmap
= pinned_dwords(i
)) != 0) {
2317 GENCGC_CARD_BYTES
/ (2*N_WORD_BYTES
) / N_WORD_BITS
,
2318 BIT_SCAN_INVERT
| BIT_SCAN_CLEAR
,
2319 wipe_range
, page_address(i
));
2320 page_table
[i
].has_pin_map
= 0;
2321 // move the page to newspace
2322 generations
[new_space
].bytes_allocated
+= page_table
[i
].bytes_used
;
2323 generations
[page_table
[i
].gen
].bytes_allocated
-= page_table
[i
].bytes_used
;
2324 page_table
[i
].gen
= new_space
;
2327 #ifndef LISP_FEATURE_WIN32
2328 madvise(page_table_pinned_dwords
, pins_map_size_in_bytes
, MADV_DONTNEED
);
2333 pin_words(page_index_t pageindex
, lispobj
*mark_which_pointer
)
2335 struct page
*page
= &page_table
[pageindex
];
2340 gc_assert(mark_which_pointer
);
2341 if (!page
->has_pin_map
) {
2342 page
->has_pin_map
= 1;
2346 in_use_marker_t
* map
= pinned_dwords(pageindex
);
2347 for (i
=0; i
<n_dwords_in_card
/N_WORD_BITS
; ++i
)
2348 gc_assert(map
[i
] == 0);
2352 lispobj header
= *mark_which_pointer
;
2354 // Don't bother calling a sizing function for fixnums or pointers.
2355 // The object pointed to must be a cons.
2356 if (!fixnump(header
) && !is_lisp_pointer(header
)) {
2357 size
= (sizetab
[widetag_of(header
)])(mark_which_pointer
);
2358 if (size
== 1 && (lowtag_of(header
) == 9 || lowtag_of(header
) == 2))
2361 gc_assert(size
% 2 == 0);
2362 lispobj
*page_base
= page_address(pageindex
);
2363 unsigned int begin_dword_index
= (mark_which_pointer
- page_base
) / 2;
2364 unsigned int end_dword_index
= begin_dword_index
+ size
/ 2;
2366 in_use_marker_t
*bitmap
= pinned_dwords(pageindex
);
2367 for (index
= begin_dword_index
; index
< end_dword_index
; index
++)
2368 bitmap
[index
/N_WORD_BITS
] |= 1LU << (index
% N_WORD_BITS
);
2371 /* Take a possible pointer to a Lisp object and mark its page in the
2372 * page_table so that it will not be relocated during a GC.
2374 * This involves locating the page it points to, then backing up to
2375 * the start of its region, then marking all pages dont_move from there
2376 * up to the first page that's not full or has a different generation
2378 * It is assumed that all the page static flags have been cleared at
2379 * the start of a GC.
2381 * It is also assumed that the current gc_alloc() region has been
2382 * flushed and the tables updated. */
2385 preserve_pointer(void *addr
)
2387 page_index_t addr_page_index
= find_page_index(addr
);
2388 page_index_t first_page
;
2390 unsigned int region_allocation
;
2391 lispobj
*begin_ptr
= NULL
;
2393 if (!valid_conservative_root_p(addr
, addr_page_index
, &begin_ptr
))
2396 /* (Now that we know that addr_page_index is in range, it's
2397 * safe to index into page_table[] with it.) */
2398 region_allocation
= page_table
[addr_page_index
].allocated
;
2400 /* Find the beginning of the region. Note that there may be
2401 * objects in the region preceding the one that we were passed a
2402 * pointer to: if this is the case, we will write-protect all the
2403 * previous objects' pages too. */
2406 /* I think this'd work just as well, but without the assertions.
2407 * -dan 2004.01.01 */
2408 first_page
= find_page_index(page_scan_start(addr_page_index
))
2410 first_page
= addr_page_index
;
2411 while (!page_starts_contiguous_block_p(first_page
)) {
2413 /* Do some checks. */
2414 gc_assert(page_table
[first_page
].bytes_used
== GENCGC_CARD_BYTES
);
2415 gc_assert(page_table
[first_page
].gen
== from_space
);
2416 gc_assert(page_table
[first_page
].allocated
== region_allocation
);
2420 /* Adjust any large objects before promotion as they won't be
2421 * copied after promotion. */
2422 if (page_table
[first_page
].large_object
) {
2423 maybe_adjust_large_object(page_address(first_page
));
2424 /* It may have moved to unboxed pages. */
2425 region_allocation
= page_table
[first_page
].allocated
;
2428 /* Now work forward until the end of this contiguous area is found,
2429 * marking all pages as dont_move. */
2430 for (i
= first_page
; ;i
++) {
2431 gc_assert(page_table
[i
].allocated
== region_allocation
);
2433 /* Mark the page static. */
2434 page_table
[i
].dont_move
= 1;
2436 /* It is essential that the pages are not write protected as
2437 * they may have pointers into the old-space which need
2438 * scavenging. They shouldn't be write protected at this
2440 gc_assert(!page_table
[i
].write_protected
);
2442 /* Check whether this is the last page in this contiguous block.. */
2443 if (page_ends_contiguous_block_p(i
, from_space
))
2447 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2448 /* Do not do this for multi-page objects. Those pages do not need
2449 * object wipeout anyway.
2451 if (i
== first_page
) {
2452 /* We need the pointer to the beginning of the object
2453 * We might have gotten it above but maybe not, so make sure
2455 if (begin_ptr
== NULL
) {
2456 possibly_valid_dynamic_space_pointer_s(addr
, first_page
,
2459 pin_words(first_page
, begin_ptr
);
2463 /* Check that the page is now static. */
2464 gc_assert(page_table
[addr_page_index
].dont_move
!= 0);
2467 /* If the given page is not write-protected, then scan it for pointers
2468 * to younger generations or the top temp. generation, if no
2469 * suspicious pointers are found then the page is write-protected.
2471 * Care is taken to check for pointers to the current gc_alloc()
2472 * region if it is a younger generation or the temp. generation. This
2473 * frees the caller from doing a gc_alloc_update_page_tables(). Actually
2474 * the gc_alloc_generation does not need to be checked as this is only
2475 * called from scavenge_generation() when the gc_alloc generation is
2476 * younger, so it just checks if there is a pointer to the current
2479 * We return 1 if the page was write-protected, else 0. */
2481 update_page_write_prot(page_index_t page
)
2483 generation_index_t gen
= page_table
[page
].gen
;
2486 void **page_addr
= (void **)page_address(page
);
2487 sword_t num_words
= page_table
[page
].bytes_used
/ N_WORD_BYTES
;
2489 /* Shouldn't be a free page. */
2490 gc_assert(page_allocated_p(page
));
2491 gc_assert(page_table
[page
].bytes_used
!= 0);
2493 /* Skip if it's already write-protected, pinned, or unboxed */
2494 if (page_table
[page
].write_protected
2495 /* FIXME: What's the reason for not write-protecting pinned pages? */
2496 || page_table
[page
].dont_move
2497 || page_unboxed_p(page
))
2500 /* Scan the page for pointers to younger generations or the
2501 * top temp. generation. */
2503 /* This is conservative: any word satisfying is_lisp_pointer() is
2504 * assumed to be a pointer despite that it might be machine code
2505 * or part of an unboxed array */
2506 for (j
= 0; j
< num_words
; j
++) {
2507 void *ptr
= *(page_addr
+j
);
2510 /* Check that it's in the dynamic space */
2511 if (is_lisp_pointer((lispobj
)ptr
) && (index
= find_page_index(ptr
)) != -1)
2512 if (/* Does it point to a younger or the temp. generation? */
2513 (page_allocated_p(index
)
2514 && (page_table
[index
].bytes_used
!= 0)
2515 && ((page_table
[index
].gen
< gen
)
2516 || (page_table
[index
].gen
== SCRATCH_GENERATION
)))
2518 /* Or does it point within a current gc_alloc() region? */
2519 || ((boxed_region
.start_addr
<= ptr
)
2520 && (ptr
<= boxed_region
.free_pointer
))
2521 || ((unboxed_region
.start_addr
<= ptr
)
2522 && (ptr
<= unboxed_region
.free_pointer
))) {
2529 /* Write-protect the page. */
2530 /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
2532 os_protect((void *)page_addr
,
2534 OS_VM_PROT_READ
|OS_VM_PROT_EXECUTE
);
2536 /* Note the page as protected in the page tables. */
2537 page_table
[page
].write_protected
= 1;
2543 /* Scavenge all generations from FROM to TO, inclusive, except for
2544 * new_space which needs special handling, as new objects may be
2545 * added which are not checked here - use scavenge_newspace generation.
2547 * Write-protected pages should not have any pointers to the
2548 * from_space so do need scavenging; thus write-protected pages are
2549 * not always scavenged. There is some code to check that these pages
2550 * are not written; but to check fully the write-protected pages need
2551 * to be scavenged by disabling the code to skip them.
2553 * Under the current scheme when a generation is GCed the younger
2554 * generations will be empty. So, when a generation is being GCed it
2555 * is only necessary to scavenge the older generations for pointers
2556 * not the younger. So a page that does not have pointers to younger
2557 * generations does not need to be scavenged.
2559 * The write-protection can be used to note pages that don't have
2560 * pointers to younger pages. But pages can be written without having
2561 * pointers to younger generations. After the pages are scavenged here
2562 * they can be scanned for pointers to younger generations and if
2563 * there are none the page can be write-protected.
2565 * One complication is when the newspace is the top temp. generation.
2567 * Enabling SC_GEN_CK scavenges the write-protected pages and checks
2568 * that none were written, which they shouldn't be as they should have
2569 * no pointers to younger generations. This breaks down for weak
2570 * pointers as the objects contain a link to the next and are written
2571 * if a weak pointer is scavenged. Still it's a useful check. */
2573 scavenge_generations(generation_index_t from
, generation_index_t to
)
2576 page_index_t num_wp
= 0;
2580 /* Clear the write_protected_cleared flags on all pages. */
2581 for (i
= 0; i
< page_table_pages
; i
++)
2582 page_table
[i
].write_protected_cleared
= 0;
2585 for (i
= 0; i
< last_free_page
; i
++) {
2586 generation_index_t generation
= page_table
[i
].gen
;
2588 && (page_table
[i
].bytes_used
!= 0)
2589 && (generation
!= new_space
)
2590 && (generation
>= from
)
2591 && (generation
<= to
)) {
2592 page_index_t last_page
,j
;
2593 int write_protected
=1;
2595 /* This should be the start of a region */
2596 gc_assert(page_starts_contiguous_block_p(i
));
2598 /* Now work forward until the end of the region */
2599 for (last_page
= i
; ; last_page
++) {
2601 write_protected
&& page_table
[last_page
].write_protected
;
2602 if (page_ends_contiguous_block_p(last_page
, generation
))
2605 if (!write_protected
) {
2606 scavenge(page_address(i
),
2607 ((uword_t
)(page_table
[last_page
].bytes_used
2608 + npage_bytes(last_page
-i
)))
2611 /* Now scan the pages and write protect those that
2612 * don't have pointers to younger generations. */
2613 if (enable_page_protection
) {
2614 for (j
= i
; j
<= last_page
; j
++) {
2615 num_wp
+= update_page_write_prot(j
);
2618 if ((gencgc_verbose
> 1) && (num_wp
!= 0)) {
2620 "/write protected %d pages within generation %d\n",
2621 num_wp
, generation
));
2629 /* Check that none of the write_protected pages in this generation
2630 * have been written to. */
2631 for (i
= 0; i
< page_table_pages
; i
++) {
2632 if (page_allocated_p(i
)
2633 && (page_table
[i
].bytes_used
!= 0)
2634 && (page_table
[i
].gen
== generation
)
2635 && (page_table
[i
].write_protected_cleared
!= 0)) {
2636 FSHOW((stderr
, "/scavenge_generation() %d\n", generation
));
2638 "/page bytes_used=%d scan_start_offset=%lu dont_move=%d\n",
2639 page_table
[i
].bytes_used
,
2640 page_table
[i
].scan_start_offset
,
2641 page_table
[i
].dont_move
));
2642 lose("write to protected page %d in scavenge_generation()\n", i
);
2649 /* Scavenge a newspace generation. As it is scavenged new objects may
2650 * be allocated to it; these will also need to be scavenged. This
2651 * repeats until there are no more objects unscavenged in the
2652 * newspace generation.
2654 * To help improve the efficiency, areas written are recorded by
2655 * gc_alloc() and only these scavenged. Sometimes a little more will be
2656 * scavenged, but this causes no harm. An easy check is done that the
2657 * scavenged bytes equals the number allocated in the previous
2660 * Write-protected pages are not scanned except if they are marked
2661 * dont_move in which case they may have been promoted and still have
2662 * pointers to the from space.
2664 * Write-protected pages could potentially be written by alloc however
2665 * to avoid having to handle re-scavenging of write-protected pages
2666 * gc_alloc() does not write to write-protected pages.
2668 * New areas of objects allocated are recorded alternatively in the two
2669 * new_areas arrays below. */
2670 static struct new_area new_areas_1
[NUM_NEW_AREAS
];
2671 static struct new_area new_areas_2
[NUM_NEW_AREAS
];
2673 /* Do one full scan of the new space generation. This is not enough to
2674 * complete the job as new objects may be added to the generation in
2675 * the process which are not scavenged. */
2677 scavenge_newspace_generation_one_scan(generation_index_t generation
)
2682 "/starting one full scan of newspace generation %d\n",
2684 for (i
= 0; i
< last_free_page
; i
++) {
2685 /* Note that this skips over open regions when it encounters them. */
2687 && (page_table
[i
].bytes_used
!= 0)
2688 && (page_table
[i
].gen
== generation
)
2689 && ((page_table
[i
].write_protected
== 0)
2690 /* (This may be redundant as write_protected is now
2691 * cleared before promotion.) */
2692 || (page_table
[i
].dont_move
== 1))) {
2693 page_index_t last_page
;
2696 /* The scavenge will start at the scan_start_offset of
2699 * We need to find the full extent of this contiguous
2700 * block in case objects span pages.
2702 * Now work forward until the end of this contiguous area
2703 * is found. A small area is preferred as there is a
2704 * better chance of its pages being write-protected. */
2705 for (last_page
= i
; ;last_page
++) {
2706 /* If all pages are write-protected and movable,
2707 * then no need to scavenge */
2708 all_wp
=all_wp
&& page_table
[last_page
].write_protected
&&
2709 !page_table
[last_page
].dont_move
;
2711 /* Check whether this is the last page in this
2712 * contiguous block */
2713 if (page_ends_contiguous_block_p(last_page
, generation
))
2717 /* Do a limited check for write-protected pages. */
2719 sword_t nwords
= (((uword_t
)
2720 (page_table
[last_page
].bytes_used
2721 + npage_bytes(last_page
-i
)
2722 + page_table
[i
].scan_start_offset
))
2724 new_areas_ignore_page
= last_page
;
2726 scavenge(page_scan_start(i
), nwords
);
2733 "/done with one full scan of newspace generation %d\n",
2737 /* Do a complete scavenge of the newspace generation. */
2739 scavenge_newspace_generation(generation_index_t generation
)
2743 /* the new_areas array currently being written to by gc_alloc() */
2744 struct new_area (*current_new_areas
)[] = &new_areas_1
;
2745 size_t current_new_areas_index
;
2747 /* the new_areas created by the previous scavenge cycle */
2748 struct new_area (*previous_new_areas
)[] = NULL
;
2749 size_t previous_new_areas_index
;
2751 /* Flush the current regions updating the tables. */
2752 gc_alloc_update_all_page_tables();
2754 /* Turn on the recording of new areas by gc_alloc(). */
2755 new_areas
= current_new_areas
;
2756 new_areas_index
= 0;
2758 /* Don't need to record new areas that get scavenged anyway during
2759 * scavenge_newspace_generation_one_scan. */
2760 record_new_objects
= 1;
2762 /* Start with a full scavenge. */
2763 scavenge_newspace_generation_one_scan(generation
);
2765 /* Record all new areas now. */
2766 record_new_objects
= 2;
2768 /* Give a chance to weak hash tables to make other objects live.
2769 * FIXME: The algorithm implemented here for weak hash table gcing
2770 * is O(W^2+N) as Bruno Haible warns in
2771 * http://www.haible.de/bruno/papers/cs/weak/WeakDatastructures-writeup.html
2772 * see "Implementation 2". */
2773 scav_weak_hash_tables();
2775 /* Flush the current regions updating the tables. */
2776 gc_alloc_update_all_page_tables();
2778 /* Grab new_areas_index. */
2779 current_new_areas_index
= new_areas_index
;
2782 "The first scan is finished; current_new_areas_index=%d.\n",
2783 current_new_areas_index));*/
2785 while (current_new_areas_index
> 0) {
2786 /* Move the current to the previous new areas */
2787 previous_new_areas
= current_new_areas
;
2788 previous_new_areas_index
= current_new_areas_index
;
2790 /* Scavenge all the areas in previous new areas. Any new areas
2791 * allocated are saved in current_new_areas. */
2793 /* Allocate an array for current_new_areas; alternating between
2794 * new_areas_1 and 2 */
2795 if (previous_new_areas
== &new_areas_1
)
2796 current_new_areas
= &new_areas_2
;
2798 current_new_areas
= &new_areas_1
;
2800 /* Set up for gc_alloc(). */
2801 new_areas
= current_new_areas
;
2802 new_areas_index
= 0;
2804 /* Check whether previous_new_areas had overflowed. */
2805 if (previous_new_areas_index
>= NUM_NEW_AREAS
) {
2807 /* New areas of objects allocated have been lost so need to do a
2808 * full scan to be sure! If this becomes a problem try
2809 * increasing NUM_NEW_AREAS. */
2810 if (gencgc_verbose
) {
2811 SHOW("new_areas overflow, doing full scavenge");
2814 /* Don't need to record new areas that get scavenged
2815 * anyway during scavenge_newspace_generation_one_scan. */
2816 record_new_objects
= 1;
2818 scavenge_newspace_generation_one_scan(generation
);
2820 /* Record all new areas now. */
2821 record_new_objects
= 2;
2823 scav_weak_hash_tables();
2825 /* Flush the current regions updating the tables. */
2826 gc_alloc_update_all_page_tables();
2830 /* Work through previous_new_areas. */
2831 for (i
= 0; i
< previous_new_areas_index
; i
++) {
2832 page_index_t page
= (*previous_new_areas
)[i
].page
;
2833 size_t offset
= (*previous_new_areas
)[i
].offset
;
2834 size_t size
= (*previous_new_areas
)[i
].size
/ N_WORD_BYTES
;
2835 gc_assert((*previous_new_areas
)[i
].size
% N_WORD_BYTES
== 0);
2836 scavenge(page_address(page
)+offset
, size
);
2839 scav_weak_hash_tables();
2841 /* Flush the current regions updating the tables. */
2842 gc_alloc_update_all_page_tables();
2845 current_new_areas_index
= new_areas_index
;
2848 "The re-scan has finished; current_new_areas_index=%d.\n",
2849 current_new_areas_index));*/
2852 /* Turn off recording of areas allocated by gc_alloc(). */
2853 record_new_objects
= 0;
2858 /* Check that none of the write_protected pages in this generation
2859 * have been written to. */
2860 for (i
= 0; i
< page_table_pages
; i
++) {
2861 if (page_allocated_p(i
)
2862 && (page_table
[i
].bytes_used
!= 0)
2863 && (page_table
[i
].gen
== generation
)
2864 && (page_table
[i
].write_protected_cleared
!= 0)
2865 && (page_table
[i
].dont_move
== 0)) {
2866 lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d\n",
2867 i
, generation
, page_table
[i
].dont_move
);
2874 /* Un-write-protect all the pages in from_space. This is done at the
2875 * start of a GC else there may be many page faults while scavenging
2876 * the newspace (I've seen drive the system time to 99%). These pages
2877 * would need to be unprotected anyway before unmapping in
2878 * free_oldspace; not sure what effect this has on paging.. */
2880 unprotect_oldspace(void)
2883 void *region_addr
= 0;
2884 void *page_addr
= 0;
2885 uword_t region_bytes
= 0;
2887 for (i
= 0; i
< last_free_page
; i
++) {
2888 if (page_allocated_p(i
)
2889 && (page_table
[i
].bytes_used
!= 0)
2890 && (page_table
[i
].gen
== from_space
)) {
2892 /* Remove any write-protection. We should be able to rely
2893 * on the write-protect flag to avoid redundant calls. */
2894 if (page_table
[i
].write_protected
) {
2895 page_table
[i
].write_protected
= 0;
2896 page_addr
= page_address(i
);
2899 region_addr
= page_addr
;
2900 region_bytes
= GENCGC_CARD_BYTES
;
2901 } else if (region_addr
+ region_bytes
== page_addr
) {
2902 /* Region continue. */
2903 region_bytes
+= GENCGC_CARD_BYTES
;
2905 /* Unprotect previous region. */
2906 os_protect(region_addr
, region_bytes
, OS_VM_PROT_ALL
);
2907 /* First page in new region. */
2908 region_addr
= page_addr
;
2909 region_bytes
= GENCGC_CARD_BYTES
;
2915 /* Unprotect last region. */
2916 os_protect(region_addr
, region_bytes
, OS_VM_PROT_ALL
);
2920 /* Work through all the pages and free any in from_space. This
2921 * assumes that all objects have been copied or promoted to an older
2922 * generation. Bytes_allocated and the generation bytes_allocated
2923 * counter are updated. The number of bytes freed is returned. */
2927 uword_t bytes_freed
= 0;
2928 page_index_t first_page
, last_page
;
2933 /* Find a first page for the next region of pages. */
2934 while ((first_page
< last_free_page
)
2935 && (page_free_p(first_page
)
2936 || (page_table
[first_page
].bytes_used
== 0)
2937 || (page_table
[first_page
].gen
!= from_space
)))
2940 if (first_page
>= last_free_page
)
2943 /* Find the last page of this region. */
2944 last_page
= first_page
;
2947 /* Free the page. */
2948 bytes_freed
+= page_table
[last_page
].bytes_used
;
2949 generations
[page_table
[last_page
].gen
].bytes_allocated
-=
2950 page_table
[last_page
].bytes_used
;
2951 page_table
[last_page
].allocated
= FREE_PAGE_FLAG
;
2952 page_table
[last_page
].bytes_used
= 0;
2953 /* Should already be unprotected by unprotect_oldspace(). */
2954 gc_assert(!page_table
[last_page
].write_protected
);
2957 while ((last_page
< last_free_page
)
2958 && page_allocated_p(last_page
)
2959 && (page_table
[last_page
].bytes_used
!= 0)
2960 && (page_table
[last_page
].gen
== from_space
));
2962 #ifdef READ_PROTECT_FREE_PAGES
2963 os_protect(page_address(first_page
),
2964 npage_bytes(last_page
-first_page
),
2967 first_page
= last_page
;
2968 } while (first_page
< last_free_page
);
2970 bytes_allocated
-= bytes_freed
;
2975 /* Print some information about a pointer at the given address. */
2977 print_ptr(lispobj
*addr
)
2979 /* If addr is in the dynamic space then out the page information. */
2980 page_index_t pi1
= find_page_index((void*)addr
);
2983 fprintf(stderr
," %p: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n",
2986 page_table
[pi1
].allocated
,
2987 page_table
[pi1
].gen
,
2988 page_table
[pi1
].bytes_used
,
2989 page_table
[pi1
].scan_start_offset
,
2990 page_table
[pi1
].dont_move
);
2991 fprintf(stderr
," %x %x %x %x (%x) %x %x %x %x\n",
3005 is_in_stack_space(lispobj ptr
)
3007 /* For space verification: Pointers can be valid if they point
3008 * to a thread stack space. This would be faster if the thread
3009 * structures had page-table entries as if they were part of
3010 * the heap space. */
3012 for_each_thread(th
) {
3013 if ((th
->control_stack_start
<= (lispobj
*)ptr
) &&
3014 (th
->control_stack_end
>= (lispobj
*)ptr
)) {
3022 verify_space(lispobj
*start
, size_t words
)
3024 int is_in_dynamic_space
= (find_page_index((void*)start
) != -1);
3025 int is_in_readonly_space
=
3026 (READ_ONLY_SPACE_START
<= (uword_t
)start
&&
3027 (uword_t
)start
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0));
3031 lispobj thing
= *(lispobj
*)start
;
3033 if (is_lisp_pointer(thing
)) {
3034 page_index_t page_index
= find_page_index((void*)thing
);
3035 sword_t to_readonly_space
=
3036 (READ_ONLY_SPACE_START
<= thing
&&
3037 thing
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0));
3038 sword_t to_static_space
=
3039 (STATIC_SPACE_START
<= thing
&&
3040 thing
< SymbolValue(STATIC_SPACE_FREE_POINTER
,0));
3042 /* Does it point to the dynamic space? */
3043 if (page_index
!= -1) {
3044 /* If it's within the dynamic space it should point to a used
3045 * page. XX Could check the offset too. */
3046 if (page_allocated_p(page_index
)
3047 && (page_table
[page_index
].bytes_used
== 0))
3048 lose ("Ptr %p @ %p sees free page.\n", thing
, start
);
3049 /* Check that it doesn't point to a forwarding pointer! */
3050 if (*((lispobj
*)native_pointer(thing
)) == 0x01) {
3051 lose("Ptr %p @ %p sees forwarding ptr.\n", thing
, start
);
3053 /* Check that its not in the RO space as it would then be a
3054 * pointer from the RO to the dynamic space. */
3055 if (is_in_readonly_space
) {
3056 lose("ptr to dynamic space %p from RO space %x\n",
3059 /* Does it point to a plausible object? This check slows
3060 * it down a lot (so it's commented out).
3062 * "a lot" is serious: it ate 50 minutes cpu time on
3063 * my duron 950 before I came back from lunch and
3066 * FIXME: Add a variable to enable this
3069 if (!possibly_valid_dynamic_space_pointer_s((lispobj *)thing, page_index, NULL)) {
3070 lose("ptr %p to invalid object %p\n", thing, start);
3074 extern char __attribute__((unused
)) funcallable_instance_tramp
;
3075 /* Verify that it points to another valid space. */
3076 if (!to_readonly_space
&& !to_static_space
3077 #ifndef LISP_FEATURE_READ_ONLY_TRAMPS
3078 && (thing
!= (lispobj
)&funcallable_instance_tramp
)
3080 && !is_in_stack_space(thing
)) {
3081 lose("Ptr %p @ %p sees junk.\n", thing
, start
);
3085 if (!(fixnump(thing
))) {
3087 switch(widetag_of(*start
)) {
3090 case SIMPLE_VECTOR_WIDETAG
:
3092 case COMPLEX_WIDETAG
:
3093 case SIMPLE_ARRAY_WIDETAG
:
3094 case COMPLEX_BASE_STRING_WIDETAG
:
3095 #ifdef COMPLEX_CHARACTER_STRING_WIDETAG
3096 case COMPLEX_CHARACTER_STRING_WIDETAG
:
3098 case COMPLEX_VECTOR_NIL_WIDETAG
:
3099 case COMPLEX_BIT_VECTOR_WIDETAG
:
3100 case COMPLEX_VECTOR_WIDETAG
:
3101 case COMPLEX_ARRAY_WIDETAG
:
3102 case CLOSURE_HEADER_WIDETAG
:
3103 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
3104 case VALUE_CELL_HEADER_WIDETAG
:
3105 case SYMBOL_HEADER_WIDETAG
:
3106 case CHARACTER_WIDETAG
:
3107 #if N_WORD_BITS == 64
3108 case SINGLE_FLOAT_WIDETAG
:
3110 case UNBOUND_MARKER_WIDETAG
:
3115 case INSTANCE_HEADER_WIDETAG
:
3117 sword_t ntotal
= instance_length(thing
);
3118 lispobj layout
= instance_layout(start
);
3123 instance_scan_interleaved(verify_space
,
3125 native_pointer(layout
));
3129 case CODE_HEADER_WIDETAG
:
3131 lispobj object
= *start
;
3133 sword_t nheader_words
, ncode_words
, nwords
;
3135 struct simple_fun
*fheaderp
;
3137 code
= (struct code
*) start
;
3139 /* Check that it's not in the dynamic space.
3140 * FIXME: Isn't is supposed to be OK for code
3141 * objects to be in the dynamic space these days? */
3142 /* It is for byte compiled code, but there's
3143 * no byte compilation in SBCL anymore. */
3144 if (is_in_dynamic_space
3145 /* Only when enabled */
3146 && verify_dynamic_code_check
) {
3148 "/code object at %p in the dynamic space\n",
3152 ncode_words
= code_instruction_words(code
->code_size
);
3153 nheader_words
= code_header_words(object
);
3154 nwords
= ncode_words
+ nheader_words
;
3155 nwords
= CEILING(nwords
, 2);
3156 /* Scavenge the boxed section of the code data block */
3157 verify_space(start
+ 1, nheader_words
- 1);
3159 /* Scavenge the boxed section of each function
3160 * object in the code data block. */
3161 fheaderl
= code
->entry_points
;
3162 while (fheaderl
!= NIL
) {
3164 (struct simple_fun
*) native_pointer(fheaderl
);
3165 gc_assert(widetag_of(fheaderp
->header
) ==
3166 SIMPLE_FUN_HEADER_WIDETAG
);
3167 verify_space(SIMPLE_FUN_SCAV_START(fheaderp
),
3168 SIMPLE_FUN_SCAV_NWORDS(fheaderp
));
3169 fheaderl
= fheaderp
->next
;
3175 /* unboxed objects */
3176 case BIGNUM_WIDETAG
:
3177 #if N_WORD_BITS != 64
3178 case SINGLE_FLOAT_WIDETAG
:
3180 case DOUBLE_FLOAT_WIDETAG
:
3181 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3182 case LONG_FLOAT_WIDETAG
:
3184 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
3185 case COMPLEX_SINGLE_FLOAT_WIDETAG
:
3187 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
3188 case COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3190 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3191 case COMPLEX_LONG_FLOAT_WIDETAG
:
3193 #ifdef SIMD_PACK_WIDETAG
3194 case SIMD_PACK_WIDETAG
:
3196 case SIMPLE_BASE_STRING_WIDETAG
:
3197 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
3198 case SIMPLE_CHARACTER_STRING_WIDETAG
:
3200 case SIMPLE_BIT_VECTOR_WIDETAG
:
3201 case SIMPLE_ARRAY_NIL_WIDETAG
:
3202 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
3203 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
3204 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
3205 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
3206 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
3207 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
3209 case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG
:
3211 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
3212 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
3213 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
3214 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
3216 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
3217 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
3219 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
3220 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
3222 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
3223 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
3226 case SIMPLE_ARRAY_FIXNUM_WIDETAG
:
3228 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
3229 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
3231 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
3232 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
3234 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
3235 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
3236 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3237 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
3239 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
3240 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
3242 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
3243 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3245 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3246 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
3249 case WEAK_POINTER_WIDETAG
:
3250 #ifdef NO_TLS_VALUE_MARKER_WIDETAG
3251 case NO_TLS_VALUE_MARKER_WIDETAG
:
3253 count
= (sizetab
[widetag_of(*start
)])(start
);
3257 lose("Unhandled widetag %p at %p\n",
3258 widetag_of(*start
), start
);
3270 /* FIXME: It would be nice to make names consistent so that
3271 * foo_size meant size *in* *bytes* instead of size in some
3272 * arbitrary units. (Yes, this caused a bug, how did you guess?:-)
3273 * Some counts of lispobjs are called foo_count; it might be good
3274 * to grep for all foo_size and rename the appropriate ones to
3276 sword_t read_only_space_size
=
3277 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0)
3278 - (lispobj
*)READ_ONLY_SPACE_START
;
3279 sword_t static_space_size
=
3280 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0)
3281 - (lispobj
*)STATIC_SPACE_START
;
3283 for_each_thread(th
) {
3284 sword_t binding_stack_size
=
3285 (lispobj
*)get_binding_stack_pointer(th
)
3286 - (lispobj
*)th
->binding_stack_start
;
3287 verify_space(th
->binding_stack_start
, binding_stack_size
);
3289 verify_space((lispobj
*)READ_ONLY_SPACE_START
, read_only_space_size
);
3290 verify_space((lispobj
*)STATIC_SPACE_START
, static_space_size
);
3294 verify_generation(generation_index_t generation
)
3298 for (i
= 0; i
< last_free_page
; i
++) {
3299 if (page_allocated_p(i
)
3300 && (page_table
[i
].bytes_used
!= 0)
3301 && (page_table
[i
].gen
== generation
)) {
3302 page_index_t last_page
;
3304 /* This should be the start of a contiguous block */
3305 gc_assert(page_starts_contiguous_block_p(i
));
3307 /* Need to find the full extent of this contiguous block in case
3308 objects span pages. */
3310 /* Now work forward until the end of this contiguous area is
3312 for (last_page
= i
; ;last_page
++)
3313 /* Check whether this is the last page in this contiguous
3315 if (page_ends_contiguous_block_p(last_page
, generation
))
3318 verify_space(page_address(i
),
3320 (page_table
[last_page
].bytes_used
3321 + npage_bytes(last_page
-i
)))
3328 /* Check that all the free space is zero filled. */
3330 verify_zero_fill(void)
3334 for (page
= 0; page
< last_free_page
; page
++) {
3335 if (page_free_p(page
)) {
3336 /* The whole page should be zero filled. */
3337 sword_t
*start_addr
= (sword_t
*)page_address(page
);
3338 sword_t size
= 1024;
3340 for (i
= 0; i
< size
; i
++) {
3341 if (start_addr
[i
] != 0) {
3342 lose("free page not zero at %x\n", start_addr
+ i
);
3346 sword_t free_bytes
= GENCGC_CARD_BYTES
- page_table
[page
].bytes_used
;
3347 if (free_bytes
> 0) {
3348 sword_t
*start_addr
= (sword_t
*)((uword_t
)page_address(page
)
3349 + page_table
[page
].bytes_used
);
3350 sword_t size
= free_bytes
/ N_WORD_BYTES
;
3352 for (i
= 0; i
< size
; i
++) {
3353 if (start_addr
[i
] != 0) {
3354 lose("free region not zero at %x\n", start_addr
+ i
);
3362 /* External entry point for verify_zero_fill */
3364 gencgc_verify_zero_fill(void)
3366 /* Flush the alloc regions updating the tables. */
3367 gc_alloc_update_all_page_tables();
3368 SHOW("verifying zero fill");
3373 verify_dynamic_space(void)
3375 generation_index_t i
;
3377 for (i
= 0; i
<= HIGHEST_NORMAL_GENERATION
; i
++)
3378 verify_generation(i
);
3380 if (gencgc_enable_verify_zero_fill
)
3384 /* Write-protect all the dynamic boxed pages in the given generation. */
3386 write_protect_generation_pages(generation_index_t generation
)
3390 gc_assert(generation
< SCRATCH_GENERATION
);
3392 for (start
= 0; start
< last_free_page
; start
++) {
3393 if (protect_page_p(start
, generation
)) {
3397 /* Note the page as protected in the page tables. */
3398 page_table
[start
].write_protected
= 1;
3400 for (last
= start
+ 1; last
< last_free_page
; last
++) {
3401 if (!protect_page_p(last
, generation
))
3403 page_table
[last
].write_protected
= 1;
3406 page_start
= (void *)page_address(start
);
3408 os_protect(page_start
,
3409 npage_bytes(last
- start
),
3410 OS_VM_PROT_READ
| OS_VM_PROT_EXECUTE
);
3416 if (gencgc_verbose
> 1) {
3418 "/write protected %d of %d pages in generation %d\n",
3419 count_write_protect_generation_pages(generation
),
3420 count_generation_pages(generation
),
3425 #if defined(LISP_FEATURE_SB_THREAD) && (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
3427 preserve_context_registers (os_context_t
*c
)
3430 /* On Darwin the signal context isn't a contiguous block of memory,
3431 * so just preserve_pointering its contents won't be sufficient.
3433 #if defined(LISP_FEATURE_DARWIN)||defined(LISP_FEATURE_WIN32)
3434 #if defined LISP_FEATURE_X86
3435 preserve_pointer((void*)*os_context_register_addr(c
,reg_EAX
));
3436 preserve_pointer((void*)*os_context_register_addr(c
,reg_ECX
));
3437 preserve_pointer((void*)*os_context_register_addr(c
,reg_EDX
));
3438 preserve_pointer((void*)*os_context_register_addr(c
,reg_EBX
));
3439 preserve_pointer((void*)*os_context_register_addr(c
,reg_ESI
));
3440 preserve_pointer((void*)*os_context_register_addr(c
,reg_EDI
));
3441 preserve_pointer((void*)*os_context_pc_addr(c
));
3442 #elif defined LISP_FEATURE_X86_64
3443 preserve_pointer((void*)*os_context_register_addr(c
,reg_RAX
));
3444 preserve_pointer((void*)*os_context_register_addr(c
,reg_RCX
));
3445 preserve_pointer((void*)*os_context_register_addr(c
,reg_RDX
));
3446 preserve_pointer((void*)*os_context_register_addr(c
,reg_RBX
));
3447 preserve_pointer((void*)*os_context_register_addr(c
,reg_RSI
));
3448 preserve_pointer((void*)*os_context_register_addr(c
,reg_RDI
));
3449 preserve_pointer((void*)*os_context_register_addr(c
,reg_R8
));
3450 preserve_pointer((void*)*os_context_register_addr(c
,reg_R9
));
3451 preserve_pointer((void*)*os_context_register_addr(c
,reg_R10
));
3452 preserve_pointer((void*)*os_context_register_addr(c
,reg_R11
));
3453 preserve_pointer((void*)*os_context_register_addr(c
,reg_R12
));
3454 preserve_pointer((void*)*os_context_register_addr(c
,reg_R13
));
3455 preserve_pointer((void*)*os_context_register_addr(c
,reg_R14
));
3456 preserve_pointer((void*)*os_context_register_addr(c
,reg_R15
));
3457 preserve_pointer((void*)*os_context_pc_addr(c
));
3459 #error "preserve_context_registers needs to be tweaked for non-x86 Darwin"
3462 #if !defined(LISP_FEATURE_WIN32)
3463 for(ptr
= ((void **)(c
+1))-1; ptr
>=(void **)c
; ptr
--) {
3464 preserve_pointer(*ptr
);
3471 move_pinned_pages_to_newspace()
3475 /* scavenge() will evacuate all oldspace pages, but no newspace
3476 * pages. Pinned pages are precisely those pages which must not
3477 * be evacuated, so move them to newspace directly. */
3479 for (i
= 0; i
< last_free_page
; i
++) {
3480 if (page_table
[i
].dont_move
&&
3481 /* dont_move is cleared lazily, so validate the space as well. */
3482 page_table
[i
].gen
== from_space
) {
3483 if (pinned_dwords(i
) && do_wipe_p
) {
3484 // do not move to newspace after all, this will be word-wiped
3487 page_table
[i
].gen
= new_space
;
3488 /* And since we're moving the pages wholesale, also adjust
3489 * the generation allocation counters. */
3490 generations
[new_space
].bytes_allocated
+= page_table
[i
].bytes_used
;
3491 generations
[from_space
].bytes_allocated
-= page_table
[i
].bytes_used
;
3496 /* Garbage collect a generation. If raise is 0 then the remains of the
3497 * generation are not raised to the next generation. */
3499 garbage_collect_generation(generation_index_t generation
, int raise
)
3502 uword_t static_space_size
;
3505 gc_assert(generation
<= HIGHEST_NORMAL_GENERATION
);
3507 /* The oldest generation can't be raised. */
3508 gc_assert((generation
!= HIGHEST_NORMAL_GENERATION
) || (raise
== 0));
3510 /* Check if weak hash tables were processed in the previous GC. */
3511 gc_assert(weak_hash_tables
== NULL
);
3513 /* Initialize the weak pointer list. */
3514 weak_pointers
= NULL
;
3516 /* When a generation is not being raised it is transported to a
3517 * temporary generation (NUM_GENERATIONS), and lowered when
3518 * done. Set up this new generation. There should be no pages
3519 * allocated to it yet. */
3521 gc_assert(generations
[SCRATCH_GENERATION
].bytes_allocated
== 0);
3524 /* Set the global src and dest. generations */
3525 from_space
= generation
;
3527 new_space
= generation
+1;
3529 new_space
= SCRATCH_GENERATION
;
3531 /* Change to a new space for allocation, resetting the alloc_start_page */
3532 gc_alloc_generation
= new_space
;
3533 generations
[new_space
].alloc_start_page
= 0;
3534 generations
[new_space
].alloc_unboxed_start_page
= 0;
3535 generations
[new_space
].alloc_large_start_page
= 0;
3536 generations
[new_space
].alloc_large_unboxed_start_page
= 0;
3538 /* Before any pointers are preserved, the dont_move flags on the
3539 * pages need to be cleared. */
3540 for (i
= 0; i
< last_free_page
; i
++)
3541 if(page_table
[i
].gen
==from_space
) {
3542 page_table
[i
].dont_move
= 0;
3543 gc_assert(pinned_dwords(i
) == NULL
);
3546 /* Un-write-protect the old-space pages. This is essential for the
3547 * promoted pages as they may contain pointers into the old-space
3548 * which need to be scavenged. It also helps avoid unnecessary page
3549 * faults as forwarding pointers are written into them. They need to
3550 * be un-protected anyway before unmapping later. */
3551 unprotect_oldspace();
3553 /* Scavenge the stacks' conservative roots. */
3555 /* there are potentially two stacks for each thread: the main
3556 * stack, which may contain Lisp pointers, and the alternate stack.
3557 * We don't ever run Lisp code on the altstack, but it may
3558 * host a sigcontext with lisp objects in it */
3560 /* what we need to do: (1) find the stack pointer for the main
3561 * stack; scavenge it (2) find the interrupt context on the
3562 * alternate stack that might contain lisp values, and scavenge
3565 /* we assume that none of the preceding applies to the thread that
3566 * initiates GC. If you ever call GC from inside an altstack
3567 * handler, you will lose. */
3569 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
3570 /* And if we're saving a core, there's no point in being conservative. */
3571 if (conservative_stack
) {
3572 for_each_thread(th
) {
3574 void **esp
=(void **)-1;
3575 if (th
->state
== STATE_DEAD
)
3577 # if defined(LISP_FEATURE_SB_SAFEPOINT)
3578 /* Conservative collect_garbage is always invoked with a
3579 * foreign C call or an interrupt handler on top of every
3580 * existing thread, so the stored SP in each thread
3581 * structure is valid, no matter which thread we are looking
3582 * at. For threads that were running Lisp code, the pitstop
3583 * and edge functions maintain this value within the
3584 * interrupt or exception handler. */
3585 esp
= os_get_csp(th
);
3586 assert_on_stack(th
, esp
);
3588 /* In addition to pointers on the stack, also preserve the
3589 * return PC, the only value from the context that we need
3590 * in addition to the SP. The return PC gets saved by the
3591 * foreign call wrapper, and removed from the control stack
3592 * into a register. */
3593 preserve_pointer(th
->pc_around_foreign_call
);
3595 /* And on platforms with interrupts: scavenge ctx registers. */
3597 /* Disabled on Windows, because it does not have an explicit
3598 * stack of `interrupt_contexts'. The reported CSP has been
3599 * chosen so that the current context on the stack is
3600 * covered by the stack scan. See also set_csp_from_context(). */
3601 # ifndef LISP_FEATURE_WIN32
3602 if (th
!= arch_os_get_current_thread()) {
3603 long k
= fixnum_value(
3604 SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
3606 preserve_context_registers(th
->interrupt_contexts
[--k
]);
3609 # elif defined(LISP_FEATURE_SB_THREAD)
3611 if(th
==arch_os_get_current_thread()) {
3612 /* Somebody is going to burn in hell for this, but casting
3613 * it in two steps shuts gcc up about strict aliasing. */
3614 esp
= (void **)((void *)&raise
);
3617 free
=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
3618 for(i
=free
-1;i
>=0;i
--) {
3619 os_context_t
*c
=th
->interrupt_contexts
[i
];
3620 esp1
= (void **) *os_context_register_addr(c
,reg_SP
);
3621 if (esp1
>=(void **)th
->control_stack_start
&&
3622 esp1
<(void **)th
->control_stack_end
) {
3623 if(esp1
<esp
) esp
=esp1
;
3624 preserve_context_registers(c
);
3629 esp
= (void **)((void *)&raise
);
3631 if (!esp
|| esp
== (void*) -1)
3632 lose("garbage_collect: no SP known for thread %x (OS %x)",
3634 for (ptr
= ((void **)th
->control_stack_end
)-1; ptr
>= esp
; ptr
--) {
3635 preserve_pointer(*ptr
);
3640 /* Non-x86oid systems don't have "conservative roots" as such, but
3641 * the same mechanism is used for objects pinned for use by alien
3643 for_each_thread(th
) {
3644 lispobj pin_list
= SymbolTlValue(PINNED_OBJECTS
,th
);
3645 while (pin_list
!= NIL
) {
3646 struct cons
*list_entry
=
3647 (struct cons
*)native_pointer(pin_list
);
3648 preserve_pointer(list_entry
->car
);
3649 pin_list
= list_entry
->cdr
;
3655 if (gencgc_verbose
> 1) {
3656 sword_t num_dont_move_pages
= count_dont_move_pages();
3658 "/non-movable pages due to conservative pointers = %ld (%lu bytes)\n",
3659 num_dont_move_pages
,
3660 npage_bytes(num_dont_move_pages
));
3664 /* Now that all of the pinned (dont_move) pages are known, and
3665 * before we start to scavenge (and thus relocate) objects,
3666 * relocate the pinned pages to newspace, so that the scavenger
3667 * will not attempt to relocate their contents. */
3668 move_pinned_pages_to_newspace();
3670 /* Scavenge all the rest of the roots. */
3672 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
3674 * If not x86, we need to scavenge the interrupt context(s) and the
3679 for_each_thread(th
) {
3680 scavenge_interrupt_contexts(th
);
3681 scavenge_control_stack(th
);
3684 # ifdef LISP_FEATURE_SB_SAFEPOINT
3685 /* In this case, scrub all stacks right here from the GCing thread
3686 * instead of doing what the comment below says. Suboptimal, but
3689 scrub_thread_control_stack(th
);
3691 /* Scrub the unscavenged control stack space, so that we can't run
3692 * into any stale pointers in a later GC (this is done by the
3693 * stop-for-gc handler in the other threads). */
3694 scrub_control_stack();
3699 /* Scavenge the Lisp functions of the interrupt handlers, taking
3700 * care to avoid SIG_DFL and SIG_IGN. */
3701 for (i
= 0; i
< NSIG
; i
++) {
3702 union interrupt_handler handler
= interrupt_handlers
[i
];
3703 if (!ARE_SAME_HANDLER(handler
.c
, SIG_IGN
) &&
3704 !ARE_SAME_HANDLER(handler
.c
, SIG_DFL
)) {
3705 scavenge((lispobj
*)(interrupt_handlers
+ i
), 1);
3708 /* Scavenge the binding stacks. */
3711 for_each_thread(th
) {
3712 sword_t len
= (lispobj
*)get_binding_stack_pointer(th
) -
3713 th
->binding_stack_start
;
3714 scavenge((lispobj
*) th
->binding_stack_start
,len
);
3715 #ifdef LISP_FEATURE_SB_THREAD
3716 /* do the tls as well */
3717 len
=(SymbolValue(FREE_TLS_INDEX
,0) >> WORD_SHIFT
) -
3718 (sizeof (struct thread
))/(sizeof (lispobj
));
3719 scavenge((lispobj
*) (th
+1),len
);
3724 /* The original CMU CL code had scavenge-read-only-space code
3725 * controlled by the Lisp-level variable
3726 * *SCAVENGE-READ-ONLY-SPACE*. It was disabled by default, and it
3727 * wasn't documented under what circumstances it was useful or
3728 * safe to turn it on, so it's been turned off in SBCL. If you
3729 * want/need this functionality, and can test and document it,
3730 * please submit a patch. */
3732 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE
) != NIL
) {
3733 uword_t read_only_space_size
=
3734 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
) -
3735 (lispobj
*)READ_ONLY_SPACE_START
;
3737 "/scavenge read only space: %d bytes\n",
3738 read_only_space_size
* sizeof(lispobj
)));
3739 scavenge( (lispobj
*) READ_ONLY_SPACE_START
, read_only_space_size
);
3743 /* Scavenge static space. */
3745 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0) -
3746 (lispobj
*)STATIC_SPACE_START
;
3747 if (gencgc_verbose
> 1) {
3749 "/scavenge static space: %d bytes\n",
3750 static_space_size
* sizeof(lispobj
)));
3752 scavenge( (lispobj
*) STATIC_SPACE_START
, static_space_size
);
3754 /* All generations but the generation being GCed need to be
3755 * scavenged. The new_space generation needs special handling as
3756 * objects may be moved in - it is handled separately below. */
3757 scavenge_generations(generation
+1, PSEUDO_STATIC_GENERATION
);
3759 scavenge_pinned_ranges();
3761 /* Finally scavenge the new_space generation. Keep going until no
3762 * more objects are moved into the new generation */
3763 scavenge_newspace_generation(new_space
);
3765 /* FIXME: I tried reenabling this check when debugging unrelated
3766 * GC weirdness ca. sbcl-0.6.12.45, and it failed immediately.
3767 * Since the current GC code seems to work well, I'm guessing that
3768 * this debugging code is just stale, but I haven't tried to
3769 * figure it out. It should be figured out and then either made to
3770 * work or just deleted. */
3772 #define RESCAN_CHECK 0
3774 /* As a check re-scavenge the newspace once; no new objects should
3777 os_vm_size_t old_bytes_allocated
= bytes_allocated
;
3778 os_vm_size_t bytes_allocated
;
3780 /* Start with a full scavenge. */
3781 scavenge_newspace_generation_one_scan(new_space
);
3783 /* Flush the current regions, updating the tables. */
3784 gc_alloc_update_all_page_tables();
3786 bytes_allocated
= bytes_allocated
- old_bytes_allocated
;
3788 if (bytes_allocated
!= 0) {
3789 lose("Rescan of new_space allocated %d more bytes.\n",
3795 scan_weak_hash_tables();
3796 scan_weak_pointers();
3797 wipe_nonpinned_words();
3799 /* Flush the current regions, updating the tables. */
3800 gc_alloc_update_all_page_tables();
3802 /* Free the pages in oldspace, but not those marked dont_move. */
3805 /* If the GC is not raising the age then lower the generation back
3806 * to its normal generation number */
3808 for (i
= 0; i
< last_free_page
; i
++)
3809 if ((page_table
[i
].bytes_used
!= 0)
3810 && (page_table
[i
].gen
== SCRATCH_GENERATION
))
3811 page_table
[i
].gen
= generation
;
3812 gc_assert(generations
[generation
].bytes_allocated
== 0);
3813 generations
[generation
].bytes_allocated
=
3814 generations
[SCRATCH_GENERATION
].bytes_allocated
;
3815 generations
[SCRATCH_GENERATION
].bytes_allocated
= 0;
3818 /* Reset the alloc_start_page for generation. */
3819 generations
[generation
].alloc_start_page
= 0;
3820 generations
[generation
].alloc_unboxed_start_page
= 0;
3821 generations
[generation
].alloc_large_start_page
= 0;
3822 generations
[generation
].alloc_large_unboxed_start_page
= 0;
3824 if (generation
>= verify_gens
) {
3825 if (gencgc_verbose
) {
3829 verify_dynamic_space();
3832 /* Set the new gc trigger for the GCed generation. */
3833 generations
[generation
].gc_trigger
=
3834 generations
[generation
].bytes_allocated
3835 + generations
[generation
].bytes_consed_between_gc
;
3838 generations
[generation
].num_gc
= 0;
3840 ++generations
[generation
].num_gc
;
3844 /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
3846 update_dynamic_space_free_pointer(void)
3848 page_index_t last_page
= -1, i
;
3850 for (i
= 0; i
< last_free_page
; i
++)
3851 if (page_allocated_p(i
) && (page_table
[i
].bytes_used
!= 0))
3854 last_free_page
= last_page
+1;
3856 set_alloc_pointer((lispobj
)(page_address(last_free_page
)));
3857 return 0; /* dummy value: return something ... */
3861 remap_page_range (page_index_t from
, page_index_t to
)
3863 /* There's a mysterious Solaris/x86 problem with using mmap
3864 * tricks for memory zeroing. See sbcl-devel thread
3865 * "Re: patch: standalone executable redux".
3867 #if defined(LISP_FEATURE_SUNOS)
3868 zero_and_mark_pages(from
, to
);
3871 release_granularity
= gencgc_release_granularity
/GENCGC_CARD_BYTES
,
3872 release_mask
= release_granularity
-1,
3874 aligned_from
= (from
+release_mask
)&~release_mask
,
3875 aligned_end
= (end
&~release_mask
);
3877 if (aligned_from
< aligned_end
) {
3878 zero_pages_with_mmap(aligned_from
, aligned_end
-1);
3879 if (aligned_from
!= from
)
3880 zero_and_mark_pages(from
, aligned_from
-1);
3881 if (aligned_end
!= end
)
3882 zero_and_mark_pages(aligned_end
, end
-1);
3884 zero_and_mark_pages(from
, to
);
3890 remap_free_pages (page_index_t from
, page_index_t to
, int forcibly
)
3892 page_index_t first_page
, last_page
;
3895 return remap_page_range(from
, to
);
3897 for (first_page
= from
; first_page
<= to
; first_page
++) {
3898 if (page_allocated_p(first_page
) ||
3899 (page_table
[first_page
].need_to_zero
== 0))
3902 last_page
= first_page
+ 1;
3903 while (page_free_p(last_page
) &&
3904 (last_page
<= to
) &&
3905 (page_table
[last_page
].need_to_zero
== 1))
3908 remap_page_range(first_page
, last_page
-1);
3910 first_page
= last_page
;
3914 generation_index_t small_generation_limit
= 1;
3916 /* GC all generations newer than last_gen, raising the objects in each
3917 * to the next older generation - we finish when all generations below
3918 * last_gen are empty. Then if last_gen is due for a GC, or if
3919 * last_gen==NUM_GENERATIONS (the scratch generation? eh?) we GC that
3920 * too. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
3922 * We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than
3923 * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */
3925 collect_garbage(generation_index_t last_gen
)
3927 generation_index_t gen
= 0, i
;
3928 int raise
, more
= 0;
3930 /* The largest value of last_free_page seen since the time
3931 * remap_free_pages was called. */
3932 static page_index_t high_water_mark
= 0;
3934 FSHOW((stderr
, "/entering collect_garbage(%d)\n", last_gen
));
3935 log_generation_stats(gc_logfile
, "=== GC Start ===");
3939 if (last_gen
> HIGHEST_NORMAL_GENERATION
+1) {
3941 "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
3946 /* Flush the alloc regions updating the tables. */
3947 gc_alloc_update_all_page_tables();
3949 /* Verify the new objects created by Lisp code. */
3950 if (pre_verify_gen_0
) {
3951 FSHOW((stderr
, "pre-checking generation 0\n"));
3952 verify_generation(0);
3955 if (gencgc_verbose
> 1)
3956 print_generation_stats();
3959 /* Collect the generation. */
3961 if (more
|| (gen
>= gencgc_oldest_gen_to_gc
)) {
3962 /* Never raise the oldest generation. Never raise the extra generation
3963 * collected due to more-flag. */
3969 || (generations
[gen
].num_gc
>= generations
[gen
].number_of_gcs_before_promotion
);
3970 /* If we would not normally raise this one, but we're
3971 * running low on space in comparison to the object-sizes
3972 * we've been seeing, raise it and collect the next one
3974 if (!raise
&& gen
== last_gen
) {
3975 more
= (2*large_allocation
) >= (dynamic_space_size
- bytes_allocated
);
3980 if (gencgc_verbose
> 1) {
3982 "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
3985 generations
[gen
].bytes_allocated
,
3986 generations
[gen
].gc_trigger
,
3987 generations
[gen
].num_gc
));
3990 /* If an older generation is being filled, then update its
3993 generations
[gen
+1].cum_sum_bytes_allocated
+=
3994 generations
[gen
+1].bytes_allocated
;
3997 garbage_collect_generation(gen
, raise
);
3999 /* Reset the memory age cum_sum. */
4000 generations
[gen
].cum_sum_bytes_allocated
= 0;
4002 if (gencgc_verbose
> 1) {
4003 FSHOW((stderr
, "GC of generation %d finished:\n", gen
));
4004 print_generation_stats();
4008 } while ((gen
<= gencgc_oldest_gen_to_gc
)
4009 && ((gen
< last_gen
)
4012 && (generations
[gen
].bytes_allocated
4013 > generations
[gen
].gc_trigger
)
4014 && (generation_average_age(gen
)
4015 > generations
[gen
].minimum_age_before_gc
))));
4017 /* Now if gen-1 was raised all generations before gen are empty.
4018 * If it wasn't raised then all generations before gen-1 are empty.
4020 * Now objects within this gen's pages cannot point to younger
4021 * generations unless they are written to. This can be exploited
4022 * by write-protecting the pages of gen; then when younger
4023 * generations are GCed only the pages which have been written
4028 gen_to_wp
= gen
- 1;
4030 /* There's not much point in WPing pages in generation 0 as it is
4031 * never scavenged (except promoted pages). */
4032 if ((gen_to_wp
> 0) && enable_page_protection
) {
4033 /* Check that they are all empty. */
4034 for (i
= 0; i
< gen_to_wp
; i
++) {
4035 if (generations
[i
].bytes_allocated
)
4036 lose("trying to write-protect gen. %d when gen. %d nonempty\n",
4039 write_protect_generation_pages(gen_to_wp
);
4042 /* Set gc_alloc() back to generation 0. The current regions should
4043 * be flushed after the above GCs. */
4044 gc_assert((boxed_region
.free_pointer
- boxed_region
.start_addr
) == 0);
4045 gc_alloc_generation
= 0;
4047 /* Save the high-water mark before updating last_free_page */
4048 if (last_free_page
> high_water_mark
)
4049 high_water_mark
= last_free_page
;
4051 update_dynamic_space_free_pointer();
4053 /* Update auto_gc_trigger. Make sure we trigger the next GC before
4054 * running out of heap! */
4055 if (bytes_consed_between_gcs
<= (dynamic_space_size
- bytes_allocated
))
4056 auto_gc_trigger
= bytes_allocated
+ bytes_consed_between_gcs
;
4058 auto_gc_trigger
= bytes_allocated
+ (dynamic_space_size
- bytes_allocated
)/2;
4061 fprintf(stderr
,"Next gc when %"OS_VM_SIZE_FMT
" bytes have been consed\n",
4064 /* If we did a big GC (arbitrarily defined as gen > 1), release memory
4067 if (gen
> small_generation_limit
) {
4068 if (last_free_page
> high_water_mark
)
4069 high_water_mark
= last_free_page
;
4070 remap_free_pages(0, high_water_mark
, 0);
4071 high_water_mark
= 0;
4075 large_allocation
= 0;
4077 log_generation_stats(gc_logfile
, "=== GC End ===");
4078 SHOW("returning from collect_garbage");
4081 /* This is called by Lisp PURIFY when it is finished. All live objects
4082 * will have been moved to the RO and Static heaps. The dynamic space
4083 * will need a full re-initialization. We don't bother having Lisp
4084 * PURIFY flush the current gc_alloc() region, as the page_tables are
4085 * re-initialized, and every page is zeroed to be sure. */
4089 page_index_t page
, last_page
;
4091 if (gencgc_verbose
> 1) {
4092 SHOW("entering gc_free_heap");
4095 for (page
= 0; page
< page_table_pages
; page
++) {
4096 /* Skip free pages which should already be zero filled. */
4097 if (page_allocated_p(page
)) {
4099 for (last_page
= page
;
4100 (last_page
< page_table_pages
) && page_allocated_p(last_page
);
4102 /* Mark the page free. The other slots are assumed invalid
4103 * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
4104 * should not be write-protected -- except that the
4105 * generation is used for the current region but it sets
4107 page_table
[page
].allocated
= FREE_PAGE_FLAG
;
4108 page_table
[page
].bytes_used
= 0;
4109 page_table
[page
].write_protected
= 0;
4112 #ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure
4113 * about this change. */
4114 page_start
= (void *)page_address(page
);
4115 os_protect(page_start
, npage_bytes(last_page
-page
), OS_VM_PROT_ALL
);
4116 remap_free_pages(page
, last_page
-1, 1);
4119 } else if (gencgc_zero_check_during_free_heap
) {
4120 /* Double-check that the page is zero filled. */
4121 sword_t
*page_start
;
4123 gc_assert(page_free_p(page
));
4124 gc_assert(page_table
[page
].bytes_used
== 0);
4125 page_start
= (sword_t
*)page_address(page
);
4126 for (i
=0; i
<(long)(GENCGC_CARD_BYTES
/sizeof(sword_t
)); i
++) {
4127 if (page_start
[i
] != 0) {
4128 lose("free region not zero at %x\n", page_start
+ i
);
4134 bytes_allocated
= 0;
4136 /* Initialize the generations. */
4137 for (page
= 0; page
< NUM_GENERATIONS
; page
++) {
4138 generations
[page
].alloc_start_page
= 0;
4139 generations
[page
].alloc_unboxed_start_page
= 0;
4140 generations
[page
].alloc_large_start_page
= 0;
4141 generations
[page
].alloc_large_unboxed_start_page
= 0;
4142 generations
[page
].bytes_allocated
= 0;
4143 generations
[page
].gc_trigger
= 2000000;
4144 generations
[page
].num_gc
= 0;
4145 generations
[page
].cum_sum_bytes_allocated
= 0;
4148 if (gencgc_verbose
> 1)
4149 print_generation_stats();
4151 /* Initialize gc_alloc(). */
4152 gc_alloc_generation
= 0;
4154 gc_set_region_empty(&boxed_region
);
4155 gc_set_region_empty(&unboxed_region
);
4158 set_alloc_pointer((lispobj
)((char *)heap_base
));
4160 if (verify_after_free_heap
) {
4161 /* Check whether purify has left any bad pointers. */
4162 FSHOW((stderr
, "checking after free_heap\n"));
4172 #if defined(LISP_FEATURE_SB_SAFEPOINT)
4176 /* Compute the number of pages needed for the dynamic space.
4177 * Dynamic space size should be aligned on page size. */
4178 page_table_pages
= dynamic_space_size
/GENCGC_CARD_BYTES
;
4179 gc_assert(dynamic_space_size
== npage_bytes(page_table_pages
));
4181 /* Default nursery size to 5% of the total dynamic space size,
4183 bytes_consed_between_gcs
= dynamic_space_size
/(os_vm_size_t
)20;
4184 if (bytes_consed_between_gcs
< (1024*1024))
4185 bytes_consed_between_gcs
= 1024*1024;
4187 /* The page_table must be allocated using "calloc" to initialize
4188 * the page structures correctly. There used to be a separate
4189 * initialization loop (now commented out; see below) but that was
4190 * unnecessary and did hurt startup time. */
4191 page_table
= calloc(page_table_pages
, sizeof(struct page
));
4192 gc_assert(page_table
);
4193 size_t pins_map_size_in_bytes
=
4194 (n_dwords_in_card
/ N_WORD_BITS
) * sizeof (uword_t
) * page_table_pages
;
4195 /* We use mmap directly here so that we can use a minimum of
4196 system calls per page during GC.
4197 All we need here now is a madvise(DONTNEED) at the end of GC. */
4198 page_table_pinned_dwords
4199 = (in_use_marker_t
*)os_validate(NULL
, pins_map_size_in_bytes
);
4200 /* We do not need to zero */
4201 gc_assert(page_table_pinned_dwords
);
4204 scavtab
[WEAK_POINTER_WIDETAG
] = scav_weak_pointer
;
4205 transother
[SIMPLE_ARRAY_WIDETAG
] = trans_boxed_large
;
4207 heap_base
= (void*)DYNAMIC_SPACE_START
;
4209 /* The page structures are initialized implicitly when page_table
4210 * is allocated with "calloc" above. Formerly we had the following
4211 * explicit initialization here (comments converted to C99 style
4212 * for readability as C's block comments don't nest):
4214 * // Initialize each page structure.
4215 * for (i = 0; i < page_table_pages; i++) {
4216 * // Initialize all pages as free.
4217 * page_table[i].allocated = FREE_PAGE_FLAG;
4218 * page_table[i].bytes_used = 0;
4220 * // Pages are not write-protected at startup.
4221 * page_table[i].write_protected = 0;
4224 * Without this loop the image starts up much faster when dynamic
4225 * space is large -- which it is on 64-bit platforms already by
4226 * default -- and when "calloc" for large arrays is implemented
4227 * using copy-on-write of a page of zeroes -- which it is at least
4228 * on Linux. In this case the pages that page_table_pages is stored
4229 * in are mapped and cleared not before the corresponding part of
4230 * dynamic space is used. For example, this saves clearing 16 MB of
4231 * memory at startup if the page size is 4 KB and the size of
4232 * dynamic space is 4 GB.
4233 * FREE_PAGE_FLAG must be 0 for this to work correctly which is
4234 * asserted below: */
4236 /* Compile time assertion: If triggered, declares an array
4237 * of dimension -1 forcing a syntax error. The intent of the
4238 * assignment is to avoid an "unused variable" warning. */
4239 char assert_free_page_flag_0
[(FREE_PAGE_FLAG
) ? -1 : 1];
4240 assert_free_page_flag_0
[0] = assert_free_page_flag_0
[0];
4243 bytes_allocated
= 0;
4245 /* Initialize the generations.
4247 * FIXME: very similar to code in gc_free_heap(), should be shared */
4248 for (i
= 0; i
< NUM_GENERATIONS
; i
++) {
4249 generations
[i
].alloc_start_page
= 0;
4250 generations
[i
].alloc_unboxed_start_page
= 0;
4251 generations
[i
].alloc_large_start_page
= 0;
4252 generations
[i
].alloc_large_unboxed_start_page
= 0;
4253 generations
[i
].bytes_allocated
= 0;
4254 generations
[i
].gc_trigger
= 2000000;
4255 generations
[i
].num_gc
= 0;
4256 generations
[i
].cum_sum_bytes_allocated
= 0;
4257 /* the tune-able parameters */
4258 generations
[i
].bytes_consed_between_gc
4259 = bytes_consed_between_gcs
/(os_vm_size_t
)HIGHEST_NORMAL_GENERATION
;
4260 generations
[i
].number_of_gcs_before_promotion
= 1;
4261 generations
[i
].minimum_age_before_gc
= 0.75;
4264 /* Initialize gc_alloc. */
4265 gc_alloc_generation
= 0;
4266 gc_set_region_empty(&boxed_region
);
4267 gc_set_region_empty(&unboxed_region
);
4272 /* Pick up the dynamic space from after a core load.
4274 * The ALLOCATION_POINTER points to the end of the dynamic space.
4278 gencgc_pickup_dynamic(void)
4280 page_index_t page
= 0;
4281 void *alloc_ptr
= (void *)get_alloc_pointer();
4282 lispobj
*prev
=(lispobj
*)page_address(page
);
4283 generation_index_t gen
= PSEUDO_STATIC_GENERATION
;
4285 bytes_allocated
= 0;
4288 lispobj
*first
,*ptr
= (lispobj
*)page_address(page
);
4290 if (!gencgc_partial_pickup
|| page_allocated_p(page
)) {
4291 /* It is possible, though rare, for the saved page table
4292 * to contain free pages below alloc_ptr. */
4293 page_table
[page
].gen
= gen
;
4294 page_table
[page
].bytes_used
= GENCGC_CARD_BYTES
;
4295 page_table
[page
].large_object
= 0;
4296 page_table
[page
].write_protected
= 0;
4297 page_table
[page
].write_protected_cleared
= 0;
4298 page_table
[page
].dont_move
= 0;
4299 page_table
[page
].need_to_zero
= 1;
4301 bytes_allocated
+= GENCGC_CARD_BYTES
;
4304 if (!gencgc_partial_pickup
) {
4305 page_table
[page
].allocated
= BOXED_PAGE_FLAG
;
4306 first
=gc_search_space(prev
,(ptr
+2)-prev
,ptr
);
4309 page_table
[page
].scan_start_offset
=
4310 page_address(page
) - (void *)prev
;
4313 } while (page_address(page
) < alloc_ptr
);
4315 last_free_page
= page
;
4317 generations
[gen
].bytes_allocated
= bytes_allocated
;
4319 gc_alloc_update_all_page_tables();
4320 write_protect_generation_pages(gen
);
4324 gc_initialize_pointers(void)
4326 gencgc_pickup_dynamic();
4330 /* alloc(..) is the external interface for memory allocation. It
4331 * allocates to generation 0. It is not called from within the garbage
4332 * collector as it is only external uses that need the check for heap
4333 * size (GC trigger) and to disable the interrupts (interrupts are
4334 * always disabled during a GC).
4336 * The vops that call alloc(..) assume that the returned space is zero-filled.
4337 * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
4339 * The check for a GC trigger is only performed when the current
4340 * region is full, so in most cases it's not needed. */
4342 static inline lispobj
*
4343 general_alloc_internal(sword_t nbytes
, int page_type_flag
, struct alloc_region
*region
,
4344 struct thread
*thread
)
4346 #ifndef LISP_FEATURE_WIN32
4347 lispobj alloc_signal
;
4350 void *new_free_pointer
;
4351 os_vm_size_t trigger_bytes
= 0;
4353 gc_assert(nbytes
> 0);
4355 /* Check for alignment allocation problems. */
4356 gc_assert((((uword_t
)region
->free_pointer
& LOWTAG_MASK
) == 0)
4357 && ((nbytes
& LOWTAG_MASK
) == 0));
4359 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
4360 /* Must be inside a PA section. */
4361 gc_assert(get_pseudo_atomic_atomic(thread
));
4364 if ((os_vm_size_t
) nbytes
> large_allocation
)
4365 large_allocation
= nbytes
;
4367 /* maybe we can do this quickly ... */
4368 new_free_pointer
= region
->free_pointer
+ nbytes
;
4369 if (new_free_pointer
<= region
->end_addr
) {
4370 new_obj
= (void*)(region
->free_pointer
);
4371 region
->free_pointer
= new_free_pointer
;
4372 return(new_obj
); /* yup */
4375 /* We don't want to count nbytes against auto_gc_trigger unless we
4376 * have to: it speeds up the tenuring of objects and slows down
4377 * allocation. However, unless we do so when allocating _very_
4378 * large objects we are in danger of exhausting the heap without
4379 * running sufficient GCs.
4381 if ((os_vm_size_t
) nbytes
>= bytes_consed_between_gcs
)
4382 trigger_bytes
= nbytes
;
4384 /* we have to go the long way around, it seems. Check whether we
4385 * should GC in the near future
4387 if (auto_gc_trigger
&& (bytes_allocated
+trigger_bytes
> auto_gc_trigger
)) {
4388 /* Don't flood the system with interrupts if the need to gc is
4389 * already noted. This can happen for example when SUB-GC
4390 * allocates or after a gc triggered in a WITHOUT-GCING. */
4391 if (SymbolValue(GC_PENDING
,thread
) == NIL
) {
4392 /* set things up so that GC happens when we finish the PA
4394 SetSymbolValue(GC_PENDING
,T
,thread
);
4395 if (SymbolValue(GC_INHIBIT
,thread
) == NIL
) {
4396 #ifdef LISP_FEATURE_SB_SAFEPOINT
4397 thread_register_gc_trigger();
4399 set_pseudo_atomic_interrupted(thread
);
4400 #ifdef GENCGC_IS_PRECISE
4401 /* PPC calls alloc() from a trap
4402 * look up the most context if it's from a trap. */
4404 os_context_t
*context
=
4405 thread
->interrupt_data
->allocation_trap_context
;
4406 maybe_save_gc_mask_and_block_deferrables
4407 (context
? os_context_sigmask_addr(context
) : NULL
);
4410 maybe_save_gc_mask_and_block_deferrables(NULL
);
4416 new_obj
= gc_alloc_with_region(nbytes
, page_type_flag
, region
, 0);
4418 #ifndef LISP_FEATURE_WIN32
4419 /* for sb-prof, and not supported on Windows yet */
4420 alloc_signal
= SymbolValue(ALLOC_SIGNAL
,thread
);
4421 if ((alloc_signal
& FIXNUM_TAG_MASK
) == 0) {
4422 if ((sword_t
) alloc_signal
<= 0) {
4423 SetSymbolValue(ALLOC_SIGNAL
, T
, thread
);
4426 SetSymbolValue(ALLOC_SIGNAL
,
4427 alloc_signal
- (1 << N_FIXNUM_TAG_BITS
),
4437 general_alloc(sword_t nbytes
, int page_type_flag
)
4439 struct thread
*thread
= arch_os_get_current_thread();
4440 /* Select correct region, and call general_alloc_internal with it.
4441 * For other then boxed allocation we must lock first, since the
4442 * region is shared. */
4443 if (BOXED_PAGE_FLAG
& page_type_flag
) {
4444 #ifdef LISP_FEATURE_SB_THREAD
4445 struct alloc_region
*region
= (thread
? &(thread
->alloc_region
) : &boxed_region
);
4447 struct alloc_region
*region
= &boxed_region
;
4449 return general_alloc_internal(nbytes
, page_type_flag
, region
, thread
);
4450 } else if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
4452 gc_assert(0 == thread_mutex_lock(&allocation_lock
));
4453 obj
= general_alloc_internal(nbytes
, page_type_flag
, &unboxed_region
, thread
);
4454 gc_assert(0 == thread_mutex_unlock(&allocation_lock
));
4457 lose("bad page type flag: %d", page_type_flag
);
4461 lispobj AMD64_SYSV_ABI
*
4462 alloc(sword_t nbytes
)
4464 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4465 struct thread
*self
= arch_os_get_current_thread();
4466 int was_pseudo_atomic
= get_pseudo_atomic_atomic(self
);
4467 if (!was_pseudo_atomic
)
4468 set_pseudo_atomic_atomic(self
);
4470 gc_assert(get_pseudo_atomic_atomic(arch_os_get_current_thread()));
4473 lispobj
*result
= general_alloc(nbytes
, BOXED_PAGE_FLAG
);
4475 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4476 if (!was_pseudo_atomic
)
4477 clear_pseudo_atomic_atomic(self
);
4484 * shared support for the OS-dependent signal handlers which
4485 * catch GENCGC-related write-protect violations
4487 void unhandled_sigmemoryfault(void* addr
);
4489 /* Depending on which OS we're running under, different signals might
4490 * be raised for a violation of write protection in the heap. This
4491 * function factors out the common generational GC magic which needs
4492 * to invoked in this case, and should be called from whatever signal
4493 * handler is appropriate for the OS we're running under.
4495 * Return true if this signal is a normal generational GC thing that
4496 * we were able to handle, or false if it was abnormal and control
4497 * should fall through to the general SIGSEGV/SIGBUS/whatever logic.
4499 * We have two control flags for this: one causes us to ignore faults
4500 * on unprotected pages completely, and the second complains to stderr
4501 * but allows us to continue without losing.
4503 extern boolean ignore_memoryfaults_on_unprotected_pages
;
4504 boolean ignore_memoryfaults_on_unprotected_pages
= 0;
4506 extern boolean continue_after_memoryfault_on_unprotected_pages
;
4507 boolean continue_after_memoryfault_on_unprotected_pages
= 0;
4510 gencgc_handle_wp_violation(void* fault_addr
)
4512 page_index_t page_index
= find_page_index(fault_addr
);
4516 "heap WP violation? fault_addr=%p, page_index=%"PAGE_INDEX_FMT
"\n",
4517 fault_addr
, page_index
));
4520 /* Check whether the fault is within the dynamic space. */
4521 if (page_index
== (-1)) {
4523 /* It can be helpful to be able to put a breakpoint on this
4524 * case to help diagnose low-level problems. */
4525 unhandled_sigmemoryfault(fault_addr
);
4527 /* not within the dynamic space -- not our responsibility */
4532 ret
= thread_mutex_lock(&free_pages_lock
);
4533 gc_assert(ret
== 0);
4534 if (page_table
[page_index
].write_protected
) {
4535 /* Unprotect the page. */
4536 os_protect(page_address(page_index
), GENCGC_CARD_BYTES
, OS_VM_PROT_ALL
);
4537 page_table
[page_index
].write_protected_cleared
= 1;
4538 page_table
[page_index
].write_protected
= 0;
4539 } else if (!ignore_memoryfaults_on_unprotected_pages
) {
4540 /* The only acceptable reason for this signal on a heap
4541 * access is that GENCGC write-protected the page.
4542 * However, if two CPUs hit a wp page near-simultaneously,
4543 * we had better not have the second one lose here if it
4544 * does this test after the first one has already set wp=0
4546 if(page_table
[page_index
].write_protected_cleared
!= 1) {
4547 void lisp_backtrace(int frames
);
4550 "Fault @ %p, page %"PAGE_INDEX_FMT
" not marked as write-protected:\n"
4551 " boxed_region.first_page: %"PAGE_INDEX_FMT
","
4552 " boxed_region.last_page %"PAGE_INDEX_FMT
"\n"
4553 " page.scan_start_offset: %"OS_VM_SIZE_FMT
"\n"
4554 " page.bytes_used: %"PAGE_BYTES_FMT
"\n"
4555 " page.allocated: %d\n"
4556 " page.write_protected: %d\n"
4557 " page.write_protected_cleared: %d\n"
4558 " page.generation: %d\n",
4561 boxed_region
.first_page
,
4562 boxed_region
.last_page
,
4563 page_table
[page_index
].scan_start_offset
,
4564 page_table
[page_index
].bytes_used
,
4565 page_table
[page_index
].allocated
,
4566 page_table
[page_index
].write_protected
,
4567 page_table
[page_index
].write_protected_cleared
,
4568 page_table
[page_index
].gen
);
4569 if (!continue_after_memoryfault_on_unprotected_pages
)
4573 ret
= thread_mutex_unlock(&free_pages_lock
);
4574 gc_assert(ret
== 0);
4575 /* Don't worry, we can handle it. */
4579 /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
4580 * it's not just a case of the program hitting the write barrier, and
4581 * are about to let Lisp deal with it. It's basically just a
4582 * convenient place to set a gdb breakpoint. */
4584 unhandled_sigmemoryfault(void *addr
)
4587 void gc_alloc_update_all_page_tables(void)
4589 /* Flush the alloc regions updating the tables. */
4591 for_each_thread(th
) {
4592 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->alloc_region
);
4593 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
4594 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->sprof_alloc_region
);
4597 gc_alloc_update_page_tables(UNBOXED_PAGE_FLAG
, &unboxed_region
);
4598 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &boxed_region
);
4602 gc_set_region_empty(struct alloc_region
*region
)
4604 region
->first_page
= 0;
4605 region
->last_page
= -1;
4606 region
->start_addr
= page_address(0);
4607 region
->free_pointer
= page_address(0);
4608 region
->end_addr
= page_address(0);
4612 zero_all_free_pages()
4616 for (i
= 0; i
< last_free_page
; i
++) {
4617 if (page_free_p(i
)) {
4618 #ifdef READ_PROTECT_FREE_PAGES
4619 os_protect(page_address(i
),
4628 /* Things to do before doing a final GC before saving a core (without
4631 * + Pages in large_object pages aren't moved by the GC, so we need to
4632 * unset that flag from all pages.
4633 * + The pseudo-static generation isn't normally collected, but it seems
4634 * reasonable to collect it at least when saving a core. So move the
4635 * pages to a normal generation.
4638 prepare_for_final_gc ()
4643 for (i
= 0; i
< last_free_page
; i
++) {
4644 page_table
[i
].large_object
= 0;
4645 if (page_table
[i
].gen
== PSEUDO_STATIC_GENERATION
) {
4646 int used
= page_table
[i
].bytes_used
;
4647 page_table
[i
].gen
= HIGHEST_NORMAL_GENERATION
;
4648 generations
[PSEUDO_STATIC_GENERATION
].bytes_allocated
-= used
;
4649 generations
[HIGHEST_NORMAL_GENERATION
].bytes_allocated
+= used
;
4655 /* Do a non-conservative GC, and then save a core with the initial
4656 * function being set to the value of the static symbol
4657 * SB!VM:RESTART-LISP-FUNCTION */
4659 gc_and_save(char *filename
, boolean prepend_runtime
,
4660 boolean save_runtime_options
, boolean compressed
,
4661 int compression_level
, int application_type
)
4664 void *runtime_bytes
= NULL
;
4665 size_t runtime_size
;
4667 file
= prepare_to_save(filename
, prepend_runtime
, &runtime_bytes
,
4672 conservative_stack
= 0;
4674 /* The filename might come from Lisp, and be moved by the now
4675 * non-conservative GC. */
4676 filename
= strdup(filename
);
4678 /* Collect twice: once into relatively high memory, and then back
4679 * into low memory. This compacts the retained data into the lower
4680 * pages, minimizing the size of the core file.
4682 prepare_for_final_gc();
4683 gencgc_alloc_start_page
= last_free_page
;
4684 collect_garbage(HIGHEST_NORMAL_GENERATION
+1);
4686 prepare_for_final_gc();
4687 gencgc_alloc_start_page
= -1;
4688 collect_garbage(HIGHEST_NORMAL_GENERATION
+1);
4690 if (prepend_runtime
)
4691 save_runtime_to_filehandle(file
, runtime_bytes
, runtime_size
,
4694 /* The dumper doesn't know that pages need to be zeroed before use. */
4695 zero_all_free_pages();
4696 save_to_filehandle(file
, filename
, SymbolValue(RESTART_LISP_FUNCTION
,0),
4697 prepend_runtime
, save_runtime_options
,
4698 compressed
? compression_level
: COMPRESSION_LEVEL_NONE
);
4699 /* Oops. Save still managed to fail. Since we've mangled the stack
4700 * beyond hope, there's not much we can do.
4701 * (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's
4702 * going to be rather unsatisfactory too... */
4703 lose("Attempt to save core after non-conservative GC failed.\n");