2 * GENerational Conservative Garbage Collector for SBCL
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 * For a review of garbage collection techniques (e.g. generational
18 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
19 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
20 * had been accepted for _ACM Computing Surveys_ and was available
21 * as a PostScript preprint through
22 * <http://www.cs.utexas.edu/users/oops/papers.html>
24 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
32 #if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)
33 #include "pthreads_win32.h"
41 #include "interrupt.h"
46 #include "gc-internal.h"
48 #include "pseudo-atomic.h"
50 #include "genesis/vector.h"
51 #include "genesis/weak-pointer.h"
52 #include "genesis/fdefn.h"
53 #include "genesis/simple-fun.h"
55 #include "genesis/hash-table.h"
56 #include "genesis/instance.h"
57 #include "genesis/layout.h"
59 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
60 #include "genesis/cons.h"
63 /* forward declarations */
64 page_index_t
gc_find_freeish_pages(page_index_t
*restart_page_ptr
, sword_t nbytes
,
72 /* As usually configured, generations 0-5 are normal collected generations,
73 6 is pseudo-static (the objects in which are never moved nor reclaimed),
74 and 7 is scratch space used when collecting a generation without promotion,
75 wherein it is moved to generation 7 and back again.
78 SCRATCH_GENERATION
= PSEUDO_STATIC_GENERATION
+1,
82 /* Should we use page protection to help avoid the scavenging of pages
83 * that don't have pointers to younger generations? */
84 boolean enable_page_protection
= 1;
86 /* the minimum size (in bytes) for a large object*/
87 /* NB this logic is unfortunately copied in 'compiler/x86-64/macros.lisp' */
88 #if (GENCGC_ALLOC_GRANULARITY >= PAGE_BYTES) && (GENCGC_ALLOC_GRANULARITY >= GENCGC_CARD_BYTES)
89 os_vm_size_t large_object_size
= 4 * GENCGC_ALLOC_GRANULARITY
;
90 #elif (GENCGC_CARD_BYTES >= PAGE_BYTES) && (GENCGC_CARD_BYTES >= GENCGC_ALLOC_GRANULARITY)
91 os_vm_size_t large_object_size
= 4 * GENCGC_CARD_BYTES
;
93 os_vm_size_t large_object_size
= 4 * PAGE_BYTES
;
96 /* Largest allocation seen since last GC. */
97 os_vm_size_t large_allocation
= 0;
104 /* the verbosity level. All non-error messages are disabled at level 0;
105 * and only a few rare messages are printed at level 1. */
107 boolean gencgc_verbose
= 1;
109 boolean gencgc_verbose
= 0;
112 /* FIXME: At some point enable the various error-checking things below
113 * and see what they say. */
115 /* We hunt for pointers to old-space, when GCing generations >= verify_gen.
116 * Set verify_gens to HIGHEST_NORMAL_GENERATION + 1 to disable this kind of
118 generation_index_t verify_gens
= HIGHEST_NORMAL_GENERATION
+ 1;
120 /* Should we do a pre-scan verify of generation 0 before it's GCed? */
121 boolean pre_verify_gen_0
= 0;
123 /* Should we check for bad pointers after gc_free_heap is called
124 * from Lisp PURIFY? */
125 boolean verify_after_free_heap
= 0;
127 /* Should we print a note when code objects are found in the dynamic space
128 * during a heap verify? */
129 boolean verify_dynamic_code_check
= 0;
131 #ifdef LISP_FEATURE_X86
132 /* Should we check code objects for fixup errors after they are transported? */
133 boolean check_code_fixups
= 0;
136 /* Should we check that newly allocated regions are zero filled? */
137 boolean gencgc_zero_check
= 0;
139 /* Should we check that the free space is zero filled? */
140 boolean gencgc_enable_verify_zero_fill
= 0;
142 /* Should we check that free pages are zero filled during gc_free_heap
143 * called after Lisp PURIFY? */
144 boolean gencgc_zero_check_during_free_heap
= 0;
146 /* When loading a core, don't do a full scan of the memory for the
147 * memory region boundaries. (Set to true by coreparse.c if the core
148 * contained a pagetable entry).
150 boolean gencgc_partial_pickup
= 0;
152 /* If defined, free pages are read-protected to ensure that nothing
156 /* #define READ_PROTECT_FREE_PAGES */
160 * GC structures and variables
163 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
164 os_vm_size_t bytes_allocated
= 0;
165 os_vm_size_t auto_gc_trigger
= 0;
167 /* the source and destination generations. These are set before a GC starts
169 generation_index_t from_space
;
170 generation_index_t new_space
;
172 /* Set to 1 when in GC */
173 boolean gc_active_p
= 0;
175 /* should the GC be conservative on stack. If false (only right before
176 * saving a core), don't scan the stack / mark pages dont_move. */
177 static boolean conservative_stack
= 1;
179 /* An array of page structures is allocated on gc initialization.
180 * This helps to quickly map between an address and its page structure.
181 * page_table_pages is set from the size of the dynamic space. */
182 page_index_t page_table_pages
;
183 struct page
*page_table
;
185 /* In GC cards that have conservative pointers to them, should we wipe out
186 * dwords in there that are not used, so that they do not act as false
187 * root to other things in the heap from then on? This is a new feature
188 * but in testing it is both reliable and no noticeable slowdown. */
191 /* a value that we use to wipe out unused words in GC cards that
192 * live alongside conservatively to pointed words. */
193 const lispobj wipe_with
= 0;
195 static inline boolean
page_allocated_p(page_index_t page
) {
196 return (page_table
[page
].allocated
!= FREE_PAGE_FLAG
);
199 static inline boolean
page_no_region_p(page_index_t page
) {
200 return !(page_table
[page
].allocated
& OPEN_REGION_PAGE_FLAG
);
203 static inline boolean
page_allocated_no_region_p(page_index_t page
) {
204 return ((page_table
[page
].allocated
& (UNBOXED_PAGE_FLAG
| BOXED_PAGE_FLAG
))
205 && page_no_region_p(page
));
208 static inline boolean
page_free_p(page_index_t page
) {
209 return (page_table
[page
].allocated
== FREE_PAGE_FLAG
);
212 static inline boolean
page_boxed_p(page_index_t page
) {
213 return (page_table
[page
].allocated
& BOXED_PAGE_FLAG
);
216 static inline boolean
page_boxed_no_region_p(page_index_t page
) {
217 return page_boxed_p(page
) && page_no_region_p(page
);
220 static inline boolean
page_unboxed_p(page_index_t page
) {
221 /* Both flags set == boxed code page */
222 return ((page_table
[page
].allocated
& UNBOXED_PAGE_FLAG
)
223 && !page_boxed_p(page
));
226 static inline boolean
protect_page_p(page_index_t page
, generation_index_t generation
) {
227 return (page_boxed_no_region_p(page
)
228 && (page_table
[page
].bytes_used
!= 0)
229 && !page_table
[page
].dont_move
230 && (page_table
[page
].gen
== generation
));
233 /* To map addresses to page structures the address of the first page
235 void *heap_base
= NULL
;
237 /* Calculate the start address for the given page number. */
239 page_address(page_index_t page_num
)
241 return (heap_base
+ (page_num
* GENCGC_CARD_BYTES
));
244 /* Calculate the address where the allocation region associated with
245 * the page starts. */
247 page_scan_start(page_index_t page_index
)
249 return page_address(page_index
)-page_table
[page_index
].scan_start_offset
;
252 /* True if the page starts a contiguous block. */
253 static inline boolean
254 page_starts_contiguous_block_p(page_index_t page_index
)
256 return page_table
[page_index
].scan_start_offset
== 0;
259 /* True if the page is the last page in a contiguous block. */
260 static inline boolean
261 page_ends_contiguous_block_p(page_index_t page_index
, generation_index_t gen
)
263 return (/* page doesn't fill block */
264 (page_table
[page_index
].bytes_used
< GENCGC_CARD_BYTES
)
265 /* page is last allocated page */
266 || ((page_index
+ 1) >= last_free_page
)
268 || page_free_p(page_index
+ 1)
269 /* next page contains no data */
270 || (page_table
[page_index
+ 1].bytes_used
== 0)
271 /* next page is in different generation */
272 || (page_table
[page_index
+ 1].gen
!= gen
)
273 /* next page starts its own contiguous block */
274 || (page_starts_contiguous_block_p(page_index
+ 1)));
277 /* Find the page index within the page_table for the given
278 * address. Return -1 on failure. */
280 find_page_index(void *addr
)
282 if (addr
>= heap_base
) {
283 page_index_t index
= ((pointer_sized_uint_t
)addr
-
284 (pointer_sized_uint_t
)heap_base
) / GENCGC_CARD_BYTES
;
285 if (index
< page_table_pages
)
292 npage_bytes(page_index_t npages
)
294 gc_assert(npages
>=0);
295 return ((os_vm_size_t
)npages
)*GENCGC_CARD_BYTES
;
298 /* Check that X is a higher address than Y and return offset from Y to
300 static inline os_vm_size_t
301 void_diff(void *x
, void *y
)
304 return (pointer_sized_uint_t
)x
- (pointer_sized_uint_t
)y
;
307 /* a structure to hold the state of a generation
309 * CAUTION: If you modify this, make sure to touch up the alien
310 * definition in src/code/gc.lisp accordingly. ...or better yes,
311 * deal with the FIXME there...
315 /* the first page that gc_alloc() checks on its next call */
316 page_index_t alloc_start_page
;
318 /* the first page that gc_alloc_unboxed() checks on its next call */
319 page_index_t alloc_unboxed_start_page
;
321 /* the first page that gc_alloc_large (boxed) considers on its next
322 * call. (Although it always allocates after the boxed_region.) */
323 page_index_t alloc_large_start_page
;
325 /* the first page that gc_alloc_large (unboxed) considers on its
326 * next call. (Although it always allocates after the
327 * current_unboxed_region.) */
328 page_index_t alloc_large_unboxed_start_page
;
330 /* the bytes allocated to this generation */
331 os_vm_size_t bytes_allocated
;
333 /* the number of bytes at which to trigger a GC */
334 os_vm_size_t gc_trigger
;
336 /* to calculate a new level for gc_trigger */
337 os_vm_size_t bytes_consed_between_gc
;
339 /* the number of GCs since the last raise */
342 /* the number of GCs to run on the generations before raising objects to the
344 int number_of_gcs_before_promotion
;
346 /* the cumulative sum of the bytes allocated to this generation. It is
347 * cleared after a GC on this generations, and update before new
348 * objects are added from a GC of a younger generation. Dividing by
349 * the bytes_allocated will give the average age of the memory in
350 * this generation since its last GC. */
351 os_vm_size_t cum_sum_bytes_allocated
;
353 /* a minimum average memory age before a GC will occur helps
354 * prevent a GC when a large number of new live objects have been
355 * added, in which case a GC could be a waste of time */
356 double minimum_age_before_gc
;
359 /* an array of generation structures. There needs to be one more
360 * generation structure than actual generations as the oldest
361 * generation is temporarily raised then lowered. */
362 struct generation generations
[NUM_GENERATIONS
];
364 /* the oldest generation that is will currently be GCed by default.
365 * Valid values are: 0, 1, ... HIGHEST_NORMAL_GENERATION
367 * The default of HIGHEST_NORMAL_GENERATION enables GC on all generations.
369 * Setting this to 0 effectively disables the generational nature of
370 * the GC. In some applications generational GC may not be useful
371 * because there are no long-lived objects.
373 * An intermediate value could be handy after moving long-lived data
374 * into an older generation so an unnecessary GC of this long-lived
375 * data can be avoided. */
376 generation_index_t gencgc_oldest_gen_to_gc
= HIGHEST_NORMAL_GENERATION
;
378 /* The maximum free page in the heap is maintained and used to update
379 * ALLOCATION_POINTER which is used by the room function to limit its
380 * search of the heap. XX Gencgc obviously needs to be better
381 * integrated with the Lisp code. */
382 page_index_t last_free_page
;
384 #ifdef LISP_FEATURE_SB_THREAD
385 /* This lock is to prevent multiple threads from simultaneously
386 * allocating new regions which overlap each other. Note that the
387 * majority of GC is single-threaded, but alloc() may be called from
388 * >1 thread at a time and must be thread-safe. This lock must be
389 * seized before all accesses to generations[] or to parts of
390 * page_table[] that other threads may want to see */
391 static pthread_mutex_t free_pages_lock
= PTHREAD_MUTEX_INITIALIZER
;
392 /* This lock is used to protect non-thread-local allocation. */
393 static pthread_mutex_t allocation_lock
= PTHREAD_MUTEX_INITIALIZER
;
396 extern os_vm_size_t gencgc_release_granularity
;
397 os_vm_size_t gencgc_release_granularity
= GENCGC_RELEASE_GRANULARITY
;
399 extern os_vm_size_t gencgc_alloc_granularity
;
400 os_vm_size_t gencgc_alloc_granularity
= GENCGC_ALLOC_GRANULARITY
;
404 * miscellaneous heap functions
407 /* Count the number of pages which are write-protected within the
408 * given generation. */
410 count_write_protect_generation_pages(generation_index_t generation
)
412 page_index_t i
, count
= 0;
414 for (i
= 0; i
< last_free_page
; i
++)
415 if (page_allocated_p(i
)
416 && (page_table
[i
].gen
== generation
)
417 && (page_table
[i
].write_protected
== 1))
422 /* Count the number of pages within the given generation. */
424 count_generation_pages(generation_index_t generation
)
427 page_index_t count
= 0;
429 for (i
= 0; i
< last_free_page
; i
++)
430 if (page_allocated_p(i
)
431 && (page_table
[i
].gen
== generation
))
438 count_dont_move_pages(void)
441 page_index_t count
= 0;
442 for (i
= 0; i
< last_free_page
; i
++) {
443 if (page_allocated_p(i
)
444 && (page_table
[i
].dont_move
!= 0)) {
452 /* Work through the pages and add up the number of bytes used for the
453 * given generation. */
455 count_generation_bytes_allocated (generation_index_t gen
)
458 os_vm_size_t result
= 0;
459 for (i
= 0; i
< last_free_page
; i
++) {
460 if (page_allocated_p(i
)
461 && (page_table
[i
].gen
== gen
))
462 result
+= page_table
[i
].bytes_used
;
467 /* Return the average age of the memory in a generation. */
469 generation_average_age(generation_index_t gen
)
471 if (generations
[gen
].bytes_allocated
== 0)
475 ((double)generations
[gen
].cum_sum_bytes_allocated
)
476 / ((double)generations
[gen
].bytes_allocated
);
480 write_generation_stats(FILE *file
)
482 generation_index_t i
;
484 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
485 #define FPU_STATE_SIZE 27
486 int fpu_state
[FPU_STATE_SIZE
];
487 #elif defined(LISP_FEATURE_PPC)
488 #define FPU_STATE_SIZE 32
489 long long fpu_state
[FPU_STATE_SIZE
];
490 #elif defined(LISP_FEATURE_SPARC)
492 * 32 (single-precision) FP registers, and the FP state register.
493 * But Sparc V9 has 32 double-precision registers (equivalent to 64
494 * single-precision, but can't be accessed), so we leave enough room
497 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
498 long long fpu_state
[FPU_STATE_SIZE
];
499 #elif defined(LISP_FEATURE_ARM)
500 #define FPU_STATE_SIZE 8
501 long long fpu_state
[FPU_STATE_SIZE
];
504 /* This code uses the FP instructions which may be set up for Lisp
505 * so they need to be saved and reset for C. */
508 /* Print the heap stats. */
510 " Gen StaPg UbSta LaSta LUbSt Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
512 for (i
= 0; i
< SCRATCH_GENERATION
; i
++) {
514 page_index_t boxed_cnt
= 0;
515 page_index_t unboxed_cnt
= 0;
516 page_index_t large_boxed_cnt
= 0;
517 page_index_t large_unboxed_cnt
= 0;
518 page_index_t pinned_cnt
=0;
520 for (j
= 0; j
< last_free_page
; j
++)
521 if (page_table
[j
].gen
== i
) {
523 /* Count the number of boxed pages within the given
525 if (page_boxed_p(j
)) {
526 if (page_table
[j
].large_object
)
531 if(page_table
[j
].dont_move
) pinned_cnt
++;
532 /* Count the number of unboxed pages within the given
534 if (page_unboxed_p(j
)) {
535 if (page_table
[j
].large_object
)
542 gc_assert(generations
[i
].bytes_allocated
543 == count_generation_bytes_allocated(i
));
545 " %1d: %5ld %5ld %5ld %5ld",
547 generations
[i
].alloc_start_page
,
548 generations
[i
].alloc_unboxed_start_page
,
549 generations
[i
].alloc_large_start_page
,
550 generations
[i
].alloc_large_unboxed_start_page
);
552 " %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
553 " %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
,
554 boxed_cnt
, unboxed_cnt
, large_boxed_cnt
,
555 large_unboxed_cnt
, pinned_cnt
);
560 " %4"PAGE_INDEX_FMT
" %3d %7.4f\n",
561 generations
[i
].bytes_allocated
,
562 (npage_bytes(count_generation_pages(i
)) - generations
[i
].bytes_allocated
),
563 generations
[i
].gc_trigger
,
564 count_write_protect_generation_pages(i
),
565 generations
[i
].num_gc
,
566 generation_average_age(i
));
568 fprintf(file
," Total bytes allocated = %"OS_VM_SIZE_FMT
"\n", bytes_allocated
);
569 fprintf(file
," Dynamic-space-size bytes = %"OS_VM_SIZE_FMT
"\n", dynamic_space_size
);
571 fpu_restore(fpu_state
);
575 write_heap_exhaustion_report(FILE *file
, long available
, long requested
,
576 struct thread
*thread
)
579 "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
580 gc_active_p
? "garbage collection" : "allocation",
583 write_generation_stats(file
);
584 fprintf(file
, "GC control variables:\n");
585 fprintf(file
, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n",
586 SymbolValue(GC_INHIBIT
,thread
)==NIL
? "false" : "true",
587 (SymbolValue(GC_PENDING
, thread
) == T
) ?
588 "true" : ((SymbolValue(GC_PENDING
, thread
) == NIL
) ?
589 "false" : "in progress"));
590 #ifdef LISP_FEATURE_SB_THREAD
591 fprintf(file
, " *STOP-FOR-GC-PENDING* = %s\n",
592 SymbolValue(STOP_FOR_GC_PENDING
,thread
)==NIL
? "false" : "true");
597 print_generation_stats(void)
599 write_generation_stats(stderr
);
602 extern char* gc_logfile
;
603 char * gc_logfile
= NULL
;
606 log_generation_stats(char *logfile
, char *header
)
609 FILE * log
= fopen(logfile
, "a");
611 fprintf(log
, "%s\n", header
);
612 write_generation_stats(log
);
615 fprintf(stderr
, "Could not open gc logfile: %s\n", logfile
);
622 report_heap_exhaustion(long available
, long requested
, struct thread
*th
)
625 FILE * log
= fopen(gc_logfile
, "a");
627 write_heap_exhaustion_report(log
, available
, requested
, th
);
630 fprintf(stderr
, "Could not open gc logfile: %s\n", gc_logfile
);
634 /* Always to stderr as well. */
635 write_heap_exhaustion_report(stderr
, available
, requested
, th
);
639 #if defined(LISP_FEATURE_X86)
640 void fast_bzero(void*, size_t); /* in <arch>-assem.S */
643 /* Zero the pages from START to END (inclusive), but use mmap/munmap instead
644 * if zeroing it ourselves, i.e. in practice give the memory back to the
645 * OS. Generally done after a large GC.
647 void zero_pages_with_mmap(page_index_t start
, page_index_t end
) {
649 void *addr
= page_address(start
), *new_addr
;
650 os_vm_size_t length
= npage_bytes(1+end
-start
);
655 gc_assert(length
>= gencgc_release_granularity
);
656 gc_assert((length
% gencgc_release_granularity
) == 0);
658 os_invalidate(addr
, length
);
659 new_addr
= os_validate(addr
, length
);
660 if (new_addr
== NULL
|| new_addr
!= addr
) {
661 lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x",
665 for (i
= start
; i
<= end
; i
++) {
666 page_table
[i
].need_to_zero
= 0;
670 /* Zero the pages from START to END (inclusive). Generally done just after
671 * a new region has been allocated.
674 zero_pages(page_index_t start
, page_index_t end
) {
678 #if defined(LISP_FEATURE_X86)
679 fast_bzero(page_address(start
), npage_bytes(1+end
-start
));
681 bzero(page_address(start
), npage_bytes(1+end
-start
));
687 zero_and_mark_pages(page_index_t start
, page_index_t end
) {
690 zero_pages(start
, end
);
691 for (i
= start
; i
<= end
; i
++)
692 page_table
[i
].need_to_zero
= 0;
695 /* Zero the pages from START to END (inclusive), except for those
696 * pages that are known to already zeroed. Mark all pages in the
697 * ranges as non-zeroed.
700 zero_dirty_pages(page_index_t start
, page_index_t end
) {
703 for (i
= start
; i
<= end
; i
++) {
704 if (!page_table
[i
].need_to_zero
) continue;
705 for (j
= i
+1; (j
<= end
) && (page_table
[j
].need_to_zero
); j
++);
710 for (i
= start
; i
<= end
; i
++) {
711 page_table
[i
].need_to_zero
= 1;
717 * To support quick and inline allocation, regions of memory can be
718 * allocated and then allocated from with just a free pointer and a
719 * check against an end address.
721 * Since objects can be allocated to spaces with different properties
722 * e.g. boxed/unboxed, generation, ages; there may need to be many
723 * allocation regions.
725 * Each allocation region may start within a partly used page. Many
726 * features of memory use are noted on a page wise basis, e.g. the
727 * generation; so if a region starts within an existing allocated page
728 * it must be consistent with this page.
730 * During the scavenging of the newspace, objects will be transported
731 * into an allocation region, and pointers updated to point to this
732 * allocation region. It is possible that these pointers will be
733 * scavenged again before the allocation region is closed, e.g. due to
734 * trans_list which jumps all over the place to cleanup the list. It
735 * is important to be able to determine properties of all objects
736 * pointed to when scavenging, e.g to detect pointers to the oldspace.
737 * Thus it's important that the allocation regions have the correct
738 * properties set when allocated, and not just set when closed. The
739 * region allocation routines return regions with the specified
740 * properties, and grab all the pages, setting their properties
741 * appropriately, except that the amount used is not known.
743 * These regions are used to support quicker allocation using just a
744 * free pointer. The actual space used by the region is not reflected
745 * in the pages tables until it is closed. It can't be scavenged until
748 * When finished with the region it should be closed, which will
749 * update the page tables for the actual space used returning unused
750 * space. Further it may be noted in the new regions which is
751 * necessary when scavenging the newspace.
753 * Large objects may be allocated directly without an allocation
754 * region, the page tables are updated immediately.
756 * Unboxed objects don't contain pointers to other objects and so
757 * don't need scavenging. Further they can't contain pointers to
758 * younger generations so WP is not needed. By allocating pages to
759 * unboxed objects the whole page never needs scavenging or
760 * write-protecting. */
762 /* We are only using two regions at present. Both are for the current
763 * newspace generation. */
764 struct alloc_region boxed_region
;
765 struct alloc_region unboxed_region
;
767 /* The generation currently being allocated to. */
768 static generation_index_t gc_alloc_generation
;
770 static inline page_index_t
771 generation_alloc_start_page(generation_index_t generation
, int page_type_flag
, int large
)
774 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
775 return generations
[generation
].alloc_large_unboxed_start_page
;
776 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
777 /* Both code and data. */
778 return generations
[generation
].alloc_large_start_page
;
780 lose("bad page type flag: %d", page_type_flag
);
783 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
784 return generations
[generation
].alloc_unboxed_start_page
;
785 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
786 /* Both code and data. */
787 return generations
[generation
].alloc_start_page
;
789 lose("bad page_type_flag: %d", page_type_flag
);
795 set_generation_alloc_start_page(generation_index_t generation
, int page_type_flag
, int large
,
799 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
800 generations
[generation
].alloc_large_unboxed_start_page
= page
;
801 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
802 /* Both code and data. */
803 generations
[generation
].alloc_large_start_page
= page
;
805 lose("bad page type flag: %d", page_type_flag
);
808 if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
809 generations
[generation
].alloc_unboxed_start_page
= page
;
810 } else if (BOXED_PAGE_FLAG
& page_type_flag
) {
811 /* Both code and data. */
812 generations
[generation
].alloc_start_page
= page
;
814 lose("bad page type flag: %d", page_type_flag
);
819 /* Find a new region with room for at least the given number of bytes.
821 * It starts looking at the current generation's alloc_start_page. So
822 * may pick up from the previous region if there is enough space. This
823 * keeps the allocation contiguous when scavenging the newspace.
825 * The alloc_region should have been closed by a call to
826 * gc_alloc_update_page_tables(), and will thus be in an empty state.
828 * To assist the scavenging functions write-protected pages are not
829 * used. Free pages should not be write-protected.
831 * It is critical to the conservative GC that the start of regions be
832 * known. To help achieve this only small regions are allocated at a
835 * During scavenging, pointers may be found to within the current
836 * region and the page generation must be set so that pointers to the
837 * from space can be recognized. Therefore the generation of pages in
838 * the region are set to gc_alloc_generation. To prevent another
839 * allocation call using the same pages, all the pages in the region
840 * are allocated, although they will initially be empty.
843 gc_alloc_new_region(sword_t nbytes
, int page_type_flag
, struct alloc_region
*alloc_region
)
845 page_index_t first_page
;
846 page_index_t last_page
;
847 os_vm_size_t bytes_found
;
853 "/alloc_new_region for %d bytes from gen %d\n",
854 nbytes, gc_alloc_generation));
857 /* Check that the region is in a reset state. */
858 gc_assert((alloc_region
->first_page
== 0)
859 && (alloc_region
->last_page
== -1)
860 && (alloc_region
->free_pointer
== alloc_region
->end_addr
));
861 ret
= thread_mutex_lock(&free_pages_lock
);
863 first_page
= generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 0);
864 last_page
=gc_find_freeish_pages(&first_page
, nbytes
, page_type_flag
);
865 bytes_found
=(GENCGC_CARD_BYTES
- page_table
[first_page
].bytes_used
)
866 + npage_bytes(last_page
-first_page
);
868 /* Set up the alloc_region. */
869 alloc_region
->first_page
= first_page
;
870 alloc_region
->last_page
= last_page
;
871 alloc_region
->start_addr
= page_table
[first_page
].bytes_used
872 + page_address(first_page
);
873 alloc_region
->free_pointer
= alloc_region
->start_addr
;
874 alloc_region
->end_addr
= alloc_region
->start_addr
+ bytes_found
;
876 /* Set up the pages. */
878 /* The first page may have already been in use. */
879 if (page_table
[first_page
].bytes_used
== 0) {
880 page_table
[first_page
].allocated
= page_type_flag
;
881 page_table
[first_page
].gen
= gc_alloc_generation
;
882 page_table
[first_page
].large_object
= 0;
883 page_table
[first_page
].scan_start_offset
= 0;
884 // wiping should have free()ed and :=NULL
885 gc_assert(page_table
[first_page
].dontmove_dwords
== NULL
);
888 gc_assert(page_table
[first_page
].allocated
== page_type_flag
);
889 page_table
[first_page
].allocated
|= OPEN_REGION_PAGE_FLAG
;
891 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
892 gc_assert(page_table
[first_page
].large_object
== 0);
894 for (i
= first_page
+1; i
<= last_page
; i
++) {
895 page_table
[i
].allocated
= page_type_flag
;
896 page_table
[i
].gen
= gc_alloc_generation
;
897 page_table
[i
].large_object
= 0;
898 /* This may not be necessary for unboxed regions (think it was
900 page_table
[i
].scan_start_offset
=
901 void_diff(page_address(i
),alloc_region
->start_addr
);
902 page_table
[i
].allocated
|= OPEN_REGION_PAGE_FLAG
;
904 /* Bump up last_free_page. */
905 if (last_page
+1 > last_free_page
) {
906 last_free_page
= last_page
+1;
907 /* do we only want to call this on special occasions? like for
909 set_alloc_pointer((lispobj
)page_address(last_free_page
));
911 ret
= thread_mutex_unlock(&free_pages_lock
);
914 #ifdef READ_PROTECT_FREE_PAGES
915 os_protect(page_address(first_page
),
916 npage_bytes(1+last_page
-first_page
),
920 /* If the first page was only partial, don't check whether it's
921 * zeroed (it won't be) and don't zero it (since the parts that
922 * we're interested in are guaranteed to be zeroed).
924 if (page_table
[first_page
].bytes_used
) {
928 zero_dirty_pages(first_page
, last_page
);
930 /* we can do this after releasing free_pages_lock */
931 if (gencgc_zero_check
) {
933 for (p
= (word_t
*)alloc_region
->start_addr
;
934 p
< (word_t
*)alloc_region
->end_addr
; p
++) {
936 lose("The new region is not zero at %p (start=%p, end=%p).\n",
937 p
, alloc_region
->start_addr
, alloc_region
->end_addr
);
943 /* If the record_new_objects flag is 2 then all new regions created
946 * If it's 1 then then it is only recorded if the first page of the
947 * current region is <= new_areas_ignore_page. This helps avoid
948 * unnecessary recording when doing full scavenge pass.
950 * The new_object structure holds the page, byte offset, and size of
951 * new regions of objects. Each new area is placed in the array of
952 * these structures pointer to by new_areas. new_areas_index holds the
953 * offset into new_areas.
955 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
956 * later code must detect this and handle it, probably by doing a full
957 * scavenge of a generation. */
958 #define NUM_NEW_AREAS 512
959 static int record_new_objects
= 0;
960 static page_index_t new_areas_ignore_page
;
966 static struct new_area (*new_areas
)[];
967 static size_t new_areas_index
;
968 size_t max_new_areas
;
970 /* Add a new area to new_areas. */
972 add_new_area(page_index_t first_page
, size_t offset
, size_t size
)
974 size_t new_area_start
, c
;
977 /* Ignore if full. */
978 if (new_areas_index
>= NUM_NEW_AREAS
)
981 switch (record_new_objects
) {
985 if (first_page
> new_areas_ignore_page
)
994 new_area_start
= npage_bytes(first_page
) + offset
;
996 /* Search backwards for a prior area that this follows from. If
997 found this will save adding a new area. */
998 for (i
= new_areas_index
-1, c
= 0; (i
>= 0) && (c
< 8); i
--, c
++) {
1000 npage_bytes((*new_areas
)[i
].page
)
1001 + (*new_areas
)[i
].offset
1002 + (*new_areas
)[i
].size
;
1004 "/add_new_area S1 %d %d %d %d\n",
1005 i, c, new_area_start, area_end));*/
1006 if (new_area_start
== area_end
) {
1008 "/adding to [%d] %d %d %d with %d %d %d:\n",
1010 (*new_areas)[i].page,
1011 (*new_areas)[i].offset,
1012 (*new_areas)[i].size,
1016 (*new_areas
)[i
].size
+= size
;
1021 (*new_areas
)[new_areas_index
].page
= first_page
;
1022 (*new_areas
)[new_areas_index
].offset
= offset
;
1023 (*new_areas
)[new_areas_index
].size
= size
;
1025 "/new_area %d page %d offset %d size %d\n",
1026 new_areas_index, first_page, offset, size));*/
1029 /* Note the max new_areas used. */
1030 if (new_areas_index
> max_new_areas
)
1031 max_new_areas
= new_areas_index
;
1034 /* Update the tables for the alloc_region. The region may be added to
1037 * When done the alloc_region is set up so that the next quick alloc
1038 * will fail safely and thus a new region will be allocated. Further
1039 * it is safe to try to re-update the page table of this reset
1042 gc_alloc_update_page_tables(int page_type_flag
, struct alloc_region
*alloc_region
)
1045 page_index_t first_page
;
1046 page_index_t next_page
;
1047 os_vm_size_t bytes_used
;
1048 os_vm_size_t region_size
;
1049 os_vm_size_t byte_cnt
;
1050 page_bytes_t orig_first_page_bytes_used
;
1054 first_page
= alloc_region
->first_page
;
1056 /* Catch an unused alloc_region. */
1057 if ((first_page
== 0) && (alloc_region
->last_page
== -1))
1060 next_page
= first_page
+1;
1062 ret
= thread_mutex_lock(&free_pages_lock
);
1063 gc_assert(ret
== 0);
1064 if (alloc_region
->free_pointer
!= alloc_region
->start_addr
) {
1065 /* some bytes were allocated in the region */
1066 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
1068 gc_assert(alloc_region
->start_addr
==
1069 (page_address(first_page
)
1070 + page_table
[first_page
].bytes_used
));
1072 /* All the pages used need to be updated */
1074 /* Update the first page. */
1076 /* If the page was free then set up the gen, and
1077 * scan_start_offset. */
1078 if (page_table
[first_page
].bytes_used
== 0)
1079 gc_assert(page_starts_contiguous_block_p(first_page
));
1080 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1082 gc_assert(page_table
[first_page
].allocated
& page_type_flag
);
1083 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
1084 gc_assert(page_table
[first_page
].large_object
== 0);
1088 /* Calculate the number of bytes used in this page. This is not
1089 * always the number of new bytes, unless it was free. */
1091 if ((bytes_used
= void_diff(alloc_region
->free_pointer
,
1092 page_address(first_page
)))
1093 >GENCGC_CARD_BYTES
) {
1094 bytes_used
= GENCGC_CARD_BYTES
;
1097 page_table
[first_page
].bytes_used
= bytes_used
;
1098 byte_cnt
+= bytes_used
;
1101 /* All the rest of the pages should be free. We need to set
1102 * their scan_start_offset pointer to the start of the
1103 * region, and set the bytes_used. */
1105 page_table
[next_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1106 gc_assert(page_table
[next_page
].allocated
& page_type_flag
);
1107 gc_assert(page_table
[next_page
].bytes_used
== 0);
1108 gc_assert(page_table
[next_page
].gen
== gc_alloc_generation
);
1109 gc_assert(page_table
[next_page
].large_object
== 0);
1111 gc_assert(page_table
[next_page
].scan_start_offset
==
1112 void_diff(page_address(next_page
),
1113 alloc_region
->start_addr
));
1115 /* Calculate the number of bytes used in this page. */
1117 if ((bytes_used
= void_diff(alloc_region
->free_pointer
,
1118 page_address(next_page
)))>GENCGC_CARD_BYTES
) {
1119 bytes_used
= GENCGC_CARD_BYTES
;
1122 page_table
[next_page
].bytes_used
= bytes_used
;
1123 byte_cnt
+= bytes_used
;
1128 region_size
= void_diff(alloc_region
->free_pointer
,
1129 alloc_region
->start_addr
);
1130 bytes_allocated
+= region_size
;
1131 generations
[gc_alloc_generation
].bytes_allocated
+= region_size
;
1133 gc_assert((byte_cnt
- orig_first_page_bytes_used
) == region_size
);
1135 /* Set the generations alloc restart page to the last page of
1137 set_generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 0, next_page
-1);
1139 /* Add the region to the new_areas if requested. */
1140 if (BOXED_PAGE_FLAG
& page_type_flag
)
1141 add_new_area(first_page
,orig_first_page_bytes_used
, region_size
);
1145 "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
1147 gc_alloc_generation));
1150 /* There are no bytes allocated. Unallocate the first_page if
1151 * there are 0 bytes_used. */
1152 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1153 if (page_table
[first_page
].bytes_used
== 0)
1154 page_table
[first_page
].allocated
= FREE_PAGE_FLAG
;
1157 /* Unallocate any unused pages. */
1158 while (next_page
<= alloc_region
->last_page
) {
1159 gc_assert(page_table
[next_page
].bytes_used
== 0);
1160 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1163 ret
= thread_mutex_unlock(&free_pages_lock
);
1164 gc_assert(ret
== 0);
1166 /* alloc_region is per-thread, we're ok to do this unlocked */
1167 gc_set_region_empty(alloc_region
);
1170 static inline void *gc_quick_alloc(word_t nbytes
);
1172 /* Allocate a possibly large object. */
1174 gc_alloc_large(sword_t nbytes
, int page_type_flag
, struct alloc_region
*alloc_region
)
1177 page_index_t first_page
, next_page
, last_page
;
1178 page_bytes_t orig_first_page_bytes_used
;
1179 os_vm_size_t byte_cnt
;
1180 os_vm_size_t bytes_used
;
1183 ret
= thread_mutex_lock(&free_pages_lock
);
1184 gc_assert(ret
== 0);
1186 first_page
= generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 1);
1187 if (first_page
<= alloc_region
->last_page
) {
1188 first_page
= alloc_region
->last_page
+1;
1191 last_page
=gc_find_freeish_pages(&first_page
,nbytes
, page_type_flag
);
1193 gc_assert(first_page
> alloc_region
->last_page
);
1195 set_generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 1, last_page
);
1197 /* Set up the pages. */
1198 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
1200 /* If the first page was free then set up the gen, and
1201 * scan_start_offset. */
1202 if (page_table
[first_page
].bytes_used
== 0) {
1203 page_table
[first_page
].allocated
= page_type_flag
;
1204 page_table
[first_page
].gen
= gc_alloc_generation
;
1205 page_table
[first_page
].scan_start_offset
= 0;
1206 page_table
[first_page
].large_object
= 1;
1209 gc_assert(page_table
[first_page
].allocated
== page_type_flag
);
1210 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
1211 gc_assert(page_table
[first_page
].large_object
== 1);
1215 /* Calc. the number of bytes used in this page. This is not
1216 * always the number of new bytes, unless it was free. */
1218 if ((bytes_used
= nbytes
+orig_first_page_bytes_used
) > GENCGC_CARD_BYTES
) {
1219 bytes_used
= GENCGC_CARD_BYTES
;
1222 page_table
[first_page
].bytes_used
= bytes_used
;
1223 byte_cnt
+= bytes_used
;
1225 next_page
= first_page
+1;
1227 /* All the rest of the pages should be free. We need to set their
1228 * scan_start_offset pointer to the start of the region, and set
1229 * the bytes_used. */
1231 gc_assert(page_free_p(next_page
));
1232 gc_assert(page_table
[next_page
].bytes_used
== 0);
1233 page_table
[next_page
].allocated
= page_type_flag
;
1234 page_table
[next_page
].gen
= gc_alloc_generation
;
1235 page_table
[next_page
].large_object
= 1;
1237 page_table
[next_page
].scan_start_offset
=
1238 npage_bytes(next_page
-first_page
) - orig_first_page_bytes_used
;
1240 /* Calculate the number of bytes used in this page. */
1242 bytes_used
=(nbytes
+orig_first_page_bytes_used
)-byte_cnt
;
1243 if (bytes_used
> GENCGC_CARD_BYTES
) {
1244 bytes_used
= GENCGC_CARD_BYTES
;
1247 page_table
[next_page
].bytes_used
= bytes_used
;
1248 page_table
[next_page
].write_protected
=0;
1249 page_table
[next_page
].dont_move
=0;
1250 byte_cnt
+= bytes_used
;
1254 gc_assert((byte_cnt
-orig_first_page_bytes_used
) == (size_t)nbytes
);
1256 bytes_allocated
+= nbytes
;
1257 generations
[gc_alloc_generation
].bytes_allocated
+= nbytes
;
1259 /* Add the region to the new_areas if requested. */
1260 if (BOXED_PAGE_FLAG
& page_type_flag
)
1261 add_new_area(first_page
,orig_first_page_bytes_used
,nbytes
);
1263 /* Bump up last_free_page */
1264 if (last_page
+1 > last_free_page
) {
1265 last_free_page
= last_page
+1;
1266 set_alloc_pointer((lispobj
)(page_address(last_free_page
)));
1268 ret
= thread_mutex_unlock(&free_pages_lock
);
1269 gc_assert(ret
== 0);
1271 #ifdef READ_PROTECT_FREE_PAGES
1272 os_protect(page_address(first_page
),
1273 npage_bytes(1+last_page
-first_page
),
1277 zero_dirty_pages(first_page
, last_page
);
1279 return page_address(first_page
);
1282 static page_index_t gencgc_alloc_start_page
= -1;
1285 gc_heap_exhausted_error_or_lose (sword_t available
, sword_t requested
)
1287 struct thread
*thread
= arch_os_get_current_thread();
1288 /* Write basic information before doing anything else: if we don't
1289 * call to lisp this is a must, and even if we do there is always
1290 * the danger that we bounce back here before the error has been
1291 * handled, or indeed even printed.
1293 report_heap_exhaustion(available
, requested
, thread
);
1294 if (gc_active_p
|| (available
== 0)) {
1295 /* If we are in GC, or totally out of memory there is no way
1296 * to sanely transfer control to the lisp-side of things.
1298 lose("Heap exhausted, game over.");
1301 /* FIXME: assert free_pages_lock held */
1302 (void)thread_mutex_unlock(&free_pages_lock
);
1303 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
1304 gc_assert(get_pseudo_atomic_atomic(thread
));
1305 clear_pseudo_atomic_atomic(thread
);
1306 if (get_pseudo_atomic_interrupted(thread
))
1307 do_pending_interrupt();
1309 /* Another issue is that signalling HEAP-EXHAUSTED error leads
1310 * to running user code at arbitrary places, even in a
1311 * WITHOUT-INTERRUPTS which may lead to a deadlock without
1312 * running out of the heap. So at this point all bets are
1314 if (SymbolValue(INTERRUPTS_ENABLED
,thread
) == NIL
)
1315 corruption_warning_and_maybe_lose
1316 ("Signalling HEAP-EXHAUSTED in a WITHOUT-INTERRUPTS.");
1317 funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR
),
1318 alloc_number(available
), alloc_number(requested
));
1319 lose("HEAP-EXHAUSTED-ERROR fell through");
1324 gc_find_freeish_pages(page_index_t
*restart_page_ptr
, sword_t bytes
,
1327 page_index_t most_bytes_found_from
= 0, most_bytes_found_to
= 0;
1328 page_index_t first_page
, last_page
, restart_page
= *restart_page_ptr
;
1329 os_vm_size_t nbytes
= bytes
;
1330 os_vm_size_t nbytes_goal
= nbytes
;
1331 os_vm_size_t bytes_found
= 0;
1332 os_vm_size_t most_bytes_found
= 0;
1333 boolean small_object
= nbytes
< GENCGC_CARD_BYTES
;
1334 /* FIXME: assert(free_pages_lock is held); */
1336 if (nbytes_goal
< gencgc_alloc_granularity
)
1337 nbytes_goal
= gencgc_alloc_granularity
;
1339 /* Toggled by gc_and_save for heap compaction, normally -1. */
1340 if (gencgc_alloc_start_page
!= -1) {
1341 restart_page
= gencgc_alloc_start_page
;
1344 /* FIXME: This is on bytes instead of nbytes pending cleanup of
1345 * long from the interface. */
1346 gc_assert(bytes
>=0);
1347 /* Search for a page with at least nbytes of space. We prefer
1348 * not to split small objects on multiple pages, to reduce the
1349 * number of contiguous allocation regions spaning multiple
1350 * pages: this helps avoid excessive conservativism.
1352 * For other objects, we guarantee that they start on their own
1355 first_page
= restart_page
;
1356 while (first_page
< page_table_pages
) {
1358 if (page_free_p(first_page
)) {
1359 gc_assert(0 == page_table
[first_page
].bytes_used
);
1360 bytes_found
= GENCGC_CARD_BYTES
;
1361 } else if (small_object
&&
1362 (page_table
[first_page
].allocated
== page_type_flag
) &&
1363 (page_table
[first_page
].large_object
== 0) &&
1364 (page_table
[first_page
].gen
== gc_alloc_generation
) &&
1365 (page_table
[first_page
].write_protected
== 0) &&
1366 (page_table
[first_page
].dont_move
== 0)) {
1367 bytes_found
= GENCGC_CARD_BYTES
- page_table
[first_page
].bytes_used
;
1368 if (bytes_found
< nbytes
) {
1369 if (bytes_found
> most_bytes_found
)
1370 most_bytes_found
= bytes_found
;
1379 gc_assert(page_table
[first_page
].write_protected
== 0);
1380 for (last_page
= first_page
+1;
1381 ((last_page
< page_table_pages
) &&
1382 page_free_p(last_page
) &&
1383 (bytes_found
< nbytes_goal
));
1385 bytes_found
+= GENCGC_CARD_BYTES
;
1386 gc_assert(0 == page_table
[last_page
].bytes_used
);
1387 gc_assert(0 == page_table
[last_page
].write_protected
);
1390 if (bytes_found
> most_bytes_found
) {
1391 most_bytes_found
= bytes_found
;
1392 most_bytes_found_from
= first_page
;
1393 most_bytes_found_to
= last_page
;
1395 if (bytes_found
>= nbytes_goal
)
1398 first_page
= last_page
;
1401 bytes_found
= most_bytes_found
;
1402 restart_page
= first_page
+ 1;
1404 /* Check for a failure */
1405 if (bytes_found
< nbytes
) {
1406 gc_assert(restart_page
>= page_table_pages
);
1407 gc_heap_exhausted_error_or_lose(most_bytes_found
, nbytes
);
1410 gc_assert(most_bytes_found_to
);
1411 *restart_page_ptr
= most_bytes_found_from
;
1412 return most_bytes_found_to
-1;
1415 /* Allocate bytes. All the rest of the special-purpose allocation
1416 * functions will eventually call this */
1419 gc_alloc_with_region(sword_t nbytes
,int page_type_flag
, struct alloc_region
*my_region
,
1422 void *new_free_pointer
;
1424 if ((size_t)nbytes
>=large_object_size
)
1425 return gc_alloc_large(nbytes
, page_type_flag
, my_region
);
1427 /* Check whether there is room in the current alloc region. */
1428 new_free_pointer
= my_region
->free_pointer
+ nbytes
;
1430 /* fprintf(stderr, "alloc %d bytes from %p to %p\n", nbytes,
1431 my_region->free_pointer, new_free_pointer); */
1433 if (new_free_pointer
<= my_region
->end_addr
) {
1434 /* If so then allocate from the current alloc region. */
1435 void *new_obj
= my_region
->free_pointer
;
1436 my_region
->free_pointer
= new_free_pointer
;
1438 /* Unless a `quick' alloc was requested, check whether the
1439 alloc region is almost empty. */
1441 void_diff(my_region
->end_addr
,my_region
->free_pointer
) <= 32) {
1442 /* If so, finished with the current region. */
1443 gc_alloc_update_page_tables(page_type_flag
, my_region
);
1444 /* Set up a new region. */
1445 gc_alloc_new_region(32 /*bytes*/, page_type_flag
, my_region
);
1448 return((void *)new_obj
);
1451 /* Else not enough free space in the current region: retry with a
1454 gc_alloc_update_page_tables(page_type_flag
, my_region
);
1455 gc_alloc_new_region(nbytes
, page_type_flag
, my_region
);
1456 return gc_alloc_with_region(nbytes
, page_type_flag
, my_region
,0);
1459 /* these are only used during GC: all allocation from the mutator calls
1460 * alloc() -> gc_alloc_with_region() with the appropriate per-thread
1463 static inline void *
1464 gc_quick_alloc(word_t nbytes
)
1466 return gc_general_alloc(nbytes
, BOXED_PAGE_FLAG
, ALLOC_QUICK
);
1469 static inline void *
1470 gc_alloc_unboxed(word_t nbytes
)
1472 return gc_general_alloc(nbytes
, UNBOXED_PAGE_FLAG
, 0);
1475 static inline void *
1476 gc_quick_alloc_unboxed(word_t nbytes
)
1478 return gc_general_alloc(nbytes
, UNBOXED_PAGE_FLAG
, ALLOC_QUICK
);
1481 /* Copy a large object. If the object is in a large object region then
1482 * it is simply promoted, else it is copied. If it's large enough then
1483 * it's copied to a large object region.
1485 * Bignums and vectors may have shrunk. If the object is not copied
1486 * the space needs to be reclaimed, and the page_tables corrected. */
1488 general_copy_large_object(lispobj object
, word_t nwords
, boolean boxedp
)
1492 page_index_t first_page
;
1494 gc_assert(is_lisp_pointer(object
));
1495 gc_assert(from_space_p(object
));
1496 gc_assert((nwords
& 0x01) == 0);
1498 if ((nwords
> 1024*1024) && gencgc_verbose
) {
1499 FSHOW((stderr
, "/general_copy_large_object: %d bytes\n",
1500 nwords
*N_WORD_BYTES
));
1503 /* Check whether it's a large object. */
1504 first_page
= find_page_index((void *)object
);
1505 gc_assert(first_page
>= 0);
1507 if (page_table
[first_page
].large_object
) {
1508 /* Promote the object. Note: Unboxed objects may have been
1509 * allocated to a BOXED region so it may be necessary to
1510 * change the region to UNBOXED. */
1511 os_vm_size_t remaining_bytes
;
1512 os_vm_size_t bytes_freed
;
1513 page_index_t next_page
;
1514 page_bytes_t old_bytes_used
;
1516 /* FIXME: This comment is somewhat stale.
1518 * Note: Any page write-protection must be removed, else a
1519 * later scavenge_newspace may incorrectly not scavenge these
1520 * pages. This would not be necessary if they are added to the
1521 * new areas, but let's do it for them all (they'll probably
1522 * be written anyway?). */
1524 gc_assert(page_starts_contiguous_block_p(first_page
));
1525 next_page
= first_page
;
1526 remaining_bytes
= nwords
*N_WORD_BYTES
;
1528 while (remaining_bytes
> GENCGC_CARD_BYTES
) {
1529 gc_assert(page_table
[next_page
].gen
== from_space
);
1530 gc_assert(page_table
[next_page
].large_object
);
1531 gc_assert(page_table
[next_page
].scan_start_offset
==
1532 npage_bytes(next_page
-first_page
));
1533 gc_assert(page_table
[next_page
].bytes_used
== GENCGC_CARD_BYTES
);
1534 /* Should have been unprotected by unprotect_oldspace()
1535 * for boxed objects, and after promotion unboxed ones
1536 * should not be on protected pages at all. */
1537 gc_assert(!page_table
[next_page
].write_protected
);
1540 gc_assert(page_boxed_p(next_page
));
1542 gc_assert(page_allocated_no_region_p(next_page
));
1543 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1545 page_table
[next_page
].gen
= new_space
;
1547 remaining_bytes
-= GENCGC_CARD_BYTES
;
1551 /* Now only one page remains, but the object may have shrunk so
1552 * there may be more unused pages which will be freed. */
1554 /* Object may have shrunk but shouldn't have grown - check. */
1555 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
1557 page_table
[next_page
].gen
= new_space
;
1560 gc_assert(page_boxed_p(next_page
));
1562 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1564 /* Adjust the bytes_used. */
1565 old_bytes_used
= page_table
[next_page
].bytes_used
;
1566 page_table
[next_page
].bytes_used
= remaining_bytes
;
1568 bytes_freed
= old_bytes_used
- remaining_bytes
;
1570 /* Free any remaining pages; needs care. */
1572 while ((old_bytes_used
== GENCGC_CARD_BYTES
) &&
1573 (page_table
[next_page
].gen
== from_space
) &&
1574 /* FIXME: It is not obvious to me why this is necessary
1575 * as a loop condition: it seems to me that the
1576 * scan_start_offset test should be sufficient, but
1577 * experimentally that is not the case. --NS
1580 page_boxed_p(next_page
) :
1581 page_allocated_no_region_p(next_page
)) &&
1582 page_table
[next_page
].large_object
&&
1583 (page_table
[next_page
].scan_start_offset
==
1584 npage_bytes(next_page
- first_page
))) {
1585 /* Checks out OK, free the page. Don't need to both zeroing
1586 * pages as this should have been done before shrinking the
1587 * object. These pages shouldn't be write-protected, even if
1588 * boxed they should be zero filled. */
1589 gc_assert(page_table
[next_page
].write_protected
== 0);
1591 old_bytes_used
= page_table
[next_page
].bytes_used
;
1592 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1593 page_table
[next_page
].bytes_used
= 0;
1594 bytes_freed
+= old_bytes_used
;
1598 if ((bytes_freed
> 0) && gencgc_verbose
) {
1600 "/general_copy_large_object bytes_freed=%"OS_VM_SIZE_FMT
"\n",
1604 generations
[from_space
].bytes_allocated
-= nwords
*N_WORD_BYTES
1606 generations
[new_space
].bytes_allocated
+= nwords
*N_WORD_BYTES
;
1607 bytes_allocated
-= bytes_freed
;
1609 /* Add the region to the new_areas if requested. */
1611 add_new_area(first_page
,0,nwords
*N_WORD_BYTES
);
1616 /* Get tag of object. */
1617 tag
= lowtag_of(object
);
1619 /* Allocate space. */
1620 new = gc_general_alloc(nwords
*N_WORD_BYTES
,
1621 (boxedp
? BOXED_PAGE_FLAG
: UNBOXED_PAGE_FLAG
),
1624 /* Copy the object. */
1625 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1627 /* Return Lisp pointer of new object. */
1628 return ((lispobj
) new) | tag
;
1633 copy_large_object(lispobj object
, sword_t nwords
)
1635 return general_copy_large_object(object
, nwords
, 1);
1639 copy_large_unboxed_object(lispobj object
, sword_t nwords
)
1641 return general_copy_large_object(object
, nwords
, 0);
1644 /* to copy unboxed objects */
1646 copy_unboxed_object(lispobj object
, sword_t nwords
)
1648 return gc_general_copy_object(object
, nwords
, UNBOXED_PAGE_FLAG
);
1653 * code and code-related objects
1656 static lispobj trans_fun_header(lispobj object);
1657 static lispobj trans_boxed(lispobj object);
1660 /* Scan a x86 compiled code object, looking for possible fixups that
1661 * have been missed after a move.
1663 * Two types of fixups are needed:
1664 * 1. Absolute fixups to within the code object.
1665 * 2. Relative fixups to outside the code object.
1667 * Currently only absolute fixups to the constant vector, or to the
1668 * code area are checked. */
1669 #ifdef LISP_FEATURE_X86
1671 sniff_code_object(struct code
*code
, os_vm_size_t displacement
)
1673 sword_t nheader_words
, ncode_words
, nwords
;
1674 os_vm_address_t constants_start_addr
= NULL
, constants_end_addr
, p
;
1675 os_vm_address_t code_start_addr
, code_end_addr
;
1676 os_vm_address_t code_addr
= (os_vm_address_t
)code
;
1677 int fixup_found
= 0;
1679 if (!check_code_fixups
)
1682 FSHOW((stderr
, "/sniffing code: %p, %lu\n", code
, displacement
));
1684 ncode_words
= fixnum_word_value(code
->code_size
);
1685 nheader_words
= HeaderValue(*(lispobj
*)code
);
1686 nwords
= ncode_words
+ nheader_words
;
1688 constants_start_addr
= code_addr
+ 5*N_WORD_BYTES
;
1689 constants_end_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1690 code_start_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1691 code_end_addr
= code_addr
+ nwords
*N_WORD_BYTES
;
1693 /* Work through the unboxed code. */
1694 for (p
= code_start_addr
; p
< code_end_addr
; p
++) {
1695 void *data
= *(void **)p
;
1696 unsigned d1
= *((unsigned char *)p
- 1);
1697 unsigned d2
= *((unsigned char *)p
- 2);
1698 unsigned d3
= *((unsigned char *)p
- 3);
1699 unsigned d4
= *((unsigned char *)p
- 4);
1701 unsigned d5
= *((unsigned char *)p
- 5);
1702 unsigned d6
= *((unsigned char *)p
- 6);
1705 /* Check for code references. */
1706 /* Check for a 32 bit word that looks like an absolute
1707 reference to within the code adea of the code object. */
1708 if ((data
>= (void*)(code_start_addr
-displacement
))
1709 && (data
< (void*)(code_end_addr
-displacement
))) {
1710 /* function header */
1712 && (((unsigned)p
- 4 - 4*HeaderValue(*((unsigned *)p
-1))) ==
1714 /* Skip the function header */
1718 /* the case of PUSH imm32 */
1722 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1723 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1724 FSHOW((stderr
, "/PUSH $0x%.8x\n", data
));
1726 /* the case of MOV [reg-8],imm32 */
1728 && (d2
==0x40 || d2
==0x41 || d2
==0x42 || d2
==0x43
1729 || d2
==0x45 || d2
==0x46 || d2
==0x47)
1733 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1734 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1735 FSHOW((stderr
, "/MOV [reg-8],$0x%.8x\n", data
));
1737 /* the case of LEA reg,[disp32] */
1738 if ((d2
== 0x8d) && ((d1
& 0xc7) == 5)) {
1741 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1742 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1743 FSHOW((stderr
,"/LEA reg,[$0x%.8x]\n", data
));
1747 /* Check for constant references. */
1748 /* Check for a 32 bit word that looks like an absolute
1749 reference to within the constant vector. Constant references
1751 if ((data
>= (void*)(constants_start_addr
-displacement
))
1752 && (data
< (void*)(constants_end_addr
-displacement
))
1753 && (((unsigned)data
& 0x3) == 0)) {
1758 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1759 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1760 FSHOW((stderr
,"/MOV eax,0x%.8x\n", data
));
1763 /* the case of MOV m32,EAX */
1767 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1768 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1769 FSHOW((stderr
, "/MOV 0x%.8x,eax\n", data
));
1772 /* the case of CMP m32,imm32 */
1773 if ((d1
== 0x3d) && (d2
== 0x81)) {
1776 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1777 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1779 FSHOW((stderr
, "/CMP 0x%.8x,immed32\n", data
));
1782 /* Check for a mod=00, r/m=101 byte. */
1783 if ((d1
& 0xc7) == 5) {
1788 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1789 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1790 FSHOW((stderr
,"/CMP 0x%.8x,reg\n", data
));
1792 /* the case of CMP reg32,m32 */
1796 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1797 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1798 FSHOW((stderr
, "/CMP reg32,0x%.8x\n", data
));
1800 /* the case of MOV m32,reg32 */
1804 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1805 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1806 FSHOW((stderr
, "/MOV 0x%.8x,reg32\n", data
));
1808 /* the case of MOV reg32,m32 */
1812 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1813 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1814 FSHOW((stderr
, "/MOV reg32,0x%.8x\n", data
));
1816 /* the case of LEA reg32,m32 */
1820 "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1821 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1822 FSHOW((stderr
, "/LEA reg32,0x%.8x\n", data
));
1828 /* If anything was found, print some information on the code
1832 "/compiled code object at %x: header words = %d, code words = %d\n",
1833 code
, nheader_words
, ncode_words
));
1835 "/const start = %x, end = %x\n",
1836 constants_start_addr
, constants_end_addr
));
1838 "/code start = %x, end = %x\n",
1839 code_start_addr
, code_end_addr
));
1844 #ifdef LISP_FEATURE_X86
1846 gencgc_apply_code_fixups(struct code
*old_code
, struct code
*new_code
)
1848 sword_t nheader_words
, ncode_words
, nwords
;
1849 os_vm_address_t constants_start_addr
, constants_end_addr
;
1850 os_vm_address_t code_start_addr
, code_end_addr
;
1851 os_vm_address_t code_addr
= (os_vm_address_t
)new_code
;
1852 os_vm_address_t old_addr
= (os_vm_address_t
)old_code
;
1853 os_vm_size_t displacement
= code_addr
- old_addr
;
1854 lispobj fixups
= NIL
;
1855 struct vector
*fixups_vector
;
1857 ncode_words
= fixnum_word_value(new_code
->code_size
);
1858 nheader_words
= HeaderValue(*(lispobj
*)new_code
);
1859 nwords
= ncode_words
+ nheader_words
;
1861 "/compiled code object at %x: header words = %d, code words = %d\n",
1862 new_code, nheader_words, ncode_words)); */
1863 constants_start_addr
= code_addr
+ 5*N_WORD_BYTES
;
1864 constants_end_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1865 code_start_addr
= code_addr
+ nheader_words
*N_WORD_BYTES
;
1866 code_end_addr
= code_addr
+ nwords
*N_WORD_BYTES
;
1869 "/const start = %x, end = %x\n",
1870 constants_start_addr,constants_end_addr));
1872 "/code start = %x; end = %x\n",
1873 code_start_addr,code_end_addr));
1876 /* The first constant should be a pointer to the fixups for this
1877 code objects. Check. */
1878 fixups
= new_code
->constants
[0];
1880 /* It will be 0 or the unbound-marker if there are no fixups (as
1881 * will be the case if the code object has been purified, for
1882 * example) and will be an other pointer if it is valid. */
1883 if ((fixups
== 0) || (fixups
== UNBOUND_MARKER_WIDETAG
) ||
1884 !is_lisp_pointer(fixups
)) {
1885 /* Check for possible errors. */
1886 if (check_code_fixups
)
1887 sniff_code_object(new_code
, displacement
);
1892 fixups_vector
= (struct vector
*)native_pointer(fixups
);
1894 /* Could be pointing to a forwarding pointer. */
1895 /* FIXME is this always in from_space? if so, could replace this code with
1896 * forwarding_pointer_p/forwarding_pointer_value */
1897 if (is_lisp_pointer(fixups
) &&
1898 (find_page_index((void*)fixups_vector
) != -1) &&
1899 (fixups_vector
->header
== 0x01)) {
1900 /* If so, then follow it. */
1901 /*SHOW("following pointer to a forwarding pointer");*/
1903 (struct vector
*)native_pointer((lispobj
)fixups_vector
->length
);
1906 /*SHOW("got fixups");*/
1908 if (widetag_of(fixups_vector
->header
) == SIMPLE_ARRAY_WORD_WIDETAG
) {
1909 /* Got the fixups for the code block. Now work through the vector,
1910 and apply a fixup at each address. */
1911 sword_t length
= fixnum_value(fixups_vector
->length
);
1913 for (i
= 0; i
< length
; i
++) {
1914 long offset
= fixups_vector
->data
[i
];
1915 /* Now check the current value of offset. */
1916 os_vm_address_t old_value
= *(os_vm_address_t
*)(code_start_addr
+ offset
);
1918 /* If it's within the old_code object then it must be an
1919 * absolute fixup (relative ones are not saved) */
1920 if ((old_value
>= old_addr
)
1921 && (old_value
< (old_addr
+ nwords
*N_WORD_BYTES
)))
1922 /* So add the dispacement. */
1923 *(os_vm_address_t
*)(code_start_addr
+ offset
) =
1924 old_value
+ displacement
;
1926 /* It is outside the old code object so it must be a
1927 * relative fixup (absolute fixups are not saved). So
1928 * subtract the displacement. */
1929 *(os_vm_address_t
*)(code_start_addr
+ offset
) =
1930 old_value
- displacement
;
1933 /* This used to just print a note to stderr, but a bogus fixup seems to
1934 * indicate real heap corruption, so a hard hailure is in order. */
1935 lose("fixup vector %p has a bad widetag: %d\n",
1936 fixups_vector
, widetag_of(fixups_vector
->header
));
1939 /* Check for possible errors. */
1940 if (check_code_fixups
) {
1941 sniff_code_object(new_code
,displacement
);
1947 trans_boxed_large(lispobj object
)
1952 gc_assert(is_lisp_pointer(object
));
1954 header
= *((lispobj
*) native_pointer(object
));
1955 length
= HeaderValue(header
) + 1;
1956 length
= CEILING(length
, 2);
1958 return copy_large_object(object
, length
);
1961 /* Doesn't seem to be used, delete it after the grace period. */
1964 trans_unboxed_large(lispobj object
)
1969 gc_assert(is_lisp_pointer(object
));
1971 header
= *((lispobj
*) native_pointer(object
));
1972 length
= HeaderValue(header
) + 1;
1973 length
= CEILING(length
, 2);
1975 return copy_large_unboxed_object(object
, length
);
1983 /* XX This is a hack adapted from cgc.c. These don't work too
1984 * efficiently with the gencgc as a list of the weak pointers is
1985 * maintained within the objects which causes writes to the pages. A
1986 * limited attempt is made to avoid unnecessary writes, but this needs
1988 #define WEAK_POINTER_NWORDS \
1989 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
1992 scav_weak_pointer(lispobj
*where
, lispobj object
)
1994 /* Since we overwrite the 'next' field, we have to make
1995 * sure not to do so for pointers already in the list.
1996 * Instead of searching the list of weak_pointers each
1997 * time, we ensure that next is always NULL when the weak
1998 * pointer isn't in the list, and not NULL otherwise.
1999 * Since we can't use NULL to denote end of list, we
2000 * use a pointer back to the same weak_pointer.
2002 struct weak_pointer
* wp
= (struct weak_pointer
*)where
;
2004 if (NULL
== wp
->next
) {
2005 wp
->next
= weak_pointers
;
2007 if (NULL
== wp
->next
)
2011 /* Do not let GC scavenge the value slot of the weak pointer.
2012 * (That is why it is a weak pointer.) */
2014 return WEAK_POINTER_NWORDS
;
2019 search_read_only_space(void *pointer
)
2021 lispobj
*start
= (lispobj
*) READ_ONLY_SPACE_START
;
2022 lispobj
*end
= (lispobj
*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0);
2023 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
2025 return (gc_search_space(start
,
2026 (((lispobj
*)pointer
)+2)-start
,
2027 (lispobj
*) pointer
));
2031 search_static_space(void *pointer
)
2033 lispobj
*start
= (lispobj
*)STATIC_SPACE_START
;
2034 lispobj
*end
= (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0);
2035 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
2037 return (gc_search_space(start
,
2038 (((lispobj
*)pointer
)+2)-start
,
2039 (lispobj
*) pointer
));
2042 /* a faster version for searching the dynamic space. This will work even
2043 * if the object is in a current allocation region. */
2045 search_dynamic_space(void *pointer
)
2047 page_index_t page_index
= find_page_index(pointer
);
2050 /* The address may be invalid, so do some checks. */
2051 if ((page_index
== -1) || page_free_p(page_index
))
2053 start
= (lispobj
*)page_scan_start(page_index
);
2054 return (gc_search_space(start
,
2055 (((lispobj
*)pointer
)+2)-start
,
2056 (lispobj
*)pointer
));
2059 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2061 /* Is there any possibility that pointer is a valid Lisp object
2062 * reference, and/or something else (e.g. subroutine call return
2063 * address) which should prevent us from moving the referred-to thing?
2064 * This is called from preserve_pointers() */
2066 possibly_valid_dynamic_space_pointer_s(lispobj
*pointer
,
2067 page_index_t addr_page_index
,
2068 lispobj
**store_here
)
2070 lispobj
*start_addr
;
2072 /* Find the object start address. */
2073 start_addr
= search_dynamic_space(pointer
);
2075 if (start_addr
== NULL
) {
2079 *store_here
= start_addr
;
2082 /* If the containing object is a code object, presume that the
2083 * pointer is valid, simply because it could be an unboxed return
2085 if (widetag_of(*start_addr
) == CODE_HEADER_WIDETAG
)
2088 /* Large object pages only contain ONE object, and it will never
2089 * be a CONS. However, arrays and bignums can be allocated larger
2090 * than necessary and then shrunk to fit, leaving what look like
2091 * (0 . 0) CONSes at the end. These appear valid to
2092 * looks_like_valid_lisp_pointer_p(), so pick them off here. */
2093 if (page_table
[addr_page_index
].large_object
&&
2094 (lowtag_of((lispobj
)pointer
) == LIST_POINTER_LOWTAG
))
2097 return looks_like_valid_lisp_pointer_p((lispobj
)pointer
, start_addr
);
2100 #endif // defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2103 valid_conservative_root_p(void *addr
, page_index_t addr_page_index
,
2104 lispobj
**begin_ptr
)
2106 #ifdef GENCGC_IS_PRECISE
2107 /* If we're in precise gencgc (non-x86oid as of this writing) then
2108 * we are only called on valid object pointers in the first place,
2109 * so we just have to do a bounds-check against the heap, a
2110 * generation check, and the already-pinned check. */
2111 if ((addr_page_index
== -1)
2112 || (page_table
[addr_page_index
].gen
!= from_space
)
2113 || (page_table
[addr_page_index
].dont_move
!= 0))
2116 /* quick check 1: Address is quite likely to have been invalid. */
2117 if ((addr_page_index
== -1)
2118 || page_free_p(addr_page_index
)
2119 || (page_table
[addr_page_index
].bytes_used
== 0)
2120 || (page_table
[addr_page_index
].gen
!= from_space
))
2122 gc_assert(!(page_table
[addr_page_index
].allocated
&OPEN_REGION_PAGE_FLAG
));
2124 /* quick check 2: Check the offset within the page.
2127 if (((uword_t
)addr
& (GENCGC_CARD_BYTES
- 1)) >
2128 page_table
[addr_page_index
].bytes_used
)
2131 /* Filter out anything which can't be a pointer to a Lisp object
2132 * (or, as a special case which also requires dont_move, a return
2133 * address referring to something in a CodeObject). This is
2134 * expensive but important, since it vastly reduces the
2135 * probability that random garbage will be bogusly interpreted as
2136 * a pointer which prevents a page from moving. */
2137 if (!possibly_valid_dynamic_space_pointer_s(addr
, addr_page_index
,
2146 in_dontmove_dwordindex_p(page_index_t page_index
, int dword_in_page
)
2148 if (page_table
[page_index
].dontmove_dwords
) {
2149 return page_table
[page_index
].dontmove_dwords
[dword_in_page
];
2155 in_dontmove_nativeptr_p(page_index_t page_index
, lispobj
*native_ptr
)
2157 if (page_table
[page_index
].dontmove_dwords
) {
2158 lispobj
*begin
= page_address(page_index
);
2159 int dword_in_page
= (native_ptr
- begin
) / 2;
2160 return in_dontmove_dwordindex_p(page_index
, dword_in_page
);
2166 /* Adjust large bignum and vector objects. This will adjust the
2167 * allocated region if the size has shrunk, and move unboxed objects
2168 * into unboxed pages. The pages are not promoted here, and the
2169 * promoted region is not added to the new_regions; this is really
2170 * only designed to be called from preserve_pointer(). Shouldn't fail
2171 * if this is missed, just may delay the moving of objects to unboxed
2172 * pages, and the freeing of pages. */
2174 maybe_adjust_large_object(lispobj
*where
)
2176 page_index_t first_page
;
2177 page_index_t next_page
;
2180 uword_t remaining_bytes
;
2181 uword_t bytes_freed
;
2182 uword_t old_bytes_used
;
2186 /* Check whether it's a vector or bignum object. */
2187 switch (widetag_of(where
[0])) {
2188 case SIMPLE_VECTOR_WIDETAG
:
2189 boxed
= BOXED_PAGE_FLAG
;
2191 case BIGNUM_WIDETAG
:
2192 case SIMPLE_BASE_STRING_WIDETAG
:
2193 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
2194 case SIMPLE_CHARACTER_STRING_WIDETAG
:
2196 case SIMPLE_BIT_VECTOR_WIDETAG
:
2197 case SIMPLE_ARRAY_NIL_WIDETAG
:
2198 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
2199 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
2200 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
2201 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
2202 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
2203 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
2205 case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG
:
2207 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
2208 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
2209 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
2210 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
2212 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
2213 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
2215 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
2216 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
2218 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
2219 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
2222 case SIMPLE_ARRAY_FIXNUM_WIDETAG
:
2224 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
2225 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
2227 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
2228 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
2230 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
2231 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
2232 #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
2233 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
2235 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
2236 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
2238 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
2239 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2241 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
2242 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
2244 boxed
= UNBOXED_PAGE_FLAG
;
2250 /* Find its current size. */
2251 nwords
= (sizetab
[widetag_of(where
[0])])(where
);
2253 first_page
= find_page_index((void *)where
);
2254 gc_assert(first_page
>= 0);
2256 /* Note: Any page write-protection must be removed, else a later
2257 * scavenge_newspace may incorrectly not scavenge these pages.
2258 * This would not be necessary if they are added to the new areas,
2259 * but lets do it for them all (they'll probably be written
2262 gc_assert(page_starts_contiguous_block_p(first_page
));
2264 next_page
= first_page
;
2265 remaining_bytes
= nwords
*N_WORD_BYTES
;
2266 while (remaining_bytes
> GENCGC_CARD_BYTES
) {
2267 gc_assert(page_table
[next_page
].gen
== from_space
);
2268 gc_assert(page_allocated_no_region_p(next_page
));
2269 gc_assert(page_table
[next_page
].large_object
);
2270 gc_assert(page_table
[next_page
].scan_start_offset
==
2271 npage_bytes(next_page
-first_page
));
2272 gc_assert(page_table
[next_page
].bytes_used
== GENCGC_CARD_BYTES
);
2274 page_table
[next_page
].allocated
= boxed
;
2276 /* Shouldn't be write-protected at this stage. Essential that the
2278 gc_assert(!page_table
[next_page
].write_protected
);
2279 remaining_bytes
-= GENCGC_CARD_BYTES
;
2283 /* Now only one page remains, but the object may have shrunk so
2284 * there may be more unused pages which will be freed. */
2286 /* Object may have shrunk but shouldn't have grown - check. */
2287 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
2289 page_table
[next_page
].allocated
= boxed
;
2290 gc_assert(page_table
[next_page
].allocated
==
2291 page_table
[first_page
].allocated
);
2293 /* Adjust the bytes_used. */
2294 old_bytes_used
= page_table
[next_page
].bytes_used
;
2295 page_table
[next_page
].bytes_used
= remaining_bytes
;
2297 bytes_freed
= old_bytes_used
- remaining_bytes
;
2299 /* Free any remaining pages; needs care. */
2301 while ((old_bytes_used
== GENCGC_CARD_BYTES
) &&
2302 (page_table
[next_page
].gen
== from_space
) &&
2303 page_allocated_no_region_p(next_page
) &&
2304 page_table
[next_page
].large_object
&&
2305 (page_table
[next_page
].scan_start_offset
==
2306 npage_bytes(next_page
- first_page
))) {
2307 /* It checks out OK, free the page. We don't need to both zeroing
2308 * pages as this should have been done before shrinking the
2309 * object. These pages shouldn't be write protected as they
2310 * should be zero filled. */
2311 gc_assert(page_table
[next_page
].write_protected
== 0);
2313 old_bytes_used
= page_table
[next_page
].bytes_used
;
2314 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
2315 page_table
[next_page
].bytes_used
= 0;
2316 bytes_freed
+= old_bytes_used
;
2320 if ((bytes_freed
> 0) && gencgc_verbose
) {
2322 "/maybe_adjust_large_object() freed %d\n",
2326 generations
[from_space
].bytes_allocated
-= bytes_freed
;
2327 bytes_allocated
-= bytes_freed
;
2333 * Why is this restricted to protected objects only?
2334 * Because the rest of the page has been scavenged already,
2335 * and since that leaves forwarding pointers in the unprotected
2336 * areas you cannot scavenge it again until those are gone.
2339 scavenge_pages_with_conservative_pointers_to_them_protected_objects_only()
2342 for (i
= 0; i
< last_free_page
; i
++) {
2343 if (!page_table
[i
].dontmove_dwords
) {
2346 lispobj
*begin
= page_address(i
);
2349 lispobj
*scavme_begin
= NULL
;
2350 for (dword
= 0; dword
< GENCGC_CARD_BYTES
/ N_WORD_BYTES
/ 2; dword
++) {
2351 if (in_dontmove_dwordindex_p(i
, dword
)) {
2352 if (!scavme_begin
) {
2353 scavme_begin
= begin
+ dword
* 2;
2356 // contiguous area stopped
2358 scavenge(scavme_begin
, (begin
+ dword
* 2) - scavme_begin
);
2360 scavme_begin
= NULL
;
2364 scavenge(scavme_begin
, (begin
+ dword
* 2) - scavme_begin
);
2369 int verbosefixes
= 0;
2375 int words_wiped
= 0;
2376 int lisp_pointers_wiped
= 0;
2377 int pages_considered
= 0;
2378 int n_pages_cannot_wipe
= 0;
2380 for (i
= 0; i
< last_free_page
; i
++) {
2381 if (!page_table
[i
].dont_move
) {
2385 if (!page_table
[i
].dontmove_dwords
) {
2386 n_pages_cannot_wipe
++;
2389 begin
= page_address(i
);
2391 for (dword
= 0; dword
< GENCGC_CARD_BYTES
/ N_WORD_BYTES
/ 2; dword
++) {
2392 if (!in_dontmove_dwordindex_p(i
, dword
)) {
2393 if (is_lisp_pointer(*(begin
+ dword
* 2))) {
2394 lisp_pointers_wiped
++;
2396 if (is_lisp_pointer(*(begin
+ dword
* 2 + 1))) {
2397 lisp_pointers_wiped
++;
2399 *(begin
+ dword
* 2) = wipe_with
;
2400 *(begin
+ dword
* 2 + 1) = wipe_with
;
2404 free(page_table
[i
].dontmove_dwords
);
2405 page_table
[i
].dontmove_dwords
= NULL
;
2407 // move the page to newspace
2408 generations
[new_space
].bytes_allocated
+= page_table
[i
].bytes_used
;
2409 generations
[page_table
[i
].gen
].bytes_allocated
-= page_table
[i
].bytes_used
;
2410 page_table
[i
].gen
= new_space
;
2412 if ((verbosefixes
>= 1 && lisp_pointers_wiped
> 0) || verbosefixes
>= 2) {
2413 fprintf(stderr
, "Cra25a: wiped %d words (%d lisp_pointers) in %d pages, cannot wipe %d pages \n"
2414 , words_wiped
, lisp_pointers_wiped
, pages_considered
, n_pages_cannot_wipe
);
2419 set_page_consi_bit(page_index_t pageindex
, lispobj
*mark_which_pointer
)
2421 struct page
*page
= &page_table
[pageindex
];
2426 gc_assert(mark_which_pointer
);
2427 if (page
->dontmove_dwords
== NULL
) {
2428 const int n_dwords_in_card
= GENCGC_CARD_BYTES
/ N_WORD_BYTES
/ 2;
2429 const int malloc_size
= sizeof(in_use_marker_t
) * n_dwords_in_card
;
2430 page
->dontmove_dwords
= malloc(malloc_size
);
2431 gc_assert(page
->dontmove_dwords
);
2432 bzero(page
->dontmove_dwords
, malloc_size
);
2435 int size
= (sizetab
[widetag_of(mark_which_pointer
[0])])(mark_which_pointer
);
2437 (fixnump(*mark_which_pointer
) ||
2438 is_lisp_pointer(*mark_which_pointer
) ||
2439 lowtag_of(*mark_which_pointer
) == 9 ||
2440 lowtag_of(*mark_which_pointer
) == 2)) {
2443 // print additional debug info for now.
2444 if (size
% 2 != 0) {
2445 fprintf(stderr
, "WIPE ERROR !dword, size %d, lowtag %d, world 0x%lld\n",
2447 lowtag_of(*mark_which_pointer
),
2448 (long long)*mark_which_pointer
);
2450 gc_assert(size
% 2 == 0);
2451 lispobj
*begin
= page_address(pageindex
);
2452 int begin_dword
= (mark_which_pointer
- begin
) / 2;
2454 for (dword
= begin_dword
; dword
< begin_dword
+ size
/ 2; dword
++) {
2455 page
->dontmove_dwords
[dword
] = 1;
2459 /* Take a possible pointer to a Lisp object and mark its page in the
2460 * page_table so that it will not be relocated during a GC.
2462 * This involves locating the page it points to, then backing up to
2463 * the start of its region, then marking all pages dont_move from there
2464 * up to the first page that's not full or has a different generation
2466 * It is assumed that all the page static flags have been cleared at
2467 * the start of a GC.
2469 * It is also assumed that the current gc_alloc() region has been
2470 * flushed and the tables updated. */
2473 preserve_pointer(void *addr
)
2475 page_index_t addr_page_index
= find_page_index(addr
);
2476 page_index_t first_page
;
2478 unsigned int region_allocation
;
2479 lispobj
*begin_ptr
= NULL
;
2481 if (!valid_conservative_root_p(addr
, addr_page_index
, &begin_ptr
))
2484 /* (Now that we know that addr_page_index is in range, it's
2485 * safe to index into page_table[] with it.) */
2486 region_allocation
= page_table
[addr_page_index
].allocated
;
2488 /* Find the beginning of the region. Note that there may be
2489 * objects in the region preceding the one that we were passed a
2490 * pointer to: if this is the case, we will write-protect all the
2491 * previous objects' pages too. */
2494 /* I think this'd work just as well, but without the assertions.
2495 * -dan 2004.01.01 */
2496 first_page
= find_page_index(page_scan_start(addr_page_index
))
2498 first_page
= addr_page_index
;
2499 while (!page_starts_contiguous_block_p(first_page
)) {
2501 /* Do some checks. */
2502 gc_assert(page_table
[first_page
].bytes_used
== GENCGC_CARD_BYTES
);
2503 gc_assert(page_table
[first_page
].gen
== from_space
);
2504 gc_assert(page_table
[first_page
].allocated
== region_allocation
);
2508 /* Adjust any large objects before promotion as they won't be
2509 * copied after promotion. */
2510 if (page_table
[first_page
].large_object
) {
2511 maybe_adjust_large_object(page_address(first_page
));
2512 /* It may have moved to unboxed pages. */
2513 region_allocation
= page_table
[first_page
].allocated
;
2516 /* Now work forward until the end of this contiguous area is found,
2517 * marking all pages as dont_move. */
2518 for (i
= first_page
; ;i
++) {
2519 gc_assert(page_table
[i
].allocated
== region_allocation
);
2521 /* Mark the page static. */
2522 page_table
[i
].dont_move
= 1;
2524 /* It is essential that the pages are not write protected as
2525 * they may have pointers into the old-space which need
2526 * scavenging. They shouldn't be write protected at this
2528 gc_assert(!page_table
[i
].write_protected
);
2530 /* Check whether this is the last page in this contiguous block.. */
2531 if (page_ends_contiguous_block_p(i
, from_space
))
2535 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
2536 /* Do not do this for multi-page objects. Those pages do not need
2537 * object wipeout anyway.
2539 if (i
== first_page
) {
2540 /* We need the pointer to the beginning of the object
2541 * We might have gotten it above but maybe not, so make sure
2543 if (begin_ptr
== NULL
) {
2544 possibly_valid_dynamic_space_pointer_s(addr
, first_page
,
2547 set_page_consi_bit(first_page
, begin_ptr
);
2551 /* Check that the page is now static. */
2552 gc_assert(page_table
[addr_page_index
].dont_move
!= 0);
2555 /* If the given page is not write-protected, then scan it for pointers
2556 * to younger generations or the top temp. generation, if no
2557 * suspicious pointers are found then the page is write-protected.
2559 * Care is taken to check for pointers to the current gc_alloc()
2560 * region if it is a younger generation or the temp. generation. This
2561 * frees the caller from doing a gc_alloc_update_page_tables(). Actually
2562 * the gc_alloc_generation does not need to be checked as this is only
2563 * called from scavenge_generation() when the gc_alloc generation is
2564 * younger, so it just checks if there is a pointer to the current
2567 * We return 1 if the page was write-protected, else 0. */
2569 update_page_write_prot(page_index_t page
)
2571 generation_index_t gen
= page_table
[page
].gen
;
2574 void **page_addr
= (void **)page_address(page
);
2575 sword_t num_words
= page_table
[page
].bytes_used
/ N_WORD_BYTES
;
2577 /* Shouldn't be a free page. */
2578 gc_assert(page_allocated_p(page
));
2579 gc_assert(page_table
[page
].bytes_used
!= 0);
2581 /* Skip if it's already write-protected, pinned, or unboxed */
2582 if (page_table
[page
].write_protected
2583 /* FIXME: What's the reason for not write-protecting pinned pages? */
2584 || page_table
[page
].dont_move
2585 || page_unboxed_p(page
))
2588 /* Scan the page for pointers to younger generations or the
2589 * top temp. generation. */
2591 for (j
= 0; j
< num_words
; j
++) {
2592 void *ptr
= *(page_addr
+j
);
2593 page_index_t index
= find_page_index(ptr
);
2595 /* Check that it's in the dynamic space */
2597 if (/* Does it point to a younger or the temp. generation? */
2598 (page_allocated_p(index
)
2599 && (page_table
[index
].bytes_used
!= 0)
2600 && ((page_table
[index
].gen
< gen
)
2601 || (page_table
[index
].gen
== SCRATCH_GENERATION
)))
2603 /* Or does it point within a current gc_alloc() region? */
2604 || ((boxed_region
.start_addr
<= ptr
)
2605 && (ptr
<= boxed_region
.free_pointer
))
2606 || ((unboxed_region
.start_addr
<= ptr
)
2607 && (ptr
<= unboxed_region
.free_pointer
))) {
2614 /* Write-protect the page. */
2615 /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
2617 os_protect((void *)page_addr
,
2619 OS_VM_PROT_READ
|OS_VM_PROT_EXECUTE
);
2621 /* Note the page as protected in the page tables. */
2622 page_table
[page
].write_protected
= 1;
2628 /* Scavenge all generations from FROM to TO, inclusive, except for
2629 * new_space which needs special handling, as new objects may be
2630 * added which are not checked here - use scavenge_newspace generation.
2632 * Write-protected pages should not have any pointers to the
2633 * from_space so do need scavenging; thus write-protected pages are
2634 * not always scavenged. There is some code to check that these pages
2635 * are not written; but to check fully the write-protected pages need
2636 * to be scavenged by disabling the code to skip them.
2638 * Under the current scheme when a generation is GCed the younger
2639 * generations will be empty. So, when a generation is being GCed it
2640 * is only necessary to scavenge the older generations for pointers
2641 * not the younger. So a page that does not have pointers to younger
2642 * generations does not need to be scavenged.
2644 * The write-protection can be used to note pages that don't have
2645 * pointers to younger pages. But pages can be written without having
2646 * pointers to younger generations. After the pages are scavenged here
2647 * they can be scanned for pointers to younger generations and if
2648 * there are none the page can be write-protected.
2650 * One complication is when the newspace is the top temp. generation.
2652 * Enabling SC_GEN_CK scavenges the write-protected pages and checks
2653 * that none were written, which they shouldn't be as they should have
2654 * no pointers to younger generations. This breaks down for weak
2655 * pointers as the objects contain a link to the next and are written
2656 * if a weak pointer is scavenged. Still it's a useful check. */
2658 scavenge_generations(generation_index_t from
, generation_index_t to
)
2661 page_index_t num_wp
= 0;
2665 /* Clear the write_protected_cleared flags on all pages. */
2666 for (i
= 0; i
< page_table_pages
; i
++)
2667 page_table
[i
].write_protected_cleared
= 0;
2670 for (i
= 0; i
< last_free_page
; i
++) {
2671 generation_index_t generation
= page_table
[i
].gen
;
2673 && (page_table
[i
].bytes_used
!= 0)
2674 && (generation
!= new_space
)
2675 && (generation
>= from
)
2676 && (generation
<= to
)) {
2677 page_index_t last_page
,j
;
2678 int write_protected
=1;
2680 /* This should be the start of a region */
2681 gc_assert(page_starts_contiguous_block_p(i
));
2683 /* Now work forward until the end of the region */
2684 for (last_page
= i
; ; last_page
++) {
2686 write_protected
&& page_table
[last_page
].write_protected
;
2687 if (page_ends_contiguous_block_p(last_page
, generation
))
2690 if (!write_protected
) {
2691 scavenge(page_address(i
),
2692 ((uword_t
)(page_table
[last_page
].bytes_used
2693 + npage_bytes(last_page
-i
)))
2696 /* Now scan the pages and write protect those that
2697 * don't have pointers to younger generations. */
2698 if (enable_page_protection
) {
2699 for (j
= i
; j
<= last_page
; j
++) {
2700 num_wp
+= update_page_write_prot(j
);
2703 if ((gencgc_verbose
> 1) && (num_wp
!= 0)) {
2705 "/write protected %d pages within generation %d\n",
2706 num_wp
, generation
));
2714 /* Check that none of the write_protected pages in this generation
2715 * have been written to. */
2716 for (i
= 0; i
< page_table_pages
; i
++) {
2717 if (page_allocated_p(i
)
2718 && (page_table
[i
].bytes_used
!= 0)
2719 && (page_table
[i
].gen
== generation
)
2720 && (page_table
[i
].write_protected_cleared
!= 0)) {
2721 FSHOW((stderr
, "/scavenge_generation() %d\n", generation
));
2723 "/page bytes_used=%d scan_start_offset=%lu dont_move=%d\n",
2724 page_table
[i
].bytes_used
,
2725 page_table
[i
].scan_start_offset
,
2726 page_table
[i
].dont_move
));
2727 lose("write to protected page %d in scavenge_generation()\n", i
);
2734 /* Scavenge a newspace generation. As it is scavenged new objects may
2735 * be allocated to it; these will also need to be scavenged. This
2736 * repeats until there are no more objects unscavenged in the
2737 * newspace generation.
2739 * To help improve the efficiency, areas written are recorded by
2740 * gc_alloc() and only these scavenged. Sometimes a little more will be
2741 * scavenged, but this causes no harm. An easy check is done that the
2742 * scavenged bytes equals the number allocated in the previous
2745 * Write-protected pages are not scanned except if they are marked
2746 * dont_move in which case they may have been promoted and still have
2747 * pointers to the from space.
2749 * Write-protected pages could potentially be written by alloc however
2750 * to avoid having to handle re-scavenging of write-protected pages
2751 * gc_alloc() does not write to write-protected pages.
2753 * New areas of objects allocated are recorded alternatively in the two
2754 * new_areas arrays below. */
2755 static struct new_area new_areas_1
[NUM_NEW_AREAS
];
2756 static struct new_area new_areas_2
[NUM_NEW_AREAS
];
2758 /* Do one full scan of the new space generation. This is not enough to
2759 * complete the job as new objects may be added to the generation in
2760 * the process which are not scavenged. */
2762 scavenge_newspace_generation_one_scan(generation_index_t generation
)
2767 "/starting one full scan of newspace generation %d\n",
2769 for (i
= 0; i
< last_free_page
; i
++) {
2770 /* Note that this skips over open regions when it encounters them. */
2772 && (page_table
[i
].bytes_used
!= 0)
2773 && (page_table
[i
].gen
== generation
)
2774 && ((page_table
[i
].write_protected
== 0)
2775 /* (This may be redundant as write_protected is now
2776 * cleared before promotion.) */
2777 || (page_table
[i
].dont_move
== 1))) {
2778 page_index_t last_page
;
2781 /* The scavenge will start at the scan_start_offset of
2784 * We need to find the full extent of this contiguous
2785 * block in case objects span pages.
2787 * Now work forward until the end of this contiguous area
2788 * is found. A small area is preferred as there is a
2789 * better chance of its pages being write-protected. */
2790 for (last_page
= i
; ;last_page
++) {
2791 /* If all pages are write-protected and movable,
2792 * then no need to scavenge */
2793 all_wp
=all_wp
&& page_table
[last_page
].write_protected
&&
2794 !page_table
[last_page
].dont_move
;
2796 /* Check whether this is the last page in this
2797 * contiguous block */
2798 if (page_ends_contiguous_block_p(last_page
, generation
))
2802 /* Do a limited check for write-protected pages. */
2804 sword_t nwords
= (((uword_t
)
2805 (page_table
[last_page
].bytes_used
2806 + npage_bytes(last_page
-i
)
2807 + page_table
[i
].scan_start_offset
))
2809 new_areas_ignore_page
= last_page
;
2811 scavenge(page_scan_start(i
), nwords
);
2818 "/done with one full scan of newspace generation %d\n",
2822 /* Do a complete scavenge of the newspace generation. */
2824 scavenge_newspace_generation(generation_index_t generation
)
2828 /* the new_areas array currently being written to by gc_alloc() */
2829 struct new_area (*current_new_areas
)[] = &new_areas_1
;
2830 size_t current_new_areas_index
;
2832 /* the new_areas created by the previous scavenge cycle */
2833 struct new_area (*previous_new_areas
)[] = NULL
;
2834 size_t previous_new_areas_index
;
2836 /* Flush the current regions updating the tables. */
2837 gc_alloc_update_all_page_tables();
2839 /* Turn on the recording of new areas by gc_alloc(). */
2840 new_areas
= current_new_areas
;
2841 new_areas_index
= 0;
2843 /* Don't need to record new areas that get scavenged anyway during
2844 * scavenge_newspace_generation_one_scan. */
2845 record_new_objects
= 1;
2847 /* Start with a full scavenge. */
2848 scavenge_newspace_generation_one_scan(generation
);
2850 /* Record all new areas now. */
2851 record_new_objects
= 2;
2853 /* Give a chance to weak hash tables to make other objects live.
2854 * FIXME: The algorithm implemented here for weak hash table gcing
2855 * is O(W^2+N) as Bruno Haible warns in
2856 * http://www.haible.de/bruno/papers/cs/weak/WeakDatastructures-writeup.html
2857 * see "Implementation 2". */
2858 scav_weak_hash_tables();
2860 /* Flush the current regions updating the tables. */
2861 gc_alloc_update_all_page_tables();
2863 /* Grab new_areas_index. */
2864 current_new_areas_index
= new_areas_index
;
2867 "The first scan is finished; current_new_areas_index=%d.\n",
2868 current_new_areas_index));*/
2870 while (current_new_areas_index
> 0) {
2871 /* Move the current to the previous new areas */
2872 previous_new_areas
= current_new_areas
;
2873 previous_new_areas_index
= current_new_areas_index
;
2875 /* Scavenge all the areas in previous new areas. Any new areas
2876 * allocated are saved in current_new_areas. */
2878 /* Allocate an array for current_new_areas; alternating between
2879 * new_areas_1 and 2 */
2880 if (previous_new_areas
== &new_areas_1
)
2881 current_new_areas
= &new_areas_2
;
2883 current_new_areas
= &new_areas_1
;
2885 /* Set up for gc_alloc(). */
2886 new_areas
= current_new_areas
;
2887 new_areas_index
= 0;
2889 /* Check whether previous_new_areas had overflowed. */
2890 if (previous_new_areas_index
>= NUM_NEW_AREAS
) {
2892 /* New areas of objects allocated have been lost so need to do a
2893 * full scan to be sure! If this becomes a problem try
2894 * increasing NUM_NEW_AREAS. */
2895 if (gencgc_verbose
) {
2896 SHOW("new_areas overflow, doing full scavenge");
2899 /* Don't need to record new areas that get scavenged
2900 * anyway during scavenge_newspace_generation_one_scan. */
2901 record_new_objects
= 1;
2903 scavenge_newspace_generation_one_scan(generation
);
2905 /* Record all new areas now. */
2906 record_new_objects
= 2;
2908 scav_weak_hash_tables();
2910 /* Flush the current regions updating the tables. */
2911 gc_alloc_update_all_page_tables();
2915 /* Work through previous_new_areas. */
2916 for (i
= 0; i
< previous_new_areas_index
; i
++) {
2917 page_index_t page
= (*previous_new_areas
)[i
].page
;
2918 size_t offset
= (*previous_new_areas
)[i
].offset
;
2919 size_t size
= (*previous_new_areas
)[i
].size
/ N_WORD_BYTES
;
2920 gc_assert((*previous_new_areas
)[i
].size
% N_WORD_BYTES
== 0);
2921 scavenge(page_address(page
)+offset
, size
);
2924 scav_weak_hash_tables();
2926 /* Flush the current regions updating the tables. */
2927 gc_alloc_update_all_page_tables();
2930 current_new_areas_index
= new_areas_index
;
2933 "The re-scan has finished; current_new_areas_index=%d.\n",
2934 current_new_areas_index));*/
2937 /* Turn off recording of areas allocated by gc_alloc(). */
2938 record_new_objects
= 0;
2943 /* Check that none of the write_protected pages in this generation
2944 * have been written to. */
2945 for (i
= 0; i
< page_table_pages
; i
++) {
2946 if (page_allocated_p(i
)
2947 && (page_table
[i
].bytes_used
!= 0)
2948 && (page_table
[i
].gen
== generation
)
2949 && (page_table
[i
].write_protected_cleared
!= 0)
2950 && (page_table
[i
].dont_move
== 0)) {
2951 lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d\n",
2952 i
, generation
, page_table
[i
].dont_move
);
2959 /* Un-write-protect all the pages in from_space. This is done at the
2960 * start of a GC else there may be many page faults while scavenging
2961 * the newspace (I've seen drive the system time to 99%). These pages
2962 * would need to be unprotected anyway before unmapping in
2963 * free_oldspace; not sure what effect this has on paging.. */
2965 unprotect_oldspace(void)
2968 void *region_addr
= 0;
2969 void *page_addr
= 0;
2970 uword_t region_bytes
= 0;
2972 for (i
= 0; i
< last_free_page
; i
++) {
2973 if (page_allocated_p(i
)
2974 && (page_table
[i
].bytes_used
!= 0)
2975 && (page_table
[i
].gen
== from_space
)) {
2977 /* Remove any write-protection. We should be able to rely
2978 * on the write-protect flag to avoid redundant calls. */
2979 if (page_table
[i
].write_protected
) {
2980 page_table
[i
].write_protected
= 0;
2981 page_addr
= page_address(i
);
2984 region_addr
= page_addr
;
2985 region_bytes
= GENCGC_CARD_BYTES
;
2986 } else if (region_addr
+ region_bytes
== page_addr
) {
2987 /* Region continue. */
2988 region_bytes
+= GENCGC_CARD_BYTES
;
2990 /* Unprotect previous region. */
2991 os_protect(region_addr
, region_bytes
, OS_VM_PROT_ALL
);
2992 /* First page in new region. */
2993 region_addr
= page_addr
;
2994 region_bytes
= GENCGC_CARD_BYTES
;
3000 /* Unprotect last region. */
3001 os_protect(region_addr
, region_bytes
, OS_VM_PROT_ALL
);
3005 /* Work through all the pages and free any in from_space. This
3006 * assumes that all objects have been copied or promoted to an older
3007 * generation. Bytes_allocated and the generation bytes_allocated
3008 * counter are updated. The number of bytes freed is returned. */
3012 uword_t bytes_freed
= 0;
3013 page_index_t first_page
, last_page
;
3018 /* Find a first page for the next region of pages. */
3019 while ((first_page
< last_free_page
)
3020 && (page_free_p(first_page
)
3021 || (page_table
[first_page
].bytes_used
== 0)
3022 || (page_table
[first_page
].gen
!= from_space
)))
3025 if (first_page
>= last_free_page
)
3028 /* Find the last page of this region. */
3029 last_page
= first_page
;
3032 /* Free the page. */
3033 bytes_freed
+= page_table
[last_page
].bytes_used
;
3034 generations
[page_table
[last_page
].gen
].bytes_allocated
-=
3035 page_table
[last_page
].bytes_used
;
3036 page_table
[last_page
].allocated
= FREE_PAGE_FLAG
;
3037 page_table
[last_page
].bytes_used
= 0;
3038 /* Should already be unprotected by unprotect_oldspace(). */
3039 gc_assert(!page_table
[last_page
].write_protected
);
3042 while ((last_page
< last_free_page
)
3043 && page_allocated_p(last_page
)
3044 && (page_table
[last_page
].bytes_used
!= 0)
3045 && (page_table
[last_page
].gen
== from_space
));
3047 #ifdef READ_PROTECT_FREE_PAGES
3048 os_protect(page_address(first_page
),
3049 npage_bytes(last_page
-first_page
),
3052 first_page
= last_page
;
3053 } while (first_page
< last_free_page
);
3055 bytes_allocated
-= bytes_freed
;
3060 /* Print some information about a pointer at the given address. */
3062 print_ptr(lispobj
*addr
)
3064 /* If addr is in the dynamic space then out the page information. */
3065 page_index_t pi1
= find_page_index((void*)addr
);
3068 fprintf(stderr
," %p: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n",
3071 page_table
[pi1
].allocated
,
3072 page_table
[pi1
].gen
,
3073 page_table
[pi1
].bytes_used
,
3074 page_table
[pi1
].scan_start_offset
,
3075 page_table
[pi1
].dont_move
);
3076 fprintf(stderr
," %x %x %x %x (%x) %x %x %x %x\n",
3090 is_in_stack_space(lispobj ptr
)
3092 /* For space verification: Pointers can be valid if they point
3093 * to a thread stack space. This would be faster if the thread
3094 * structures had page-table entries as if they were part of
3095 * the heap space. */
3097 for_each_thread(th
) {
3098 if ((th
->control_stack_start
<= (lispobj
*)ptr
) &&
3099 (th
->control_stack_end
>= (lispobj
*)ptr
)) {
3107 verify_space(lispobj
*start
, size_t words
)
3109 int is_in_dynamic_space
= (find_page_index((void*)start
) != -1);
3110 int is_in_readonly_space
=
3111 (READ_ONLY_SPACE_START
<= (uword_t
)start
&&
3112 (uword_t
)start
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0));
3116 lispobj thing
= *(lispobj
*)start
;
3118 if (is_lisp_pointer(thing
)) {
3119 page_index_t page_index
= find_page_index((void*)thing
);
3120 sword_t to_readonly_space
=
3121 (READ_ONLY_SPACE_START
<= thing
&&
3122 thing
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0));
3123 sword_t to_static_space
=
3124 (STATIC_SPACE_START
<= thing
&&
3125 thing
< SymbolValue(STATIC_SPACE_FREE_POINTER
,0));
3127 /* Does it point to the dynamic space? */
3128 if (page_index
!= -1) {
3129 /* If it's within the dynamic space it should point to a used
3130 * page. XX Could check the offset too. */
3131 if (page_allocated_p(page_index
)
3132 && (page_table
[page_index
].bytes_used
== 0))
3133 lose ("Ptr %p @ %p sees free page.\n", thing
, start
);
3134 /* Check that it doesn't point to a forwarding pointer! */
3135 if (*((lispobj
*)native_pointer(thing
)) == 0x01) {
3136 lose("Ptr %p @ %p sees forwarding ptr.\n", thing
, start
);
3138 /* Check that its not in the RO space as it would then be a
3139 * pointer from the RO to the dynamic space. */
3140 if (is_in_readonly_space
) {
3141 lose("ptr to dynamic space %p from RO space %x\n",
3144 /* Does it point to a plausible object? This check slows
3145 * it down a lot (so it's commented out).
3147 * "a lot" is serious: it ate 50 minutes cpu time on
3148 * my duron 950 before I came back from lunch and
3151 * FIXME: Add a variable to enable this
3154 if (!possibly_valid_dynamic_space_pointer_s((lispobj *)thing, page_index, NULL)) {
3155 lose("ptr %p to invalid object %p\n", thing, start);
3159 extern void funcallable_instance_tramp
;
3160 /* Verify that it points to another valid space. */
3161 if (!to_readonly_space
&& !to_static_space
3162 && (thing
!= (lispobj
)&funcallable_instance_tramp
)
3163 && !is_in_stack_space(thing
)) {
3164 lose("Ptr %p @ %p sees junk.\n", thing
, start
);
3168 if (!(fixnump(thing
))) {
3170 switch(widetag_of(*start
)) {
3173 case SIMPLE_VECTOR_WIDETAG
:
3175 case COMPLEX_WIDETAG
:
3176 case SIMPLE_ARRAY_WIDETAG
:
3177 case COMPLEX_BASE_STRING_WIDETAG
:
3178 #ifdef COMPLEX_CHARACTER_STRING_WIDETAG
3179 case COMPLEX_CHARACTER_STRING_WIDETAG
:
3181 case COMPLEX_VECTOR_NIL_WIDETAG
:
3182 case COMPLEX_BIT_VECTOR_WIDETAG
:
3183 case COMPLEX_VECTOR_WIDETAG
:
3184 case COMPLEX_ARRAY_WIDETAG
:
3185 case CLOSURE_HEADER_WIDETAG
:
3186 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
3187 case VALUE_CELL_HEADER_WIDETAG
:
3188 case SYMBOL_HEADER_WIDETAG
:
3189 case CHARACTER_WIDETAG
:
3190 #if N_WORD_BITS == 64
3191 case SINGLE_FLOAT_WIDETAG
:
3193 case UNBOUND_MARKER_WIDETAG
:
3198 case INSTANCE_HEADER_WIDETAG
:
3201 sword_t ntotal
= HeaderValue(thing
);
3202 lispobj layout
= ((struct instance
*)start
)->slots
[0];
3207 nuntagged
= ((struct layout
*)
3208 native_pointer(layout
))->n_untagged_slots
;
3209 verify_space(start
+ 1,
3210 ntotal
- fixnum_value(nuntagged
));
3214 case CODE_HEADER_WIDETAG
:
3216 lispobj object
= *start
;
3218 sword_t nheader_words
, ncode_words
, nwords
;
3220 struct simple_fun
*fheaderp
;
3222 code
= (struct code
*) start
;
3224 /* Check that it's not in the dynamic space.
3225 * FIXME: Isn't is supposed to be OK for code
3226 * objects to be in the dynamic space these days? */
3227 /* It is for byte compiled code, but there's
3228 * no byte compilation in SBCL anymore. */
3229 if (is_in_dynamic_space
3230 /* Only when enabled */
3231 && verify_dynamic_code_check
) {
3233 "/code object at %p in the dynamic space\n",
3237 ncode_words
= fixnum_word_value(code
->code_size
);
3238 nheader_words
= HeaderValue(object
);
3239 nwords
= ncode_words
+ nheader_words
;
3240 nwords
= CEILING(nwords
, 2);
3241 /* Scavenge the boxed section of the code data block */
3242 verify_space(start
+ 1, nheader_words
- 1);
3244 /* Scavenge the boxed section of each function
3245 * object in the code data block. */
3246 fheaderl
= code
->entry_points
;
3247 while (fheaderl
!= NIL
) {
3249 (struct simple_fun
*) native_pointer(fheaderl
);
3250 gc_assert(widetag_of(fheaderp
->header
) ==
3251 SIMPLE_FUN_HEADER_WIDETAG
);
3252 verify_space(&fheaderp
->name
, 1);
3253 verify_space(&fheaderp
->arglist
, 1);
3254 verify_space(&fheaderp
->type
, 1);
3255 fheaderl
= fheaderp
->next
;
3261 /* unboxed objects */
3262 case BIGNUM_WIDETAG
:
3263 #if N_WORD_BITS != 64
3264 case SINGLE_FLOAT_WIDETAG
:
3266 case DOUBLE_FLOAT_WIDETAG
:
3267 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3268 case LONG_FLOAT_WIDETAG
:
3270 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
3271 case COMPLEX_SINGLE_FLOAT_WIDETAG
:
3273 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
3274 case COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3276 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3277 case COMPLEX_LONG_FLOAT_WIDETAG
:
3279 #ifdef SIMD_PACK_WIDETAG
3280 case SIMD_PACK_WIDETAG
:
3282 case SIMPLE_BASE_STRING_WIDETAG
:
3283 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
3284 case SIMPLE_CHARACTER_STRING_WIDETAG
:
3286 case SIMPLE_BIT_VECTOR_WIDETAG
:
3287 case SIMPLE_ARRAY_NIL_WIDETAG
:
3288 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
3289 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
3290 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
3291 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
3292 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
3293 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
3295 case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG
:
3297 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
3298 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
3299 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
3300 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
3302 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
3303 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
3305 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
3306 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
3308 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
3309 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
3312 case SIMPLE_ARRAY_FIXNUM_WIDETAG
:
3314 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
3315 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
3317 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
3318 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
3320 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
3321 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
3322 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3323 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
3325 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
3326 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
3328 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
3329 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3331 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3332 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
3335 case WEAK_POINTER_WIDETAG
:
3336 #ifdef NO_TLS_VALUE_MARKER_WIDETAG
3337 case NO_TLS_VALUE_MARKER_WIDETAG
:
3339 count
= (sizetab
[widetag_of(*start
)])(start
);
3343 lose("Unhandled widetag %p at %p\n",
3344 widetag_of(*start
), start
);
3356 /* FIXME: It would be nice to make names consistent so that
3357 * foo_size meant size *in* *bytes* instead of size in some
3358 * arbitrary units. (Yes, this caused a bug, how did you guess?:-)
3359 * Some counts of lispobjs are called foo_count; it might be good
3360 * to grep for all foo_size and rename the appropriate ones to
3362 sword_t read_only_space_size
=
3363 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0)
3364 - (lispobj
*)READ_ONLY_SPACE_START
;
3365 sword_t static_space_size
=
3366 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0)
3367 - (lispobj
*)STATIC_SPACE_START
;
3369 for_each_thread(th
) {
3370 sword_t binding_stack_size
=
3371 (lispobj
*)get_binding_stack_pointer(th
)
3372 - (lispobj
*)th
->binding_stack_start
;
3373 verify_space(th
->binding_stack_start
, binding_stack_size
);
3375 verify_space((lispobj
*)READ_ONLY_SPACE_START
, read_only_space_size
);
3376 verify_space((lispobj
*)STATIC_SPACE_START
, static_space_size
);
3380 verify_generation(generation_index_t generation
)
3384 for (i
= 0; i
< last_free_page
; i
++) {
3385 if (page_allocated_p(i
)
3386 && (page_table
[i
].bytes_used
!= 0)
3387 && (page_table
[i
].gen
== generation
)) {
3388 page_index_t last_page
;
3390 /* This should be the start of a contiguous block */
3391 gc_assert(page_starts_contiguous_block_p(i
));
3393 /* Need to find the full extent of this contiguous block in case
3394 objects span pages. */
3396 /* Now work forward until the end of this contiguous area is
3398 for (last_page
= i
; ;last_page
++)
3399 /* Check whether this is the last page in this contiguous
3401 if (page_ends_contiguous_block_p(last_page
, generation
))
3404 verify_space(page_address(i
),
3406 (page_table
[last_page
].bytes_used
3407 + npage_bytes(last_page
-i
)))
3414 /* Check that all the free space is zero filled. */
3416 verify_zero_fill(void)
3420 for (page
= 0; page
< last_free_page
; page
++) {
3421 if (page_free_p(page
)) {
3422 /* The whole page should be zero filled. */
3423 sword_t
*start_addr
= (sword_t
*)page_address(page
);
3424 sword_t size
= 1024;
3426 for (i
= 0; i
< size
; i
++) {
3427 if (start_addr
[i
] != 0) {
3428 lose("free page not zero at %x\n", start_addr
+ i
);
3432 sword_t free_bytes
= GENCGC_CARD_BYTES
- page_table
[page
].bytes_used
;
3433 if (free_bytes
> 0) {
3434 sword_t
*start_addr
= (sword_t
*)((uword_t
)page_address(page
)
3435 + page_table
[page
].bytes_used
);
3436 sword_t size
= free_bytes
/ N_WORD_BYTES
;
3438 for (i
= 0; i
< size
; i
++) {
3439 if (start_addr
[i
] != 0) {
3440 lose("free region not zero at %x\n", start_addr
+ i
);
3448 /* External entry point for verify_zero_fill */
3450 gencgc_verify_zero_fill(void)
3452 /* Flush the alloc regions updating the tables. */
3453 gc_alloc_update_all_page_tables();
3454 SHOW("verifying zero fill");
3459 verify_dynamic_space(void)
3461 generation_index_t i
;
3463 for (i
= 0; i
<= HIGHEST_NORMAL_GENERATION
; i
++)
3464 verify_generation(i
);
3466 if (gencgc_enable_verify_zero_fill
)
3470 /* Write-protect all the dynamic boxed pages in the given generation. */
3472 write_protect_generation_pages(generation_index_t generation
)
3476 gc_assert(generation
< SCRATCH_GENERATION
);
3478 for (start
= 0; start
< last_free_page
; start
++) {
3479 if (protect_page_p(start
, generation
)) {
3483 /* Note the page as protected in the page tables. */
3484 page_table
[start
].write_protected
= 1;
3486 for (last
= start
+ 1; last
< last_free_page
; last
++) {
3487 if (!protect_page_p(last
, generation
))
3489 page_table
[last
].write_protected
= 1;
3492 page_start
= (void *)page_address(start
);
3494 os_protect(page_start
,
3495 npage_bytes(last
- start
),
3496 OS_VM_PROT_READ
| OS_VM_PROT_EXECUTE
);
3502 if (gencgc_verbose
> 1) {
3504 "/write protected %d of %d pages in generation %d\n",
3505 count_write_protect_generation_pages(generation
),
3506 count_generation_pages(generation
),
3511 #if defined(LISP_FEATURE_SB_THREAD) && (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
3513 preserve_context_registers (os_context_t
*c
)
3516 /* On Darwin the signal context isn't a contiguous block of memory,
3517 * so just preserve_pointering its contents won't be sufficient.
3519 #if defined(LISP_FEATURE_DARWIN)||defined(LISP_FEATURE_WIN32)
3520 #if defined LISP_FEATURE_X86
3521 preserve_pointer((void*)*os_context_register_addr(c
,reg_EAX
));
3522 preserve_pointer((void*)*os_context_register_addr(c
,reg_ECX
));
3523 preserve_pointer((void*)*os_context_register_addr(c
,reg_EDX
));
3524 preserve_pointer((void*)*os_context_register_addr(c
,reg_EBX
));
3525 preserve_pointer((void*)*os_context_register_addr(c
,reg_ESI
));
3526 preserve_pointer((void*)*os_context_register_addr(c
,reg_EDI
));
3527 preserve_pointer((void*)*os_context_pc_addr(c
));
3528 #elif defined LISP_FEATURE_X86_64
3529 preserve_pointer((void*)*os_context_register_addr(c
,reg_RAX
));
3530 preserve_pointer((void*)*os_context_register_addr(c
,reg_RCX
));
3531 preserve_pointer((void*)*os_context_register_addr(c
,reg_RDX
));
3532 preserve_pointer((void*)*os_context_register_addr(c
,reg_RBX
));
3533 preserve_pointer((void*)*os_context_register_addr(c
,reg_RSI
));
3534 preserve_pointer((void*)*os_context_register_addr(c
,reg_RDI
));
3535 preserve_pointer((void*)*os_context_register_addr(c
,reg_R8
));
3536 preserve_pointer((void*)*os_context_register_addr(c
,reg_R9
));
3537 preserve_pointer((void*)*os_context_register_addr(c
,reg_R10
));
3538 preserve_pointer((void*)*os_context_register_addr(c
,reg_R11
));
3539 preserve_pointer((void*)*os_context_register_addr(c
,reg_R12
));
3540 preserve_pointer((void*)*os_context_register_addr(c
,reg_R13
));
3541 preserve_pointer((void*)*os_context_register_addr(c
,reg_R14
));
3542 preserve_pointer((void*)*os_context_register_addr(c
,reg_R15
));
3543 preserve_pointer((void*)*os_context_pc_addr(c
));
3545 #error "preserve_context_registers needs to be tweaked for non-x86 Darwin"
3548 #if !defined(LISP_FEATURE_WIN32)
3549 for(ptr
= ((void **)(c
+1))-1; ptr
>=(void **)c
; ptr
--) {
3550 preserve_pointer(*ptr
);
3557 move_pinned_pages_to_newspace()
3561 /* scavenge() will evacuate all oldspace pages, but no newspace
3562 * pages. Pinned pages are precisely those pages which must not
3563 * be evacuated, so move them to newspace directly. */
3565 for (i
= 0; i
< last_free_page
; i
++) {
3566 if (page_table
[i
].dont_move
&&
3567 /* dont_move is cleared lazily, so validate the space as well. */
3568 page_table
[i
].gen
== from_space
) {
3569 if (page_table
[i
].dontmove_dwords
&& do_wipe_p
) {
3570 // do not move to newspace after all, this will be word-wiped
3573 page_table
[i
].gen
= new_space
;
3574 /* And since we're moving the pages wholesale, also adjust
3575 * the generation allocation counters. */
3576 generations
[new_space
].bytes_allocated
+= page_table
[i
].bytes_used
;
3577 generations
[from_space
].bytes_allocated
-= page_table
[i
].bytes_used
;
3582 /* Garbage collect a generation. If raise is 0 then the remains of the
3583 * generation are not raised to the next generation. */
3585 garbage_collect_generation(generation_index_t generation
, int raise
)
3587 uword_t bytes_freed
;
3589 uword_t static_space_size
;
3592 gc_assert(generation
<= HIGHEST_NORMAL_GENERATION
);
3594 /* The oldest generation can't be raised. */
3595 gc_assert((generation
!= HIGHEST_NORMAL_GENERATION
) || (raise
== 0));
3597 /* Check if weak hash tables were processed in the previous GC. */
3598 gc_assert(weak_hash_tables
== NULL
);
3600 /* Initialize the weak pointer list. */
3601 weak_pointers
= NULL
;
3603 /* When a generation is not being raised it is transported to a
3604 * temporary generation (NUM_GENERATIONS), and lowered when
3605 * done. Set up this new generation. There should be no pages
3606 * allocated to it yet. */
3608 gc_assert(generations
[SCRATCH_GENERATION
].bytes_allocated
== 0);
3611 /* Set the global src and dest. generations */
3612 from_space
= generation
;
3614 new_space
= generation
+1;
3616 new_space
= SCRATCH_GENERATION
;
3618 /* Change to a new space for allocation, resetting the alloc_start_page */
3619 gc_alloc_generation
= new_space
;
3620 generations
[new_space
].alloc_start_page
= 0;
3621 generations
[new_space
].alloc_unboxed_start_page
= 0;
3622 generations
[new_space
].alloc_large_start_page
= 0;
3623 generations
[new_space
].alloc_large_unboxed_start_page
= 0;
3625 /* Before any pointers are preserved, the dont_move flags on the
3626 * pages need to be cleared. */
3627 for (i
= 0; i
< last_free_page
; i
++)
3628 if(page_table
[i
].gen
==from_space
) {
3629 page_table
[i
].dont_move
= 0;
3630 gc_assert(page_table
[i
].dontmove_dwords
== NULL
);
3633 /* Un-write-protect the old-space pages. This is essential for the
3634 * promoted pages as they may contain pointers into the old-space
3635 * which need to be scavenged. It also helps avoid unnecessary page
3636 * faults as forwarding pointers are written into them. They need to
3637 * be un-protected anyway before unmapping later. */
3638 unprotect_oldspace();
3640 /* Scavenge the stacks' conservative roots. */
3642 /* there are potentially two stacks for each thread: the main
3643 * stack, which may contain Lisp pointers, and the alternate stack.
3644 * We don't ever run Lisp code on the altstack, but it may
3645 * host a sigcontext with lisp objects in it */
3647 /* what we need to do: (1) find the stack pointer for the main
3648 * stack; scavenge it (2) find the interrupt context on the
3649 * alternate stack that might contain lisp values, and scavenge
3652 /* we assume that none of the preceding applies to the thread that
3653 * initiates GC. If you ever call GC from inside an altstack
3654 * handler, you will lose. */
3656 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
3657 /* And if we're saving a core, there's no point in being conservative. */
3658 if (conservative_stack
) {
3659 for_each_thread(th
) {
3661 void **esp
=(void **)-1;
3662 if (th
->state
== STATE_DEAD
)
3664 # if defined(LISP_FEATURE_SB_SAFEPOINT)
3665 /* Conservative collect_garbage is always invoked with a
3666 * foreign C call or an interrupt handler on top of every
3667 * existing thread, so the stored SP in each thread
3668 * structure is valid, no matter which thread we are looking
3669 * at. For threads that were running Lisp code, the pitstop
3670 * and edge functions maintain this value within the
3671 * interrupt or exception handler. */
3672 esp
= os_get_csp(th
);
3673 assert_on_stack(th
, esp
);
3675 /* In addition to pointers on the stack, also preserve the
3676 * return PC, the only value from the context that we need
3677 * in addition to the SP. The return PC gets saved by the
3678 * foreign call wrapper, and removed from the control stack
3679 * into a register. */
3680 preserve_pointer(th
->pc_around_foreign_call
);
3682 /* And on platforms with interrupts: scavenge ctx registers. */
3684 /* Disabled on Windows, because it does not have an explicit
3685 * stack of `interrupt_contexts'. The reported CSP has been
3686 * chosen so that the current context on the stack is
3687 * covered by the stack scan. See also set_csp_from_context(). */
3688 # ifndef LISP_FEATURE_WIN32
3689 if (th
!= arch_os_get_current_thread()) {
3690 long k
= fixnum_value(
3691 SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
3693 preserve_context_registers(th
->interrupt_contexts
[--k
]);
3696 # elif defined(LISP_FEATURE_SB_THREAD)
3698 if(th
==arch_os_get_current_thread()) {
3699 /* Somebody is going to burn in hell for this, but casting
3700 * it in two steps shuts gcc up about strict aliasing. */
3701 esp
= (void **)((void *)&raise
);
3704 free
=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
3705 for(i
=free
-1;i
>=0;i
--) {
3706 os_context_t
*c
=th
->interrupt_contexts
[i
];
3707 esp1
= (void **) *os_context_register_addr(c
,reg_SP
);
3708 if (esp1
>=(void **)th
->control_stack_start
&&
3709 esp1
<(void **)th
->control_stack_end
) {
3710 if(esp1
<esp
) esp
=esp1
;
3711 preserve_context_registers(c
);
3716 esp
= (void **)((void *)&raise
);
3718 if (!esp
|| esp
== (void*) -1)
3719 lose("garbage_collect: no SP known for thread %x (OS %x)",
3721 for (ptr
= ((void **)th
->control_stack_end
)-1; ptr
>= esp
; ptr
--) {
3722 preserve_pointer(*ptr
);
3727 /* Non-x86oid systems don't have "conservative roots" as such, but
3728 * the same mechanism is used for objects pinned for use by alien
3730 for_each_thread(th
) {
3731 lispobj pin_list
= SymbolTlValue(PINNED_OBJECTS
,th
);
3732 while (pin_list
!= NIL
) {
3733 struct cons
*list_entry
=
3734 (struct cons
*)native_pointer(pin_list
);
3735 preserve_pointer(list_entry
->car
);
3736 pin_list
= list_entry
->cdr
;
3742 if (gencgc_verbose
> 1) {
3743 sword_t num_dont_move_pages
= count_dont_move_pages();
3745 "/non-movable pages due to conservative pointers = %d (%d bytes)\n",
3746 num_dont_move_pages
,
3747 npage_bytes(num_dont_move_pages
));
3751 /* Now that all of the pinned (dont_move) pages are known, and
3752 * before we start to scavenge (and thus relocate) objects,
3753 * relocate the pinned pages to newspace, so that the scavenger
3754 * will not attempt to relocate their contents. */
3755 move_pinned_pages_to_newspace();
3757 /* Scavenge all the rest of the roots. */
3759 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
3761 * If not x86, we need to scavenge the interrupt context(s) and the
3766 for_each_thread(th
) {
3767 scavenge_interrupt_contexts(th
);
3768 scavenge_control_stack(th
);
3771 # ifdef LISP_FEATURE_SB_SAFEPOINT
3772 /* In this case, scrub all stacks right here from the GCing thread
3773 * instead of doing what the comment below says. Suboptimal, but
3776 scrub_thread_control_stack(th
);
3778 /* Scrub the unscavenged control stack space, so that we can't run
3779 * into any stale pointers in a later GC (this is done by the
3780 * stop-for-gc handler in the other threads). */
3781 scrub_control_stack();
3786 /* Scavenge the Lisp functions of the interrupt handlers, taking
3787 * care to avoid SIG_DFL and SIG_IGN. */
3788 for (i
= 0; i
< NSIG
; i
++) {
3789 union interrupt_handler handler
= interrupt_handlers
[i
];
3790 if (!ARE_SAME_HANDLER(handler
.c
, SIG_IGN
) &&
3791 !ARE_SAME_HANDLER(handler
.c
, SIG_DFL
)) {
3792 scavenge((lispobj
*)(interrupt_handlers
+ i
), 1);
3795 /* Scavenge the binding stacks. */
3798 for_each_thread(th
) {
3799 sword_t len
= (lispobj
*)get_binding_stack_pointer(th
) -
3800 th
->binding_stack_start
;
3801 scavenge((lispobj
*) th
->binding_stack_start
,len
);
3802 #ifdef LISP_FEATURE_SB_THREAD
3803 /* do the tls as well */
3804 len
=(SymbolValue(FREE_TLS_INDEX
,0) >> WORD_SHIFT
) -
3805 (sizeof (struct thread
))/(sizeof (lispobj
));
3806 scavenge((lispobj
*) (th
+1),len
);
3811 /* The original CMU CL code had scavenge-read-only-space code
3812 * controlled by the Lisp-level variable
3813 * *SCAVENGE-READ-ONLY-SPACE*. It was disabled by default, and it
3814 * wasn't documented under what circumstances it was useful or
3815 * safe to turn it on, so it's been turned off in SBCL. If you
3816 * want/need this functionality, and can test and document it,
3817 * please submit a patch. */
3819 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE
) != NIL
) {
3820 uword_t read_only_space_size
=
3821 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
) -
3822 (lispobj
*)READ_ONLY_SPACE_START
;
3824 "/scavenge read only space: %d bytes\n",
3825 read_only_space_size
* sizeof(lispobj
)));
3826 scavenge( (lispobj
*) READ_ONLY_SPACE_START
, read_only_space_size
);
3830 /* Scavenge static space. */
3832 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0) -
3833 (lispobj
*)STATIC_SPACE_START
;
3834 if (gencgc_verbose
> 1) {
3836 "/scavenge static space: %d bytes\n",
3837 static_space_size
* sizeof(lispobj
)));
3839 scavenge( (lispobj
*) STATIC_SPACE_START
, static_space_size
);
3841 /* All generations but the generation being GCed need to be
3842 * scavenged. The new_space generation needs special handling as
3843 * objects may be moved in - it is handled separately below. */
3844 scavenge_generations(generation
+1, PSEUDO_STATIC_GENERATION
);
3846 scavenge_pages_with_conservative_pointers_to_them_protected_objects_only();
3848 /* Finally scavenge the new_space generation. Keep going until no
3849 * more objects are moved into the new generation */
3850 scavenge_newspace_generation(new_space
);
3852 /* FIXME: I tried reenabling this check when debugging unrelated
3853 * GC weirdness ca. sbcl-0.6.12.45, and it failed immediately.
3854 * Since the current GC code seems to work well, I'm guessing that
3855 * this debugging code is just stale, but I haven't tried to
3856 * figure it out. It should be figured out and then either made to
3857 * work or just deleted. */
3859 #define RESCAN_CHECK 0
3861 /* As a check re-scavenge the newspace once; no new objects should
3864 os_vm_size_t old_bytes_allocated
= bytes_allocated
;
3865 os_vm_size_t bytes_allocated
;
3867 /* Start with a full scavenge. */
3868 scavenge_newspace_generation_one_scan(new_space
);
3870 /* Flush the current regions, updating the tables. */
3871 gc_alloc_update_all_page_tables();
3873 bytes_allocated
= bytes_allocated
- old_bytes_allocated
;
3875 if (bytes_allocated
!= 0) {
3876 lose("Rescan of new_space allocated %d more bytes.\n",
3882 scan_weak_hash_tables();
3883 scan_weak_pointers();
3886 /* Flush the current regions, updating the tables. */
3887 gc_alloc_update_all_page_tables();
3889 /* Free the pages in oldspace, but not those marked dont_move. */
3890 bytes_freed
= free_oldspace();
3892 /* If the GC is not raising the age then lower the generation back
3893 * to its normal generation number */
3895 for (i
= 0; i
< last_free_page
; i
++)
3896 if ((page_table
[i
].bytes_used
!= 0)
3897 && (page_table
[i
].gen
== SCRATCH_GENERATION
))
3898 page_table
[i
].gen
= generation
;
3899 gc_assert(generations
[generation
].bytes_allocated
== 0);
3900 generations
[generation
].bytes_allocated
=
3901 generations
[SCRATCH_GENERATION
].bytes_allocated
;
3902 generations
[SCRATCH_GENERATION
].bytes_allocated
= 0;
3905 /* Reset the alloc_start_page for generation. */
3906 generations
[generation
].alloc_start_page
= 0;
3907 generations
[generation
].alloc_unboxed_start_page
= 0;
3908 generations
[generation
].alloc_large_start_page
= 0;
3909 generations
[generation
].alloc_large_unboxed_start_page
= 0;
3911 if (generation
>= verify_gens
) {
3912 if (gencgc_verbose
) {
3916 verify_dynamic_space();
3919 /* Set the new gc trigger for the GCed generation. */
3920 generations
[generation
].gc_trigger
=
3921 generations
[generation
].bytes_allocated
3922 + generations
[generation
].bytes_consed_between_gc
;
3925 generations
[generation
].num_gc
= 0;
3927 ++generations
[generation
].num_gc
;
3931 /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
3933 update_dynamic_space_free_pointer(void)
3935 page_index_t last_page
= -1, i
;
3937 for (i
= 0; i
< last_free_page
; i
++)
3938 if (page_allocated_p(i
) && (page_table
[i
].bytes_used
!= 0))
3941 last_free_page
= last_page
+1;
3943 set_alloc_pointer((lispobj
)(page_address(last_free_page
)));
3944 return 0; /* dummy value: return something ... */
3948 remap_page_range (page_index_t from
, page_index_t to
)
3950 /* There's a mysterious Solaris/x86 problem with using mmap
3951 * tricks for memory zeroing. See sbcl-devel thread
3952 * "Re: patch: standalone executable redux".
3954 #if defined(LISP_FEATURE_SUNOS)
3955 zero_and_mark_pages(from
, to
);
3958 release_granularity
= gencgc_release_granularity
/GENCGC_CARD_BYTES
,
3959 release_mask
= release_granularity
-1,
3961 aligned_from
= (from
+release_mask
)&~release_mask
,
3962 aligned_end
= (end
&~release_mask
);
3964 if (aligned_from
< aligned_end
) {
3965 zero_pages_with_mmap(aligned_from
, aligned_end
-1);
3966 if (aligned_from
!= from
)
3967 zero_and_mark_pages(from
, aligned_from
-1);
3968 if (aligned_end
!= end
)
3969 zero_and_mark_pages(aligned_end
, end
-1);
3971 zero_and_mark_pages(from
, to
);
3977 remap_free_pages (page_index_t from
, page_index_t to
, int forcibly
)
3979 page_index_t first_page
, last_page
;
3982 return remap_page_range(from
, to
);
3984 for (first_page
= from
; first_page
<= to
; first_page
++) {
3985 if (page_allocated_p(first_page
) ||
3986 (page_table
[first_page
].need_to_zero
== 0))
3989 last_page
= first_page
+ 1;
3990 while (page_free_p(last_page
) &&
3991 (last_page
<= to
) &&
3992 (page_table
[last_page
].need_to_zero
== 1))
3995 remap_page_range(first_page
, last_page
-1);
3997 first_page
= last_page
;
4001 generation_index_t small_generation_limit
= 1;
4003 /* GC all generations newer than last_gen, raising the objects in each
4004 * to the next older generation - we finish when all generations below
4005 * last_gen are empty. Then if last_gen is due for a GC, or if
4006 * last_gen==NUM_GENERATIONS (the scratch generation? eh?) we GC that
4007 * too. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
4009 * We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than
4010 * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */
4012 collect_garbage(generation_index_t last_gen
)
4014 generation_index_t gen
= 0, i
;
4015 int raise
, more
= 0;
4017 /* The largest value of last_free_page seen since the time
4018 * remap_free_pages was called. */
4019 static page_index_t high_water_mark
= 0;
4021 FSHOW((stderr
, "/entering collect_garbage(%d)\n", last_gen
));
4022 log_generation_stats(gc_logfile
, "=== GC Start ===");
4026 if (last_gen
> HIGHEST_NORMAL_GENERATION
+1) {
4028 "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
4033 /* Flush the alloc regions updating the tables. */
4034 gc_alloc_update_all_page_tables();
4036 /* Verify the new objects created by Lisp code. */
4037 if (pre_verify_gen_0
) {
4038 FSHOW((stderr
, "pre-checking generation 0\n"));
4039 verify_generation(0);
4042 if (gencgc_verbose
> 1)
4043 print_generation_stats();
4046 /* Collect the generation. */
4048 if (more
|| (gen
>= gencgc_oldest_gen_to_gc
)) {
4049 /* Never raise the oldest generation. Never raise the extra generation
4050 * collected due to more-flag. */
4056 || (generations
[gen
].num_gc
>= generations
[gen
].number_of_gcs_before_promotion
);
4057 /* If we would not normally raise this one, but we're
4058 * running low on space in comparison to the object-sizes
4059 * we've been seeing, raise it and collect the next one
4061 if (!raise
&& gen
== last_gen
) {
4062 more
= (2*large_allocation
) >= (dynamic_space_size
- bytes_allocated
);
4067 if (gencgc_verbose
> 1) {
4069 "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
4072 generations
[gen
].bytes_allocated
,
4073 generations
[gen
].gc_trigger
,
4074 generations
[gen
].num_gc
));
4077 /* If an older generation is being filled, then update its
4080 generations
[gen
+1].cum_sum_bytes_allocated
+=
4081 generations
[gen
+1].bytes_allocated
;
4084 garbage_collect_generation(gen
, raise
);
4086 /* Reset the memory age cum_sum. */
4087 generations
[gen
].cum_sum_bytes_allocated
= 0;
4089 if (gencgc_verbose
> 1) {
4090 FSHOW((stderr
, "GC of generation %d finished:\n", gen
));
4091 print_generation_stats();
4095 } while ((gen
<= gencgc_oldest_gen_to_gc
)
4096 && ((gen
< last_gen
)
4099 && (generations
[gen
].bytes_allocated
4100 > generations
[gen
].gc_trigger
)
4101 && (generation_average_age(gen
)
4102 > generations
[gen
].minimum_age_before_gc
))));
4104 /* Now if gen-1 was raised all generations before gen are empty.
4105 * If it wasn't raised then all generations before gen-1 are empty.
4107 * Now objects within this gen's pages cannot point to younger
4108 * generations unless they are written to. This can be exploited
4109 * by write-protecting the pages of gen; then when younger
4110 * generations are GCed only the pages which have been written
4115 gen_to_wp
= gen
- 1;
4117 /* There's not much point in WPing pages in generation 0 as it is
4118 * never scavenged (except promoted pages). */
4119 if ((gen_to_wp
> 0) && enable_page_protection
) {
4120 /* Check that they are all empty. */
4121 for (i
= 0; i
< gen_to_wp
; i
++) {
4122 if (generations
[i
].bytes_allocated
)
4123 lose("trying to write-protect gen. %d when gen. %d nonempty\n",
4126 write_protect_generation_pages(gen_to_wp
);
4129 /* Set gc_alloc() back to generation 0. The current regions should
4130 * be flushed after the above GCs. */
4131 gc_assert((boxed_region
.free_pointer
- boxed_region
.start_addr
) == 0);
4132 gc_alloc_generation
= 0;
4134 /* Save the high-water mark before updating last_free_page */
4135 if (last_free_page
> high_water_mark
)
4136 high_water_mark
= last_free_page
;
4138 update_dynamic_space_free_pointer();
4140 /* Update auto_gc_trigger. Make sure we trigger the next GC before
4141 * running out of heap! */
4142 if (bytes_consed_between_gcs
<= (dynamic_space_size
- bytes_allocated
))
4143 auto_gc_trigger
= bytes_allocated
+ bytes_consed_between_gcs
;
4145 auto_gc_trigger
= bytes_allocated
+ (dynamic_space_size
- bytes_allocated
)/2;
4148 fprintf(stderr
,"Next gc when %"OS_VM_SIZE_FMT
" bytes have been consed\n",
4151 /* If we did a big GC (arbitrarily defined as gen > 1), release memory
4154 if (gen
> small_generation_limit
) {
4155 if (last_free_page
> high_water_mark
)
4156 high_water_mark
= last_free_page
;
4157 remap_free_pages(0, high_water_mark
, 0);
4158 high_water_mark
= 0;
4162 large_allocation
= 0;
4164 log_generation_stats(gc_logfile
, "=== GC End ===");
4165 SHOW("returning from collect_garbage");
4168 /* This is called by Lisp PURIFY when it is finished. All live objects
4169 * will have been moved to the RO and Static heaps. The dynamic space
4170 * will need a full re-initialization. We don't bother having Lisp
4171 * PURIFY flush the current gc_alloc() region, as the page_tables are
4172 * re-initialized, and every page is zeroed to be sure. */
4176 page_index_t page
, last_page
;
4178 if (gencgc_verbose
> 1) {
4179 SHOW("entering gc_free_heap");
4182 for (page
= 0; page
< page_table_pages
; page
++) {
4183 /* Skip free pages which should already be zero filled. */
4184 if (page_allocated_p(page
)) {
4186 for (last_page
= page
;
4187 (last_page
< page_table_pages
) && page_allocated_p(last_page
);
4189 /* Mark the page free. The other slots are assumed invalid
4190 * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
4191 * should not be write-protected -- except that the
4192 * generation is used for the current region but it sets
4194 page_table
[page
].allocated
= FREE_PAGE_FLAG
;
4195 page_table
[page
].bytes_used
= 0;
4196 page_table
[page
].write_protected
= 0;
4199 #ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure
4200 * about this change. */
4201 page_start
= (void *)page_address(page
);
4202 os_protect(page_start
, npage_bytes(last_page
-page
), OS_VM_PROT_ALL
);
4203 remap_free_pages(page
, last_page
-1, 1);
4206 } else if (gencgc_zero_check_during_free_heap
) {
4207 /* Double-check that the page is zero filled. */
4208 sword_t
*page_start
;
4210 gc_assert(page_free_p(page
));
4211 gc_assert(page_table
[page
].bytes_used
== 0);
4212 page_start
= (sword_t
*)page_address(page
);
4213 for (i
=0; i
<GENCGC_CARD_BYTES
/sizeof(sword_t
); i
++) {
4214 if (page_start
[i
] != 0) {
4215 lose("free region not zero at %x\n", page_start
+ i
);
4221 bytes_allocated
= 0;
4223 /* Initialize the generations. */
4224 for (page
= 0; page
< NUM_GENERATIONS
; page
++) {
4225 generations
[page
].alloc_start_page
= 0;
4226 generations
[page
].alloc_unboxed_start_page
= 0;
4227 generations
[page
].alloc_large_start_page
= 0;
4228 generations
[page
].alloc_large_unboxed_start_page
= 0;
4229 generations
[page
].bytes_allocated
= 0;
4230 generations
[page
].gc_trigger
= 2000000;
4231 generations
[page
].num_gc
= 0;
4232 generations
[page
].cum_sum_bytes_allocated
= 0;
4235 if (gencgc_verbose
> 1)
4236 print_generation_stats();
4238 /* Initialize gc_alloc(). */
4239 gc_alloc_generation
= 0;
4241 gc_set_region_empty(&boxed_region
);
4242 gc_set_region_empty(&unboxed_region
);
4245 set_alloc_pointer((lispobj
)((char *)heap_base
));
4247 if (verify_after_free_heap
) {
4248 /* Check whether purify has left any bad pointers. */
4249 FSHOW((stderr
, "checking after free_heap\n"));
4259 #if defined(LISP_FEATURE_SB_SAFEPOINT)
4263 /* Compute the number of pages needed for the dynamic space.
4264 * Dynamic space size should be aligned on page size. */
4265 page_table_pages
= dynamic_space_size
/GENCGC_CARD_BYTES
;
4266 gc_assert(dynamic_space_size
== npage_bytes(page_table_pages
));
4268 /* Default nursery size to 5% of the total dynamic space size,
4270 bytes_consed_between_gcs
= dynamic_space_size
/(os_vm_size_t
)20;
4271 if (bytes_consed_between_gcs
< (1024*1024))
4272 bytes_consed_between_gcs
= 1024*1024;
4274 /* The page_table must be allocated using "calloc" to initialize
4275 * the page structures correctly. There used to be a separate
4276 * initialization loop (now commented out; see below) but that was
4277 * unnecessary and did hurt startup time. */
4278 page_table
= calloc(page_table_pages
, sizeof(struct page
));
4279 gc_assert(page_table
);
4282 scavtab
[WEAK_POINTER_WIDETAG
] = scav_weak_pointer
;
4283 transother
[SIMPLE_ARRAY_WIDETAG
] = trans_boxed_large
;
4285 heap_base
= (void*)DYNAMIC_SPACE_START
;
4287 /* The page structures are initialized implicitly when page_table
4288 * is allocated with "calloc" above. Formerly we had the following
4289 * explicit initialization here (comments converted to C99 style
4290 * for readability as C's block comments don't nest):
4292 * // Initialize each page structure.
4293 * for (i = 0; i < page_table_pages; i++) {
4294 * // Initialize all pages as free.
4295 * page_table[i].allocated = FREE_PAGE_FLAG;
4296 * page_table[i].bytes_used = 0;
4298 * // Pages are not write-protected at startup.
4299 * page_table[i].write_protected = 0;
4302 * Without this loop the image starts up much faster when dynamic
4303 * space is large -- which it is on 64-bit platforms already by
4304 * default -- and when "calloc" for large arrays is implemented
4305 * using copy-on-write of a page of zeroes -- which it is at least
4306 * on Linux. In this case the pages that page_table_pages is stored
4307 * in are mapped and cleared not before the corresponding part of
4308 * dynamic space is used. For example, this saves clearing 16 MB of
4309 * memory at startup if the page size is 4 KB and the size of
4310 * dynamic space is 4 GB.
4311 * FREE_PAGE_FLAG must be 0 for this to work correctly which is
4312 * asserted below: */
4314 /* Compile time assertion: If triggered, declares an array
4315 * of dimension -1 forcing a syntax error. The intent of the
4316 * assignment is to avoid an "unused variable" warning. */
4317 char assert_free_page_flag_0
[(FREE_PAGE_FLAG
) ? -1 : 1];
4318 assert_free_page_flag_0
[0] = assert_free_page_flag_0
[0];
4321 bytes_allocated
= 0;
4323 /* Initialize the generations.
4325 * FIXME: very similar to code in gc_free_heap(), should be shared */
4326 for (i
= 0; i
< NUM_GENERATIONS
; i
++) {
4327 generations
[i
].alloc_start_page
= 0;
4328 generations
[i
].alloc_unboxed_start_page
= 0;
4329 generations
[i
].alloc_large_start_page
= 0;
4330 generations
[i
].alloc_large_unboxed_start_page
= 0;
4331 generations
[i
].bytes_allocated
= 0;
4332 generations
[i
].gc_trigger
= 2000000;
4333 generations
[i
].num_gc
= 0;
4334 generations
[i
].cum_sum_bytes_allocated
= 0;
4335 /* the tune-able parameters */
4336 generations
[i
].bytes_consed_between_gc
4337 = bytes_consed_between_gcs
/(os_vm_size_t
)HIGHEST_NORMAL_GENERATION
;
4338 generations
[i
].number_of_gcs_before_promotion
= 1;
4339 generations
[i
].minimum_age_before_gc
= 0.75;
4342 /* Initialize gc_alloc. */
4343 gc_alloc_generation
= 0;
4344 gc_set_region_empty(&boxed_region
);
4345 gc_set_region_empty(&unboxed_region
);
4350 /* Pick up the dynamic space from after a core load.
4352 * The ALLOCATION_POINTER points to the end of the dynamic space.
4356 gencgc_pickup_dynamic(void)
4358 page_index_t page
= 0;
4359 void *alloc_ptr
= (void *)get_alloc_pointer();
4360 lispobj
*prev
=(lispobj
*)page_address(page
);
4361 generation_index_t gen
= PSEUDO_STATIC_GENERATION
;
4363 bytes_allocated
= 0;
4366 lispobj
*first
,*ptr
= (lispobj
*)page_address(page
);
4368 if (!gencgc_partial_pickup
|| page_allocated_p(page
)) {
4369 /* It is possible, though rare, for the saved page table
4370 * to contain free pages below alloc_ptr. */
4371 page_table
[page
].gen
= gen
;
4372 page_table
[page
].bytes_used
= GENCGC_CARD_BYTES
;
4373 page_table
[page
].large_object
= 0;
4374 page_table
[page
].write_protected
= 0;
4375 page_table
[page
].write_protected_cleared
= 0;
4376 page_table
[page
].dont_move
= 0;
4377 page_table
[page
].need_to_zero
= 1;
4379 bytes_allocated
+= GENCGC_CARD_BYTES
;
4382 if (!gencgc_partial_pickup
) {
4383 page_table
[page
].allocated
= BOXED_PAGE_FLAG
;
4384 first
=gc_search_space(prev
,(ptr
+2)-prev
,ptr
);
4387 page_table
[page
].scan_start_offset
=
4388 page_address(page
) - (void *)prev
;
4391 } while (page_address(page
) < alloc_ptr
);
4393 last_free_page
= page
;
4395 generations
[gen
].bytes_allocated
= bytes_allocated
;
4397 gc_alloc_update_all_page_tables();
4398 write_protect_generation_pages(gen
);
4402 gc_initialize_pointers(void)
4404 gencgc_pickup_dynamic();
4408 /* alloc(..) is the external interface for memory allocation. It
4409 * allocates to generation 0. It is not called from within the garbage
4410 * collector as it is only external uses that need the check for heap
4411 * size (GC trigger) and to disable the interrupts (interrupts are
4412 * always disabled during a GC).
4414 * The vops that call alloc(..) assume that the returned space is zero-filled.
4415 * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
4417 * The check for a GC trigger is only performed when the current
4418 * region is full, so in most cases it's not needed. */
4420 static inline lispobj
*
4421 general_alloc_internal(sword_t nbytes
, int page_type_flag
, struct alloc_region
*region
,
4422 struct thread
*thread
)
4424 #ifndef LISP_FEATURE_WIN32
4425 lispobj alloc_signal
;
4428 void *new_free_pointer
;
4429 os_vm_size_t trigger_bytes
= 0;
4431 gc_assert(nbytes
>0);
4433 /* Check for alignment allocation problems. */
4434 gc_assert((((uword_t
)region
->free_pointer
& LOWTAG_MASK
) == 0)
4435 && ((nbytes
& LOWTAG_MASK
) == 0));
4437 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
4438 /* Must be inside a PA section. */
4439 gc_assert(get_pseudo_atomic_atomic(thread
));
4442 if (nbytes
> large_allocation
)
4443 large_allocation
= nbytes
;
4445 /* maybe we can do this quickly ... */
4446 new_free_pointer
= region
->free_pointer
+ nbytes
;
4447 if (new_free_pointer
<= region
->end_addr
) {
4448 new_obj
= (void*)(region
->free_pointer
);
4449 region
->free_pointer
= new_free_pointer
;
4450 return(new_obj
); /* yup */
4453 /* We don't want to count nbytes against auto_gc_trigger unless we
4454 * have to: it speeds up the tenuring of objects and slows down
4455 * allocation. However, unless we do so when allocating _very_
4456 * large objects we are in danger of exhausting the heap without
4457 * running sufficient GCs.
4459 if (nbytes
>= bytes_consed_between_gcs
)
4460 trigger_bytes
= nbytes
;
4462 /* we have to go the long way around, it seems. Check whether we
4463 * should GC in the near future
4465 if (auto_gc_trigger
&& (bytes_allocated
+trigger_bytes
> auto_gc_trigger
)) {
4466 /* Don't flood the system with interrupts if the need to gc is
4467 * already noted. This can happen for example when SUB-GC
4468 * allocates or after a gc triggered in a WITHOUT-GCING. */
4469 if (SymbolValue(GC_PENDING
,thread
) == NIL
) {
4470 /* set things up so that GC happens when we finish the PA
4472 SetSymbolValue(GC_PENDING
,T
,thread
);
4473 if (SymbolValue(GC_INHIBIT
,thread
) == NIL
) {
4474 #ifdef LISP_FEATURE_SB_SAFEPOINT
4475 thread_register_gc_trigger();
4477 set_pseudo_atomic_interrupted(thread
);
4478 #ifdef GENCGC_IS_PRECISE
4479 /* PPC calls alloc() from a trap or from pa_alloc(),
4480 * look up the most context if it's from a trap. */
4482 os_context_t
*context
=
4483 thread
->interrupt_data
->allocation_trap_context
;
4484 maybe_save_gc_mask_and_block_deferrables
4485 (context
? os_context_sigmask_addr(context
) : NULL
);
4488 maybe_save_gc_mask_and_block_deferrables(NULL
);
4494 new_obj
= gc_alloc_with_region(nbytes
, page_type_flag
, region
, 0);
4496 #ifndef LISP_FEATURE_WIN32
4497 /* for sb-prof, and not supported on Windows yet */
4498 alloc_signal
= SymbolValue(ALLOC_SIGNAL
,thread
);
4499 if ((alloc_signal
& FIXNUM_TAG_MASK
) == 0) {
4500 if ((sword_t
) alloc_signal
<= 0) {
4501 SetSymbolValue(ALLOC_SIGNAL
, T
, thread
);
4504 SetSymbolValue(ALLOC_SIGNAL
,
4505 alloc_signal
- (1 << N_FIXNUM_TAG_BITS
),
4515 general_alloc(sword_t nbytes
, int page_type_flag
)
4517 struct thread
*thread
= arch_os_get_current_thread();
4518 /* Select correct region, and call general_alloc_internal with it.
4519 * For other then boxed allocation we must lock first, since the
4520 * region is shared. */
4521 if (BOXED_PAGE_FLAG
& page_type_flag
) {
4522 #ifdef LISP_FEATURE_SB_THREAD
4523 struct alloc_region
*region
= (thread
? &(thread
->alloc_region
) : &boxed_region
);
4525 struct alloc_region
*region
= &boxed_region
;
4527 return general_alloc_internal(nbytes
, page_type_flag
, region
, thread
);
4528 } else if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
4530 gc_assert(0 == thread_mutex_lock(&allocation_lock
));
4531 obj
= general_alloc_internal(nbytes
, page_type_flag
, &unboxed_region
, thread
);
4532 gc_assert(0 == thread_mutex_unlock(&allocation_lock
));
4535 lose("bad page type flag: %d", page_type_flag
);
4539 lispobj AMD64_SYSV_ABI
*
4542 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4543 struct thread
*self
= arch_os_get_current_thread();
4544 int was_pseudo_atomic
= get_pseudo_atomic_atomic(self
);
4545 if (!was_pseudo_atomic
)
4546 set_pseudo_atomic_atomic(self
);
4548 gc_assert(get_pseudo_atomic_atomic(arch_os_get_current_thread()));
4551 lispobj
*result
= general_alloc(nbytes
, BOXED_PAGE_FLAG
);
4553 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4554 if (!was_pseudo_atomic
)
4555 clear_pseudo_atomic_atomic(self
);
4562 * shared support for the OS-dependent signal handlers which
4563 * catch GENCGC-related write-protect violations
4565 void unhandled_sigmemoryfault(void* addr
);
4567 /* Depending on which OS we're running under, different signals might
4568 * be raised for a violation of write protection in the heap. This
4569 * function factors out the common generational GC magic which needs
4570 * to invoked in this case, and should be called from whatever signal
4571 * handler is appropriate for the OS we're running under.
4573 * Return true if this signal is a normal generational GC thing that
4574 * we were able to handle, or false if it was abnormal and control
4575 * should fall through to the general SIGSEGV/SIGBUS/whatever logic.
4577 * We have two control flags for this: one causes us to ignore faults
4578 * on unprotected pages completely, and the second complains to stderr
4579 * but allows us to continue without losing.
4581 extern boolean ignore_memoryfaults_on_unprotected_pages
;
4582 boolean ignore_memoryfaults_on_unprotected_pages
= 0;
4584 extern boolean continue_after_memoryfault_on_unprotected_pages
;
4585 boolean continue_after_memoryfault_on_unprotected_pages
= 0;
4588 gencgc_handle_wp_violation(void* fault_addr
)
4590 page_index_t page_index
= find_page_index(fault_addr
);
4593 FSHOW((stderr
, "heap WP violation? fault_addr=%x, page_index=%d\n",
4594 fault_addr
, page_index
));
4597 /* Check whether the fault is within the dynamic space. */
4598 if (page_index
== (-1)) {
4600 /* It can be helpful to be able to put a breakpoint on this
4601 * case to help diagnose low-level problems. */
4602 unhandled_sigmemoryfault(fault_addr
);
4604 /* not within the dynamic space -- not our responsibility */
4609 ret
= thread_mutex_lock(&free_pages_lock
);
4610 gc_assert(ret
== 0);
4611 if (page_table
[page_index
].write_protected
) {
4612 /* Unprotect the page. */
4613 os_protect(page_address(page_index
), GENCGC_CARD_BYTES
, OS_VM_PROT_ALL
);
4614 page_table
[page_index
].write_protected_cleared
= 1;
4615 page_table
[page_index
].write_protected
= 0;
4616 } else if (!ignore_memoryfaults_on_unprotected_pages
) {
4617 /* The only acceptable reason for this signal on a heap
4618 * access is that GENCGC write-protected the page.
4619 * However, if two CPUs hit a wp page near-simultaneously,
4620 * we had better not have the second one lose here if it
4621 * does this test after the first one has already set wp=0
4623 if(page_table
[page_index
].write_protected_cleared
!= 1) {
4624 void lisp_backtrace(int frames
);
4627 "Fault @ %p, page %"PAGE_INDEX_FMT
" not marked as write-protected:\n"
4628 " boxed_region.first_page: %"PAGE_INDEX_FMT
","
4629 " boxed_region.last_page %"PAGE_INDEX_FMT
"\n"
4630 " page.scan_start_offset: %"OS_VM_SIZE_FMT
"\n"
4631 " page.bytes_used: %"PAGE_BYTES_FMT
"\n"
4632 " page.allocated: %d\n"
4633 " page.write_protected: %d\n"
4634 " page.write_protected_cleared: %d\n"
4635 " page.generation: %d\n",
4638 boxed_region
.first_page
,
4639 boxed_region
.last_page
,
4640 page_table
[page_index
].scan_start_offset
,
4641 page_table
[page_index
].bytes_used
,
4642 page_table
[page_index
].allocated
,
4643 page_table
[page_index
].write_protected
,
4644 page_table
[page_index
].write_protected_cleared
,
4645 page_table
[page_index
].gen
);
4646 if (!continue_after_memoryfault_on_unprotected_pages
)
4650 ret
= thread_mutex_unlock(&free_pages_lock
);
4651 gc_assert(ret
== 0);
4652 /* Don't worry, we can handle it. */
4656 /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
4657 * it's not just a case of the program hitting the write barrier, and
4658 * are about to let Lisp deal with it. It's basically just a
4659 * convenient place to set a gdb breakpoint. */
4661 unhandled_sigmemoryfault(void *addr
)
4664 void gc_alloc_update_all_page_tables(void)
4666 /* Flush the alloc regions updating the tables. */
4668 for_each_thread(th
) {
4669 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->alloc_region
);
4670 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
4671 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->sprof_alloc_region
);
4674 gc_alloc_update_page_tables(UNBOXED_PAGE_FLAG
, &unboxed_region
);
4675 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &boxed_region
);
4679 gc_set_region_empty(struct alloc_region
*region
)
4681 region
->first_page
= 0;
4682 region
->last_page
= -1;
4683 region
->start_addr
= page_address(0);
4684 region
->free_pointer
= page_address(0);
4685 region
->end_addr
= page_address(0);
4689 zero_all_free_pages()
4693 for (i
= 0; i
< last_free_page
; i
++) {
4694 if (page_free_p(i
)) {
4695 #ifdef READ_PROTECT_FREE_PAGES
4696 os_protect(page_address(i
),
4705 /* Things to do before doing a final GC before saving a core (without
4708 * + Pages in large_object pages aren't moved by the GC, so we need to
4709 * unset that flag from all pages.
4710 * + The pseudo-static generation isn't normally collected, but it seems
4711 * reasonable to collect it at least when saving a core. So move the
4712 * pages to a normal generation.
4715 prepare_for_final_gc ()
4720 for (i
= 0; i
< last_free_page
; i
++) {
4721 page_table
[i
].large_object
= 0;
4722 if (page_table
[i
].gen
== PSEUDO_STATIC_GENERATION
) {
4723 int used
= page_table
[i
].bytes_used
;
4724 page_table
[i
].gen
= HIGHEST_NORMAL_GENERATION
;
4725 generations
[PSEUDO_STATIC_GENERATION
].bytes_allocated
-= used
;
4726 generations
[HIGHEST_NORMAL_GENERATION
].bytes_allocated
+= used
;
4732 /* Do a non-conservative GC, and then save a core with the initial
4733 * function being set to the value of the static symbol
4734 * SB!VM:RESTART-LISP-FUNCTION */
4736 gc_and_save(char *filename
, boolean prepend_runtime
,
4737 boolean save_runtime_options
, boolean compressed
,
4738 int compression_level
, int application_type
)
4741 void *runtime_bytes
= NULL
;
4742 size_t runtime_size
;
4744 file
= prepare_to_save(filename
, prepend_runtime
, &runtime_bytes
,
4749 conservative_stack
= 0;
4751 /* The filename might come from Lisp, and be moved by the now
4752 * non-conservative GC. */
4753 filename
= strdup(filename
);
4755 /* Collect twice: once into relatively high memory, and then back
4756 * into low memory. This compacts the retained data into the lower
4757 * pages, minimizing the size of the core file.
4759 prepare_for_final_gc();
4760 gencgc_alloc_start_page
= last_free_page
;
4761 collect_garbage(HIGHEST_NORMAL_GENERATION
+1);
4763 prepare_for_final_gc();
4764 gencgc_alloc_start_page
= -1;
4765 collect_garbage(HIGHEST_NORMAL_GENERATION
+1);
4767 if (prepend_runtime
)
4768 save_runtime_to_filehandle(file
, runtime_bytes
, runtime_size
,
4771 /* The dumper doesn't know that pages need to be zeroed before use. */
4772 zero_all_free_pages();
4773 save_to_filehandle(file
, filename
, SymbolValue(RESTART_LISP_FUNCTION
,0),
4774 prepend_runtime
, save_runtime_options
,
4775 compressed
? compression_level
: COMPRESSION_LEVEL_NONE
);
4776 /* Oops. Save still managed to fail. Since we've mangled the stack
4777 * beyond hope, there's not much we can do.
4778 * (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's
4779 * going to be rather unsatisfactory too... */
4780 lose("Attempt to save core after non-conservative GC failed.\n");