2 * GENerational Conservative Garbage Collector for SBCL
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 * For a review of garbage collection techniques (e.g. generational
18 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
19 * "Uniprocessor Garbage Collection Techniques" available at
20 * <https://www.cs.rice.edu/~javaplt/311/Readings/wilson92uniprocessor.pdf>
22 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
31 #if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)
32 #include "pthreads_win32.h"
40 #include "interrupt.h"
45 #include "gc-internal.h"
46 #include "gc-private.h"
47 #include "gencgc-private.h"
49 #include "pseudo-atomic.h"
51 #include "genesis/gc-tables.h"
52 #include "genesis/vector.h"
53 #include "genesis/weak-pointer.h"
54 #include "genesis/fdefn.h"
55 #include "genesis/simple-fun.h"
57 #include "genesis/hash-table.h"
58 #include "genesis/instance.h"
59 #include "genesis/layout.h"
61 #include "hopscotch.h"
62 #include "genesis/cons.h"
63 #include "forwarding-ptr.h"
65 /* forward declarations */
66 page_index_t
gc_find_freeish_pages(page_index_t
*restart_page_ptr
, sword_t nbytes
,
74 /* As usually configured, generations 0-5 are normal collected generations,
75 6 is pseudo-static (the objects in which are never moved nor reclaimed),
76 and 7 is scratch space used when collecting a generation without promotion,
77 wherein it is moved to generation 7 and back again.
80 SCRATCH_GENERATION
= PSEUDO_STATIC_GENERATION
+1,
84 /* Largest allocation seen since last GC. */
85 os_vm_size_t large_allocation
= 0;
92 /* the verbosity level. All non-error messages are disabled at level 0;
93 * and only a few rare messages are printed at level 1. */
95 boolean gencgc_verbose
= 1;
97 boolean gencgc_verbose
= 0;
100 /* FIXME: At some point enable the various error-checking things below
101 * and see what they say. */
103 /* We hunt for pointers to old-space, when GCing generations >= verify_gen.
104 * Set verify_gens to HIGHEST_NORMAL_GENERATION + 2 to disable this kind of
106 generation_index_t verify_gens
= HIGHEST_NORMAL_GENERATION
+ 2;
108 /* Should we do a pre-scan verify of generation 0 before it's GCed? */
109 boolean pre_verify_gen_0
= 0;
111 /* Should we check that newly allocated regions are zero filled? */
112 boolean gencgc_zero_check
= 0;
114 /* Should we check that the free space is zero filled? */
115 /* Don't use this - you'll get more mileage out of READ_PROTECT_FREE_PAGES,
116 * because we zero-fill lazily. This switch should probably be removed. */
117 boolean gencgc_enable_verify_zero_fill
= 0;
119 /* When loading a core, don't do a full scan of the memory for the
120 * memory region boundaries. (Set to true by coreparse.c if the core
121 * contained a pagetable entry).
123 boolean gencgc_partial_pickup
= 0;
125 /* If defined, free pages are read-protected to ensure that nothing
129 /* #define READ_PROTECT_FREE_PAGES */
133 * GC structures and variables
136 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
137 os_vm_size_t bytes_allocated
= 0;
138 os_vm_size_t auto_gc_trigger
= 0;
140 /* the source and destination generations. These are set before a GC starts
142 generation_index_t from_space
;
143 generation_index_t new_space
;
145 /* Set to 1 when in GC */
146 boolean gc_active_p
= 0;
148 /* should the GC be conservative on stack. If false (only right before
149 * saving a core), don't scan the stack / mark pages dont_move. */
150 static boolean conservative_stack
= 1;
152 /* An array of page structures is allocated on gc initialization.
153 * This helps to quickly map between an address and its page structure.
154 * page_table_pages is set from the size of the dynamic space. */
155 page_index_t page_table_pages
;
156 struct page
*page_table
;
157 lispobj gc_object_watcher
;
158 int gc_traceroot_criterion
;
159 #ifdef PIN_GRANULARITY_LISPOBJ
161 struct hopscotch_table pinned_objects
;
164 /* This is always 0 except during gc_and_save() */
165 lispobj lisp_init_function
;
167 /// Constants defined in gc-internal:
168 /// #define BOXED_PAGE_FLAG 1
169 /// #define UNBOXED_PAGE_FLAG 2
170 /// #define OPEN_REGION_PAGE_FLAG 4
172 /// Return true if 'allocated' bits are: {001, 010, 011}, false if 1zz or 000.
173 static inline boolean
page_allocated_no_region_p(page_index_t page
) {
174 return (page_table
[page
].allocated
^ OPEN_REGION_PAGE_FLAG
) > OPEN_REGION_PAGE_FLAG
;
177 static inline boolean
page_free_p(page_index_t page
) {
178 return (page_table
[page
].allocated
== FREE_PAGE_FLAG
);
181 static inline boolean
page_boxed_p(page_index_t page
) {
182 return (page_table
[page
].allocated
& BOXED_PAGE_FLAG
);
185 /// Return true if 'allocated' bits are: {001, 011}, false otherwise.
186 /// i.e. true of pages which could hold boxed or partially boxed objects.
187 static inline boolean
page_boxed_no_region_p(page_index_t page
) {
188 return (page_table
[page
].allocated
& 5) == BOXED_PAGE_FLAG
;
191 /// Return true if page MUST NOT hold boxed objects (including code).
192 static inline boolean
page_unboxed_p(page_index_t page
) {
193 /* Both flags set == boxed code page */
194 return (page_table
[page
].allocated
& 3) == UNBOXED_PAGE_FLAG
;
197 static inline boolean
protect_page_p(page_index_t page
, generation_index_t generation
) {
198 return (page_boxed_no_region_p(page
)
199 && (page_bytes_used(page
) != 0)
200 && !page_table
[page
].dont_move
201 && (page_table
[page
].gen
== generation
));
204 /* Calculate the start address for the given page number. */
206 page_address(page_index_t page_num
)
208 return (void*)(DYNAMIC_SPACE_START
+ (page_num
* GENCGC_CARD_BYTES
));
211 /* Calculate the address where the allocation region associated with
212 * the page starts. */
214 page_scan_start(page_index_t page_index
)
216 return page_address(page_index
)-page_scan_start_offset(page_index
);
219 /* True if the page starts a contiguous block. */
220 static inline boolean
221 page_starts_contiguous_block_p(page_index_t page_index
)
223 // Don't use the preprocessor macro: 0 means 0.
224 return page_table
[page_index
].scan_start_offset_
== 0;
227 /* True if the page is the last page in a contiguous block. */
228 static inline boolean
229 page_ends_contiguous_block_p(page_index_t page_index
, generation_index_t gen
)
231 // There is *always* a next page in the page table.
232 boolean answer
= page_bytes_used(page_index
) < GENCGC_CARD_BYTES
233 || page_starts_contiguous_block_p(page_index
+1);
235 boolean safe_answer
=
236 (/* page doesn't fill block */
237 (page_bytes_used(page_index
) < GENCGC_CARD_BYTES
)
238 /* page is last allocated page */
239 || ((page_index
+ 1) >= last_free_page
)
240 /* next page contains no data */
241 || !page_bytes_used(page_index
+ 1)
242 /* next page is in different generation */
243 || (page_table
[page_index
+ 1].gen
!= gen
)
244 /* next page starts its own contiguous block */
245 || (page_starts_contiguous_block_p(page_index
+ 1)));
246 gc_assert(answer
== safe_answer
);
251 /* We maintain the invariant that pages with FREE_PAGE_FLAG have
252 * scan_start of zero, to optimize page_ends_contiguous_block_p().
253 * Clear all other flags as well, since they don't mean anything,
254 * and a store is simpler than a bitwise operation */
255 static inline void reset_page_flags(page_index_t page
) {
256 page_table
[page
].scan_start_offset_
= 0;
257 // Any C compiler worth its salt should merge these into one store
258 page_table
[page
].allocated
= page_table
[page
].write_protected
259 = page_table
[page
].write_protected_cleared
260 = page_table
[page
].dont_move
= page_table
[page
].has_pins
261 = page_table
[page
].large_object
= 0;
264 /// External function for calling from Lisp.
265 page_index_t
ext_find_page_index(void *addr
) { return find_page_index(addr
); }
268 npage_bytes(page_index_t npages
)
270 gc_assert(npages
>=0);
271 return ((os_vm_size_t
)npages
)*GENCGC_CARD_BYTES
;
274 /* Check that X is a higher address than Y and return offset from Y to
276 static inline os_vm_size_t
277 addr_diff(void *x
, void *y
)
280 return (uintptr_t)x
- (uintptr_t)y
;
283 /* a structure to hold the state of a generation
285 * CAUTION: If you modify this, make sure to touch up the alien
286 * definition in src/code/gc.lisp accordingly. ...or better yes,
287 * deal with the FIXME there...
292 // A distinct start page per nonzero value of 'page_type_flag'.
293 // The zeroth index is the large object start page.
294 page_index_t alloc_start_page_
[4];
295 #define alloc_large_start_page alloc_start_page_[0]
296 #define alloc_start_page alloc_start_page_[BOXED_PAGE_FLAG]
297 #define alloc_unboxed_start_page alloc_start_page_[UNBOXED_PAGE_FLAG]
299 /* the first page that gc_alloc_large (boxed) considers on its next
300 * call. (Although it always allocates after the boxed_region.) */
301 page_index_t alloc_large_start_page
;
303 /* the first page that gc_alloc() checks on its next call */
304 page_index_t alloc_start_page
;
306 /* the first page that gc_alloc_unboxed() checks on its next call */
307 page_index_t alloc_unboxed_start_page
;
310 /* the bytes allocated to this generation */
311 os_vm_size_t bytes_allocated
;
313 /* the number of bytes at which to trigger a GC */
314 os_vm_size_t gc_trigger
;
316 /* to calculate a new level for gc_trigger */
317 os_vm_size_t bytes_consed_between_gc
;
319 /* the number of GCs since the last raise */
322 /* the number of GCs to run on the generations before raising objects to the
324 int number_of_gcs_before_promotion
;
326 /* the cumulative sum of the bytes allocated to this generation. It is
327 * cleared after a GC on this generations, and update before new
328 * objects are added from a GC of a younger generation. Dividing by
329 * the bytes_allocated will give the average age of the memory in
330 * this generation since its last GC. */
331 os_vm_size_t cum_sum_bytes_allocated
;
333 /* a minimum average memory age before a GC will occur helps
334 * prevent a GC when a large number of new live objects have been
335 * added, in which case a GC could be a waste of time */
336 double minimum_age_before_gc
;
339 /* an array of generation structures. There needs to be one more
340 * generation structure than actual generations as the oldest
341 * generation is temporarily raised then lowered. */
342 struct generation generations
[NUM_GENERATIONS
];
344 /* the oldest generation that is will currently be GCed by default.
345 * Valid values are: 0, 1, ... HIGHEST_NORMAL_GENERATION
347 * The default of HIGHEST_NORMAL_GENERATION enables GC on all generations.
349 * Setting this to 0 effectively disables the generational nature of
350 * the GC. In some applications generational GC may not be useful
351 * because there are no long-lived objects.
353 * An intermediate value could be handy after moving long-lived data
354 * into an older generation so an unnecessary GC of this long-lived
355 * data can be avoided. */
356 generation_index_t gencgc_oldest_gen_to_gc
= HIGHEST_NORMAL_GENERATION
;
358 /* META: Is nobody aside from me bothered by this especially misleading
359 * use of the word "last"? It could mean either "ultimate" or "prior",
360 * but in fact means neither. It is the *FIRST* page that should be grabbed
361 * for more space, so it is min free page, or 1+ the max used page. */
362 /* The maximum free page in the heap is maintained and used to update
363 * ALLOCATION_POINTER which is used by the room function to limit its
364 * search of the heap. XX Gencgc obviously needs to be better
365 * integrated with the Lisp code. */
367 page_index_t last_free_page
;
369 #ifdef LISP_FEATURE_SB_THREAD
370 /* This lock is to prevent multiple threads from simultaneously
371 * allocating new regions which overlap each other. Note that the
372 * majority of GC is single-threaded, but alloc() may be called from
373 * >1 thread at a time and must be thread-safe. This lock must be
374 * seized before all accesses to generations[] or to parts of
375 * page_table[] that other threads may want to see */
376 static pthread_mutex_t free_pages_lock
= PTHREAD_MUTEX_INITIALIZER
;
377 /* This lock is used to protect non-thread-local allocation. */
378 static pthread_mutex_t allocation_lock
= PTHREAD_MUTEX_INITIALIZER
;
381 extern os_vm_size_t gencgc_release_granularity
;
382 os_vm_size_t gencgc_release_granularity
= GENCGC_RELEASE_GRANULARITY
;
384 extern os_vm_size_t gencgc_alloc_granularity
;
385 os_vm_size_t gencgc_alloc_granularity
= GENCGC_ALLOC_GRANULARITY
;
389 * miscellaneous heap functions
392 /* Count the number of pages which are write-protected within the
393 * given generation. */
395 count_write_protect_generation_pages(generation_index_t generation
)
397 page_index_t i
, count
= 0;
399 for (i
= 0; i
< last_free_page
; i
++)
401 && (page_table
[i
].gen
== generation
)
402 && page_table
[i
].write_protected
)
407 /* Count the number of pages within the given generation. */
409 count_generation_pages(generation_index_t generation
)
412 page_index_t count
= 0;
414 for (i
= 0; i
< last_free_page
; i
++)
415 if (!page_free_p(i
) && page_table
[i
].gen
== generation
)
422 count_dont_move_pages(void)
425 page_index_t count
= 0;
426 for (i
= 0; i
< last_free_page
; i
++) {
427 if (!page_free_p(i
) && page_table
[i
].dont_move
) {
435 /* Work through the pages and add up the number of bytes used for the
436 * given generation. */
437 static __attribute__((unused
)) os_vm_size_t
438 count_generation_bytes_allocated (generation_index_t gen
)
441 os_vm_size_t result
= 0;
442 for (i
= 0; i
< last_free_page
; i
++) {
443 if (!page_free_p(i
) && page_table
[i
].gen
== gen
)
444 result
+= page_bytes_used(i
);
449 /* Return the average age of the memory in a generation. */
451 generation_average_age(generation_index_t gen
)
453 if (generations
[gen
].bytes_allocated
== 0)
457 ((double)generations
[gen
].cum_sum_bytes_allocated
)
458 / ((double)generations
[gen
].bytes_allocated
);
461 #ifdef LISP_FEATURE_X86
462 extern void fpu_save(void *);
463 extern void fpu_restore(void *);
466 #define PAGE_INDEX_FMT PRIdPTR
469 write_generation_stats(FILE *file
)
471 generation_index_t i
;
473 #ifdef LISP_FEATURE_X86
476 /* Can end up here after calling alloc_tramp which doesn't prepare
477 * the x87 state, and the C ABI uses a different mode */
481 /* Print the heap stats. */
483 " Gen StaPg UbSta LaSta Boxed Unbox LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
485 for (i
= 0; i
<= SCRATCH_GENERATION
; i
++) {
487 page_index_t boxed_cnt
= 0;
488 page_index_t unboxed_cnt
= 0;
489 page_index_t large_boxed_cnt
= 0;
490 page_index_t large_unboxed_cnt
= 0;
491 page_index_t pinned_cnt
=0;
493 for (j
= 0; j
< last_free_page
; j
++)
494 if (page_table
[j
].gen
== i
) {
496 /* Count the number of boxed pages within the given
498 if (page_boxed_p(j
)) {
499 if (page_table
[j
].large_object
)
504 if(page_table
[j
].dont_move
) pinned_cnt
++;
505 /* Count the number of unboxed pages within the given
507 if (page_unboxed_p(j
)) {
508 if (page_table
[j
].large_object
)
515 gc_assert(generations
[i
].bytes_allocated
516 == count_generation_bytes_allocated(i
));
518 " %1d: %5ld %5ld %5ld",
520 (long)generations
[i
].alloc_start_page
,
521 (long)generations
[i
].alloc_unboxed_start_page
,
522 (long)generations
[i
].alloc_large_start_page
);
524 " %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
525 " %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
,
526 boxed_cnt
, unboxed_cnt
, large_boxed_cnt
,
527 large_unboxed_cnt
, pinned_cnt
);
532 " %4"PAGE_INDEX_FMT
" %3d %7.4f\n",
533 generations
[i
].bytes_allocated
,
534 (npage_bytes(count_generation_pages(i
)) - generations
[i
].bytes_allocated
),
535 generations
[i
].gc_trigger
,
536 count_write_protect_generation_pages(i
),
537 generations
[i
].num_gc
,
538 generation_average_age(i
));
540 fprintf(file
," Total bytes allocated = %"OS_VM_SIZE_FMT
"\n", bytes_allocated
);
541 fprintf(file
," Dynamic-space-size bytes = %"OS_VM_SIZE_FMT
"\n", dynamic_space_size
);
543 #ifdef LISP_FEATURE_X86
544 fpu_restore(fpu_state
);
549 write_heap_exhaustion_report(FILE *file
, long available
, long requested
,
550 struct thread
*thread
)
553 "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
554 gc_active_p
? "garbage collection" : "allocation",
557 write_generation_stats(file
);
558 fprintf(file
, "GC control variables:\n");
559 fprintf(file
, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n",
560 read_TLS(GC_INHIBIT
,thread
)==NIL
? "false" : "true",
561 (read_TLS(GC_PENDING
, thread
) == T
) ?
562 "true" : ((read_TLS(GC_PENDING
, thread
) == NIL
) ?
563 "false" : "in progress"));
564 #ifdef LISP_FEATURE_SB_THREAD
565 fprintf(file
, " *STOP-FOR-GC-PENDING* = %s\n",
566 read_TLS(STOP_FOR_GC_PENDING
,thread
)==NIL
? "false" : "true");
571 print_generation_stats(void)
573 write_generation_stats(stderr
);
576 extern char* gc_logfile
;
577 char * gc_logfile
= NULL
;
580 log_generation_stats(char *logfile
, char *header
)
583 FILE * log
= fopen(logfile
, "a");
585 fprintf(log
, "%s\n", header
);
586 write_generation_stats(log
);
589 fprintf(stderr
, "Could not open gc logfile: %s\n", logfile
);
596 report_heap_exhaustion(long available
, long requested
, struct thread
*th
)
599 FILE * log
= fopen(gc_logfile
, "a");
601 write_heap_exhaustion_report(log
, available
, requested
, th
);
604 fprintf(stderr
, "Could not open gc logfile: %s\n", gc_logfile
);
608 /* Always to stderr as well. */
609 write_heap_exhaustion_report(stderr
, available
, requested
, th
);
613 #if defined(LISP_FEATURE_X86)
614 void fast_bzero(void*, size_t); /* in <arch>-assem.S */
617 /* Zero the pages from START to END (inclusive), but use mmap/munmap instead
618 * if zeroing it ourselves, i.e. in practice give the memory back to the
619 * OS. Generally done after a large GC.
621 void zero_pages_with_mmap(page_index_t start
, page_index_t end
) {
623 void *addr
= page_address(start
), *new_addr
;
624 os_vm_size_t length
= npage_bytes(1+end
-start
);
629 gc_assert(length
>= gencgc_release_granularity
);
630 gc_assert((length
% gencgc_release_granularity
) == 0);
632 #ifdef LISP_FEATURE_LINUX
633 // We use MADV_DONTNEED only on Linux due to differing semantics from BSD.
634 // Linux treats it as a demand that the memory be 0-filled, or refreshed
635 // from a file that backs the range. BSD takes it as a hint that you don't
636 // care if the memory has to brought in from swap when next accessed,
637 // i.e. it's not a request to make a user-visible alteration to memory.
638 // So in theory this can bring a page in from the core file, if we happen
639 // to hit a page that resides in the portion of memory mapped by coreparse.
640 // In practice this should not happen because objects from a core file can't
641 // become garbage. Except in save-lisp-and-die they can, and we must be
642 // cautious not to resurrect bytes that originally came from the file.
643 if ((os_vm_address_t
)addr
>= anon_dynamic_space_start
) {
644 if (madvise(addr
, length
, MADV_DONTNEED
) != 0)
645 lose("madvise failed\n");
649 os_invalidate(addr
, length
);
650 new_addr
= os_validate(NOT_MOVABLE
, addr
, length
);
651 if (new_addr
== NULL
|| new_addr
!= addr
) {
652 lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x",
657 for (i
= start
; i
<= end
; i
++)
658 set_page_need_to_zero(i
, 0);
661 /* Zero the pages from START to END (inclusive). Generally done just after
662 * a new region has been allocated.
665 zero_pages(page_index_t start
, page_index_t end
) {
669 #if defined(LISP_FEATURE_X86)
670 fast_bzero(page_address(start
), npage_bytes(1+end
-start
));
672 bzero(page_address(start
), npage_bytes(1+end
-start
));
678 zero_and_mark_pages(page_index_t start
, page_index_t end
) {
681 zero_pages(start
, end
);
682 for (i
= start
; i
<= end
; i
++)
683 set_page_need_to_zero(i
, 0);
686 /* Zero the pages from START to END (inclusive), except for those
687 * pages that are known to already zeroed. Mark all pages in the
688 * ranges as non-zeroed.
691 zero_dirty_pages(page_index_t start
, page_index_t end
) {
694 #ifdef READ_PROTECT_FREE_PAGES
695 os_protect(page_address(start
), npage_bytes(1+end
-start
), OS_VM_PROT_ALL
);
697 for (i
= start
; i
<= end
; i
++) {
698 if (!page_need_to_zero(i
)) continue;
699 for (j
= i
+1; (j
<= end
) && page_need_to_zero(j
) ; j
++)
705 for (i
= start
; i
<= end
; i
++) {
706 set_page_need_to_zero(i
, 1);
712 * To support quick and inline allocation, regions of memory can be
713 * allocated and then allocated from with just a free pointer and a
714 * check against an end address.
716 * Since objects can be allocated to spaces with different properties
717 * e.g. boxed/unboxed, generation, ages; there may need to be many
718 * allocation regions.
720 * Each allocation region may start within a partly used page. Many
721 * features of memory use are noted on a page wise basis, e.g. the
722 * generation; so if a region starts within an existing allocated page
723 * it must be consistent with this page.
725 * During the scavenging of the newspace, objects will be transported
726 * into an allocation region, and pointers updated to point to this
727 * allocation region. It is possible that these pointers will be
728 * scavenged again before the allocation region is closed, e.g. due to
729 * trans_list which jumps all over the place to cleanup the list. It
730 * is important to be able to determine properties of all objects
731 * pointed to when scavenging, e.g to detect pointers to the oldspace.
732 * Thus it's important that the allocation regions have the correct
733 * properties set when allocated, and not just set when closed. The
734 * region allocation routines return regions with the specified
735 * properties, and grab all the pages, setting their properties
736 * appropriately, except that the amount used is not known.
738 * These regions are used to support quicker allocation using just a
739 * free pointer. The actual space used by the region is not reflected
740 * in the pages tables until it is closed. It can't be scavenged until
743 * When finished with the region it should be closed, which will
744 * update the page tables for the actual space used returning unused
745 * space. Further it may be noted in the new regions which is
746 * necessary when scavenging the newspace.
748 * Large objects may be allocated directly without an allocation
749 * region, the page tables are updated immediately.
751 * Unboxed objects don't contain pointers to other objects and so
752 * don't need scavenging. Further they can't contain pointers to
753 * younger generations so WP is not needed. By allocating pages to
754 * unboxed objects the whole page never needs scavenging or
755 * write-protecting. */
757 /* We use either two or three regions for the current newspace generation. */
759 struct alloc_region gc_alloc_region
[3];
760 #define boxed_region gc_alloc_region[BOXED_PAGE_FLAG-1]
761 #define unboxed_region gc_alloc_region[UNBOXED_PAGE_FLAG-1]
762 #define code_region gc_alloc_region[CODE_PAGE_FLAG-1]
764 struct alloc_region boxed_region
;
765 struct alloc_region unboxed_region
;
768 /* The generation currently being allocated to. */
769 static generation_index_t gc_alloc_generation
;
771 static inline page_index_t
772 generation_alloc_start_page(generation_index_t generation
, int page_type_flag
, int large
)
774 if (!(page_type_flag
>= 1 && page_type_flag
<= 3))
775 lose("bad page_type_flag: %d", page_type_flag
);
777 return generations
[generation
].alloc_large_start_page
;
779 return generations
[generation
].alloc_start_page_
[page_type_flag
];
781 if (UNBOXED_PAGE_FLAG
== page_type_flag
)
782 return generations
[generation
].alloc_unboxed_start_page
;
783 /* Both code and data. */
784 return generations
[generation
].alloc_start_page
;
789 set_generation_alloc_start_page(generation_index_t generation
, int page_type_flag
, int large
,
792 if (!(page_type_flag
>= 1 && page_type_flag
<= 3))
793 lose("bad page_type_flag: %d", page_type_flag
);
795 generations
[generation
].alloc_large_start_page
= page
;
798 generations
[generation
].alloc_start_page_
[page_type_flag
] = page
;
800 else if (UNBOXED_PAGE_FLAG
== page_type_flag
)
801 generations
[generation
].alloc_unboxed_start_page
= page
;
802 else /* Both code and data. */
803 generations
[generation
].alloc_start_page
= page
;
807 /* Find a new region with room for at least the given number of bytes.
809 * It starts looking at the current generation's alloc_start_page. So
810 * may pick up from the previous region if there is enough space. This
811 * keeps the allocation contiguous when scavenging the newspace.
813 * The alloc_region should have been closed by a call to
814 * gc_alloc_update_page_tables(), and will thus be in an empty state.
816 * To assist the scavenging functions write-protected pages are not
817 * used. Free pages should not be write-protected.
819 * It is critical to the conservative GC that the start of regions be
820 * known. To help achieve this only small regions are allocated at a
823 * During scavenging, pointers may be found to within the current
824 * region and the page generation must be set so that pointers to the
825 * from space can be recognized. Therefore the generation of pages in
826 * the region are set to gc_alloc_generation. To prevent another
827 * allocation call using the same pages, all the pages in the region
828 * are allocated, although they will initially be empty.
831 gc_alloc_new_region(sword_t nbytes
, int page_type_flag
, struct alloc_region
*alloc_region
)
833 page_index_t first_page
;
834 page_index_t last_page
;
840 "/alloc_new_region for %d bytes from gen %d\n",
841 nbytes, gc_alloc_generation));
844 /* Check that the region is in a reset state. */
845 gc_assert((alloc_region
->first_page
== 0)
846 && (alloc_region
->last_page
== -1)
847 && (alloc_region
->free_pointer
== alloc_region
->end_addr
));
848 ret
= thread_mutex_lock(&free_pages_lock
);
850 first_page
= generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 0);
851 last_page
=gc_find_freeish_pages(&first_page
, nbytes
, page_type_flag
);
853 /* Set up the alloc_region. */
854 alloc_region
->first_page
= first_page
;
855 alloc_region
->last_page
= last_page
;
856 alloc_region
->start_addr
= page_address(first_page
) + page_bytes_used(first_page
);
857 alloc_region
->free_pointer
= alloc_region
->start_addr
;
858 alloc_region
->end_addr
= page_address(last_page
+1);
860 /* Set up the pages. */
862 /* The first page may have already been in use. */
863 /* If so, just assert that it's consistent, otherwise, set it up. */
864 if (page_bytes_used(first_page
)) {
865 gc_assert(page_table
[first_page
].allocated
== page_type_flag
);
866 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
867 gc_dcheck(page_table
[first_page
].large_object
== 0);
869 page_table
[first_page
].allocated
= page_type_flag
;
870 page_table
[first_page
].gen
= gc_alloc_generation
;
872 page_table
[first_page
].allocated
|= OPEN_REGION_PAGE_FLAG
;
874 for (i
= first_page
+1; i
<= last_page
; i
++) {
875 page_table
[i
].allocated
= page_type_flag
;
876 page_table
[i
].gen
= gc_alloc_generation
;
877 set_page_scan_start_offset(i
,
878 addr_diff(page_address(i
), alloc_region
->start_addr
));
879 page_table
[i
].allocated
|= OPEN_REGION_PAGE_FLAG
;
881 /* Bump up last_free_page. */
882 if (last_page
+1 > last_free_page
) {
883 last_free_page
= last_page
+1;
884 /* do we only want to call this on special occasions? like for
886 set_alloc_pointer((lispobj
)page_address(last_free_page
));
888 ret
= thread_mutex_unlock(&free_pages_lock
);
891 /* If the first page was only partial, don't check whether it's
892 * zeroed (it won't be) and don't zero it (since the parts that
893 * we're interested in are guaranteed to be zeroed).
895 if (page_bytes_used(first_page
)) {
899 zero_dirty_pages(first_page
, last_page
);
901 /* we can do this after releasing free_pages_lock */
902 if (gencgc_zero_check
) {
904 for (p
= alloc_region
->start_addr
;
905 (void*)p
< alloc_region
->end_addr
; p
++) {
907 lose("The new region is not zero at %p (start=%p, end=%p).\n",
908 p
, alloc_region
->start_addr
, alloc_region
->end_addr
);
914 /* If the record_new_objects flag is 2 then all new regions created
917 * If it's 1 then then it is only recorded if the first page of the
918 * current region is <= new_areas_ignore_page. This helps avoid
919 * unnecessary recording when doing full scavenge pass.
921 * The new_object structure holds the page, byte offset, and size of
922 * new regions of objects. Each new area is placed in the array of
923 * these structures pointer to by new_areas. new_areas_index holds the
924 * offset into new_areas.
926 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
927 * later code must detect this and handle it, probably by doing a full
928 * scavenge of a generation. */
929 #define NUM_NEW_AREAS 512
930 static int record_new_objects
= 0;
931 static page_index_t new_areas_ignore_page
;
937 static struct new_area (*new_areas
)[];
938 static size_t new_areas_index
;
939 size_t max_new_areas
;
941 /* Add a new area to new_areas. */
943 add_new_area(page_index_t first_page
, size_t offset
, size_t size
)
945 size_t new_area_start
, c
;
948 /* Ignore if full. */
949 if (new_areas_index
>= NUM_NEW_AREAS
)
952 switch (record_new_objects
) {
956 if (first_page
> new_areas_ignore_page
)
965 new_area_start
= npage_bytes(first_page
) + offset
;
967 /* Search backwards for a prior area that this follows from. If
968 found this will save adding a new area. */
969 for (i
= new_areas_index
-1, c
= 0; (i
>= 0) && (c
< 8); i
--, c
++) {
971 npage_bytes((*new_areas
)[i
].page
)
972 + (*new_areas
)[i
].offset
973 + (*new_areas
)[i
].size
;
975 "/add_new_area S1 %d %d %d %d\n",
976 i, c, new_area_start, area_end));*/
977 if (new_area_start
== area_end
) {
979 "/adding to [%d] %d %d %d with %d %d %d:\n",
981 (*new_areas)[i].page,
982 (*new_areas)[i].offset,
983 (*new_areas)[i].size,
987 (*new_areas
)[i
].size
+= size
;
992 (*new_areas
)[new_areas_index
].page
= first_page
;
993 (*new_areas
)[new_areas_index
].offset
= offset
;
994 (*new_areas
)[new_areas_index
].size
= size
;
996 "/new_area %d page %d offset %d size %d\n",
997 new_areas_index, first_page, offset, size));*/
1000 /* Note the max new_areas used. */
1001 if (new_areas_index
> max_new_areas
)
1002 max_new_areas
= new_areas_index
;
1005 /* Update the tables for the alloc_region. The region may be added to
1008 * When done the alloc_region is set up so that the next quick alloc
1009 * will fail safely and thus a new region will be allocated. Further
1010 * it is safe to try to re-update the page table of this reset
1013 gc_alloc_update_page_tables(int page_type_flag
, struct alloc_region
*alloc_region
)
1015 /* Catch an unused alloc_region. */
1016 if (alloc_region
->last_page
== -1)
1019 page_index_t first_page
= alloc_region
->first_page
;
1020 page_index_t next_page
= first_page
+1;
1021 char *page_base
= page_address(first_page
);
1022 char *free_pointer
= alloc_region
->free_pointer
;
1024 // page_bytes_used() can be done without holding a lock. Nothing else
1025 // affects the usage on the first page of a region owned by this thread.
1026 page_bytes_t orig_first_page_bytes_used
= page_bytes_used(first_page
);
1027 gc_assert(alloc_region
->start_addr
== page_base
+ orig_first_page_bytes_used
);
1029 int ret
= thread_mutex_lock(&free_pages_lock
);
1030 gc_assert(ret
== 0);
1032 // Mark the region as closed on its first page.
1033 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1035 if (free_pointer
!= alloc_region
->start_addr
) {
1036 /* some bytes were allocated in the region */
1038 /* All the pages used need to be updated */
1040 /* Update the first page. */
1041 if (!orig_first_page_bytes_used
)
1042 gc_assert(page_starts_contiguous_block_p(first_page
));
1043 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1046 gc_assert(page_table
[first_page
].allocated
== page_type_flag
);
1048 gc_assert(page_table
[first_page
].allocated
& page_type_flag
);
1050 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
1051 gc_assert(page_table
[first_page
].large_object
== 0);
1053 /* Calculate the number of bytes used in this page. This is not
1054 * always the number of new bytes, unless it was free. */
1055 os_vm_size_t bytes_used
= addr_diff(free_pointer
, page_base
);
1057 if ((more
= (bytes_used
> GENCGC_CARD_BYTES
)))
1058 bytes_used
= GENCGC_CARD_BYTES
;
1059 set_page_bytes_used(first_page
, bytes_used
);
1061 /* 'region_size' will be the sum of new bytes consumed by the region,
1062 * EXCLUDING any part of the first page already in use,
1063 * and any unused part of the final used page */
1064 os_vm_size_t region_size
= bytes_used
- orig_first_page_bytes_used
;
1066 /* All the rest of the pages should be accounted for. */
1068 page_table
[next_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1070 gc_assert(page_table
[next_page
].allocated
== page_type_flag
);
1072 gc_assert(page_table
[next_page
].allocated
& page_type_flag
);
1074 gc_assert(page_bytes_used(next_page
) == 0);
1075 gc_assert(page_table
[next_page
].gen
== gc_alloc_generation
);
1076 gc_assert(page_table
[next_page
].large_object
== 0);
1077 page_base
+= GENCGC_CARD_BYTES
;
1078 gc_assert(page_scan_start_offset(next_page
) ==
1079 addr_diff(page_base
, alloc_region
->start_addr
));
1081 /* Calculate the number of bytes used in this page. */
1082 bytes_used
= addr_diff(free_pointer
, page_base
);
1083 if ((more
= (bytes_used
> GENCGC_CARD_BYTES
)))
1084 bytes_used
= GENCGC_CARD_BYTES
;
1085 set_page_bytes_used(next_page
, bytes_used
);
1086 region_size
+= bytes_used
;
1091 // Now 'next_page' is 1 page beyond those fully accounted for.
1092 gc_assert(addr_diff(free_pointer
, alloc_region
->start_addr
) == region_size
);
1093 // Update the global totals
1094 bytes_allocated
+= region_size
;
1095 generations
[gc_alloc_generation
].bytes_allocated
+= region_size
;
1097 /* Set the generations alloc restart page to the last page of
1099 set_generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 0, next_page
-1);
1101 /* Add the region to the new_areas if requested. */
1102 if (BOXED_PAGE_FLAG
& page_type_flag
)
1103 add_new_area(first_page
,orig_first_page_bytes_used
, region_size
);
1105 } else if (!orig_first_page_bytes_used
) {
1106 /* The first page is completely unused. Unallocate it */
1107 reset_page_flags(first_page
);
1110 /* Unallocate any unused pages. */
1111 while (next_page
<= alloc_region
->last_page
) {
1112 gc_assert(page_bytes_used(next_page
) == 0);
1113 reset_page_flags(next_page
);
1116 ret
= thread_mutex_unlock(&free_pages_lock
);
1117 gc_assert(ret
== 0);
1119 /* alloc_region is per-thread, we're ok to do this unlocked */
1120 gc_set_region_empty(alloc_region
);
1123 /* Allocate a possibly large object. */
1125 gc_alloc_large(sword_t nbytes
, int page_type_flag
, struct alloc_region
*alloc_region
)
1128 page_index_t first_page
, next_page
, last_page
;
1129 os_vm_size_t byte_cnt
;
1130 os_vm_size_t bytes_used
;
1133 ret
= thread_mutex_lock(&free_pages_lock
);
1134 gc_assert(ret
== 0);
1136 first_page
= generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 1);
1137 // FIXME: really we want to try looking for space following the highest of
1138 // the last page of all other small object regions. That's impossible - there's
1139 // not enough information. At best we can skip some work in only the case where
1140 // the supplied region was the one most recently created. To do this right
1141 // would entail a malloc-like allocator at the page granularity.
1142 if (first_page
<= alloc_region
->last_page
) {
1143 first_page
= alloc_region
->last_page
+1;
1146 last_page
=gc_find_freeish_pages(&first_page
,nbytes
, page_type_flag
);
1148 gc_assert(first_page
> alloc_region
->last_page
);
1150 set_generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 1, last_page
);
1152 /* Large objects don't share pages with other objects. */
1153 gc_assert(page_bytes_used(first_page
) == 0);
1155 /* Set up the pages. */
1156 page_table
[first_page
].allocated
= page_type_flag
;
1157 page_table
[first_page
].gen
= gc_alloc_generation
;
1158 page_table
[first_page
].large_object
= 1;
1162 /* Calc. the number of bytes used in this page. This is not
1163 * always the number of new bytes, unless it was free. */
1165 if ((bytes_used
= nbytes
) > GENCGC_CARD_BYTES
) {
1166 bytes_used
= GENCGC_CARD_BYTES
;
1169 set_page_bytes_used(first_page
, bytes_used
);
1170 byte_cnt
+= bytes_used
;
1172 next_page
= first_page
+1;
1174 /* All the rest of the pages should be free. We need to set their
1175 * scan_start_offset pointer to the start of the region, and set
1176 * the bytes_used. */
1178 gc_assert(page_free_p(next_page
));
1179 gc_assert(page_bytes_used(next_page
) == 0);
1180 page_table
[next_page
].allocated
= page_type_flag
;
1181 page_table
[next_page
].gen
= gc_alloc_generation
;
1182 page_table
[next_page
].large_object
= 1;
1184 set_page_scan_start_offset(next_page
, npage_bytes(next_page
-first_page
));
1186 /* Calculate the number of bytes used in this page. */
1188 bytes_used
= nbytes
- byte_cnt
;
1189 if (bytes_used
> GENCGC_CARD_BYTES
) {
1190 bytes_used
= GENCGC_CARD_BYTES
;
1193 set_page_bytes_used(next_page
, bytes_used
);
1194 byte_cnt
+= bytes_used
;
1198 gc_assert(byte_cnt
== (size_t)nbytes
);
1200 bytes_allocated
+= nbytes
;
1201 generations
[gc_alloc_generation
].bytes_allocated
+= nbytes
;
1203 /* Add the region to the new_areas if requested. */
1204 if (BOXED_PAGE_FLAG
& page_type_flag
)
1205 add_new_area(first_page
, 0, nbytes
);
1207 /* Bump up last_free_page */
1208 if (last_page
+1 > last_free_page
) {
1209 last_free_page
= last_page
+1;
1210 set_alloc_pointer((lispobj
)(page_address(last_free_page
)));
1212 ret
= thread_mutex_unlock(&free_pages_lock
);
1213 gc_assert(ret
== 0);
1215 zero_dirty_pages(first_page
, last_page
);
1217 return page_address(first_page
);
1220 static page_index_t gencgc_alloc_start_page
= -1;
1223 gc_heap_exhausted_error_or_lose (sword_t available
, sword_t requested
)
1225 struct thread
*thread
= arch_os_get_current_thread();
1226 /* Write basic information before doing anything else: if we don't
1227 * call to lisp this is a must, and even if we do there is always
1228 * the danger that we bounce back here before the error has been
1229 * handled, or indeed even printed.
1231 report_heap_exhaustion(available
, requested
, thread
);
1232 if (gc_active_p
|| (available
== 0)) {
1233 /* If we are in GC, or totally out of memory there is no way
1234 * to sanely transfer control to the lisp-side of things.
1236 lose("Heap exhausted, game over.");
1239 /* FIXME: assert free_pages_lock held */
1240 (void)thread_mutex_unlock(&free_pages_lock
);
1241 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
1242 gc_assert(get_pseudo_atomic_atomic(thread
));
1243 clear_pseudo_atomic_atomic(thread
);
1244 if (get_pseudo_atomic_interrupted(thread
))
1245 do_pending_interrupt();
1247 /* Another issue is that signalling HEAP-EXHAUSTED error leads
1248 * to running user code at arbitrary places, even in a
1249 * WITHOUT-INTERRUPTS which may lead to a deadlock without
1250 * running out of the heap. So at this point all bets are
1252 if (read_TLS(INTERRUPTS_ENABLED
,thread
) == NIL
)
1253 corruption_warning_and_maybe_lose
1254 ("Signalling HEAP-EXHAUSTED in a WITHOUT-INTERRUPTS.");
1255 /* available and requested should be double word aligned, thus
1256 they can passed as fixnums and shifted later. */
1257 funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR
), available
, requested
);
1258 lose("HEAP-EXHAUSTED-ERROR fell through");
1262 /* Test whether page 'index' can continue a non-large-object region
1263 * having specified 'gen' and 'allocated' values. */
1264 static inline boolean
1265 page_extensible_p(page_index_t index
, generation_index_t gen
, int allocated
) {
1266 #ifdef LISP_FEATURE_BIG_ENDIAN /* TODO: implement the simpler test */
1267 /* Counterintuitively, gcc prefers to see sequential tests of the bitfields,
1268 * versus one test "!(p.large_object | p.write_protected | p.dont_move)".
1269 * When expressed as separate tests, it figures out that this can be optimized
1270 * as an AND. On the other hand, by attempting to *force* it to do that,
1271 * it shifts each field to the right to line them all up at bit index 0 to
1272 * test that 1 bit, which is a literal rendering of the user-written code.
1275 page_table
[index
].allocated
== allocated
1276 && page_table
[index
].gen
== gen
1277 && !page_table
[index
].large_object
1278 && !page_table
[index
].write_protected
1279 && !page_table
[index
].dont_move
;
1282 /* Test all 5 conditions above as a single comparison against a mask.
1283 * (The C compiler doesn't understand how to do that)
1284 * Any bit that has a 1 in this mask must match the desired input.
1285 * The two 0 bits are for "has_pins" and "write_protected_cleared".
1286 * has_pins is irrelevant- it won't be 1 except during gc.
1287 * wp_cleared is probably 0, but needs to be masked out to be sure.
1288 * All other flag bits must be zero to pass the test.
1292 * #b11111111_10101111
1294 * !move / \ allocated
1296 * The flags reside at 1 byte prior to 'gen' in the page structure.
1298 return (*(int16_t*)(&page_table
[index
].gen
-1) & 0xFFAF) == ((gen
<<8)|allocated
);
1302 /* Search for at least nbytes of space, possibly picking up any
1303 * remaining space on the tail of a page that was not fully used.
1305 * Non-small allocations are guaranteed to be page-aligned.
1308 gc_find_freeish_pages(page_index_t
*restart_page_ptr
, sword_t bytes
,
1311 page_index_t most_bytes_found_from
= 0, most_bytes_found_to
= 0;
1312 page_index_t first_page
, last_page
, restart_page
= *restart_page_ptr
;
1313 os_vm_size_t nbytes
= bytes
;
1314 os_vm_size_t nbytes_goal
= nbytes
;
1315 os_vm_size_t bytes_found
= 0;
1316 os_vm_size_t most_bytes_found
= 0;
1317 /* Note that this definition of "small" is not the complement
1318 * of "large" as used in gc_alloc_large(). That's fine.
1319 * The constraint we must respect is that a large object
1320 * MUST NOT share any of its pages with another object.
1321 * It should also be page-aligned, though that's not a restriction
1322 * per se, but a fairly obvious consequence of not sharing.
1324 boolean small_object
= nbytes
< GENCGC_CARD_BYTES
;
1325 /* FIXME: assert(free_pages_lock is held); */
1327 if (nbytes_goal
< gencgc_alloc_granularity
)
1328 nbytes_goal
= gencgc_alloc_granularity
;
1329 #if !defined(LISP_FEATURE_64_BIT) && SEGREGATED_CODE
1330 // Increase the region size to avoid excessive fragmentation
1331 if (page_type_flag
== CODE_PAGE_FLAG
&& nbytes_goal
< 65536)
1332 nbytes_goal
= 65536;
1335 /* Toggled by gc_and_save for heap compaction, normally -1. */
1336 if (gencgc_alloc_start_page
!= -1) {
1337 restart_page
= gencgc_alloc_start_page
;
1340 /* FIXME: This is on bytes instead of nbytes pending cleanup of
1341 * long from the interface. */
1342 gc_assert(bytes
>=0);
1343 first_page
= restart_page
;
1344 while (first_page
< page_table_pages
) {
1346 if (page_free_p(first_page
)) {
1347 gc_dcheck(!page_bytes_used(first_page
));
1348 bytes_found
= GENCGC_CARD_BYTES
;
1349 } else if (small_object
&&
1350 page_extensible_p(first_page
, gc_alloc_generation
, page_type_flag
)) {
1351 bytes_found
= GENCGC_CARD_BYTES
- page_bytes_used(first_page
);
1352 // XXX: Prefer to start non-code on new pages.
1353 // This is temporary until scavenging of small-object pages
1354 // is made a little more intelligent (work in progress).
1355 if (bytes_found
< nbytes
&& page_type_flag
!= CODE_PAGE_FLAG
) {
1356 if (bytes_found
> most_bytes_found
)
1357 most_bytes_found
= bytes_found
;
1366 gc_dcheck(!page_table
[first_page
].write_protected
);
1367 /* page_free_p() can legally be used at index 'page_table_pages'
1368 * because the array dimension is 1+page_table_pages */
1369 for (last_page
= first_page
+1;
1370 bytes_found
< nbytes_goal
&&
1371 page_free_p(last_page
) && last_page
< page_table_pages
;
1373 /* page_free_p() implies 0 bytes used, thus GENCGC_CARD_BYTES available.
1374 * It also implies !write_protected, and if the OS's conception were
1375 * otherwise, lossage would routinely occur in the fault handler) */
1376 bytes_found
+= GENCGC_CARD_BYTES
;
1377 gc_dcheck(0 == page_bytes_used(last_page
));
1378 gc_dcheck(!page_table
[last_page
].write_protected
);
1381 if (bytes_found
> most_bytes_found
) {
1382 most_bytes_found
= bytes_found
;
1383 most_bytes_found_from
= first_page
;
1384 most_bytes_found_to
= last_page
;
1386 if (bytes_found
>= nbytes_goal
)
1389 first_page
= last_page
;
1392 bytes_found
= most_bytes_found
;
1393 restart_page
= first_page
+ 1;
1395 /* Check for a failure */
1396 if (bytes_found
< nbytes
) {
1397 gc_assert(restart_page
>= page_table_pages
);
1398 gc_heap_exhausted_error_or_lose(most_bytes_found
, nbytes
);
1401 gc_assert(most_bytes_found_to
);
1402 *restart_page_ptr
= most_bytes_found_from
;
1403 return most_bytes_found_to
-1;
1406 /* Allocate bytes. All the rest of the special-purpose allocation
1407 * functions will eventually call this */
1410 gc_alloc_with_region(sword_t nbytes
,int page_type_flag
, struct alloc_region
*my_region
,
1413 void *new_free_pointer
;
1415 if (nbytes
>=LARGE_OBJECT_SIZE
)
1416 return gc_alloc_large(nbytes
, page_type_flag
, my_region
);
1418 /* Check whether there is room in the current alloc region. */
1419 new_free_pointer
= (char*)my_region
->free_pointer
+ nbytes
;
1421 /* fprintf(stderr, "alloc %d bytes from %p to %p\n", nbytes,
1422 my_region->free_pointer, new_free_pointer); */
1424 if (new_free_pointer
<= my_region
->end_addr
) {
1425 /* If so then allocate from the current alloc region. */
1426 void *new_obj
= my_region
->free_pointer
;
1427 my_region
->free_pointer
= new_free_pointer
;
1429 /* Unless a `quick' alloc was requested, check whether the
1430 alloc region is almost empty. */
1432 addr_diff(my_region
->end_addr
,my_region
->free_pointer
) <= 32) {
1433 /* If so, finished with the current region. */
1434 gc_alloc_update_page_tables(page_type_flag
, my_region
);
1435 /* Set up a new region. */
1436 gc_alloc_new_region(32 /*bytes*/, page_type_flag
, my_region
);
1439 return((void *)new_obj
);
1442 /* Else not enough free space in the current region: retry with a
1445 gc_alloc_update_page_tables(page_type_flag
, my_region
);
1446 gc_alloc_new_region(nbytes
, page_type_flag
, my_region
);
1447 return gc_alloc_with_region(nbytes
, page_type_flag
, my_region
,0);
1450 /* Copy a large object. If the object is on a large object page then
1451 * it is simply promoted, else it is copied.
1453 * Bignums and vectors may have shrunk. If the object is not copied
1454 * the space needs to be reclaimed, and the page_tables corrected.
1456 * Code objects can't shrink, but it's not worth adding an extra test
1457 * for large code just to avoid the loop that performs adjustment, so
1458 * go through the adjustment motions even though nothing happens.
1460 * An object that is on non-large object pages will never move
1461 * to large object pages, thus ensuring that the assignment of
1462 * '.large_object = 0' in prepare_for_final_gc() is meaningful.
1463 * The saved core should have no large object pages.
1466 copy_large_object(lispobj object
, sword_t nwords
, int page_type_flag
)
1469 page_index_t first_page
;
1470 boolean boxedp
= page_type_flag
!= UNBOXED_PAGE_FLAG
;
1472 CHECK_COPY_PRECONDITIONS(object
, nwords
);
1474 if ((nwords
> 1024*1024) && gencgc_verbose
) {
1475 FSHOW((stderr
, "/general_copy_large_object: %d bytes\n",
1476 nwords
*N_WORD_BYTES
));
1479 /* Check whether it's a large object. */
1480 first_page
= find_page_index((void *)object
);
1481 gc_assert(first_page
>= 0);
1483 // An objects that shrank but was allocated on a large-object page
1484 // is a candidate for copying if its current size is non-large.
1485 if (page_table
[first_page
].large_object
1486 && nwords
>= LARGE_OBJECT_SIZE
/ N_WORD_BYTES
) {
1487 /* Promote the object. Note: Unboxed objects may have been
1488 * allocated to a BOXED region so it may be necessary to
1489 * change the region to UNBOXED. */
1490 os_vm_size_t remaining_bytes
;
1491 os_vm_size_t bytes_freed
;
1492 page_index_t next_page
;
1493 page_bytes_t old_bytes_used
;
1495 /* FIXME: This comment is somewhat stale.
1497 * Note: Any page write-protection must be removed, else a
1498 * later scavenge_newspace may incorrectly not scavenge these
1499 * pages. This would not be necessary if they are added to the
1500 * new areas, but let's do it for them all (they'll probably
1501 * be written anyway?). */
1503 gc_assert(page_starts_contiguous_block_p(first_page
));
1504 next_page
= first_page
;
1505 remaining_bytes
= nwords
*N_WORD_BYTES
;
1507 /* FIXME: can we share code with maybe_adjust_large_object ? */
1508 while (remaining_bytes
> GENCGC_CARD_BYTES
) {
1509 gc_assert(page_table
[next_page
].gen
== from_space
);
1510 gc_assert(page_table
[next_page
].large_object
);
1511 gc_assert(page_scan_start_offset(next_page
) ==
1512 npage_bytes(next_page
-first_page
));
1513 gc_assert(page_bytes_used(next_page
) == GENCGC_CARD_BYTES
);
1514 /* Should have been unprotected by unprotect_oldspace()
1515 * for boxed objects, and after promotion unboxed ones
1516 * should not be on protected pages at all. */
1517 gc_assert(!page_table
[next_page
].write_protected
);
1520 gc_assert(page_boxed_p(next_page
));
1522 gc_assert(page_allocated_no_region_p(next_page
));
1523 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1525 page_table
[next_page
].gen
= new_space
;
1527 remaining_bytes
-= GENCGC_CARD_BYTES
;
1531 /* Now only one page remains, but the object may have shrunk so
1532 * there may be more unused pages which will be freed. */
1534 /* Object may have shrunk but shouldn't have grown - check. */
1535 gc_assert(page_bytes_used(next_page
) >= remaining_bytes
);
1537 page_table
[next_page
].gen
= new_space
;
1540 gc_assert(page_boxed_p(next_page
));
1542 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1544 /* Adjust the bytes_used. */
1545 old_bytes_used
= page_bytes_used(next_page
);
1546 set_page_bytes_used(next_page
, remaining_bytes
);
1548 bytes_freed
= old_bytes_used
- remaining_bytes
;
1550 /* Free any remaining pages; needs care. */
1552 while ((old_bytes_used
== GENCGC_CARD_BYTES
) &&
1553 (page_table
[next_page
].gen
== from_space
) &&
1554 /* FIXME: It is not obvious to me why this is necessary
1555 * as a loop condition: it seems to me that the
1556 * scan_start_offset test should be sufficient, but
1557 * experimentally that is not the case. --NS
1560 page_boxed_p(next_page
) :
1561 page_allocated_no_region_p(next_page
)) &&
1562 page_table
[next_page
].large_object
&&
1563 (page_scan_start_offset(next_page
) ==
1564 npage_bytes(next_page
- first_page
))) {
1565 /* Checks out OK, free the page. Don't need to both zeroing
1566 * pages as this should have been done before shrinking the
1567 * object. These pages shouldn't be write-protected, even if
1568 * boxed they should be zero filled. */
1569 gc_assert(!page_table
[next_page
].write_protected
);
1571 old_bytes_used
= page_bytes_used(next_page
);
1572 reset_page_flags(next_page
);
1573 set_page_bytes_used(next_page
, 0);
1574 bytes_freed
+= old_bytes_used
;
1578 if ((bytes_freed
> 0) && gencgc_verbose
) {
1580 "/general_copy_large_object bytes_freed=%"OS_VM_SIZE_FMT
"\n",
1584 generations
[from_space
].bytes_allocated
-= nwords
*N_WORD_BYTES
1586 generations
[new_space
].bytes_allocated
+= nwords
*N_WORD_BYTES
;
1587 bytes_allocated
-= bytes_freed
;
1589 /* Add the region to the new_areas if requested. */
1591 add_new_area(first_page
,0,nwords
*N_WORD_BYTES
);
1596 /* Allocate space. */
1597 new = gc_general_alloc(nwords
*N_WORD_BYTES
, page_type_flag
, ALLOC_QUICK
);
1599 /* Copy the object. */
1600 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1602 /* Return Lisp pointer of new object. */
1603 return make_lispobj(new, lowtag_of(object
));
1607 /* to copy unboxed objects */
1609 copy_unboxed_object(lispobj object
, sword_t nwords
)
1611 return gc_general_copy_object(object
, nwords
, UNBOXED_PAGE_FLAG
);
1619 scav_weak_pointer(lispobj
*where
, lispobj object
)
1621 struct weak_pointer
* wp
= (struct weak_pointer
*)where
;
1623 if (!wp
->next
&& weak_pointer_breakable_p(wp
)) {
1624 /* All weak pointers refer to objects at least as old as themselves,
1625 * because there is no slot setter for WEAK-POINTER-VALUE.
1626 * (i.e. You can't reference an object that didn't already exist,
1627 * assuming that users don't stuff a new value in via low-level hacks)
1628 * A weak pointer is breakable only if it points to an object in the
1629 * condemned generation, which must be as young as, or younger than
1630 * the weak pointer itself. Per the initial claim, it can't be younger.
1631 * So it must be in the same generation. Therefore, if the pointee
1632 * is condemned, the pointer itself must be condemned. Hence it must
1633 * not be on a write-protected page. Assert this, to be sure.
1634 * (This assertion is compiled out in a normal build,
1635 * so even if incorrect, it should be relatively harmless)
1637 gc_dcheck(!page_table
[find_page_index(wp
)].write_protected
);
1638 add_to_weak_pointer_list(wp
);
1641 /* Do not let GC scavenge the value slot of the weak pointer.
1642 * (That is why it is a weak pointer.) */
1644 return WEAK_POINTER_NWORDS
;
1647 /* a faster version for searching the dynamic space. This will work even
1648 * if the object is in a current allocation region. */
1650 search_dynamic_space(void *pointer
)
1652 page_index_t page_index
= find_page_index(pointer
);
1655 /* The address may be invalid, so do some checks. */
1656 if ((page_index
== -1) || page_free_p(page_index
))
1658 start
= (lispobj
*)page_scan_start(page_index
);
1659 return gc_search_space(start
, pointer
);
1662 #if !GENCGC_IS_PRECISE
1663 // Return the starting address of the object containing 'addr'
1664 // if and only if the object is one which would be evacuated from 'from_space'
1665 // were it allowed to be either discarded as garbage or moved.
1666 // 'addr_page_index' is the page containing 'addr' and must not be -1.
1667 // Return 0 if there is no such object - that is, if addr is past the
1668 // end of the used bytes, or its pages are not in 'from_space' etc.
1670 conservative_root_p(lispobj addr
, page_index_t addr_page_index
)
1672 /* quick check 1: Address is quite likely to have been invalid. */
1673 struct page
* page
= &page_table
[addr_page_index
];
1674 if ((addr
& (GENCGC_CARD_BYTES
- 1)) >= page_bytes_used(addr_page_index
) ||
1676 (!is_lisp_pointer(addr
) && page
->allocated
!= CODE_PAGE_FLAG
) ||
1678 (compacting_p() && (page
->gen
!= from_space
||
1679 (page
->large_object
&& page
->dont_move
))))
1681 gc_assert(!(page
->allocated
& OPEN_REGION_PAGE_FLAG
));
1684 /* quick check 2: Unless the page can hold code, the pointer's lowtag must
1685 * correspond to the widetag of the object. The object header can safely
1686 * be read even if it turns out that the pointer is not valid,
1687 * because the pointer was in bounds for the page.
1688 * Note that this can falsely pass if looking at the interior of an unboxed
1689 * array that masquerades as a Lisp object header by pure luck.
1690 * But if this doesn't pass, there's no point in proceeding to the
1691 * definitive test which involves searching for the containing object. */
1693 if (page
->allocated
!= CODE_PAGE_FLAG
) {
1694 lispobj
* obj
= native_pointer(addr
);
1695 if (lowtag_of(addr
) == LIST_POINTER_LOWTAG
) {
1696 if (!is_cons_half(obj
[0]) || !is_cons_half(obj
[1]))
1699 unsigned char widetag
= widetag_of(*obj
);
1700 if (!other_immediate_lowtag_p(widetag
) ||
1701 lowtag_of(addr
) != lowtag_for_widetag
[widetag
>>2])
1704 /* Don't gc_search_space() more than once for any object.
1705 * Doesn't apply to code since the base address is unknown */
1706 if (pinned_p(addr
, addr_page_index
)) return 0;
1710 /* Filter out anything which can't be a pointer to a Lisp object
1711 * (or, as a special case which also requires dont_move, a return
1712 * address referring to something in a CodeObject). This is
1713 * expensive but important, since it vastly reduces the
1714 * probability that random garbage will be bogusly interpreted as
1715 * a pointer which prevents a page from moving. */
1716 lispobj
* object_start
= search_dynamic_space((void*)addr
);
1717 if (!object_start
) return 0;
1719 /* If the containing object is a code object and 'addr' points
1720 * anywhere beyond the boxed words,
1721 * presume it to be a valid unboxed return address. */
1722 if (instruction_ptr_p((void*)addr
, object_start
))
1723 return object_start
;
1725 /* Large object pages only contain ONE object, and it will never
1726 * be a CONS. However, arrays and bignums can be allocated larger
1727 * than necessary and then shrunk to fit, leaving what look like
1728 * (0 . 0) CONSes at the end. These appear valid to
1729 * properly_tagged_descriptor_p(), so pick them off here. */
1730 if (((lowtag_of(addr
) == LIST_POINTER_LOWTAG
) &&
1731 page_table
[addr_page_index
].large_object
)
1732 || !properly_tagged_descriptor_p((void*)addr
, object_start
))
1735 return object_start
;
1739 /* Adjust large bignum and vector objects. This will adjust the
1740 * allocated region if the size has shrunk, and change boxed pages
1741 * into unboxed pages. The pages are not promoted here, and the
1742 * object is not added to the new_regions; this is really
1743 * only designed to be called from preserve_pointer(). Shouldn't fail
1744 * if this is missed, just may delay the moving of objects to unboxed
1745 * pages, and the freeing of pages. */
1747 maybe_adjust_large_object(page_index_t first_page
, sword_t nwords
)
1749 lispobj
* where
= (lispobj
*)page_address(first_page
);
1750 page_index_t next_page
;
1752 uword_t remaining_bytes
;
1753 uword_t bytes_freed
;
1754 uword_t old_bytes_used
;
1758 /* Check whether it's a vector or bignum object. */
1759 lispobj widetag
= widetag_of(where
[0]);
1760 if (widetag
== SIMPLE_VECTOR_WIDETAG
)
1761 page_type_flag
= BOXED_PAGE_FLAG
;
1762 else if (specialized_vector_widetag_p(widetag
) || widetag
== BIGNUM_WIDETAG
)
1763 page_type_flag
= UNBOXED_PAGE_FLAG
;
1767 /* Note: Any page write-protection must be removed, else a later
1768 * scavenge_newspace may incorrectly not scavenge these pages.
1769 * This would not be necessary if they are added to the new areas,
1770 * but lets do it for them all (they'll probably be written
1773 gc_assert(page_starts_contiguous_block_p(first_page
));
1775 next_page
= first_page
;
1776 remaining_bytes
= nwords
*N_WORD_BYTES
;
1777 while (remaining_bytes
> GENCGC_CARD_BYTES
) {
1778 gc_assert(page_table
[next_page
].gen
== from_space
);
1779 // We can't assert that page_table[next_page].allocated is correct,
1780 // because unboxed objects are initially allocated on boxed pages.
1781 gc_assert(page_allocated_no_region_p(next_page
));
1782 gc_assert(page_table
[next_page
].large_object
);
1783 gc_assert(page_scan_start_offset(next_page
) ==
1784 npage_bytes(next_page
-first_page
));
1785 gc_assert(page_bytes_used(next_page
) == GENCGC_CARD_BYTES
);
1787 // This affects only one object, since large objects don't share pages.
1788 page_table
[next_page
].allocated
= page_type_flag
;
1790 /* Shouldn't be write-protected at this stage. Essential that the
1792 gc_assert(!page_table
[next_page
].write_protected
);
1793 remaining_bytes
-= GENCGC_CARD_BYTES
;
1797 /* Now only one page remains, but the object may have shrunk so
1798 * there may be more unused pages which will be freed. */
1800 /* Object may have shrunk but shouldn't have grown - check. */
1801 gc_assert(page_bytes_used(next_page
) >= remaining_bytes
);
1803 page_table
[next_page
].allocated
= page_type_flag
;
1805 /* Adjust the bytes_used. */
1806 old_bytes_used
= page_bytes_used(next_page
);
1807 set_page_bytes_used(next_page
, remaining_bytes
);
1809 bytes_freed
= old_bytes_used
- remaining_bytes
;
1811 /* Free any remaining pages; needs care. */
1813 while ((old_bytes_used
== GENCGC_CARD_BYTES
) &&
1814 (page_table
[next_page
].gen
== from_space
) &&
1815 page_allocated_no_region_p(next_page
) &&
1816 page_table
[next_page
].large_object
&&
1817 (page_scan_start_offset(next_page
) ==
1818 npage_bytes(next_page
- first_page
))) {
1819 /* It checks out OK, free the page. We don't need to bother zeroing
1820 * pages as this should have been done before shrinking the
1821 * object. These pages shouldn't be write protected as they
1822 * should be zero filled. */
1823 gc_assert(!page_table
[next_page
].write_protected
);
1825 old_bytes_used
= page_bytes_used(next_page
);
1826 reset_page_flags(next_page
);
1827 set_page_bytes_used(next_page
, 0);
1828 bytes_freed
+= old_bytes_used
;
1832 if ((bytes_freed
> 0) && gencgc_verbose
) {
1834 "/maybe_adjust_large_object() freed %d\n",
1838 generations
[from_space
].bytes_allocated
-= bytes_freed
;
1839 bytes_allocated
-= bytes_freed
;
1844 #ifdef PIN_GRANULARITY_LISPOBJ
1845 /* After scavenging of the roots is done, we go back to the pinned objects
1846 * and look within them for pointers. While heap_scavenge() could certainly
1847 * do this, it would potentially lead to extra work, since we can't know
1848 * whether any given object has been examined at least once, since there is
1849 * no telltale forwarding-pointer. The easiest thing to do is defer all
1850 * pinned objects to a subsequent pass, as is done here.
1853 scavenge_pinned_ranges()
1857 for_each_hopscotch_key(i
, key
, pinned_objects
) {
1858 lispobj
* obj
= native_pointer(key
);
1859 lispobj header
= *obj
;
1860 // Never invoke scavenger on a simple-fun, just code components.
1861 if (is_cons_half(header
))
1863 else if (widetag_of(header
) != SIMPLE_FUN_WIDETAG
)
1864 scavtab
[widetag_of(header
)](obj
, header
);
1868 /* Deposit filler objects on small object pinned pages
1869 * from the page start to the first pinned object and in between pairs
1870 * of pinned objects. Zero-fill bytes following the last pinned object.
1871 * Also ensure that no scan_start_offset points to a page in
1872 * oldspace that will be freed.
1875 wipe_nonpinned_words()
1877 void gc_heapsort_uwords(uword_t
*, int);
1879 if (!pinned_objects
.count
)
1882 // Loop over the keys in pinned_objects and pack them densely into
1883 // the same array - pinned_objects.keys[] - but skip any simple-funs.
1884 // Admittedly this is abstraction breakage.
1885 int limit
= hopscotch_max_key_index(pinned_objects
);
1887 for (i
= 0; i
<= limit
; ++i
) {
1888 lispobj key
= pinned_objects
.keys
[i
];
1890 lispobj
* obj
= native_pointer(key
);
1891 // No need to check for is_cons_half() - it will be false
1892 // on a simple-fun header, and that's the correct answer.
1893 if (widetag_of(*obj
) != SIMPLE_FUN_WIDETAG
)
1894 pinned_objects
.keys
[n_pins
++] = (uword_t
)obj
;
1897 // Don't touch pinned_objects.count in case the reset function uses it
1898 // to decide how to resize for next use (which it doesn't, but could).
1899 gc_n_stack_pins
= n_pins
;
1900 // Order by ascending address, stopping short of the sentinel.
1901 gc_heapsort_uwords(pinned_objects
.keys
, n_pins
);
1903 fprintf(stderr
, "Sorted pin list:\n");
1904 for (i
= 0; i
< n_pins
; ++i
) {
1905 lispobj
* obj
= (lispobj
*)pinned_objects
.keys
[i
];
1906 lispobj word
= *obj
;
1907 int widetag
= widetag_of(word
);
1908 if (is_cons_half(word
))
1909 fprintf(stderr
, "%p: (cons)\n", obj
);
1911 fprintf(stderr
, "%p: %d words (%s)\n", obj
,
1912 (int)sizetab
[widetag
](obj
), widetag_names
[widetag
>>2]);
1916 #define page_base(x) ALIGN_DOWN(x, GENCGC_CARD_BYTES)
1917 // This macro asserts that space accounting happens exactly
1918 // once per affected page (a page with any pins, no matter how many)
1919 #define adjust_gen_usage(i) \
1920 gc_assert(page_table[i].has_pins); \
1921 page_table[i].has_pins = 0; \
1922 bytes_moved += page_bytes_used(i); \
1923 page_table[i].gen = new_space
1925 // Store a sentinel at the end. Even if n_pins = table capacity (unlikely),
1926 // it is safe to write one more word, because the hops[] array immediately
1927 // follows the keys[] array in memory. At worst, 2 elements of hops[]
1928 // are clobbered, which is irrelevant since the table has already been
1929 // rendered unusable by stealing its key array for a different purpose.
1930 pinned_objects
.keys
[n_pins
] = ~(uword_t
)0;
1932 // Each pinned object begets two ranges of bytes to be turned into filler:
1933 // - the range preceding it back to its page start or predecessor object
1934 // - the range after it, up to the lesser of page bytes used or successor object
1937 uword_t fill_from
= page_base(pinned_objects
.keys
[0]);
1938 os_vm_size_t bytes_moved
= 0; // i.e. virtually moved
1939 os_vm_size_t bytes_freed
= 0; // bytes after last pinned object per page
1941 for (i
= 0; i
< n_pins
; ++i
) {
1942 lispobj
* obj
= (lispobj
*)pinned_objects
.keys
[i
];
1943 page_index_t begin_page_index
= find_page_index(obj
);
1944 // Create a filler object occupying space from 'fill_from' up to but
1945 // excluding 'obj'. If obj directly abuts its predecessor then don't.
1946 if ((uword_t
)obj
> fill_from
) {
1947 lispobj
* filler
= (lispobj
*)fill_from
;
1948 int nwords
= obj
- filler
;
1949 if (page_table
[begin_page_index
].allocated
!= CODE_PAGE_FLAG
) {
1950 // On pages holding non-code, the filler is an array
1951 filler
[0] = SIMPLE_ARRAY_WORD_WIDETAG
;
1952 filler
[1] = make_fixnum(nwords
- 2);
1953 } else if (nwords
> 2) {
1954 // Otherwise try to keep a strict code/non-code distinction
1955 filler
[0] = 2<<N_WIDETAG_BITS
| CODE_HEADER_WIDETAG
;
1956 filler
[1] = make_fixnum((nwords
- 2) * N_WORD_BYTES
);
1960 // But as an exception, use a NIL array for tiny code filler
1961 // (If the ENSURE-CODE/DATA-SEPARATION test fails again,
1962 // it may need to ignore these objects. Hasn't happened yet)
1963 filler
[0] = SIMPLE_ARRAY_NIL_WIDETAG
;
1964 filler
[1] = make_fixnum(0xDEAD);
1967 if (fill_from
== page_base((uword_t
)obj
)) {
1968 adjust_gen_usage(begin_page_index
);
1969 // This pinned object started a new page of pins.
1970 // scan_start must not see any page prior to this page,
1971 // as those might be in oldspace and about to be marked free.
1972 set_page_scan_start_offset(begin_page_index
, 0);
1974 // If 'obj' spans pages, move its successive page(s) to newspace and
1975 // ensure that those pages' scan_starts point at the same address
1976 // that this page's scan start does, which could be this page or earlier.
1977 size_t nwords
= OBJECT_SIZE(*obj
, obj
);
1978 lispobj
* obj_end
= obj
+ nwords
; // non-inclusive address bound
1979 page_index_t end_page_index
= find_page_index(obj_end
- 1); // inclusive bound
1981 if (end_page_index
> begin_page_index
) {
1982 char *scan_start
= page_scan_start(begin_page_index
);
1984 for (index
= begin_page_index
+ 1; index
<= end_page_index
; ++index
) {
1985 set_page_scan_start_offset(index
,
1986 addr_diff(page_address(index
), scan_start
));
1987 adjust_gen_usage(index
);
1990 // Compute page base address of last page touched by this obj.
1991 uword_t obj_end_pageaddr
= page_base((uword_t
)obj_end
- 1);
1992 // See if there's another pinned object on this page.
1993 // There is always a next object, due to the sentinel.
1994 if (pinned_objects
.keys
[i
+1] < obj_end_pageaddr
+ GENCGC_CARD_BYTES
) {
1995 // Next object starts within the same page.
1996 fill_from
= (uword_t
)obj_end
;
1998 // Next pinned object does not start on the same page this obj ends on.
1999 // Any bytes following 'obj' up to its page end are garbage.
2000 uword_t page_end
= obj_end_pageaddr
+ page_bytes_used(end_page_index
);
2001 long nbytes
= page_end
- (uword_t
)obj_end
;
2002 gc_assert(nbytes
>= 0);
2004 // Bytes beyond a page's highest used byte must be zero.
2005 memset(obj_end
, 0, nbytes
);
2006 bytes_freed
+= nbytes
;
2007 set_page_bytes_used(end_page_index
,
2008 (uword_t
)obj_end
- obj_end_pageaddr
);
2010 fill_from
= page_base(pinned_objects
.keys
[i
+1]);
2013 generations
[from_space
].bytes_allocated
-= bytes_moved
;
2014 generations
[new_space
].bytes_allocated
+= bytes_moved
- bytes_freed
;
2015 bytes_allocated
-= bytes_freed
;
2016 #undef adjust_gen_usage
2020 /* Add 'object' to the hashtable, and if the object is a code component,
2021 * then also add all of the embedded simple-funs.
2022 * The rationale for the extra work on code components is that without it,
2023 * every test of pinned_p() on an object would have to check if the pointer
2024 * is to a simple-fun - entailing an extra read of the header - and mapping
2025 * to its code component if so. Since more calls to pinned_p occur than to
2026 * pin_object, the extra burden should be on this function.
2027 * Experimentation bears out that this is the better technique.
2028 * Also, we wouldn't often expect code components in the collected generation
2029 * so the extra work here is quite minimal, even if it can generally add to
2030 * the number of keys in the hashtable.
2033 pin_object(lispobj
* base_addr
)
2035 lispobj object
= compute_lispobj(base_addr
);
2036 if (!hopscotch_containsp(&pinned_objects
, object
)) {
2037 hopscotch_insert(&pinned_objects
, object
, 1);
2038 struct code
* maybe_code
= (struct code
*)native_pointer(object
);
2039 if (widetag_of(maybe_code
->header
) == CODE_HEADER_WIDETAG
) {
2040 for_each_simple_fun(i
, fun
, maybe_code
, 0, {
2041 hopscotch_insert(&pinned_objects
,
2042 make_lispobj(fun
, FUN_POINTER_LOWTAG
),
2049 # define scavenge_pinned_ranges()
2050 # define wipe_nonpinned_words()
2053 /* Take a possible pointer to a Lisp object and mark its page in the
2054 * page_table so that it will not be relocated during a GC.
2056 * This involves locating the page it points to, then backing up to
2057 * the start of its region, then marking all pages dont_move from there
2058 * up to the first page that's not full or has a different generation
2060 * It is assumed that all the page static flags have been cleared at
2061 * the start of a GC.
2063 * It is also assumed that the current gc_alloc() region has been
2064 * flushed and the tables updated. */
2066 // TODO: there's probably a way to be a little more efficient here.
2067 // As things are, we start by finding the object that encloses 'addr',
2068 // then we see if 'addr' was a "valid" Lisp pointer to that object
2069 // - meaning we expect the correct lowtag on the pointer - except
2070 // that for code objects we don't require a correct lowtag
2071 // and we allow a pointer to anywhere in the object.
2073 // It should be possible to avoid calling search_dynamic_space
2074 // more of the time. First, check if the page pointed to might hold code.
2075 // If it does, then we continue regardless of the pointer's lowtag
2076 // (because of the special allowance). If the page definitely does *not*
2077 // hold code, then we require up front that the lowtake make sense,
2078 // by doing the same checks that are in properly_tagged_descriptor_p.
2080 // Problem: when code is allocated from a per-thread region,
2081 // does it ensure that the occupied pages are flagged as having code?
2083 #if defined(__GNUC__) && defined(MEMORY_SANITIZER)
2084 #define NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory))
2086 #define NO_SANITIZE_MEMORY
2089 static void NO_SANITIZE_MEMORY
2090 preserve_pointer(void *addr
)
2092 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2093 /* Immobile space MUST be lower than dynamic space,
2094 or else this test needs to be revised */
2095 if (addr
< (void*)IMMOBILE_SPACE_END
) {
2096 extern void immobile_space_preserve_pointer(void*);
2097 immobile_space_preserve_pointer(addr
);
2101 page_index_t page
= find_page_index(addr
);
2102 lispobj
*object_start
;
2104 #if GENCGC_IS_PRECISE
2105 /* If we're in precise gencgc (non-x86oid as of this writing) then
2106 * we are only called on valid object pointers in the first place,
2107 * so we just have to do a bounds-check against the heap, a
2108 * generation check, and the already-pinned check. */
2110 (compacting_p() && (page_table
[page
].gen
!= from_space
||
2111 (page_table
[page
].large_object
&&
2112 page_table
[page
].dont_move
))))
2114 object_start
= native_pointer((lispobj
)addr
);
2115 switch (widetag_of(*object_start
)) {
2116 case SIMPLE_FUN_WIDETAG
:
2117 #ifdef RETURN_PC_WIDETAG
2118 case RETURN_PC_WIDETAG
:
2120 object_start
= fun_code_header(object_start
);
2123 if (page
< 0 || (object_start
= conservative_root_p((lispobj
)addr
, page
)) == NULL
)
2127 if (!compacting_p()) {
2128 /* Just mark it. No distinction between large and small objects. */
2129 gc_mark_obj(compute_lispobj(object_start
));
2133 page_index_t first_page
= find_page_index(object_start
);
2134 size_t nwords
= OBJECT_SIZE(*object_start
, object_start
);
2135 page_index_t last_page
= find_page_index(object_start
+ nwords
- 1);
2137 for (page
= first_page
; page
<= last_page
; ++page
) {
2138 /* Oldspace pages were unprotected at start of GC.
2139 * Assert this here, because the previous logic used to,
2140 * and page protection bugs are scary */
2141 gc_assert(!page_table
[page
].write_protected
);
2143 /* Mark the page static. */
2144 page_table
[page
].dont_move
= 1;
2145 page_table
[page
].has_pins
= !page_table
[page
].large_object
;
2148 if (page_table
[first_page
].large_object
)
2149 maybe_adjust_large_object(first_page
, nwords
);
2151 pin_object(object_start
);
2155 #define IN_REGION_P(a,kind) (kind##_region.start_addr<=a && a<=kind##_region.free_pointer)
2157 #define IN_BOXED_REGION_P(a) IN_REGION_P(a,boxed)||IN_REGION_P(a,code)
2159 #define IN_BOXED_REGION_P(a) IN_REGION_P(a,boxed)
2162 /* If the given page is not write-protected, then scan it for pointers
2163 * to younger generations or the top temp. generation, if no
2164 * suspicious pointers are found then the page is write-protected.
2166 * Care is taken to check for pointers to the current gc_alloc()
2167 * region if it is a younger generation or the temp. generation. This
2168 * frees the caller from doing a gc_alloc_update_page_tables(). Actually
2169 * the gc_alloc_generation does not need to be checked as this is only
2170 * called from scavenge_generation() when the gc_alloc generation is
2171 * younger, so it just checks if there is a pointer to the current
2174 * We return 1 if the page was write-protected, else 0.
2176 * Note that because of the existence of some words which have fixnum lowtag
2177 * but are actually pointers, you might think it would be possible for this
2178 * function to go wrong, protecting a page that contains old->young pointers.
2179 * Well, it seems fine mostly. Why: two of the guilty parties are CLOSURE-FUN
2180 * and FDEFN-RAW-ADDR. Closure-fun is a fixnum (on x86) which when treated
2181 * as a pointer indicates the entry point to call. Its function can never
2182 * be an object younger than itself. (An invariant of any immutable object)
2183 * fdefn-raw-address is more subtle. In set-fdefn-fun we first store 'fun'
2184 * and then 'raw-addr', where a stop-for-GC could occur in between.
2185 * So if the fdefn was, before the first store:
2186 * fun -> younger object
2187 * raw-addr -> younger object
2188 * and then after the first store:
2189 * fun -> older object | <- interrupt occurred after this store
2190 * raw-addr -> younger object
2191 * then we have a page that may look like it has no traceable pointers
2192 * to younger objects (the raw-addr is untraceable by the algorithm below).
2193 * But because the fdefn is in a register, it is pinned, therefore it is live,
2194 * therefore all its slots will be traced on this GC.
2195 * In fact update_page_write_prot() won't even be called on the fdefn's page.
2196 * The final problem is compact-instance-header layouts. Conditions and
2197 * structures can't point to younger layouts, so that much is easy.
2198 * Standard-objects can. I think those layouts are kept live by the
2199 * voluminous amount of metadata that CLOS insists on maintaining,
2200 * though I'm not 100% sure, and would not be surprised if there is a bug
2201 * related to GC of those layouts.
2204 update_page_write_prot(page_index_t page
)
2206 generation_index_t gen
= page_table
[page
].gen
;
2209 void **page_addr
= (void **)page_address(page
);
2210 sword_t num_words
= page_bytes_used(page
) / N_WORD_BYTES
;
2212 /* Shouldn't be a free page. */
2213 gc_dcheck(!page_free_p(page
)); // Implied by the next assertion
2214 gc_assert(page_bytes_used(page
) != 0);
2216 if (!ENABLE_PAGE_PROTECTION
) return 0;
2218 /* Skip if it's already write-protected, pinned, or unboxed */
2219 if (page_table
[page
].write_protected
2220 /* FIXME: What's the reason for not write-protecting pinned pages? */
2221 || page_table
[page
].dont_move
2222 || page_unboxed_p(page
))
2225 /* Scan the page for pointers to younger generations or the
2226 * top temp. generation. */
2228 /* This is conservative: any word satisfying is_lisp_pointer() is
2229 * assumed to be a pointer. To do otherwise would require a family
2230 * of scavenge-like functions. */
2231 for (j
= 0; j
< num_words
; j
++) {
2232 void *ptr
= *(page_addr
+j
);
2234 lispobj
__attribute__((unused
)) header
;
2236 if (!is_lisp_pointer((lispobj
)ptr
))
2238 /* Check that it's in the dynamic space */
2239 if ((index
= find_page_index(ptr
)) != -1) {
2240 if (/* Does it point to a younger or the temp. generation? */
2241 ((page_bytes_used(index
) != 0)
2242 && ((page_table
[index
].gen
< gen
)
2243 || (page_table
[index
].gen
== SCRATCH_GENERATION
)))
2245 /* Or does it point within a current gc_alloc() region? */
2246 || (IN_BOXED_REGION_P(ptr
) || IN_REGION_P(ptr
,unboxed
))) {
2251 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2252 else if (immobile_space_p((lispobj
)ptr
) &&
2253 other_immediate_lowtag_p(header
= *native_pointer((lispobj
)ptr
))) {
2254 // This is *possibly* a pointer to an object in immobile space,
2255 // given that above two conditions were satisfied.
2256 // But unlike in the dynamic space case, we need to read a byte
2257 // from the object to determine its generation, which requires care.
2258 // Consider an unboxed word that looks like a pointer to a word that
2259 // looks like fun-header-widetag. We can't naively back up to the
2260 // underlying code object since the alleged header might not be one.
2261 int obj_gen
= gen
; // Make comparison fail if we fall through
2262 if (lowtag_of((lispobj
)ptr
) == FUN_POINTER_LOWTAG
&&
2263 widetag_of(header
) == SIMPLE_FUN_WIDETAG
) {
2264 lispobj
* code
= fun_code_header((lispobj
)ptr
- FUN_POINTER_LOWTAG
);
2265 // This is a heuristic, since we're not actually looking for
2266 // an object boundary. Precise scanning of 'page' would obviate
2267 // the guard conditions here.
2268 if ((lispobj
)code
>= IMMOBILE_VARYOBJ_SUBSPACE_START
2269 && widetag_of(*code
) == CODE_HEADER_WIDETAG
)
2270 obj_gen
= __immobile_obj_generation(code
);
2272 obj_gen
= __immobile_obj_generation(native_pointer((lispobj
)ptr
));
2274 // A bogus generation number implies a not-really-pointer,
2275 // but it won't cause misbehavior.
2276 if (obj_gen
< gen
|| obj_gen
== SCRATCH_GENERATION
) {
2285 protect_page(page_addr
, page
);
2290 /* Is this page holding a normal (non-hashtable) large-object
2292 static inline boolean
large_simple_vector_p(page_index_t page
) {
2293 if (!page_table
[page
].large_object
)
2295 lispobj header
= *(lispobj
*)page_address(page
);
2296 return widetag_of(header
) == SIMPLE_VECTOR_WIDETAG
&&
2297 is_vector_subtype(header
, VectorNormal
);
2301 /* Scavenge all generations from FROM to TO, inclusive, except for
2302 * new_space which needs special handling, as new objects may be
2303 * added which are not checked here - use scavenge_newspace generation.
2305 * Write-protected pages should not have any pointers to the
2306 * from_space so do need scavenging; thus write-protected pages are
2307 * not always scavenged. There is some code to check that these pages
2308 * are not written; but to check fully the write-protected pages need
2309 * to be scavenged by disabling the code to skip them.
2311 * Under the current scheme when a generation is GCed the younger
2312 * generations will be empty. So, when a generation is being GCed it
2313 * is only necessary to scavenge the older generations for pointers
2314 * not the younger. So a page that does not have pointers to younger
2315 * generations does not need to be scavenged.
2317 * The write-protection can be used to note pages that don't have
2318 * pointers to younger pages. But pages can be written without having
2319 * pointers to younger generations. After the pages are scavenged here
2320 * they can be scanned for pointers to younger generations and if
2321 * there are none the page can be write-protected.
2323 * One complication is when the newspace is the top temp. generation.
2325 * Enabling SC_GEN_CK scavenges the write-protected pages and checks
2326 * that none were written, which they shouldn't be as they should have
2327 * no pointers to younger generations. This breaks down for weak
2328 * pointers as the objects contain a link to the next and are written
2329 * if a weak pointer is scavenged. Still it's a useful check. */
2331 scavenge_generations(generation_index_t from
, generation_index_t to
)
2334 page_index_t num_wp
= 0;
2338 /* Clear the write_protected_cleared flags on all pages. */
2339 for (i
= 0; i
< page_table_pages
; i
++)
2340 page_table
[i
].write_protected_cleared
= 0;
2343 for (i
= 0; i
< last_free_page
; i
++) {
2344 generation_index_t generation
= page_table
[i
].gen
;
2346 && (page_bytes_used(i
) != 0)
2347 && (generation
!= new_space
)
2348 && (generation
>= from
)
2349 && (generation
<= to
)) {
2351 /* This should be the start of a region */
2352 gc_assert(page_starts_contiguous_block_p(i
));
2354 if (large_simple_vector_p(i
)) {
2355 /* Scavenge only the unprotected pages of a
2356 * large-object vector, other large objects could be
2357 * handled as well, but vectors are easier to deal
2358 * with and are more likely to grow to very large
2359 * sizes where avoiding scavenging the whole thing is
2361 if (!page_table
[i
].write_protected
) {
2362 scavenge((lispobj
*)page_address(i
) + 2,
2363 GENCGC_CARD_BYTES
/ N_WORD_BYTES
- 2);
2364 update_page_write_prot(i
);
2366 while (!page_ends_contiguous_block_p(i
, generation
)) {
2368 if (!page_table
[i
].write_protected
) {
2369 scavenge((lispobj
*)page_address(i
),
2370 page_bytes_used(i
) / N_WORD_BYTES
);
2371 update_page_write_prot(i
);
2375 page_index_t last_page
, j
;
2376 boolean write_protected
= 1;
2377 /* Now work forward until the end of the region */
2378 for (last_page
= i
; ; last_page
++) {
2380 write_protected
&& page_table
[last_page
].write_protected
;
2381 if (page_ends_contiguous_block_p(last_page
, generation
))
2384 if (!write_protected
) {
2385 heap_scavenge((lispobj
*)page_address(i
),
2386 (lispobj
*)(page_address(last_page
)
2387 + page_bytes_used(last_page
)));
2389 /* Now scan the pages and write protect those that
2390 * don't have pointers to younger generations. */
2391 for (j
= i
; j
<= last_page
; j
++)
2392 num_wp
+= update_page_write_prot(j
);
2394 if ((gencgc_verbose
> 1) && (num_wp
!= 0)) {
2396 "/write protected %d pages within generation %d\n",
2397 num_wp
, generation
));
2406 /* Check that none of the write_protected pages in this generation
2407 * have been written to. */
2408 for (i
= 0; i
< page_table_pages
; i
++) {
2409 if ((page_bytes_used(i
) != 0)
2410 && (page_table
[i
].gen
== generation
)
2411 && (page_table
[i
].write_protected_cleared
!= 0)) {
2412 FSHOW((stderr
, "/scavenge_generation() %d\n", generation
));
2414 "/page bytes_used=%d scan_start_offset=%lu dont_move=%d\n",
2416 scan_start_offset(page_table
[i
]),
2417 page_table
[i
].dont_move
));
2418 lose("write to protected page %d in scavenge_generation()\n", i
);
2425 /* Scavenge a newspace generation. As it is scavenged new objects may
2426 * be allocated to it; these will also need to be scavenged. This
2427 * repeats until there are no more objects unscavenged in the
2428 * newspace generation.
2430 * To help improve the efficiency, areas written are recorded by
2431 * gc_alloc() and only these scavenged. Sometimes a little more will be
2432 * scavenged, but this causes no harm. An easy check is done that the
2433 * scavenged bytes equals the number allocated in the previous
2436 * Write-protected pages are not scanned except if they are marked
2437 * dont_move in which case they may have been promoted and still have
2438 * pointers to the from space.
2440 * Write-protected pages could potentially be written by alloc however
2441 * to avoid having to handle re-scavenging of write-protected pages
2442 * gc_alloc() does not write to write-protected pages.
2444 * New areas of objects allocated are recorded alternatively in the two
2445 * new_areas arrays below. */
2446 static struct new_area new_areas_1
[NUM_NEW_AREAS
];
2447 static struct new_area new_areas_2
[NUM_NEW_AREAS
];
2449 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2450 extern unsigned int immobile_scav_queue_count
;
2452 update_immobile_nursery_bits(),
2453 scavenge_immobile_roots(generation_index_t
,generation_index_t
),
2454 scavenge_immobile_newspace(),
2455 sweep_immobile_space(int raise
),
2456 write_protect_immobile_space();
2458 #define immobile_scav_queue_count 0
2461 /* Do one full scan of the new space generation. This is not enough to
2462 * complete the job as new objects may be added to the generation in
2463 * the process which are not scavenged. */
2465 scavenge_newspace_generation_one_scan(generation_index_t generation
)
2470 "/starting one full scan of newspace generation %d\n",
2472 for (i
= 0; i
< last_free_page
; i
++) {
2473 /* Note that this skips over open regions when it encounters them. */
2475 && (page_bytes_used(i
) != 0)
2476 && (page_table
[i
].gen
== generation
)
2477 && (!page_table
[i
].write_protected
2478 /* (This may be redundant as write_protected is now
2479 * cleared before promotion.) */
2480 || page_table
[i
].dont_move
)) {
2481 page_index_t last_page
;
2484 /* The scavenge will start at the scan_start_offset of
2487 * We need to find the full extent of this contiguous
2488 * block in case objects span pages.
2490 * Now work forward until the end of this contiguous area
2491 * is found. A small area is preferred as there is a
2492 * better chance of its pages being write-protected. */
2493 for (last_page
= i
; ;last_page
++) {
2494 /* If all pages are write-protected and movable,
2495 * then no need to scavenge */
2496 all_wp
=all_wp
&& page_table
[last_page
].write_protected
&&
2497 !page_table
[last_page
].dont_move
;
2499 /* Check whether this is the last page in this
2500 * contiguous block */
2501 if (page_ends_contiguous_block_p(last_page
, generation
))
2505 /* Do a limited check for write-protected pages. */
2507 new_areas_ignore_page
= last_page
;
2508 heap_scavenge(page_scan_start(i
),
2509 (lispobj
*)(page_address(last_page
)
2510 + page_bytes_used(last_page
)));
2516 "/done with one full scan of newspace generation %d\n",
2520 /* Do a complete scavenge of the newspace generation. */
2522 scavenge_newspace_generation(generation_index_t generation
)
2526 /* the new_areas array currently being written to by gc_alloc() */
2527 struct new_area (*current_new_areas
)[] = &new_areas_1
;
2528 size_t current_new_areas_index
;
2530 /* the new_areas created by the previous scavenge cycle */
2531 struct new_area (*previous_new_areas
)[] = NULL
;
2532 size_t previous_new_areas_index
;
2534 /* Flush the current regions updating the tables. */
2535 gc_alloc_update_all_page_tables(0);
2537 /* Turn on the recording of new areas by gc_alloc(). */
2538 new_areas
= current_new_areas
;
2539 new_areas_index
= 0;
2541 /* Don't need to record new areas that get scavenged anyway during
2542 * scavenge_newspace_generation_one_scan. */
2543 record_new_objects
= 1;
2545 /* Start with a full scavenge. */
2546 scavenge_newspace_generation_one_scan(generation
);
2548 /* Record all new areas now. */
2549 record_new_objects
= 2;
2551 /* Give a chance to weak hash tables to make other objects live.
2552 * FIXME: The algorithm implemented here for weak hash table gcing
2553 * is O(W^2+N) as Bruno Haible warns in
2554 * http://www.haible.de/bruno/papers/cs/weak/WeakDatastructures-writeup.html
2555 * see "Implementation 2". */
2556 scav_weak_hash_tables(weak_ht_alivep_funs
, gc_scav_pair
);
2558 /* Flush the current regions updating the tables. */
2559 gc_alloc_update_all_page_tables(0);
2561 /* Grab new_areas_index. */
2562 current_new_areas_index
= new_areas_index
;
2565 "The first scan is finished; current_new_areas_index=%d.\n",
2566 current_new_areas_index));*/
2568 while (current_new_areas_index
> 0 || immobile_scav_queue_count
) {
2569 /* Move the current to the previous new areas */
2570 previous_new_areas
= current_new_areas
;
2571 previous_new_areas_index
= current_new_areas_index
;
2573 /* Scavenge all the areas in previous new areas. Any new areas
2574 * allocated are saved in current_new_areas. */
2576 /* Allocate an array for current_new_areas; alternating between
2577 * new_areas_1 and 2 */
2578 if (previous_new_areas
== &new_areas_1
)
2579 current_new_areas
= &new_areas_2
;
2581 current_new_areas
= &new_areas_1
;
2583 /* Set up for gc_alloc(). */
2584 new_areas
= current_new_areas
;
2585 new_areas_index
= 0;
2587 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2588 scavenge_immobile_newspace();
2590 /* Check whether previous_new_areas had overflowed. */
2591 if (previous_new_areas_index
>= NUM_NEW_AREAS
) {
2593 /* New areas of objects allocated have been lost so need to do a
2594 * full scan to be sure! If this becomes a problem try
2595 * increasing NUM_NEW_AREAS. */
2596 if (gencgc_verbose
) {
2597 SHOW("new_areas overflow, doing full scavenge");
2600 /* Don't need to record new areas that get scavenged
2601 * anyway during scavenge_newspace_generation_one_scan. */
2602 record_new_objects
= 1;
2604 scavenge_newspace_generation_one_scan(generation
);
2606 /* Record all new areas now. */
2607 record_new_objects
= 2;
2611 /* Work through previous_new_areas. */
2612 for (i
= 0; i
< previous_new_areas_index
; i
++) {
2613 page_index_t page
= (*previous_new_areas
)[i
].page
;
2614 size_t offset
= (*previous_new_areas
)[i
].offset
;
2615 size_t size
= (*previous_new_areas
)[i
].size
;
2616 gc_assert(size
% (2*N_WORD_BYTES
) == 0);
2617 lispobj
*start
= (lispobj
*)(page_address(page
) + offset
);
2618 heap_scavenge(start
, (lispobj
*)((char*)start
+ size
));
2623 scav_weak_hash_tables(weak_ht_alivep_funs
, gc_scav_pair
);
2625 /* Flush the current regions updating the tables. */
2626 gc_alloc_update_all_page_tables(0);
2628 current_new_areas_index
= new_areas_index
;
2631 "The re-scan has finished; current_new_areas_index=%d.\n",
2632 current_new_areas_index));*/
2635 /* Turn off recording of areas allocated by gc_alloc(). */
2636 record_new_objects
= 0;
2641 /* Check that none of the write_protected pages in this generation
2642 * have been written to. */
2643 for (i
= 0; i
< page_table_pages
; i
++) {
2644 if ((page_bytes_used(i
) != 0)
2645 && (page_table
[i
].gen
== generation
)
2646 && (page_table
[i
].write_protected_cleared
!= 0)
2647 && (page_table
[i
].dont_move
== 0)) {
2648 lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d\n",
2649 i
, generation
, page_table
[i
].dont_move
);
2656 /* Un-write-protect all the pages in from_space. This is done at the
2657 * start of a GC else there may be many page faults while scavenging
2658 * the newspace (I've seen drive the system time to 99%). These pages
2659 * would need to be unprotected anyway before unmapping in
2660 * free_oldspace; not sure what effect this has on paging.. */
2662 unprotect_oldspace(void)
2665 char *region_addr
= 0;
2666 char *page_addr
= 0;
2667 uword_t region_bytes
= 0;
2669 for (i
= 0; i
< last_free_page
; i
++) {
2670 if ((page_bytes_used(i
) != 0)
2671 && (page_table
[i
].gen
== from_space
)) {
2673 /* Remove any write-protection. We should be able to rely
2674 * on the write-protect flag to avoid redundant calls. */
2675 if (page_table
[i
].write_protected
) {
2676 page_table
[i
].write_protected
= 0;
2677 page_addr
= page_address(i
);
2680 region_addr
= page_addr
;
2681 region_bytes
= GENCGC_CARD_BYTES
;
2682 } else if (region_addr
+ region_bytes
== page_addr
) {
2683 /* Region continue. */
2684 region_bytes
+= GENCGC_CARD_BYTES
;
2686 /* Unprotect previous region. */
2687 os_protect(region_addr
, region_bytes
, OS_VM_PROT_ALL
);
2688 /* First page in new region. */
2689 region_addr
= page_addr
;
2690 region_bytes
= GENCGC_CARD_BYTES
;
2696 /* Unprotect last region. */
2697 os_protect(region_addr
, region_bytes
, OS_VM_PROT_ALL
);
2701 /* Work through all the pages and free any in from_space. This
2702 * assumes that all objects have been copied or promoted to an older
2703 * generation. Bytes_allocated and the generation bytes_allocated
2704 * counter are updated. The number of bytes freed is returned. */
2708 uword_t bytes_freed
= 0;
2709 page_index_t first_page
, last_page
;
2714 /* Find a first page for the next region of pages. */
2715 while ((first_page
< last_free_page
)
2716 && ((page_bytes_used(first_page
) == 0)
2717 || (page_table
[first_page
].gen
!= from_space
)))
2720 if (first_page
>= last_free_page
)
2723 /* Find the last page of this region. */
2724 last_page
= first_page
;
2727 /* Free the page. */
2728 bytes_freed
+= page_bytes_used(last_page
);
2729 generations
[page_table
[last_page
].gen
].bytes_allocated
-=
2730 page_bytes_used(last_page
);
2731 reset_page_flags(last_page
);
2732 set_page_bytes_used(last_page
, 0);
2733 /* Should already be unprotected by unprotect_oldspace(). */
2734 gc_assert(!page_table
[last_page
].write_protected
);
2737 while ((last_page
< last_free_page
)
2738 && (page_bytes_used(last_page
) != 0)
2739 && (page_table
[last_page
].gen
== from_space
));
2741 #ifdef TRAVERSE_FREED_OBJECTS
2742 /* At this point we could attempt to recycle unused TLS indices
2743 * as follows: For each now-garbage symbol that had a nonzero index,
2744 * return that index to a "free TLS index" pool, perhaps a linked list
2745 * or bitmap. Then either always try the free pool first (for better
2746 * locality) or if ALLOC-TLS-INDEX detects exhaustion (for speed). */
2748 lispobj
* where
= (lispobj
*)page_address(first_page
);
2749 lispobj
* end
= (lispobj
*)page_address(last_page
);
2750 while (where
< end
) {
2751 lispobj word
= *where
;
2752 if (forwarding_pointer_p(where
)) {
2753 word
= *native_pointer(forwarding_pointer_value(where
));
2754 where
+= OBJECT_SIZE(word
,
2755 native_pointer(forwarding_pointer_value(where
)));
2756 } else if (is_cons_half(word
)) {
2757 // Print something maybe
2760 // Print something maybe
2761 where
+= sizetab
[widetag_of(word
)](where
);
2767 #ifdef READ_PROTECT_FREE_PAGES
2768 os_protect(page_address(first_page
),
2769 npage_bytes(last_page
-first_page
),
2772 first_page
= last_page
;
2773 } while (first_page
< last_free_page
);
2775 bytes_allocated
-= bytes_freed
;
2780 /* Print some information about a pointer at the given address. */
2782 print_ptr(lispobj
*addr
)
2784 /* If addr is in the dynamic space then out the page information. */
2785 page_index_t pi1
= find_page_index((void*)addr
);
2788 fprintf(stderr
," %p: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n",
2791 page_table
[pi1
].allocated
,
2792 page_table
[pi1
].gen
,
2793 page_bytes_used(pi1
),
2794 scan_start_offset(page_table
[pi1
]),
2795 page_table
[pi1
].dont_move
);
2796 fprintf(stderr
," %x %x %x %x (%x) %x %x %x %x\n",
2810 is_in_stack_space(lispobj ptr
)
2812 /* For space verification: Pointers can be valid if they point
2813 * to a thread stack space. This would be faster if the thread
2814 * structures had page-table entries as if they were part of
2815 * the heap space. */
2816 /* Actually, no, how would that be faster?
2817 * If you have to examine thread structures, you have to examine
2818 * them all. This demands something like a binary search tree */
2820 for_each_thread(th
) {
2821 if ((th
->control_stack_start
<= (lispobj
*)ptr
) &&
2822 (th
->control_stack_end
>= (lispobj
*)ptr
)) {
2829 struct verify_state
{
2830 lispobj
*object_start
, *object_end
;
2831 lispobj
*virtual_where
;
2834 generation_index_t object_gen
;
2837 #define VERIFY_VERBOSE 1
2838 /* AGGRESSIVE = always call valid_lisp_pointer_p() on pointers.
2839 * Otherwise, do only a quick check that widetag/lowtag correspond */
2840 #define VERIFY_AGGRESSIVE 2
2841 /* VERIFYING_foo indicates internal state, not a caller's option */
2842 #define VERIFYING_HEAP_OBJECTS 8
2844 // NOTE: This function can produces false failure indications,
2845 // usually related to dynamic space pointing to the stack of a
2846 // dead thread, but there may be other reasons as well.
2848 verify_range(lispobj
*where
, sword_t nwords
, struct verify_state
*state
)
2850 extern int valid_lisp_pointer_p(lispobj
);
2851 boolean is_in_readonly_space
=
2852 (READ_ONLY_SPACE_START
<= (uword_t
)where
&&
2853 where
< read_only_space_free_pointer
);
2854 boolean is_in_immobile_space
= 0;
2855 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2856 is_in_immobile_space
=
2857 (IMMOBILE_SPACE_START
<= (uword_t
)where
&&
2858 where
< immobile_space_free_pointer
);
2861 lispobj
*end
= where
+ nwords
;
2863 for ( ; where
< end
; where
+= count
) {
2864 // Keep track of object boundaries, unless verifying a non-heap space.
2865 if (where
> state
->object_end
&& (state
->flags
& VERIFYING_HEAP_OBJECTS
)) {
2866 state
->object_start
= where
;
2867 state
->object_end
= where
+ OBJECT_SIZE(*where
, where
) - 1;
2870 lispobj thing
= *where
;
2873 if (is_lisp_pointer(thing
)) {
2874 page_index_t page_index
= find_page_index((void*)thing
);
2875 boolean to_immobile_space
= 0;
2876 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2878 (IMMOBILE_SPACE_START
<= thing
&&
2879 thing
< (lispobj
)immobile_fixedobj_free_pointer
) ||
2880 (IMMOBILE_VARYOBJ_SUBSPACE_START
<= thing
&&
2881 thing
< (lispobj
)immobile_space_free_pointer
);
2884 /* unlike lose(), fprintf detects format mismatch, hence the casts */
2885 #define FAIL_IF(what, why) if (what) { \
2886 if (++state->errors > 25) lose("Too many errors"); \
2887 else fprintf(stderr, "Ptr %p @ %"OBJ_FMTX" sees %s\n", \
2888 (void*)(uintptr_t)thing, \
2889 (lispobj)(state->virtual_where ? state->virtual_where : where), \
2892 /* Does it point to the dynamic space? */
2893 if (page_index
!= -1) {
2894 /* If it's within the dynamic space it should point to a used page. */
2895 FAIL_IF(page_free_p(page_index
), "free page");
2896 FAIL_IF(!(page_table
[page_index
].allocated
& OPEN_REGION_PAGE_FLAG
)
2897 && (thing
& (GENCGC_CARD_BYTES
-1)) >= page_bytes_used(page_index
),
2898 "unallocated space");
2899 /* Check that it doesn't point to a forwarding pointer! */
2900 FAIL_IF(*native_pointer(thing
) == 0x01, "forwarding ptr");
2901 /* Check that its not in the RO space as it would then be a
2902 * pointer from the RO to the dynamic space. */
2903 FAIL_IF(is_in_readonly_space
, "dynamic space from RO space");
2904 } else if (to_immobile_space
) {
2905 // the object pointed to must not have been discarded as garbage
2906 FAIL_IF(!other_immediate_lowtag_p(*native_pointer(thing
)) ||
2907 filler_obj_p(native_pointer(thing
)),
2910 /* Any pointer that points to non-static space is examined further.
2911 * You might think this should scan stacks first as a quick out,
2912 * but that would take time proportional to the number of threads. */
2913 if (page_index
>= 0 || to_immobile_space
) {
2915 /* If aggressive, or to/from immobile space, do a full search
2916 * (as entailed by valid_lisp_pointer_p) */
2917 if ((state
->flags
& VERIFY_AGGRESSIVE
)
2918 || (is_in_immobile_space
|| to_immobile_space
))
2919 valid
= valid_lisp_pointer_p(thing
);
2921 /* Efficiently decide whether 'thing' is plausible.
2922 * This MUST NOT use properly_tagged_descriptor_p() which
2923 * assumes a known good object base address, and would
2924 * "dangerously" scan a code component for embedded funs. */
2925 int lowtag
= lowtag_of(thing
);
2926 if (lowtag
== LIST_POINTER_LOWTAG
)
2927 valid
= is_cons_half(CONS(thing
)->car
)
2928 && is_cons_half(CONS(thing
)->cdr
);
2930 lispobj word
= *native_pointer(thing
);
2931 valid
= other_immediate_lowtag_p(word
) &&
2932 lowtag_for_widetag
[widetag_of(word
)>>2] == lowtag
;
2935 /* If 'thing' points to a stack, we can only hope that the frame
2936 * not clobbered, or the object at 'where' is unreachable. */
2937 FAIL_IF(!valid
&& !is_in_stack_space(thing
), "junk");
2941 int widetag
= widetag_of(thing
);
2942 if (is_lisp_immediate(thing
) || widetag
== NO_TLS_VALUE_MARKER_WIDETAG
) {
2943 /* skip immediates */
2944 } else if (!(other_immediate_lowtag_p(widetag
)
2945 && lowtag_for_widetag
[widetag
>>2])) {
2946 lose("Unhandled widetag %p at %p\n", widetag
, where
);
2947 } else if (unboxed_obj_widetag_p(widetag
)) {
2948 count
= sizetab
[widetag
](where
);
2949 } else switch(widetag
) {
2950 /* boxed or partially boxed objects */
2951 // FIXME: x86-64 can have partially unboxed FINs. The raw words
2952 // are at the moment valid fixnums by blind luck.
2953 case INSTANCE_WIDETAG
:
2954 if (instance_layout(where
)) {
2955 sword_t nslots
= instance_length(thing
) | 1;
2956 lispobj bitmap
= LAYOUT(instance_layout(where
))->bitmap
;
2957 gc_assert(fixnump(bitmap
)
2958 || widetag_of(*native_pointer(bitmap
))==BIGNUM_WIDETAG
);
2959 instance_scan((void (*)(lispobj
*, sword_t
, uword_t
))verify_range
,
2960 where
+1, nslots
, bitmap
, (uintptr_t)state
);
2964 case CODE_HEADER_WIDETAG
:
2966 struct code
*code
= (struct code
*) where
;
2967 sword_t nheader_words
= code_header_words(code
->header
);
2968 /* Scavenge the boxed section of the code data block */
2969 verify_range(where
+ 1, nheader_words
- 1, state
);
2971 /* Scavenge the boxed section of each function
2972 * object in the code data block. */
2973 for_each_simple_fun(i
, fheaderp
, code
, 1, {
2974 #if defined(LISP_FEATURE_COMPACT_INSTANCE_HEADER)
2975 lispobj
__attribute__((unused
)) layout
=
2976 function_layout((lispobj
*)fheaderp
);
2977 gc_assert(!layout
|| layout
== SYMBOL(FUNCTION_LAYOUT
)->value
>> 32);
2979 verify_range(SIMPLE_FUN_SCAV_START(fheaderp
),
2980 SIMPLE_FUN_SCAV_NWORDS(fheaderp
),
2982 count
= nheader_words
+ code_instruction_words(code
->code_size
);
2986 verify_range(where
+ 1, 2, state
);
2987 callee
= fdefn_callee_lispobj((struct fdefn
*)where
);
2988 /* For a more intelligible error, don't say that the word that
2989 * contains an errant pointer is in stack space if it isn't. */
2990 state
->virtual_where
= where
+ 3;
2991 verify_range(&callee
, 1, state
);
2992 state
->virtual_where
= 0;
2993 count
= ALIGN_UP(sizeof (struct fdefn
)/sizeof(lispobj
), 2);
2998 static uword_t
verify_space(lispobj start
, lispobj
* end
, uword_t flags
) {
2999 struct verify_state state
;
3000 memset(&state
, 0, sizeof state
);
3001 state
.flags
= flags
;
3002 verify_range((lispobj
*)start
, end
-(lispobj
*)start
, &state
);
3003 if (state
.errors
) lose("verify failed: %d error(s)", state
.errors
);
3006 static uword_t
verify_gen_aux(lispobj start
, lispobj
* end
, struct verify_state
* state
)
3008 verify_range((lispobj
*)start
, end
-(lispobj
*)start
, state
);
3011 static void verify_generation(generation_index_t generation
, uword_t flags
)
3013 struct verify_state state
;
3014 memset(&state
, 0, sizeof state
);
3015 state
.flags
= flags
;
3016 walk_generation((uword_t(*)(lispobj
*,lispobj
*,uword_t
))verify_gen_aux
,
3017 generation
, (uword_t
)&state
);
3018 if (state
.errors
) lose("verify failed: %d error(s)", state
.errors
);
3021 void verify_gc(uword_t flags
)
3023 int verbose
= flags
& VERIFY_VERBOSE
;
3025 flags
|= VERIFYING_HEAP_OBJECTS
;
3027 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3029 // Try this verification if immobile-space was compiled with extra debugging.
3030 // But weak symbols don't work on macOS.
3031 extern void __attribute__((weak
)) check_varyobj_pages();
3032 if (&check_varyobj_pages
) check_varyobj_pages();
3035 printf("Verifying immobile space\n");
3036 verify_space(IMMOBILE_SPACE_START
, immobile_fixedobj_free_pointer
, flags
);
3037 verify_space(IMMOBILE_VARYOBJ_SUBSPACE_START
, immobile_space_free_pointer
, flags
);
3041 printf("Verifying binding stacks\n");
3042 for_each_thread(th
) {
3043 verify_space((lispobj
)th
->binding_stack_start
,
3044 (lispobj
*)get_binding_stack_pointer(th
),
3045 flags
^ VERIFYING_HEAP_OBJECTS
);
3046 #ifdef LISP_FEATURE_SB_THREAD
3047 verify_space((lispobj
)(th
+1),
3048 (lispobj
*)(SymbolValue(FREE_TLS_INDEX
,0)
3049 + (char*)((union per_thread_data
*)th
)->dynamic_values
),
3050 flags
^ VERIFYING_HEAP_OBJECTS
);
3054 printf("Verifying RO space\n");
3055 verify_space(READ_ONLY_SPACE_START
, read_only_space_free_pointer
, flags
);
3057 printf("Verifying static space\n");
3058 verify_space(STATIC_SPACE_START
, static_space_free_pointer
, flags
);
3060 printf("Verifying dynamic space\n");
3061 verify_generation(-1, flags
);
3064 /* Call 'proc' with pairs of addresses demarcating ranges in the
3065 * specified generation.
3066 * Stop if any invocation returns non-zero, and return that value */
3068 walk_generation(uword_t (*proc
)(lispobj
*,lispobj
*,uword_t
),
3069 generation_index_t generation
, uword_t extra
)
3072 int genmask
= generation
>= 0 ? 1 << generation
: ~0;
3074 for (i
= 0; i
< last_free_page
; i
++) {
3075 if ((page_bytes_used(i
) != 0) && ((1 << page_table
[i
].gen
) & genmask
)) {
3076 page_index_t last_page
;
3078 /* This should be the start of a contiguous block */
3079 gc_assert(page_starts_contiguous_block_p(i
));
3081 /* Need to find the full extent of this contiguous block in case
3082 objects span pages. */
3084 /* Now work forward until the end of this contiguous area is
3086 for (last_page
= i
; ;last_page
++)
3087 /* Check whether this is the last page in this contiguous
3089 if (page_ends_contiguous_block_p(last_page
, page_table
[i
].gen
))
3093 proc((lispobj
*)page_address(i
),
3094 (lispobj
*)(page_bytes_used(last_page
) + page_address(last_page
)),
3096 if (result
) return result
;
3104 /* Check that all the free space is zero filled. */
3106 verify_zero_fill(void)
3110 for (page
= 0; page
< last_free_page
; page
++) {
3111 if (page_free_p(page
)) {
3112 /* The whole page should be zero filled. */
3113 sword_t
*start_addr
= (sword_t
*)page_address(page
);
3115 for (i
= 0; i
< (sword_t
)GENCGC_CARD_BYTES
/N_WORD_BYTES
; i
++) {
3116 if (start_addr
[i
] != 0) {
3117 lose("free page not zero at %p\n", start_addr
+ i
);
3121 sword_t free_bytes
= GENCGC_CARD_BYTES
- page_bytes_used(page
);
3122 if (free_bytes
> 0) {
3123 sword_t
*start_addr
=
3124 (sword_t
*)(page_address(page
) + page_bytes_used(page
));
3125 sword_t size
= free_bytes
/ N_WORD_BYTES
;
3127 for (i
= 0; i
< size
; i
++) {
3128 if (start_addr
[i
] != 0) {
3129 lose("free region not zero at %p\n", start_addr
+ i
);
3137 /* External entry point for verify_zero_fill */
3139 gencgc_verify_zero_fill(void)
3141 /* Flush the alloc regions updating the tables. */
3142 gc_alloc_update_all_page_tables(1);
3143 SHOW("verifying zero fill");
3147 /* Write-protect all the dynamic boxed pages in the given generation. */
3149 write_protect_generation_pages(generation_index_t generation
)
3153 gc_assert(generation
< SCRATCH_GENERATION
);
3155 for (start
= 0; start
< last_free_page
; start
++) {
3156 if (protect_page_p(start
, generation
)) {
3160 /* Note the page as protected in the page tables. */
3161 page_table
[start
].write_protected
= 1;
3163 for (last
= start
+ 1; last
< last_free_page
; last
++) {
3164 if (!protect_page_p(last
, generation
))
3166 page_table
[last
].write_protected
= 1;
3169 page_start
= page_address(start
);
3171 os_protect(page_start
,
3172 npage_bytes(last
- start
),
3173 OS_VM_PROT_READ
| OS_VM_PROT_EXECUTE
);
3179 if (gencgc_verbose
> 1) {
3181 "/write protected %d of %d pages in generation %d\n",
3182 count_write_protect_generation_pages(generation
),
3183 count_generation_pages(generation
),
3188 #if !GENCGC_IS_PRECISE
3190 preserve_context_registers (void (*proc
)(os_context_register_t
), os_context_t
*c
)
3192 #ifdef LISP_FEATURE_SB_THREAD
3194 /* On Darwin the signal context isn't a contiguous block of memory,
3195 * so just preserve_pointering its contents won't be sufficient.
3197 #if defined(LISP_FEATURE_DARWIN)||defined(LISP_FEATURE_WIN32)
3198 #if defined LISP_FEATURE_X86
3199 proc(*os_context_register_addr(c
,reg_EAX
));
3200 proc(*os_context_register_addr(c
,reg_ECX
));
3201 proc(*os_context_register_addr(c
,reg_EDX
));
3202 proc(*os_context_register_addr(c
,reg_EBX
));
3203 proc(*os_context_register_addr(c
,reg_ESI
));
3204 proc(*os_context_register_addr(c
,reg_EDI
));
3205 proc(*os_context_pc_addr(c
));
3206 #elif defined LISP_FEATURE_X86_64
3207 proc(*os_context_register_addr(c
,reg_RAX
));
3208 proc(*os_context_register_addr(c
,reg_RCX
));
3209 proc(*os_context_register_addr(c
,reg_RDX
));
3210 proc(*os_context_register_addr(c
,reg_RBX
));
3211 proc(*os_context_register_addr(c
,reg_RSI
));
3212 proc(*os_context_register_addr(c
,reg_RDI
));
3213 proc(*os_context_register_addr(c
,reg_R8
));
3214 proc(*os_context_register_addr(c
,reg_R9
));
3215 proc(*os_context_register_addr(c
,reg_R10
));
3216 proc(*os_context_register_addr(c
,reg_R11
));
3217 proc(*os_context_register_addr(c
,reg_R12
));
3218 proc(*os_context_register_addr(c
,reg_R13
));
3219 proc(*os_context_register_addr(c
,reg_R14
));
3220 proc(*os_context_register_addr(c
,reg_R15
));
3221 proc(*os_context_pc_addr(c
));
3223 #error "preserve_context_registers needs to be tweaked for non-x86 Darwin"
3226 #if !defined(LISP_FEATURE_WIN32)
3227 for(ptr
= ((void **)(c
+1))-1; ptr
>=(void **)c
; ptr
--) {
3228 proc((os_context_register_t
)*ptr
);
3231 #endif // LISP_FEATURE_SB_THREAD
3236 move_pinned_pages_to_newspace()
3240 /* scavenge() will evacuate all oldspace pages, but no newspace
3241 * pages. Pinned pages are precisely those pages which must not
3242 * be evacuated, so move them to newspace directly. */
3244 for (i
= 0; i
< last_free_page
; i
++) {
3245 if (page_table
[i
].dont_move
&&
3246 /* dont_move is cleared lazily, so test the 'gen' field as well. */
3247 page_table
[i
].gen
== from_space
) {
3248 if (page_table
[i
].has_pins
) {
3249 // do not move to newspace after all, this will be word-wiped
3252 page_table
[i
].gen
= new_space
;
3253 /* And since we're moving the pages wholesale, also adjust
3254 * the generation allocation counters. */
3255 int used
= page_bytes_used(i
);
3256 generations
[new_space
].bytes_allocated
+= used
;
3257 generations
[from_space
].bytes_allocated
-= used
;
3262 #if defined(__GNUC__) && defined(ADDRESS_SANITIZER)
3263 #define NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
3265 #define NO_SANITIZE_ADDRESS
3268 /* Garbage collect a generation. If raise is 0 then the remains of the
3269 * generation are not raised to the next generation. */
3270 static void NO_SANITIZE_ADDRESS
3271 garbage_collect_generation(generation_index_t generation
, int raise
)
3276 gc_assert(generation
<= PSEUDO_STATIC_GENERATION
);
3278 /* The oldest generation can't be raised. */
3279 gc_assert(!raise
|| generation
< HIGHEST_NORMAL_GENERATION
);
3281 /* Check that weak hash tables were processed in the previous GC. */
3282 gc_assert(weak_hash_tables
== NULL
);
3283 gc_assert(weak_AND_hash_tables
== NULL
);
3285 /* Initialize the weak pointer list. */
3286 weak_pointers
= NULL
;
3288 /* When a generation is not being raised it is transported to a
3289 * temporary generation (NUM_GENERATIONS), and lowered when
3290 * done. Set up this new generation. There should be no pages
3291 * allocated to it yet. */
3293 gc_assert(generations
[SCRATCH_GENERATION
].bytes_allocated
== 0);
3296 /* Set the global src and dest. generations */
3297 if (generation
< PSEUDO_STATIC_GENERATION
) {
3299 from_space
= generation
;
3301 new_space
= generation
+1;
3303 new_space
= SCRATCH_GENERATION
;
3305 /* Change to a new space for allocation, resetting the alloc_start_page */
3306 gc_alloc_generation
= new_space
;
3308 bzero(generations
[new_space
].alloc_start_page_
,
3309 sizeof generations
[new_space
].alloc_start_page_
);
3311 generations
[new_space
].alloc_start_page
= 0;
3312 generations
[new_space
].alloc_unboxed_start_page
= 0;
3313 generations
[new_space
].alloc_large_start_page
= 0;
3316 #ifdef PIN_GRANULARITY_LISPOBJ
3317 hopscotch_reset(&pinned_objects
);
3319 /* Before any pointers are preserved, the dont_move flags on the
3320 * pages need to be cleared. */
3321 /* FIXME: consider moving this bitmap into its own range of words,
3322 * out of the page table. Then we can just bzero() it.
3323 * This will also obviate the extra test at the comment
3324 * "dont_move is cleared lazily" in move_pinned_pages_to_newspace().
3326 for (i
= 0; i
< last_free_page
; i
++)
3327 if(page_table
[i
].gen
==from_space
)
3328 page_table
[i
].dont_move
= 0;
3330 /* Un-write-protect the old-space pages. This is essential for the
3331 * promoted pages as they may contain pointers into the old-space
3332 * which need to be scavenged. It also helps avoid unnecessary page
3333 * faults as forwarding pointers are written into them. They need to
3334 * be un-protected anyway before unmapping later. */
3335 if (ENABLE_PAGE_PROTECTION
)
3336 unprotect_oldspace();
3338 } else { // "full" [sic] GC
3340 /* This is a full mark-and-sweep of all generations without compacting
3341 * and without returning free space to the allocator. The intent is to
3342 * break chains of objects causing accidental reachability.
3343 * Subsequent GC cycles will compact and reclaims space as usual. */
3344 from_space
= new_space
= -1;
3346 // Unprotect the dynamic space but leave page_table bits alone
3347 if (ENABLE_PAGE_PROTECTION
)
3348 os_protect(page_address(0), npage_bytes(last_free_page
),
3351 // Allocate pages from dynamic space for the work queue.
3352 extern void prepare_for_full_mark_phase();
3353 prepare_for_full_mark_phase();
3357 /* Scavenge the stacks' conservative roots. */
3359 /* there are potentially two stacks for each thread: the main
3360 * stack, which may contain Lisp pointers, and the alternate stack.
3361 * We don't ever run Lisp code on the altstack, but it may
3362 * host a sigcontext with lisp objects in it */
3364 /* what we need to do: (1) find the stack pointer for the main
3365 * stack; scavenge it (2) find the interrupt context on the
3366 * alternate stack that might contain lisp values, and scavenge
3369 /* we assume that none of the preceding applies to the thread that
3370 * initiates GC. If you ever call GC from inside an altstack
3371 * handler, you will lose. */
3373 #if !GENCGC_IS_PRECISE
3374 /* And if we're saving a core, there's no point in being conservative. */
3375 if (conservative_stack
) {
3376 for_each_thread(th
) {
3378 void **esp
=(void **)-1;
3379 if (th
->state
== STATE_DEAD
)
3381 # if defined(LISP_FEATURE_SB_SAFEPOINT)
3382 /* Conservative collect_garbage is always invoked with a
3383 * foreign C call or an interrupt handler on top of every
3384 * existing thread, so the stored SP in each thread
3385 * structure is valid, no matter which thread we are looking
3386 * at. For threads that were running Lisp code, the pitstop
3387 * and edge functions maintain this value within the
3388 * interrupt or exception handler. */
3389 esp
= os_get_csp(th
);
3390 assert_on_stack(th
, esp
);
3392 /* In addition to pointers on the stack, also preserve the
3393 * return PC, the only value from the context that we need
3394 * in addition to the SP. The return PC gets saved by the
3395 * foreign call wrapper, and removed from the control stack
3396 * into a register. */
3397 preserve_pointer(th
->pc_around_foreign_call
);
3399 /* And on platforms with interrupts: scavenge ctx registers. */
3401 /* Disabled on Windows, because it does not have an explicit
3402 * stack of `interrupt_contexts'. The reported CSP has been
3403 * chosen so that the current context on the stack is
3404 * covered by the stack scan. See also set_csp_from_context(). */
3405 # ifndef LISP_FEATURE_WIN32
3406 if (th
!= arch_os_get_current_thread()) {
3407 long k
= fixnum_value(
3408 read_TLS(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
3410 preserve_context_registers((void(*)(os_context_register_t
))preserve_pointer
,
3411 th
->interrupt_contexts
[--k
]);
3414 # elif defined(LISP_FEATURE_SB_THREAD)
3416 if(th
==arch_os_get_current_thread()) {
3417 /* Somebody is going to burn in hell for this, but casting
3418 * it in two steps shuts gcc up about strict aliasing. */
3419 esp
= (void **)((void *)&raise
);
3422 free
=fixnum_value(read_TLS(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
3423 for(i
=free
-1;i
>=0;i
--) {
3424 os_context_t
*c
=th
->interrupt_contexts
[i
];
3425 esp1
= (void **) *os_context_register_addr(c
,reg_SP
);
3426 if (esp1
>=(void **)th
->control_stack_start
&&
3427 esp1
<(void **)th
->control_stack_end
) {
3428 if(esp1
<esp
) esp
=esp1
;
3429 preserve_context_registers((void(*)(os_context_register_t
))preserve_pointer
,
3435 esp
= (void **)((void *)&raise
);
3437 if (!esp
|| esp
== (void*) -1)
3438 lose("garbage_collect: no SP known for thread %x (OS %x)",
3440 for (ptr
= ((void **)th
->control_stack_end
)-1; ptr
>= esp
; ptr
--) {
3441 preserve_pointer(*ptr
);
3446 /* Non-x86oid systems don't have "conservative roots" as such, but
3447 * the same mechanism is used for objects pinned for use by alien
3449 for_each_thread(th
) {
3450 lispobj pin_list
= read_TLS(PINNED_OBJECTS
,th
);
3451 while (pin_list
!= NIL
) {
3452 preserve_pointer((void*)(CONS(pin_list
)->car
));
3453 pin_list
= CONS(pin_list
)->cdr
;
3459 if (gencgc_verbose
> 1) {
3460 sword_t num_dont_move_pages
= count_dont_move_pages();
3462 "/non-movable pages due to conservative pointers = %ld (%lu bytes)\n",
3463 num_dont_move_pages
,
3464 npage_bytes(num_dont_move_pages
));
3468 /* Now that all of the pinned (dont_move) pages are known, and
3469 * before we start to scavenge (and thus relocate) objects,
3470 * relocate the pinned pages to newspace, so that the scavenger
3471 * will not attempt to relocate their contents. */
3473 move_pinned_pages_to_newspace();
3475 /* Scavenge all the rest of the roots. */
3477 #if GENCGC_IS_PRECISE
3479 * If not x86, we need to scavenge the interrupt context(s) and the
3484 for_each_thread(th
) {
3485 scavenge_interrupt_contexts(th
);
3486 scavenge_control_stack(th
);
3489 # ifdef LISP_FEATURE_SB_SAFEPOINT
3490 /* In this case, scrub all stacks right here from the GCing thread
3491 * instead of doing what the comment below says. Suboptimal, but
3494 scrub_thread_control_stack(th
);
3496 /* Scrub the unscavenged control stack space, so that we can't run
3497 * into any stale pointers in a later GC (this is done by the
3498 * stop-for-gc handler in the other threads). */
3499 scrub_control_stack();
3504 /* Scavenge the Lisp functions of the interrupt handlers, taking
3505 * care to avoid SIG_DFL and SIG_IGN. */
3506 for (i
= 0; i
< NSIG
; i
++) {
3507 union interrupt_handler handler
= interrupt_handlers
[i
];
3508 if (!ARE_SAME_HANDLER(handler
.c
, SIG_IGN
) &&
3509 !ARE_SAME_HANDLER(handler
.c
, SIG_DFL
) &&
3510 is_lisp_pointer(handler
.lisp
)) {
3512 scavenge((lispobj
*)(interrupt_handlers
+ i
), 1);
3514 gc_mark_obj(handler
.lisp
);
3517 /* Scavenge the binding stacks. */
3520 for_each_thread(th
) {
3521 scav_binding_stack((lispobj
*)th
->binding_stack_start
,
3522 (lispobj
*)get_binding_stack_pointer(th
),
3523 compacting_p() ? 0 : gc_mark_obj
);
3524 #ifdef LISP_FEATURE_SB_THREAD
3525 /* do the tls as well */
3527 len
=(SymbolValue(FREE_TLS_INDEX
,0) >> WORD_SHIFT
) -
3528 (sizeof (struct thread
))/(sizeof (lispobj
));
3530 scavenge((lispobj
*) (th
+1), len
);
3532 gc_mark_range((lispobj
*) (th
+1), len
);
3537 if (!compacting_p()) {
3538 extern void execute_full_mark_phase();
3539 extern void execute_full_sweep_phase();
3540 execute_full_mark_phase();
3541 execute_full_sweep_phase();
3545 /* Scavenge static space. */
3546 if (gencgc_verbose
> 1) {
3548 "/scavenge static space: %d bytes\n",
3549 (uword_t
)static_space_free_pointer
- STATIC_SPACE_START
));
3551 heap_scavenge((lispobj
*)STATIC_SPACE_START
, static_space_free_pointer
);
3553 /* All generations but the generation being GCed need to be
3554 * scavenged. The new_space generation needs special handling as
3555 * objects may be moved in - it is handled separately below. */
3556 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3557 scavenge_immobile_roots(generation
+1, SCRATCH_GENERATION
);
3559 scavenge_generations(generation
+1, PSEUDO_STATIC_GENERATION
);
3561 if (gc_object_watcher
) scavenge(&gc_object_watcher
, 1);
3562 scavenge_pinned_ranges();
3563 /* The Lisp start function is stored in the core header, not a static
3564 * symbol. It is passed to gc_and_save() in this C variable */
3565 if (lisp_init_function
) scavenge(&lisp_init_function
, 1);
3567 /* Finally scavenge the new_space generation. Keep going until no
3568 * more objects are moved into the new generation */
3569 scavenge_newspace_generation(new_space
);
3571 /* FIXME: I tried reenabling this check when debugging unrelated
3572 * GC weirdness ca. sbcl-0.6.12.45, and it failed immediately.
3573 * Since the current GC code seems to work well, I'm guessing that
3574 * this debugging code is just stale, but I haven't tried to
3575 * figure it out. It should be figured out and then either made to
3576 * work or just deleted. */
3578 #define RESCAN_CHECK 0
3580 /* As a check re-scavenge the newspace once; no new objects should
3583 os_vm_size_t old_bytes_allocated
= bytes_allocated
;
3584 os_vm_size_t bytes_allocated
;
3586 /* Start with a full scavenge. */
3587 scavenge_newspace_generation_one_scan(new_space
);
3589 /* Flush the current regions, updating the tables. */
3590 gc_alloc_update_all_page_tables(1);
3592 bytes_allocated
= bytes_allocated
- old_bytes_allocated
;
3594 if (bytes_allocated
!= 0) {
3595 lose("Rescan of new_space allocated %d more bytes.\n",
3601 scan_binding_stack();
3602 scan_weak_hash_tables(weak_ht_alivep_funs
);
3603 scan_weak_pointers();
3604 wipe_nonpinned_words();
3605 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3606 // Do this last, because until wipe_nonpinned_words() happens,
3607 // not all page table entries have the 'gen' value updated,
3608 // which we need to correctly find all old->young pointers.
3609 sweep_immobile_space(raise
);
3612 gc_assert(boxed_region
.last_page
< 0);
3613 gc_assert(unboxed_region
.last_page
< 0);
3615 gc_assert(gc_alloc_region
[2].last_page
< 0);
3617 #ifdef PIN_GRANULARITY_LISPOBJ
3618 hopscotch_log_stats(&pinned_objects
, "pins");
3621 /* Free the pages in oldspace, but not those marked dont_move. */
3624 /* If the GC is not raising the age then lower the generation back
3625 * to its normal generation number */
3627 for (i
= 0; i
< last_free_page
; i
++)
3628 if ((page_bytes_used(i
) != 0)
3629 && (page_table
[i
].gen
== SCRATCH_GENERATION
))
3630 page_table
[i
].gen
= generation
;
3631 gc_assert(generations
[generation
].bytes_allocated
== 0);
3632 generations
[generation
].bytes_allocated
=
3633 generations
[SCRATCH_GENERATION
].bytes_allocated
;
3634 generations
[SCRATCH_GENERATION
].bytes_allocated
= 0;
3637 /* Reset the alloc_start_page for generation. */
3639 bzero(generations
[generation
].alloc_start_page_
,
3640 sizeof generations
[generation
].alloc_start_page_
);
3642 generations
[generation
].alloc_start_page
= 0;
3643 generations
[generation
].alloc_unboxed_start_page
= 0;
3644 generations
[generation
].alloc_large_start_page
= 0;
3647 /* Set the new gc trigger for the GCed generation. */
3648 generations
[generation
].gc_trigger
=
3649 generations
[generation
].bytes_allocated
3650 + generations
[generation
].bytes_consed_between_gc
;
3653 generations
[generation
].num_gc
= 0;
3655 ++generations
[generation
].num_gc
;
3658 if (generation
>= verify_gens
) {
3659 if (gencgc_verbose
) {
3667 find_last_free_page(void)
3669 page_index_t last_page
= -1, i
;
3671 for (i
= 0; i
< last_free_page
; i
++)
3672 if (page_bytes_used(i
) != 0)
3675 /* The last free page is actually the first available page */
3676 return last_page
+ 1;
3680 update_dynamic_space_free_pointer(void)
3682 set_alloc_pointer((lispobj
)(page_address(find_last_free_page())));
3686 remap_page_range (page_index_t from
, page_index_t to
)
3688 /* There's a mysterious Solaris/x86 problem with using mmap
3689 * tricks for memory zeroing. See sbcl-devel thread
3690 * "Re: patch: standalone executable redux".
3692 #if defined(LISP_FEATURE_SUNOS)
3693 zero_and_mark_pages(from
, to
);
3696 release_granularity
= gencgc_release_granularity
/GENCGC_CARD_BYTES
,
3697 release_mask
= release_granularity
-1,
3699 aligned_from
= (from
+release_mask
)&~release_mask
,
3700 aligned_end
= (end
&~release_mask
);
3702 if (aligned_from
< aligned_end
) {
3703 zero_pages_with_mmap(aligned_from
, aligned_end
-1);
3704 if (aligned_from
!= from
)
3705 zero_and_mark_pages(from
, aligned_from
-1);
3706 if (aligned_end
!= end
)
3707 zero_and_mark_pages(aligned_end
, end
-1);
3709 zero_and_mark_pages(from
, to
);
3715 remap_free_pages (page_index_t from
, page_index_t to
)
3717 page_index_t first_page
, last_page
;
3719 for (first_page
= from
; first_page
<= to
; first_page
++) {
3720 if (!page_free_p(first_page
) || !page_need_to_zero(first_page
))
3723 last_page
= first_page
+ 1;
3724 while (page_free_p(last_page
) &&
3725 (last_page
<= to
) &&
3726 (page_need_to_zero(last_page
)))
3729 remap_page_range(first_page
, last_page
-1);
3731 first_page
= last_page
;
3735 generation_index_t small_generation_limit
= 1;
3737 /* GC all generations newer than last_gen, raising the objects in each
3738 * to the next older generation - we finish when all generations below
3739 * last_gen are empty. Then if last_gen is due for a GC, or if
3740 * last_gen==NUM_GENERATIONS (the scratch generation? eh?) we GC that
3741 * too. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
3743 * We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than
3744 * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */
3746 collect_garbage(generation_index_t last_gen
)
3748 generation_index_t gen
= 0, i
;
3749 boolean gc_mark_only
= 0;
3750 int raise
, more
= 0;
3752 /* The largest value of last_free_page seen since the time
3753 * remap_free_pages was called. */
3754 static page_index_t high_water_mark
= 0;
3756 FSHOW((stderr
, "/entering collect_garbage(%d)\n", last_gen
));
3757 log_generation_stats(gc_logfile
, "=== GC Start ===");
3761 if (last_gen
== 1+PSEUDO_STATIC_GENERATION
) {
3762 // Pseudostatic space undergoes a non-moving collection
3763 last_gen
= PSEUDO_STATIC_GENERATION
;
3765 } else if (last_gen
> 1+PSEUDO_STATIC_GENERATION
) {
3766 // This is a completely non-obvious thing to do, but whatever...
3768 "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
3773 /* Flush the alloc regions updating the tables. */
3774 gc_alloc_update_all_page_tables(1);
3776 /* Verify the new objects created by Lisp code. */
3777 if (pre_verify_gen_0
) {
3778 FSHOW((stderr
, "pre-checking generation 0\n"));
3779 verify_generation(0, 0);
3782 if (gencgc_verbose
> 1)
3783 print_generation_stats();
3785 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3786 /* Immobile space generation bits are lazily updated for gen0
3787 (not touched on every object allocation) so do it now */
3788 update_immobile_nursery_bits();
3792 garbage_collect_generation(PSEUDO_STATIC_GENERATION
, 0);
3797 /* Collect the generation. */
3799 if (more
|| (gen
>= gencgc_oldest_gen_to_gc
)) {
3800 /* Never raise the oldest generation. Never raise the extra generation
3801 * collected due to more-flag. */
3807 || (generations
[gen
].num_gc
>= generations
[gen
].number_of_gcs_before_promotion
);
3808 /* If we would not normally raise this one, but we're
3809 * running low on space in comparison to the object-sizes
3810 * we've been seeing, raise it and collect the next one
3812 if (!raise
&& gen
== last_gen
) {
3813 more
= (2*large_allocation
) >= (dynamic_space_size
- bytes_allocated
);
3818 if (gencgc_verbose
> 1) {
3820 "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
3823 generations
[gen
].bytes_allocated
,
3824 generations
[gen
].gc_trigger
,
3825 generations
[gen
].num_gc
));
3828 /* If an older generation is being filled, then update its
3831 generations
[gen
+1].cum_sum_bytes_allocated
+=
3832 generations
[gen
+1].bytes_allocated
;
3835 garbage_collect_generation(gen
, raise
);
3837 /* Reset the memory age cum_sum. */
3838 generations
[gen
].cum_sum_bytes_allocated
= 0;
3840 if (gencgc_verbose
> 1) {
3841 FSHOW((stderr
, "GC of generation %d finished:\n", gen
));
3842 print_generation_stats();
3846 } while ((gen
<= gencgc_oldest_gen_to_gc
)
3847 && ((gen
< last_gen
)
3850 && (generations
[gen
].bytes_allocated
3851 > generations
[gen
].gc_trigger
)
3852 && (generation_average_age(gen
)
3853 > generations
[gen
].minimum_age_before_gc
))));
3855 /* Now if gen-1 was raised all generations before gen are empty.
3856 * If it wasn't raised then all generations before gen-1 are empty.
3858 * Now objects within this gen's pages cannot point to younger
3859 * generations unless they are written to. This can be exploited
3860 * by write-protecting the pages of gen; then when younger
3861 * generations are GCed only the pages which have been written
3866 gen_to_wp
= gen
- 1;
3868 /* There's not much point in WPing pages in generation 0 as it is
3869 * never scavenged (except promoted pages). */
3870 if ((gen_to_wp
> 0) && ENABLE_PAGE_PROTECTION
) {
3871 /* Check that they are all empty. */
3872 for (i
= 0; i
< gen_to_wp
; i
++) {
3873 if (generations
[i
].bytes_allocated
)
3874 lose("trying to write-protect gen. %d when gen. %d nonempty\n",
3877 write_protect_generation_pages(gen_to_wp
);
3880 /* Set gc_alloc() back to generation 0. The current regions should
3881 * be flushed after the above GCs. */
3882 gc_assert(boxed_region
.free_pointer
== boxed_region
.start_addr
);
3883 gc_alloc_generation
= 0;
3885 /* Save the high-water mark before updating last_free_page */
3886 if (last_free_page
> high_water_mark
)
3887 high_water_mark
= last_free_page
;
3889 update_dynamic_space_free_pointer();
3891 /* Update auto_gc_trigger. Make sure we trigger the next GC before
3892 * running out of heap! */
3893 if (bytes_consed_between_gcs
<= (dynamic_space_size
- bytes_allocated
))
3894 auto_gc_trigger
= bytes_allocated
+ bytes_consed_between_gcs
;
3896 auto_gc_trigger
= bytes_allocated
+ (dynamic_space_size
- bytes_allocated
)/2;
3898 if(gencgc_verbose
) {
3899 #define MESSAGE ("Next gc when %"OS_VM_SIZE_FMT" bytes have been consed\n")
3902 // fprintf() can - and does - cause deadlock here.
3903 // snprintf() seems to work fine.
3904 n
= snprintf(buf
, sizeof buf
, MESSAGE
, auto_gc_trigger
);
3905 ignore_value(write(2, buf
, n
));
3909 /* If we did a big GC (arbitrarily defined as gen > 1), release memory
3912 if (gen
> small_generation_limit
) {
3913 if (last_free_page
> high_water_mark
)
3914 high_water_mark
= last_free_page
;
3915 remap_free_pages(0, high_water_mark
);
3916 high_water_mark
= 0;
3919 large_allocation
= 0;
3921 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3922 write_protect_immobile_space();
3926 if (gc_object_watcher
) {
3927 extern void gc_prove_liveness(void(*)(), lispobj
, int, uword_t
*, int);
3928 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
3929 gc_prove_liveness(preserve_context_registers
,
3931 gc_n_stack_pins
, pinned_objects
.keys
,
3932 gc_traceroot_criterion
);
3934 gc_prove_liveness(0, gc_object_watcher
, 0, 0, gc_traceroot_criterion
);
3938 log_generation_stats(gc_logfile
, "=== GC End ===");
3939 SHOW("returning from collect_garbage");
3942 /* Initialization of gencgc metadata is split into three steps:
3943 * 1. gc_init() - allocation of a fixed-address space via mmap(),
3944 * failing which there's no reason to go on. (safepoint only)
3945 * 2. gc_allocate_ptes() - page table entries
3946 * 3. gencgc_pickup_dynamic() - calculation of scan start offsets
3947 * Steps (2) and (3) are combined in self-build because there is
3948 * no PAGE_TABLE_CORE_ENTRY_TYPE_CODE core entry. */
3952 #if defined(LISP_FEATURE_SB_SAFEPOINT)
3957 void gc_allocate_ptes()
3961 /* Compute the number of pages needed for the dynamic space.
3962 * Dynamic space size should be aligned on page size. */
3963 page_table_pages
= dynamic_space_size
/GENCGC_CARD_BYTES
;
3964 gc_assert(dynamic_space_size
== npage_bytes(page_table_pages
));
3966 /* Default nursery size to 5% of the total dynamic space size,
3968 bytes_consed_between_gcs
= dynamic_space_size
/(os_vm_size_t
)20;
3969 if (bytes_consed_between_gcs
< (1024*1024))
3970 bytes_consed_between_gcs
= 1024*1024;
3972 /* The page_table is allocated using "calloc" to zero-initialize it.
3973 * The C library typically implements this efficiently with mmap() if the
3974 * size is large enough. To further avoid touching each page structure
3975 * until first use, FREE_PAGE_FLAG must be 0, statically asserted here:
3978 /* Compile time assertion: If triggered, declares an array
3979 * of dimension -1 forcing a syntax error. The intent of the
3980 * assignment is to avoid an "unused variable" warning. */
3981 char __attribute__((unused
)) assert_free_page_flag_0
[(FREE_PAGE_FLAG
) ? -1 : 1];
3983 /* An extra struct exists as the end as a sentinel. Its 'scan_start_offset'
3984 * and 'bytes_used' must be zero.
3985 * Doing so avoids testing in page_ends_contiguous_block_p() whether the
3986 * next page_index is within bounds, and whether that page contains data.
3988 page_table
= calloc(1+page_table_pages
, sizeof(struct page
));
3989 gc_assert(page_table
);
3992 #ifdef PIN_GRANULARITY_LISPOBJ
3993 hopscotch_create(&pinned_objects
, HOPSCOTCH_HASH_FUN_DEFAULT
, 0 /* hashset */,
3994 32 /* logical bin count */, 0 /* default range */);
3997 scavtab
[WEAK_POINTER_WIDETAG
] = scav_weak_pointer
;
3999 bytes_allocated
= 0;
4001 /* Initialize the generations. */
4002 for (i
= 0; i
< NUM_GENERATIONS
; i
++) {
4003 generations
[i
].alloc_start_page
= 0;
4004 generations
[i
].alloc_unboxed_start_page
= 0;
4005 generations
[i
].alloc_large_start_page
= 0;
4006 generations
[i
].bytes_allocated
= 0;
4007 generations
[i
].gc_trigger
= 2000000;
4008 generations
[i
].num_gc
= 0;
4009 generations
[i
].cum_sum_bytes_allocated
= 0;
4010 /* the tune-able parameters */
4011 generations
[i
].bytes_consed_between_gc
4012 = bytes_consed_between_gcs
/(os_vm_size_t
)HIGHEST_NORMAL_GENERATION
;
4013 generations
[i
].number_of_gcs_before_promotion
= 1;
4014 generations
[i
].minimum_age_before_gc
= 0.75;
4017 /* Initialize gc_alloc. */
4018 gc_alloc_generation
= 0;
4019 gc_set_region_empty(&boxed_region
);
4020 gc_set_region_empty(&unboxed_region
);
4022 gc_set_region_empty(&code_region
);
4028 /* Pick up the dynamic space from after a core load.
4030 * The ALLOCATION_POINTER points to the end of the dynamic space.
4034 gencgc_pickup_dynamic(void)
4036 page_index_t page
= 0;
4037 char *alloc_ptr
= (char *)get_alloc_pointer();
4038 lispobj
*prev
=(lispobj
*)page_address(page
);
4039 generation_index_t gen
= PSEUDO_STATIC_GENERATION
;
4041 bytes_allocated
= 0;
4044 lispobj
*first
,*ptr
= (lispobj
*)page_address(page
);
4046 if (!gencgc_partial_pickup
|| !page_free_p(page
)) {
4047 page_bytes_t bytes_used
= GENCGC_CARD_BYTES
;
4048 /* It is possible, though rare, for the saved page table
4049 * to contain free pages below alloc_ptr. */
4050 page_table
[page
].gen
= gen
;
4051 if (gencgc_partial_pickup
)
4052 bytes_used
= page_bytes_used(page
);
4054 set_page_bytes_used(page
, GENCGC_CARD_BYTES
);
4055 page_table
[page
].large_object
= 0;
4056 page_table
[page
].write_protected
= 0;
4057 page_table
[page
].write_protected_cleared
= 0;
4058 page_table
[page
].dont_move
= 0;
4059 set_page_need_to_zero(page
, 1);
4061 bytes_allocated
+= bytes_used
;
4064 if (!gencgc_partial_pickup
) {
4066 // Make the most general assumption: any page *might* contain code.
4067 page_table
[page
].allocated
= CODE_PAGE_FLAG
;
4069 page_table
[page
].allocated
= BOXED_PAGE_FLAG
;
4071 first
= gc_search_space3(ptr
, prev
, (ptr
+2));
4074 set_page_scan_start_offset(page
, page_address(page
) - (char*)prev
);
4077 } while (page_address(page
) < alloc_ptr
);
4079 last_free_page
= page
;
4081 generations
[gen
].bytes_allocated
= bytes_allocated
;
4083 gc_alloc_update_all_page_tables(1);
4084 if (ENABLE_PAGE_PROTECTION
)
4085 write_protect_generation_pages(gen
);
4089 gc_initialize_pointers(void)
4091 /* !page_table_pages happens once only in self-build and not again */
4092 if (!page_table_pages
)
4094 gencgc_pickup_dynamic();
4098 /* alloc(..) is the external interface for memory allocation. It
4099 * allocates to generation 0. It is not called from within the garbage
4100 * collector as it is only external uses that need the check for heap
4101 * size (GC trigger) and to disable the interrupts (interrupts are
4102 * always disabled during a GC).
4104 * The vops that call alloc(..) assume that the returned space is zero-filled.
4105 * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
4107 * The check for a GC trigger is only performed when the current
4108 * region is full, so in most cases it's not needed. */
4110 static inline lispobj
*
4111 general_alloc_internal(sword_t nbytes
, int page_type_flag
, struct alloc_region
*region
,
4112 struct thread
*thread
)
4114 #ifndef LISP_FEATURE_WIN32
4115 lispobj alloc_signal
;
4118 void *new_free_pointer
;
4119 os_vm_size_t trigger_bytes
= 0;
4121 gc_assert(nbytes
> 0);
4123 /* Check for alignment allocation problems. */
4124 gc_assert((((uword_t
)region
->free_pointer
& LOWTAG_MASK
) == 0)
4125 && ((nbytes
& LOWTAG_MASK
) == 0));
4127 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
4128 /* Must be inside a PA section. */
4129 gc_assert(get_pseudo_atomic_atomic(thread
));
4132 if ((os_vm_size_t
) nbytes
> large_allocation
)
4133 large_allocation
= nbytes
;
4135 /* maybe we can do this quickly ... */
4136 new_free_pointer
= (char*)region
->free_pointer
+ nbytes
;
4137 if (new_free_pointer
<= region
->end_addr
) {
4138 new_obj
= (void*)(region
->free_pointer
);
4139 region
->free_pointer
= new_free_pointer
;
4140 return(new_obj
); /* yup */
4143 /* We don't want to count nbytes against auto_gc_trigger unless we
4144 * have to: it speeds up the tenuring of objects and slows down
4145 * allocation. However, unless we do so when allocating _very_
4146 * large objects we are in danger of exhausting the heap without
4147 * running sufficient GCs.
4149 if ((os_vm_size_t
) nbytes
>= bytes_consed_between_gcs
)
4150 trigger_bytes
= nbytes
;
4152 /* we have to go the long way around, it seems. Check whether we
4153 * should GC in the near future
4155 if (auto_gc_trigger
&& (bytes_allocated
+trigger_bytes
> auto_gc_trigger
)) {
4156 /* Don't flood the system with interrupts if the need to gc is
4157 * already noted. This can happen for example when SUB-GC
4158 * allocates or after a gc triggered in a WITHOUT-GCING. */
4159 if (read_TLS(GC_PENDING
,thread
) == NIL
) {
4160 /* set things up so that GC happens when we finish the PA
4162 write_TLS(GC_PENDING
,T
,thread
);
4163 if (read_TLS(GC_INHIBIT
,thread
) == NIL
) {
4164 #ifdef LISP_FEATURE_SB_SAFEPOINT
4165 thread_register_gc_trigger();
4167 set_pseudo_atomic_interrupted(thread
);
4168 #if GENCGC_IS_PRECISE
4169 /* PPC calls alloc() from a trap
4170 * look up the most context if it's from a trap. */
4172 os_context_t
*context
=
4173 thread
->interrupt_data
->allocation_trap_context
;
4174 maybe_save_gc_mask_and_block_deferrables
4175 (context
? os_context_sigmask_addr(context
) : NULL
);
4178 maybe_save_gc_mask_and_block_deferrables(NULL
);
4184 new_obj
= gc_alloc_with_region(nbytes
, page_type_flag
, region
, 0);
4186 #ifndef LISP_FEATURE_WIN32
4187 /* for sb-prof, and not supported on Windows yet */
4188 alloc_signal
= read_TLS(ALLOC_SIGNAL
,thread
);
4189 if ((alloc_signal
& FIXNUM_TAG_MASK
) == 0) {
4190 if ((sword_t
) alloc_signal
<= 0) {
4191 write_TLS(ALLOC_SIGNAL
, T
, thread
);
4194 write_TLS(ALLOC_SIGNAL
,
4195 alloc_signal
- (1 << N_FIXNUM_TAG_BITS
),
4205 general_alloc(sword_t nbytes
, int page_type_flag
)
4207 struct thread
*thread
= arch_os_get_current_thread();
4208 /* Select correct region, and call general_alloc_internal with it.
4209 * For other then boxed allocation we must lock first, since the
4210 * region is shared. */
4212 if (page_type_flag
== BOXED_PAGE_FLAG
) {
4214 if (BOXED_PAGE_FLAG
& page_type_flag
) {
4216 #ifdef LISP_FEATURE_SB_THREAD
4217 struct alloc_region
*region
= (thread
? &(thread
->alloc_region
) : &boxed_region
);
4219 struct alloc_region
*region
= &boxed_region
;
4221 return general_alloc_internal(nbytes
, page_type_flag
, region
, thread
);
4223 } else if (page_type_flag
== UNBOXED_PAGE_FLAG
||
4224 page_type_flag
== CODE_PAGE_FLAG
) {
4225 struct alloc_region
*region
=
4226 page_type_flag
== CODE_PAGE_FLAG
? &code_region
: &unboxed_region
;
4228 } else if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
4229 struct alloc_region
*region
= &unboxed_region
;
4233 result
= thread_mutex_lock(&allocation_lock
);
4235 obj
= general_alloc_internal(nbytes
, page_type_flag
, region
, thread
);
4236 result
= thread_mutex_unlock(&allocation_lock
);
4240 lose("bad page type flag: %d", page_type_flag
);
4244 lispobj AMD64_SYSV_ABI
*
4245 alloc(sword_t nbytes
)
4247 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4248 struct thread
*self
= arch_os_get_current_thread();
4249 int was_pseudo_atomic
= get_pseudo_atomic_atomic(self
);
4250 if (!was_pseudo_atomic
)
4251 set_pseudo_atomic_atomic(self
);
4253 gc_assert(get_pseudo_atomic_atomic(arch_os_get_current_thread()));
4256 lispobj
*result
= general_alloc(nbytes
, BOXED_PAGE_FLAG
);
4258 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4259 if (!was_pseudo_atomic
)
4260 clear_pseudo_atomic_atomic(self
);
4267 * shared support for the OS-dependent signal handlers which
4268 * catch GENCGC-related write-protect violations
4270 void unhandled_sigmemoryfault(void* addr
);
4272 /* Depending on which OS we're running under, different signals might
4273 * be raised for a violation of write protection in the heap. This
4274 * function factors out the common generational GC magic which needs
4275 * to invoked in this case, and should be called from whatever signal
4276 * handler is appropriate for the OS we're running under.
4278 * Return true if this signal is a normal generational GC thing that
4279 * we were able to handle, or false if it was abnormal and control
4280 * should fall through to the general SIGSEGV/SIGBUS/whatever logic.
4282 * We have two control flags for this: one causes us to ignore faults
4283 * on unprotected pages completely, and the second complains to stderr
4284 * but allows us to continue without losing.
4286 extern boolean ignore_memoryfaults_on_unprotected_pages
;
4287 boolean ignore_memoryfaults_on_unprotected_pages
= 0;
4289 extern boolean continue_after_memoryfault_on_unprotected_pages
;
4290 boolean continue_after_memoryfault_on_unprotected_pages
= 0;
4293 gencgc_handle_wp_violation(void* fault_addr
)
4295 page_index_t page_index
= find_page_index(fault_addr
);
4299 "heap WP violation? fault_addr=%p, page_index=%"PAGE_INDEX_FMT
"\n",
4300 fault_addr
, page_index
));
4303 /* Check whether the fault is within the dynamic space. */
4304 if (page_index
== (-1)) {
4305 #ifdef LISP_FEATURE_IMMOBILE_SPACE
4306 extern int immobile_space_handle_wp_violation(void*);
4307 if (immobile_space_handle_wp_violation(fault_addr
))
4311 /* It can be helpful to be able to put a breakpoint on this
4312 * case to help diagnose low-level problems. */
4313 unhandled_sigmemoryfault(fault_addr
);
4315 /* not within the dynamic space -- not our responsibility */
4320 ret
= thread_mutex_lock(&free_pages_lock
);
4321 gc_assert(ret
== 0);
4322 if (page_table
[page_index
].write_protected
) {
4323 unprotect_page_index(page_index
);
4324 } else if (!ignore_memoryfaults_on_unprotected_pages
) {
4325 /* The only acceptable reason for this signal on a heap
4326 * access is that GENCGC write-protected the page.
4327 * However, if two CPUs hit a wp page near-simultaneously,
4328 * we had better not have the second one lose here if it
4329 * does this test after the first one has already set wp=0
4331 if(page_table
[page_index
].write_protected_cleared
!= 1) {
4332 void lisp_backtrace(int frames
);
4335 "Fault @ %p, page %"PAGE_INDEX_FMT
" not marked as write-protected:\n"
4336 " boxed_region.first_page: %"PAGE_INDEX_FMT
","
4337 " boxed_region.last_page %"PAGE_INDEX_FMT
"\n"
4338 " page.scan_start_offset: %"OS_VM_SIZE_FMT
"\n"
4339 " page.bytes_used: %u\n"
4340 " page.allocated: %d\n"
4341 " page.write_protected: %d\n"
4342 " page.write_protected_cleared: %d\n"
4343 " page.generation: %d\n",
4346 boxed_region
.first_page
,
4347 boxed_region
.last_page
,
4348 page_scan_start_offset(page_index
),
4349 page_bytes_used(page_index
),
4350 page_table
[page_index
].allocated
,
4351 page_table
[page_index
].write_protected
,
4352 page_table
[page_index
].write_protected_cleared
,
4353 page_table
[page_index
].gen
);
4354 if (!continue_after_memoryfault_on_unprotected_pages
)
4358 ret
= thread_mutex_unlock(&free_pages_lock
);
4359 gc_assert(ret
== 0);
4360 /* Don't worry, we can handle it. */
4364 /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
4365 * it's not just a case of the program hitting the write barrier, and
4366 * are about to let Lisp deal with it. It's basically just a
4367 * convenient place to set a gdb breakpoint. */
4369 unhandled_sigmemoryfault(void *addr
)
4373 update_thread_page_tables(struct thread
*th
)
4375 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->alloc_region
);
4376 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
4377 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->sprof_alloc_region
);
4381 /* GC is single-threaded and all memory allocations during a
4382 collection happen in the GC thread, so it is sufficient to update
4383 all the the page tables once at the beginning of a collection and
4384 update only page tables of the GC thread during the collection. */
4385 void gc_alloc_update_all_page_tables(int for_all_threads
)
4387 /* Flush the alloc regions updating the tables. */
4389 if (for_all_threads
) {
4390 for_each_thread(th
) {
4391 update_thread_page_tables(th
);
4395 th
= arch_os_get_current_thread();
4397 update_thread_page_tables(th
);
4401 gc_alloc_update_page_tables(CODE_PAGE_FLAG
, &code_region
);
4403 gc_alloc_update_page_tables(UNBOXED_PAGE_FLAG
, &unboxed_region
);
4404 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &boxed_region
);
4408 gc_set_region_empty(struct alloc_region
*region
)
4410 region
->first_page
= 0;
4411 region
->last_page
= -1;
4412 region
->start_addr
= page_address(0);
4413 region
->free_pointer
= page_address(0);
4414 region
->end_addr
= page_address(0);
4418 zero_all_free_pages() /* called only by gc_and_save() */
4422 for (i
= 0; i
< last_free_page
; i
++) {
4423 if (page_free_p(i
)) {
4424 #ifdef READ_PROTECT_FREE_PAGES
4425 os_protect(page_address(i
), GENCGC_CARD_BYTES
, OS_VM_PROT_ALL
);
4432 /* Things to do before doing a final GC before saving a core (without
4435 * + Pages in large_object pages aren't moved by the GC, so we need to
4436 * unset that flag from all pages.
4437 * + The pseudo-static generation isn't normally collected, but it seems
4438 * reasonable to collect it at least when saving a core. So move the
4439 * pages to a normal generation.
4442 prepare_for_final_gc ()
4446 #ifdef LISP_FEATURE_IMMOBILE_SPACE
4447 extern void prepare_immobile_space_for_final_gc();
4448 prepare_immobile_space_for_final_gc ();
4450 for (i
= 0; i
< last_free_page
; i
++) {
4451 page_table
[i
].large_object
= 0;
4452 if (page_table
[i
].gen
== PSEUDO_STATIC_GENERATION
) {
4453 int used
= page_bytes_used(i
);
4454 page_table
[i
].gen
= HIGHEST_NORMAL_GENERATION
;
4455 generations
[PSEUDO_STATIC_GENERATION
].bytes_allocated
-= used
;
4456 generations
[HIGHEST_NORMAL_GENERATION
].bytes_allocated
+= used
;
4459 #ifdef PINNED_OBJECTS
4461 for_each_thread(th
) {
4462 write_TLS(PINNED_OBJECTS
, NIL
, th
);
4467 /* Set this switch to 1 for coalescing of strings dumped to fasl,
4468 * or 2 for coalescing of those,
4469 * plus literal strings in code compiled to memory. */
4470 char gc_coalesce_string_literals
= 0;
4472 /* Do a non-conservative GC, and then save a core with the initial
4473 * function being set to the value of 'lisp_init_function' */
4475 gc_and_save(char *filename
, boolean prepend_runtime
,
4476 boolean save_runtime_options
, boolean compressed
,
4477 int compression_level
, int application_type
)
4480 void *runtime_bytes
= NULL
;
4481 size_t runtime_size
;
4482 extern void coalesce_similar_objects();
4483 extern struct lisp_startup_options lisp_startup_options
;
4484 boolean verbose
= !lisp_startup_options
.noinform
;
4486 file
= prepare_to_save(filename
, prepend_runtime
, &runtime_bytes
,
4491 conservative_stack
= 0;
4493 /* The filename might come from Lisp, and be moved by the now
4494 * non-conservative GC. */
4495 filename
= strdup(filename
);
4497 /* We're committed to process death at this point, and interrupts can not
4498 * possibly be handled in Lisp. Let the installed handler closures become
4499 * garbage, since new ones will be made by ENABLE-INTERRUPT on restart */
4500 #ifndef LISP_FEATURE_WIN32
4503 for (i
=0; i
<NSIG
; ++i
)
4504 if (lowtag_of(interrupt_handlers
[i
].lisp
) == FUN_POINTER_LOWTAG
)
4505 interrupt_handlers
[i
].lisp
= 0;
4509 /* Collect twice: once into relatively high memory, and then back
4510 * into low memory. This compacts the retained data into the lower
4511 * pages, minimizing the size of the core file.
4513 prepare_for_final_gc();
4514 gencgc_alloc_start_page
= last_free_page
;
4515 collect_garbage(HIGHEST_NORMAL_GENERATION
+1);
4517 // We always coalesce copyable numbers. Addional coalescing is done
4518 // only on request, in which case a message is shown (unless verbose=0).
4519 if (gc_coalesce_string_literals
&& verbose
) {
4520 printf("[coalescing similar vectors... ");
4523 coalesce_similar_objects();
4524 if (gc_coalesce_string_literals
&& verbose
)
4527 /* FIXME: now that relocate_heap() works, can we just memmove() everything
4528 * down and perform a relocation instead of a collection? */
4529 prepare_for_final_gc();
4530 gencgc_alloc_start_page
= -1;
4531 collect_garbage(HIGHEST_NORMAL_GENERATION
+1);
4533 if (prepend_runtime
)
4534 save_runtime_to_filehandle(file
, runtime_bytes
, runtime_size
,
4537 /* The dumper doesn't know that pages need to be zeroed before use. */
4538 zero_all_free_pages();
4539 do_destructive_cleanup_before_save(lisp_init_function
);
4541 save_to_filehandle(file
, filename
, lisp_init_function
,
4542 prepend_runtime
, save_runtime_options
,
4543 compressed
? compression_level
: COMPRESSION_LEVEL_NONE
);
4544 /* Oops. Save still managed to fail. Since we've mangled the stack
4545 * beyond hope, there's not much we can do.
4546 * (beyond FUNCALLing lisp_init_function, but I suspect that's
4547 * going to be rather unsatisfactory too... */
4548 lose("Attempt to save core after non-conservative GC failed.\n");
4551 /* Convert corefile ptes to corresponding 'struct page' */
4552 boolean
gc_load_corefile_ptes(char data
[], ssize_t bytes_read
,
4553 page_index_t npages
, page_index_t
* ppage
)
4555 page_index_t page
= *ppage
;
4557 struct corefile_pte pte
;
4559 while (bytes_read
) {
4560 bytes_read
-= sizeof(struct corefile_pte
);
4561 memcpy(&pte
, data
+i
*sizeof (struct corefile_pte
), sizeof pte
);
4562 set_page_bytes_used(page
, pte
.bytes_used
);
4563 // Low 2 bits of the corefile_pte hold the 'allocated' flag.
4564 // The other bits become the scan_start_offset
4565 set_page_scan_start_offset(page
, pte
.sso
& ~0x03);
4566 page_table
[page
].allocated
= pte
.sso
& 0x03;
4567 if (++page
== npages
)
4568 return 0; // No more to go
4572 return 1; // More to go
4575 /* Prepare the array of corefile_ptes for save */
4576 void gc_store_corefile_ptes(struct corefile_pte
*ptes
)
4579 for (i
= 0; i
< last_free_page
; i
++) {
4580 /* Thanks to alignment requirements, the two low bits
4581 * are always zero, so we can use them to store the
4582 * allocation type -- region is always closed, so only
4583 * the two low bits of allocation flags matter. */
4584 uword_t word
= page_scan_start_offset(i
);
4585 gc_assert((word
& 0x03) == 0);
4586 ptes
[i
].sso
= word
| (0x03 & page_table
[i
].allocated
);
4587 ptes
[i
].bytes_used
= page_bytes_used(i
);