2 * GENerational Conservative Garbage Collector for SBCL
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 * For a review of garbage collection techniques (e.g. generational
18 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
19 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
20 * had been accepted for _ACM Computing Surveys_ and was available
21 * as a PostScript preprint through
22 * <http://www.cs.utexas.edu/users/oops/papers.html>
24 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
33 #if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)
34 #include "pthreads_win32.h"
42 #include "interrupt.h"
47 #include "gc-internal.h"
49 #include "pseudo-atomic.h"
51 #include "genesis/gc-tables.h"
52 #include "genesis/vector.h"
53 #include "genesis/weak-pointer.h"
54 #include "genesis/fdefn.h"
55 #include "genesis/simple-fun.h"
57 #include "genesis/hash-table.h"
58 #include "genesis/instance.h"
59 #include "genesis/layout.h"
61 #include "hopscotch.h"
62 #ifdef GENCGC_IS_PRECISE
63 #include "genesis/cons.h" /* for accessing *pinned-objects* */
65 #include "forwarding-ptr.h"
67 /* forward declarations */
68 page_index_t
gc_find_freeish_pages(page_index_t
*restart_page_ptr
, sword_t nbytes
,
76 /* As usually configured, generations 0-5 are normal collected generations,
77 6 is pseudo-static (the objects in which are never moved nor reclaimed),
78 and 7 is scratch space used when collecting a generation without promotion,
79 wherein it is moved to generation 7 and back again.
82 SCRATCH_GENERATION
= PSEUDO_STATIC_GENERATION
+1,
86 /* Largest allocation seen since last GC. */
87 os_vm_size_t large_allocation
= 0;
94 /* the verbosity level. All non-error messages are disabled at level 0;
95 * and only a few rare messages are printed at level 1. */
97 boolean gencgc_verbose
= 1;
99 boolean gencgc_verbose
= 0;
102 /* FIXME: At some point enable the various error-checking things below
103 * and see what they say. */
105 /* We hunt for pointers to old-space, when GCing generations >= verify_gen.
106 * Set verify_gens to HIGHEST_NORMAL_GENERATION + 1 to disable this kind of
108 generation_index_t verify_gens
= HIGHEST_NORMAL_GENERATION
+ 1;
110 /* Should we do a pre-scan verify of generation 0 before it's GCed? */
111 boolean pre_verify_gen_0
= 0;
113 /* Should we check that newly allocated regions are zero filled? */
114 boolean gencgc_zero_check
= 0;
116 /* Should we check that the free space is zero filled? */
117 boolean gencgc_enable_verify_zero_fill
= 0;
119 /* When loading a core, don't do a full scan of the memory for the
120 * memory region boundaries. (Set to true by coreparse.c if the core
121 * contained a pagetable entry).
123 boolean gencgc_partial_pickup
= 0;
125 /* If defined, free pages are read-protected to ensure that nothing
129 /* #define READ_PROTECT_FREE_PAGES */
133 * GC structures and variables
136 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
137 os_vm_size_t bytes_allocated
= 0;
138 os_vm_size_t auto_gc_trigger
= 0;
140 /* the source and destination generations. These are set before a GC starts
142 generation_index_t from_space
;
143 generation_index_t new_space
;
145 /* Set to 1 when in GC */
146 boolean gc_active_p
= 0;
148 /* should the GC be conservative on stack. If false (only right before
149 * saving a core), don't scan the stack / mark pages dont_move. */
150 static boolean conservative_stack
= 1;
152 /* An array of page structures is allocated on gc initialization.
153 * This helps to quickly map between an address and its page structure.
154 * page_table_pages is set from the size of the dynamic space. */
155 page_index_t page_table_pages
;
156 struct page
*page_table
;
157 #ifdef LISP_FEATURE_SB_TRACEROOT
158 lispobj gc_object_watcher
;
159 int gc_traceroot_criterion
;
161 #ifdef PIN_GRANULARITY_LISPOBJ
163 struct hopscotch_table pinned_objects
;
166 /* This is always 0 except during gc_and_save() */
167 lispobj lisp_init_function
;
169 /// Constants defined in gc-internal:
170 /// #define BOXED_PAGE_FLAG 1
171 /// #define UNBOXED_PAGE_FLAG 2
172 /// #define OPEN_REGION_PAGE_FLAG 4
174 /// Return true if 'allocated' bits are: {001, 010, 011}, false if 1zz or 000.
175 static inline boolean
page_allocated_no_region_p(page_index_t page
) {
176 return (page_table
[page
].allocated
^ OPEN_REGION_PAGE_FLAG
) > OPEN_REGION_PAGE_FLAG
;
179 static inline boolean
page_free_p(page_index_t page
) {
180 return (page_table
[page
].allocated
== FREE_PAGE_FLAG
);
183 static inline boolean
page_boxed_p(page_index_t page
) {
184 return (page_table
[page
].allocated
& BOXED_PAGE_FLAG
);
187 /// Return true if 'allocated' bits are: {001, 011}, false otherwise.
188 /// i.e. true of pages which could hold boxed or partially boxed objects.
189 static inline boolean
page_boxed_no_region_p(page_index_t page
) {
190 return (page_table
[page
].allocated
& 5) == BOXED_PAGE_FLAG
;
193 /// Return true if page MUST NOT hold boxed objects (including code).
194 static inline boolean
page_unboxed_p(page_index_t page
) {
195 /* Both flags set == boxed code page */
196 return (page_table
[page
].allocated
& 3) == UNBOXED_PAGE_FLAG
;
199 static inline boolean
protect_page_p(page_index_t page
, generation_index_t generation
) {
200 return (page_boxed_no_region_p(page
)
201 && (page_bytes_used(page
) != 0)
202 && !page_table
[page
].dont_move
203 && (page_table
[page
].gen
== generation
));
206 /* Calculate the start address for the given page number. */
208 page_address(page_index_t page_num
)
210 return (void*)(DYNAMIC_SPACE_START
+ (page_num
* GENCGC_CARD_BYTES
));
213 /* Calculate the address where the allocation region associated with
214 * the page starts. */
216 page_scan_start(page_index_t page_index
)
218 return page_address(page_index
)-page_scan_start_offset(page_index
);
221 /* True if the page starts a contiguous block. */
222 static inline boolean
223 page_starts_contiguous_block_p(page_index_t page_index
)
225 // Don't use the preprocessor macro: 0 means 0.
226 return page_table
[page_index
].scan_start_offset_
== 0;
229 /* True if the page is the last page in a contiguous block. */
230 static inline boolean
231 page_ends_contiguous_block_p(page_index_t page_index
, generation_index_t gen
)
233 return (/* page doesn't fill block */
234 (page_bytes_used(page_index
) < GENCGC_CARD_BYTES
)
235 /* page is last allocated page */
236 || ((page_index
+ 1) >= last_free_page
)
238 || page_free_p(page_index
+ 1)
239 /* next page contains no data */
240 || (page_bytes_used(page_index
+ 1) == 0)
241 /* next page is in different generation */
242 || (page_table
[page_index
+ 1].gen
!= gen
)
243 /* next page starts its own contiguous block */
244 || (page_starts_contiguous_block_p(page_index
+ 1)));
247 /// External function for calling from Lisp.
248 page_index_t
ext_find_page_index(void *addr
) { return find_page_index(addr
); }
251 npage_bytes(page_index_t npages
)
253 gc_assert(npages
>=0);
254 return ((os_vm_size_t
)npages
)*GENCGC_CARD_BYTES
;
257 /* Check that X is a higher address than Y and return offset from Y to
259 static inline os_vm_size_t
260 addr_diff(void *x
, void *y
)
263 return (uintptr_t)x
- (uintptr_t)y
;
266 /* a structure to hold the state of a generation
268 * CAUTION: If you modify this, make sure to touch up the alien
269 * definition in src/code/gc.lisp accordingly. ...or better yes,
270 * deal with the FIXME there...
274 #ifdef LISP_FEATURE_SEGREGATED_CODE
275 // A distinct start page per nonzero value of 'page_type_flag'.
276 // The zeroth index is the large object start page.
277 page_index_t alloc_start_page_
[4];
278 #define alloc_large_start_page alloc_start_page_[0]
279 #define alloc_start_page alloc_start_page_[BOXED_PAGE_FLAG]
280 #define alloc_unboxed_start_page alloc_start_page_[UNBOXED_PAGE_FLAG]
282 /* the first page that gc_alloc_large (boxed) considers on its next
283 * call. (Although it always allocates after the boxed_region.) */
284 page_index_t alloc_large_start_page
;
286 /* the first page that gc_alloc() checks on its next call */
287 page_index_t alloc_start_page
;
289 /* the first page that gc_alloc_unboxed() checks on its next call */
290 page_index_t alloc_unboxed_start_page
;
293 /* the bytes allocated to this generation */
294 os_vm_size_t bytes_allocated
;
296 /* the number of bytes at which to trigger a GC */
297 os_vm_size_t gc_trigger
;
299 /* to calculate a new level for gc_trigger */
300 os_vm_size_t bytes_consed_between_gc
;
302 /* the number of GCs since the last raise */
305 /* the number of GCs to run on the generations before raising objects to the
307 int number_of_gcs_before_promotion
;
309 /* the cumulative sum of the bytes allocated to this generation. It is
310 * cleared after a GC on this generations, and update before new
311 * objects are added from a GC of a younger generation. Dividing by
312 * the bytes_allocated will give the average age of the memory in
313 * this generation since its last GC. */
314 os_vm_size_t cum_sum_bytes_allocated
;
316 /* a minimum average memory age before a GC will occur helps
317 * prevent a GC when a large number of new live objects have been
318 * added, in which case a GC could be a waste of time */
319 double minimum_age_before_gc
;
322 /* an array of generation structures. There needs to be one more
323 * generation structure than actual generations as the oldest
324 * generation is temporarily raised then lowered. */
325 struct generation generations
[NUM_GENERATIONS
];
327 /* the oldest generation that is will currently be GCed by default.
328 * Valid values are: 0, 1, ... HIGHEST_NORMAL_GENERATION
330 * The default of HIGHEST_NORMAL_GENERATION enables GC on all generations.
332 * Setting this to 0 effectively disables the generational nature of
333 * the GC. In some applications generational GC may not be useful
334 * because there are no long-lived objects.
336 * An intermediate value could be handy after moving long-lived data
337 * into an older generation so an unnecessary GC of this long-lived
338 * data can be avoided. */
339 generation_index_t gencgc_oldest_gen_to_gc
= HIGHEST_NORMAL_GENERATION
;
341 /* META: Is nobody aside from me bothered by this especially misleading
342 * use of the word "last"? It could mean either "ultimate" or "prior",
343 * but in fact means neither. It is the *FIRST* page that should be grabbed
344 * for more space, so it is min free page, or 1+ the max used page. */
345 /* The maximum free page in the heap is maintained and used to update
346 * ALLOCATION_POINTER which is used by the room function to limit its
347 * search of the heap. XX Gencgc obviously needs to be better
348 * integrated with the Lisp code. */
350 page_index_t last_free_page
;
352 #ifdef LISP_FEATURE_SB_THREAD
353 /* This lock is to prevent multiple threads from simultaneously
354 * allocating new regions which overlap each other. Note that the
355 * majority of GC is single-threaded, but alloc() may be called from
356 * >1 thread at a time and must be thread-safe. This lock must be
357 * seized before all accesses to generations[] or to parts of
358 * page_table[] that other threads may want to see */
359 static pthread_mutex_t free_pages_lock
= PTHREAD_MUTEX_INITIALIZER
;
360 /* This lock is used to protect non-thread-local allocation. */
361 static pthread_mutex_t allocation_lock
= PTHREAD_MUTEX_INITIALIZER
;
364 extern os_vm_size_t gencgc_release_granularity
;
365 os_vm_size_t gencgc_release_granularity
= GENCGC_RELEASE_GRANULARITY
;
367 extern os_vm_size_t gencgc_alloc_granularity
;
368 os_vm_size_t gencgc_alloc_granularity
= GENCGC_ALLOC_GRANULARITY
;
372 * miscellaneous heap functions
375 /* Count the number of pages which are write-protected within the
376 * given generation. */
378 count_write_protect_generation_pages(generation_index_t generation
)
380 page_index_t i
, count
= 0;
382 for (i
= 0; i
< last_free_page
; i
++)
384 && (page_table
[i
].gen
== generation
)
385 && page_table
[i
].write_protected
)
390 /* Count the number of pages within the given generation. */
392 count_generation_pages(generation_index_t generation
)
395 page_index_t count
= 0;
397 for (i
= 0; i
< last_free_page
; i
++)
399 && (page_table
[i
].gen
== generation
))
406 count_dont_move_pages(void)
409 page_index_t count
= 0;
410 for (i
= 0; i
< last_free_page
; i
++) {
412 && (page_table
[i
].dont_move
!= 0)) {
420 /* Work through the pages and add up the number of bytes used for the
421 * given generation. */
422 static __attribute__((unused
)) os_vm_size_t
423 count_generation_bytes_allocated (generation_index_t gen
)
426 os_vm_size_t result
= 0;
427 for (i
= 0; i
< last_free_page
; i
++) {
429 && (page_table
[i
].gen
== gen
))
430 result
+= page_bytes_used(i
);
435 /* Return the average age of the memory in a generation. */
437 generation_average_age(generation_index_t gen
)
439 if (generations
[gen
].bytes_allocated
== 0)
443 ((double)generations
[gen
].cum_sum_bytes_allocated
)
444 / ((double)generations
[gen
].bytes_allocated
);
447 #ifdef LISP_FEATURE_X86
448 extern void fpu_save(void *);
449 extern void fpu_restore(void *);
452 #define PAGE_INDEX_FMT PRIdPTR
455 write_generation_stats(FILE *file
)
457 generation_index_t i
;
459 #ifdef LISP_FEATURE_X86
462 /* Can end up here after calling alloc_tramp which doesn't prepare
463 * the x87 state, and the C ABI uses a different mode */
467 /* Print the heap stats. */
469 " Gen StaPg UbSta LaSta Boxed Unbox LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
471 for (i
= 0; i
<= SCRATCH_GENERATION
; i
++) {
473 page_index_t boxed_cnt
= 0;
474 page_index_t unboxed_cnt
= 0;
475 page_index_t large_boxed_cnt
= 0;
476 page_index_t large_unboxed_cnt
= 0;
477 page_index_t pinned_cnt
=0;
479 for (j
= 0; j
< last_free_page
; j
++)
480 if (page_table
[j
].gen
== i
) {
482 /* Count the number of boxed pages within the given
484 if (page_boxed_p(j
)) {
485 if (page_table
[j
].large_object
)
490 if(page_table
[j
].dont_move
) pinned_cnt
++;
491 /* Count the number of unboxed pages within the given
493 if (page_unboxed_p(j
)) {
494 if (page_table
[j
].large_object
)
501 gc_assert(generations
[i
].bytes_allocated
502 == count_generation_bytes_allocated(i
));
504 " %1d: %5ld %5ld %5ld",
506 (long)generations
[i
].alloc_start_page
,
507 (long)generations
[i
].alloc_unboxed_start_page
,
508 (long)generations
[i
].alloc_large_start_page
);
510 " %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
511 " %5"PAGE_INDEX_FMT
" %5"PAGE_INDEX_FMT
,
512 boxed_cnt
, unboxed_cnt
, large_boxed_cnt
,
513 large_unboxed_cnt
, pinned_cnt
);
518 " %4"PAGE_INDEX_FMT
" %3d %7.4f\n",
519 generations
[i
].bytes_allocated
,
520 (npage_bytes(count_generation_pages(i
)) - generations
[i
].bytes_allocated
),
521 generations
[i
].gc_trigger
,
522 count_write_protect_generation_pages(i
),
523 generations
[i
].num_gc
,
524 generation_average_age(i
));
526 fprintf(file
," Total bytes allocated = %"OS_VM_SIZE_FMT
"\n", bytes_allocated
);
527 fprintf(file
," Dynamic-space-size bytes = %"OS_VM_SIZE_FMT
"\n", dynamic_space_size
);
529 #ifdef LISP_FEATURE_X86
530 fpu_restore(fpu_state
);
535 write_heap_exhaustion_report(FILE *file
, long available
, long requested
,
536 struct thread
*thread
)
539 "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
540 gc_active_p
? "garbage collection" : "allocation",
543 write_generation_stats(file
);
544 fprintf(file
, "GC control variables:\n");
545 fprintf(file
, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n",
546 SymbolValue(GC_INHIBIT
,thread
)==NIL
? "false" : "true",
547 (SymbolValue(GC_PENDING
, thread
) == T
) ?
548 "true" : ((SymbolValue(GC_PENDING
, thread
) == NIL
) ?
549 "false" : "in progress"));
550 #ifdef LISP_FEATURE_SB_THREAD
551 fprintf(file
, " *STOP-FOR-GC-PENDING* = %s\n",
552 SymbolValue(STOP_FOR_GC_PENDING
,thread
)==NIL
? "false" : "true");
557 print_generation_stats(void)
559 write_generation_stats(stderr
);
562 extern char* gc_logfile
;
563 char * gc_logfile
= NULL
;
566 log_generation_stats(char *logfile
, char *header
)
569 FILE * log
= fopen(logfile
, "a");
571 fprintf(log
, "%s\n", header
);
572 write_generation_stats(log
);
575 fprintf(stderr
, "Could not open gc logfile: %s\n", logfile
);
582 report_heap_exhaustion(long available
, long requested
, struct thread
*th
)
585 FILE * log
= fopen(gc_logfile
, "a");
587 write_heap_exhaustion_report(log
, available
, requested
, th
);
590 fprintf(stderr
, "Could not open gc logfile: %s\n", gc_logfile
);
594 /* Always to stderr as well. */
595 write_heap_exhaustion_report(stderr
, available
, requested
, th
);
599 #if defined(LISP_FEATURE_X86)
600 void fast_bzero(void*, size_t); /* in <arch>-assem.S */
603 /* Zero the pages from START to END (inclusive), but use mmap/munmap instead
604 * if zeroing it ourselves, i.e. in practice give the memory back to the
605 * OS. Generally done after a large GC.
607 void zero_pages_with_mmap(page_index_t start
, page_index_t end
) {
609 void *addr
= page_address(start
), *new_addr
;
610 os_vm_size_t length
= npage_bytes(1+end
-start
);
615 gc_assert(length
>= gencgc_release_granularity
);
616 gc_assert((length
% gencgc_release_granularity
) == 0);
618 #ifdef LISP_FEATURE_LINUX
619 // We use MADV_DONTNEED only on Linux due to differing semantics from BSD.
620 // Linux treats it as a demand that the memory be 0-filled, or refreshed
621 // from a file that backs the range. BSD takes it as a hint that you don't
622 // care if the memory has to brought in from swap when next accessed,
623 // i.e. it's not a request to make a user-visible alteration to memory.
624 // So in theory this can bring a page in from the core file, if we happen
625 // to hit a page that resides in the portion of memory mapped by coreparse.
626 // In practice this should not happen because objects from a core file can't
627 // become garbage. Except in save-lisp-and-die they can, and we must be
628 // cautious not to resurrect bytes that originally came from the file.
629 if ((os_vm_address_t
)addr
>= anon_dynamic_space_start
) {
630 if (madvise(addr
, length
, MADV_DONTNEED
) != 0)
631 lose("madvise failed\n");
635 os_invalidate(addr
, length
);
636 new_addr
= os_validate(NOT_MOVABLE
, addr
, length
);
637 if (new_addr
== NULL
|| new_addr
!= addr
) {
638 lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x",
643 for (i
= start
; i
<= end
; i
++)
644 set_page_need_to_zero(i
, 0);
647 /* Zero the pages from START to END (inclusive). Generally done just after
648 * a new region has been allocated.
651 zero_pages(page_index_t start
, page_index_t end
) {
655 #if defined(LISP_FEATURE_X86)
656 fast_bzero(page_address(start
), npage_bytes(1+end
-start
));
658 bzero(page_address(start
), npage_bytes(1+end
-start
));
664 zero_and_mark_pages(page_index_t start
, page_index_t end
) {
667 zero_pages(start
, end
);
668 for (i
= start
; i
<= end
; i
++)
669 set_page_need_to_zero(i
, 0);
672 /* Zero the pages from START to END (inclusive), except for those
673 * pages that are known to already zeroed. Mark all pages in the
674 * ranges as non-zeroed.
677 zero_dirty_pages(page_index_t start
, page_index_t end
) {
680 for (i
= start
; i
<= end
; i
++) {
681 if (!page_need_to_zero(i
)) continue;
682 for (j
= i
+1; (j
<= end
) && page_need_to_zero(j
) ; j
++)
688 for (i
= start
; i
<= end
; i
++) {
689 set_page_need_to_zero(i
, 1);
695 * To support quick and inline allocation, regions of memory can be
696 * allocated and then allocated from with just a free pointer and a
697 * check against an end address.
699 * Since objects can be allocated to spaces with different properties
700 * e.g. boxed/unboxed, generation, ages; there may need to be many
701 * allocation regions.
703 * Each allocation region may start within a partly used page. Many
704 * features of memory use are noted on a page wise basis, e.g. the
705 * generation; so if a region starts within an existing allocated page
706 * it must be consistent with this page.
708 * During the scavenging of the newspace, objects will be transported
709 * into an allocation region, and pointers updated to point to this
710 * allocation region. It is possible that these pointers will be
711 * scavenged again before the allocation region is closed, e.g. due to
712 * trans_list which jumps all over the place to cleanup the list. It
713 * is important to be able to determine properties of all objects
714 * pointed to when scavenging, e.g to detect pointers to the oldspace.
715 * Thus it's important that the allocation regions have the correct
716 * properties set when allocated, and not just set when closed. The
717 * region allocation routines return regions with the specified
718 * properties, and grab all the pages, setting their properties
719 * appropriately, except that the amount used is not known.
721 * These regions are used to support quicker allocation using just a
722 * free pointer. The actual space used by the region is not reflected
723 * in the pages tables until it is closed. It can't be scavenged until
726 * When finished with the region it should be closed, which will
727 * update the page tables for the actual space used returning unused
728 * space. Further it may be noted in the new regions which is
729 * necessary when scavenging the newspace.
731 * Large objects may be allocated directly without an allocation
732 * region, the page tables are updated immediately.
734 * Unboxed objects don't contain pointers to other objects and so
735 * don't need scavenging. Further they can't contain pointers to
736 * younger generations so WP is not needed. By allocating pages to
737 * unboxed objects the whole page never needs scavenging or
738 * write-protecting. */
740 /* We use either two or three regions for the current newspace generation. */
741 #ifdef LISP_FEATURE_SEGREGATED_CODE
742 struct alloc_region gc_alloc_regions
[3];
743 #define boxed_region gc_alloc_regions[BOXED_PAGE_FLAG-1]
744 #define unboxed_region gc_alloc_regions[UNBOXED_PAGE_FLAG-1]
745 #define code_region gc_alloc_regions[CODE_PAGE_FLAG-1]
747 struct alloc_region boxed_region
;
748 struct alloc_region unboxed_region
;
751 /* The generation currently being allocated to. */
752 static generation_index_t gc_alloc_generation
;
754 static inline page_index_t
755 generation_alloc_start_page(generation_index_t generation
, int page_type_flag
, int large
)
757 if (!(page_type_flag
>= 1 && page_type_flag
<= 3))
758 lose("bad page_type_flag: %d", page_type_flag
);
760 return generations
[generation
].alloc_large_start_page
;
761 #ifdef LISP_FEATURE_SEGREGATED_CODE
762 return generations
[generation
].alloc_start_page_
[page_type_flag
];
764 if (UNBOXED_PAGE_FLAG
== page_type_flag
)
765 return generations
[generation
].alloc_unboxed_start_page
;
766 /* Both code and data. */
767 return generations
[generation
].alloc_start_page
;
772 set_generation_alloc_start_page(generation_index_t generation
, int page_type_flag
, int large
,
775 if (!(page_type_flag
>= 1 && page_type_flag
<= 3))
776 lose("bad page_type_flag: %d", page_type_flag
);
778 generations
[generation
].alloc_large_start_page
= page
;
779 #ifdef LISP_FEATURE_SEGREGATED_CODE
781 generations
[generation
].alloc_start_page_
[page_type_flag
] = page
;
783 else if (UNBOXED_PAGE_FLAG
== page_type_flag
)
784 generations
[generation
].alloc_unboxed_start_page
= page
;
785 else /* Both code and data. */
786 generations
[generation
].alloc_start_page
= page
;
790 /* Find a new region with room for at least the given number of bytes.
792 * It starts looking at the current generation's alloc_start_page. So
793 * may pick up from the previous region if there is enough space. This
794 * keeps the allocation contiguous when scavenging the newspace.
796 * The alloc_region should have been closed by a call to
797 * gc_alloc_update_page_tables(), and will thus be in an empty state.
799 * To assist the scavenging functions write-protected pages are not
800 * used. Free pages should not be write-protected.
802 * It is critical to the conservative GC that the start of regions be
803 * known. To help achieve this only small regions are allocated at a
806 * During scavenging, pointers may be found to within the current
807 * region and the page generation must be set so that pointers to the
808 * from space can be recognized. Therefore the generation of pages in
809 * the region are set to gc_alloc_generation. To prevent another
810 * allocation call using the same pages, all the pages in the region
811 * are allocated, although they will initially be empty.
814 gc_alloc_new_region(sword_t nbytes
, int page_type_flag
, struct alloc_region
*alloc_region
)
816 page_index_t first_page
;
817 page_index_t last_page
;
823 "/alloc_new_region for %d bytes from gen %d\n",
824 nbytes, gc_alloc_generation));
827 /* Check that the region is in a reset state. */
828 gc_assert((alloc_region
->first_page
== 0)
829 && (alloc_region
->last_page
== -1)
830 && (alloc_region
->free_pointer
== alloc_region
->end_addr
));
831 ret
= thread_mutex_lock(&free_pages_lock
);
833 first_page
= generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 0);
834 last_page
=gc_find_freeish_pages(&first_page
, nbytes
, page_type_flag
);
836 /* Set up the alloc_region. */
837 alloc_region
->first_page
= first_page
;
838 alloc_region
->last_page
= last_page
;
839 alloc_region
->start_addr
= page_address(first_page
) + page_bytes_used(first_page
);
840 alloc_region
->free_pointer
= alloc_region
->start_addr
;
841 alloc_region
->end_addr
= page_address(last_page
+1);
843 /* Set up the pages. */
845 /* The first page may have already been in use. */
846 /* If so, just assert that it's consistent, otherwise, set it up. */
847 if (page_bytes_used(first_page
)) {
848 gc_assert(page_table
[first_page
].allocated
== page_type_flag
);
849 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
850 gc_assert(page_table
[first_page
].large_object
== 0);
852 page_table
[first_page
].allocated
= page_type_flag
;
853 page_table
[first_page
].gen
= gc_alloc_generation
;
854 page_table
[first_page
].large_object
= 0;
855 set_page_scan_start_offset(first_page
, 0);
857 page_table
[first_page
].allocated
|= OPEN_REGION_PAGE_FLAG
;
859 for (i
= first_page
+1; i
<= last_page
; i
++) {
860 page_table
[i
].allocated
= page_type_flag
;
861 page_table
[i
].gen
= gc_alloc_generation
;
862 page_table
[i
].large_object
= 0;
863 /* This may not be necessary for unboxed regions (think it was
865 set_page_scan_start_offset(i
,
866 addr_diff(page_address(i
), alloc_region
->start_addr
));
867 page_table
[i
].allocated
|= OPEN_REGION_PAGE_FLAG
;
869 /* Bump up last_free_page. */
870 if (last_page
+1 > last_free_page
) {
871 last_free_page
= last_page
+1;
872 /* do we only want to call this on special occasions? like for
874 set_alloc_pointer((lispobj
)page_address(last_free_page
));
876 ret
= thread_mutex_unlock(&free_pages_lock
);
879 #ifdef READ_PROTECT_FREE_PAGES
880 os_protect(page_address(first_page
),
881 npage_bytes(1+last_page
-first_page
),
885 /* If the first page was only partial, don't check whether it's
886 * zeroed (it won't be) and don't zero it (since the parts that
887 * we're interested in are guaranteed to be zeroed).
889 if (page_bytes_used(first_page
)) {
893 zero_dirty_pages(first_page
, last_page
);
895 /* we can do this after releasing free_pages_lock */
896 if (gencgc_zero_check
) {
898 for (p
= (word_t
*)alloc_region
->start_addr
;
899 p
< (word_t
*)alloc_region
->end_addr
; p
++) {
901 lose("The new region is not zero at %p (start=%p, end=%p).\n",
902 p
, alloc_region
->start_addr
, alloc_region
->end_addr
);
908 /* If the record_new_objects flag is 2 then all new regions created
911 * If it's 1 then then it is only recorded if the first page of the
912 * current region is <= new_areas_ignore_page. This helps avoid
913 * unnecessary recording when doing full scavenge pass.
915 * The new_object structure holds the page, byte offset, and size of
916 * new regions of objects. Each new area is placed in the array of
917 * these structures pointer to by new_areas. new_areas_index holds the
918 * offset into new_areas.
920 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
921 * later code must detect this and handle it, probably by doing a full
922 * scavenge of a generation. */
923 #define NUM_NEW_AREAS 512
924 static int record_new_objects
= 0;
925 static page_index_t new_areas_ignore_page
;
931 static struct new_area (*new_areas
)[];
932 static size_t new_areas_index
;
933 size_t max_new_areas
;
935 /* Add a new area to new_areas. */
937 add_new_area(page_index_t first_page
, size_t offset
, size_t size
)
939 size_t new_area_start
, c
;
942 /* Ignore if full. */
943 if (new_areas_index
>= NUM_NEW_AREAS
)
946 switch (record_new_objects
) {
950 if (first_page
> new_areas_ignore_page
)
959 new_area_start
= npage_bytes(first_page
) + offset
;
961 /* Search backwards for a prior area that this follows from. If
962 found this will save adding a new area. */
963 for (i
= new_areas_index
-1, c
= 0; (i
>= 0) && (c
< 8); i
--, c
++) {
965 npage_bytes((*new_areas
)[i
].page
)
966 + (*new_areas
)[i
].offset
967 + (*new_areas
)[i
].size
;
969 "/add_new_area S1 %d %d %d %d\n",
970 i, c, new_area_start, area_end));*/
971 if (new_area_start
== area_end
) {
973 "/adding to [%d] %d %d %d with %d %d %d:\n",
975 (*new_areas)[i].page,
976 (*new_areas)[i].offset,
977 (*new_areas)[i].size,
981 (*new_areas
)[i
].size
+= size
;
986 (*new_areas
)[new_areas_index
].page
= first_page
;
987 (*new_areas
)[new_areas_index
].offset
= offset
;
988 (*new_areas
)[new_areas_index
].size
= size
;
990 "/new_area %d page %d offset %d size %d\n",
991 new_areas_index, first_page, offset, size));*/
994 /* Note the max new_areas used. */
995 if (new_areas_index
> max_new_areas
)
996 max_new_areas
= new_areas_index
;
999 /* Update the tables for the alloc_region. The region may be added to
1002 * When done the alloc_region is set up so that the next quick alloc
1003 * will fail safely and thus a new region will be allocated. Further
1004 * it is safe to try to re-update the page table of this reset
1007 gc_alloc_update_page_tables(int page_type_flag
, struct alloc_region
*alloc_region
)
1010 page_index_t first_page
;
1011 page_index_t next_page
;
1012 os_vm_size_t bytes_used
;
1013 os_vm_size_t region_size
;
1014 os_vm_size_t byte_cnt
;
1015 page_bytes_t orig_first_page_bytes_used
;
1019 first_page
= alloc_region
->first_page
;
1021 /* Catch an unused alloc_region. */
1022 if ((first_page
== 0) && (alloc_region
->last_page
== -1))
1025 next_page
= first_page
+1;
1027 ret
= thread_mutex_lock(&free_pages_lock
);
1028 gc_assert(ret
== 0);
1029 if (alloc_region
->free_pointer
!= alloc_region
->start_addr
) {
1030 /* some bytes were allocated in the region */
1031 orig_first_page_bytes_used
= page_bytes_used(first_page
);
1033 gc_assert(alloc_region
->start_addr
==
1034 (page_address(first_page
) + page_bytes_used(first_page
)));
1036 /* All the pages used need to be updated */
1038 /* Update the first page. */
1040 /* If the page was free then set up the gen, and
1041 * scan_start_offset. */
1042 if (page_bytes_used(first_page
) == 0)
1043 gc_assert(page_starts_contiguous_block_p(first_page
));
1044 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1046 #ifdef LISP_FEATURE_SEGREGATED_CODE
1047 gc_assert(page_table
[first_page
].allocated
== page_type_flag
);
1049 gc_assert(page_table
[first_page
].allocated
& page_type_flag
);
1051 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
1052 gc_assert(page_table
[first_page
].large_object
== 0);
1056 /* Calculate the number of bytes used in this page. This is not
1057 * always the number of new bytes, unless it was free. */
1059 if ((bytes_used
= addr_diff(alloc_region
->free_pointer
,
1060 page_address(first_page
)))
1061 >GENCGC_CARD_BYTES
) {
1062 bytes_used
= GENCGC_CARD_BYTES
;
1065 set_page_bytes_used(first_page
, bytes_used
);
1066 byte_cnt
+= bytes_used
;
1069 /* All the rest of the pages should be free. We need to set
1070 * their scan_start_offset pointer to the start of the
1071 * region, and set the bytes_used. */
1073 page_table
[next_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1074 #ifdef LISP_FEATURE_SEGREGATED_CODE
1075 gc_assert(page_table
[next_page
].allocated
== page_type_flag
);
1077 gc_assert(page_table
[next_page
].allocated
& page_type_flag
);
1079 gc_assert(page_bytes_used(next_page
) == 0);
1080 gc_assert(page_table
[next_page
].gen
== gc_alloc_generation
);
1081 gc_assert(page_table
[next_page
].large_object
== 0);
1082 gc_assert(page_scan_start_offset(next_page
) ==
1083 addr_diff(page_address(next_page
),
1084 alloc_region
->start_addr
));
1086 /* Calculate the number of bytes used in this page. */
1088 if ((bytes_used
= addr_diff(alloc_region
->free_pointer
,
1089 page_address(next_page
)))>GENCGC_CARD_BYTES
) {
1090 bytes_used
= GENCGC_CARD_BYTES
;
1093 set_page_bytes_used(next_page
, bytes_used
);
1094 byte_cnt
+= bytes_used
;
1099 region_size
= addr_diff(alloc_region
->free_pointer
,
1100 alloc_region
->start_addr
);
1101 bytes_allocated
+= region_size
;
1102 generations
[gc_alloc_generation
].bytes_allocated
+= region_size
;
1104 gc_assert((byte_cnt
- orig_first_page_bytes_used
) == region_size
);
1106 /* Set the generations alloc restart page to the last page of
1108 set_generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 0, next_page
-1);
1110 /* Add the region to the new_areas if requested. */
1111 if (BOXED_PAGE_FLAG
& page_type_flag
)
1112 add_new_area(first_page
,orig_first_page_bytes_used
, region_size
);
1116 "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
1118 gc_alloc_generation));
1121 /* There are no bytes allocated. Unallocate the first_page if
1122 * there are 0 bytes_used. */
1123 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
1124 if (page_bytes_used(first_page
) == 0)
1125 page_table
[first_page
].allocated
= FREE_PAGE_FLAG
;
1128 /* Unallocate any unused pages. */
1129 while (next_page
<= alloc_region
->last_page
) {
1130 gc_assert(page_bytes_used(next_page
) == 0);
1131 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1134 ret
= thread_mutex_unlock(&free_pages_lock
);
1135 gc_assert(ret
== 0);
1137 /* alloc_region is per-thread, we're ok to do this unlocked */
1138 gc_set_region_empty(alloc_region
);
1141 /* Allocate a possibly large object. */
1143 gc_alloc_large(sword_t nbytes
, int page_type_flag
, struct alloc_region
*alloc_region
)
1146 page_index_t first_page
, next_page
, last_page
;
1147 os_vm_size_t byte_cnt
;
1148 os_vm_size_t bytes_used
;
1151 ret
= thread_mutex_lock(&free_pages_lock
);
1152 gc_assert(ret
== 0);
1154 first_page
= generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 1);
1155 // FIXME: really we want to try looking for space following the highest of
1156 // the last page of all other small object regions. That's impossible - there's
1157 // not enough information. At best we can skip some work in only the case where
1158 // the supplied region was the one most recently created. To do this right
1159 // would entail a malloc-like allocator at the page granularity.
1160 if (first_page
<= alloc_region
->last_page
) {
1161 first_page
= alloc_region
->last_page
+1;
1164 last_page
=gc_find_freeish_pages(&first_page
,nbytes
, page_type_flag
);
1166 gc_assert(first_page
> alloc_region
->last_page
);
1168 set_generation_alloc_start_page(gc_alloc_generation
, page_type_flag
, 1, last_page
);
1170 /* Large objects don't share pages with other objects. */
1171 gc_assert(page_bytes_used(first_page
) == 0);
1173 /* Set up the pages. */
1174 page_table
[first_page
].allocated
= page_type_flag
;
1175 page_table
[first_page
].gen
= gc_alloc_generation
;
1176 page_table
[first_page
].large_object
= 1;
1177 set_page_scan_start_offset(first_page
, 0);
1181 /* Calc. the number of bytes used in this page. This is not
1182 * always the number of new bytes, unless it was free. */
1184 if ((bytes_used
= nbytes
) > GENCGC_CARD_BYTES
) {
1185 bytes_used
= GENCGC_CARD_BYTES
;
1188 set_page_bytes_used(first_page
, bytes_used
);
1189 byte_cnt
+= bytes_used
;
1191 next_page
= first_page
+1;
1193 /* All the rest of the pages should be free. We need to set their
1194 * scan_start_offset pointer to the start of the region, and set
1195 * the bytes_used. */
1197 gc_assert(page_free_p(next_page
));
1198 gc_assert(page_bytes_used(next_page
) == 0);
1199 page_table
[next_page
].allocated
= page_type_flag
;
1200 page_table
[next_page
].gen
= gc_alloc_generation
;
1201 page_table
[next_page
].large_object
= 1;
1203 set_page_scan_start_offset(next_page
, npage_bytes(next_page
-first_page
));
1205 /* Calculate the number of bytes used in this page. */
1207 bytes_used
= nbytes
- byte_cnt
;
1208 if (bytes_used
> GENCGC_CARD_BYTES
) {
1209 bytes_used
= GENCGC_CARD_BYTES
;
1212 set_page_bytes_used(next_page
, bytes_used
);
1213 page_table
[next_page
].write_protected
=0;
1214 page_table
[next_page
].dont_move
=0;
1215 byte_cnt
+= bytes_used
;
1219 gc_assert(byte_cnt
== (size_t)nbytes
);
1221 bytes_allocated
+= nbytes
;
1222 generations
[gc_alloc_generation
].bytes_allocated
+= nbytes
;
1224 /* Add the region to the new_areas if requested. */
1225 if (BOXED_PAGE_FLAG
& page_type_flag
)
1226 add_new_area(first_page
, 0, nbytes
);
1228 /* Bump up last_free_page */
1229 if (last_page
+1 > last_free_page
) {
1230 last_free_page
= last_page
+1;
1231 set_alloc_pointer((lispobj
)(page_address(last_free_page
)));
1233 ret
= thread_mutex_unlock(&free_pages_lock
);
1234 gc_assert(ret
== 0);
1236 #ifdef READ_PROTECT_FREE_PAGES
1237 os_protect(page_address(first_page
),
1238 npage_bytes(1+last_page
-first_page
),
1242 zero_dirty_pages(first_page
, last_page
);
1244 return page_address(first_page
);
1247 static page_index_t gencgc_alloc_start_page
= -1;
1250 gc_heap_exhausted_error_or_lose (sword_t available
, sword_t requested
)
1252 struct thread
*thread
= arch_os_get_current_thread();
1253 /* Write basic information before doing anything else: if we don't
1254 * call to lisp this is a must, and even if we do there is always
1255 * the danger that we bounce back here before the error has been
1256 * handled, or indeed even printed.
1258 report_heap_exhaustion(available
, requested
, thread
);
1259 if (gc_active_p
|| (available
== 0)) {
1260 /* If we are in GC, or totally out of memory there is no way
1261 * to sanely transfer control to the lisp-side of things.
1263 lose("Heap exhausted, game over.");
1266 /* FIXME: assert free_pages_lock held */
1267 (void)thread_mutex_unlock(&free_pages_lock
);
1268 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
1269 gc_assert(get_pseudo_atomic_atomic(thread
));
1270 clear_pseudo_atomic_atomic(thread
);
1271 if (get_pseudo_atomic_interrupted(thread
))
1272 do_pending_interrupt();
1274 /* Another issue is that signalling HEAP-EXHAUSTED error leads
1275 * to running user code at arbitrary places, even in a
1276 * WITHOUT-INTERRUPTS which may lead to a deadlock without
1277 * running out of the heap. So at this point all bets are
1279 if (SymbolValue(INTERRUPTS_ENABLED
,thread
) == NIL
)
1280 corruption_warning_and_maybe_lose
1281 ("Signalling HEAP-EXHAUSTED in a WITHOUT-INTERRUPTS.");
1282 /* available and requested should be double word aligned, thus
1283 they can passed as fixnums and shifted later. */
1284 funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR
), available
, requested
);
1285 lose("HEAP-EXHAUSTED-ERROR fell through");
1290 gc_find_freeish_pages(page_index_t
*restart_page_ptr
, sword_t bytes
,
1293 page_index_t most_bytes_found_from
= 0, most_bytes_found_to
= 0;
1294 page_index_t first_page
, last_page
, restart_page
= *restart_page_ptr
;
1295 os_vm_size_t nbytes
= bytes
;
1296 os_vm_size_t nbytes_goal
= nbytes
;
1297 os_vm_size_t bytes_found
= 0;
1298 os_vm_size_t most_bytes_found
= 0;
1299 boolean small_object
= nbytes
< GENCGC_CARD_BYTES
;
1300 /* FIXME: assert(free_pages_lock is held); */
1302 if (nbytes_goal
< gencgc_alloc_granularity
)
1303 nbytes_goal
= gencgc_alloc_granularity
;
1305 /* Toggled by gc_and_save for heap compaction, normally -1. */
1306 if (gencgc_alloc_start_page
!= -1) {
1307 restart_page
= gencgc_alloc_start_page
;
1310 /* FIXME: This is on bytes instead of nbytes pending cleanup of
1311 * long from the interface. */
1312 gc_assert(bytes
>=0);
1313 /* Search for a page with at least nbytes of space. We prefer
1314 * not to split small objects on multiple pages, to reduce the
1315 * number of contiguous allocation regions spaning multiple
1316 * pages: this helps avoid excessive conservativism.
1318 * For other objects, we guarantee that they start on their own
1321 first_page
= restart_page
;
1322 while (first_page
< page_table_pages
) {
1324 if (page_free_p(first_page
)) {
1325 gc_assert(0 == page_bytes_used(first_page
));
1326 bytes_found
= GENCGC_CARD_BYTES
;
1327 } else if (small_object
&&
1328 (page_table
[first_page
].allocated
== page_type_flag
) &&
1329 (!page_table
[first_page
].large_object
) &&
1330 (page_table
[first_page
].gen
== gc_alloc_generation
) &&
1331 (!page_table
[first_page
].write_protected
) &&
1332 (!page_table
[first_page
].dont_move
)) {
1333 bytes_found
= GENCGC_CARD_BYTES
- page_bytes_used(first_page
);
1334 if (bytes_found
< nbytes
) {
1335 if (bytes_found
> most_bytes_found
)
1336 most_bytes_found
= bytes_found
;
1345 gc_assert(!page_table
[first_page
].write_protected
);
1346 for (last_page
= first_page
+1;
1347 ((last_page
< page_table_pages
) &&
1348 page_free_p(last_page
) &&
1349 (bytes_found
< nbytes_goal
));
1351 bytes_found
+= GENCGC_CARD_BYTES
;
1352 gc_assert(0 == page_bytes_used(last_page
));
1353 gc_assert(!page_table
[last_page
].write_protected
);
1356 if (bytes_found
> most_bytes_found
) {
1357 most_bytes_found
= bytes_found
;
1358 most_bytes_found_from
= first_page
;
1359 most_bytes_found_to
= last_page
;
1361 if (bytes_found
>= nbytes_goal
)
1364 first_page
= last_page
;
1367 bytes_found
= most_bytes_found
;
1368 restart_page
= first_page
+ 1;
1370 /* Check for a failure */
1371 if (bytes_found
< nbytes
) {
1372 gc_assert(restart_page
>= page_table_pages
);
1373 gc_heap_exhausted_error_or_lose(most_bytes_found
, nbytes
);
1376 gc_assert(most_bytes_found_to
);
1377 *restart_page_ptr
= most_bytes_found_from
;
1378 return most_bytes_found_to
-1;
1381 /* Allocate bytes. All the rest of the special-purpose allocation
1382 * functions will eventually call this */
1385 gc_alloc_with_region(sword_t nbytes
,int page_type_flag
, struct alloc_region
*my_region
,
1388 void *new_free_pointer
;
1390 if (nbytes
>=LARGE_OBJECT_SIZE
)
1391 return gc_alloc_large(nbytes
, page_type_flag
, my_region
);
1393 /* Check whether there is room in the current alloc region. */
1394 new_free_pointer
= (char*)my_region
->free_pointer
+ nbytes
;
1396 /* fprintf(stderr, "alloc %d bytes from %p to %p\n", nbytes,
1397 my_region->free_pointer, new_free_pointer); */
1399 if (new_free_pointer
<= my_region
->end_addr
) {
1400 /* If so then allocate from the current alloc region. */
1401 void *new_obj
= my_region
->free_pointer
;
1402 my_region
->free_pointer
= new_free_pointer
;
1404 /* Unless a `quick' alloc was requested, check whether the
1405 alloc region is almost empty. */
1407 addr_diff(my_region
->end_addr
,my_region
->free_pointer
) <= 32) {
1408 /* If so, finished with the current region. */
1409 gc_alloc_update_page_tables(page_type_flag
, my_region
);
1410 /* Set up a new region. */
1411 gc_alloc_new_region(32 /*bytes*/, page_type_flag
, my_region
);
1414 return((void *)new_obj
);
1417 /* Else not enough free space in the current region: retry with a
1420 gc_alloc_update_page_tables(page_type_flag
, my_region
);
1421 gc_alloc_new_region(nbytes
, page_type_flag
, my_region
);
1422 return gc_alloc_with_region(nbytes
, page_type_flag
, my_region
,0);
1425 /* Copy a large object. If the object is in a large object region then
1426 * it is simply promoted, else it is copied. If it's large enough then
1427 * it's copied to a large object region.
1429 * Bignums and vectors may have shrunk. If the object is not copied
1430 * the space needs to be reclaimed, and the page_tables corrected. */
1432 general_copy_large_object(lispobj object
, word_t nwords
, boolean boxedp
)
1435 page_index_t first_page
;
1437 CHECK_COPY_PRECONDITIONS(object
, nwords
);
1439 if ((nwords
> 1024*1024) && gencgc_verbose
) {
1440 FSHOW((stderr
, "/general_copy_large_object: %d bytes\n",
1441 nwords
*N_WORD_BYTES
));
1444 /* Check whether it's a large object. */
1445 first_page
= find_page_index((void *)object
);
1446 gc_assert(first_page
>= 0);
1448 if (page_table
[first_page
].large_object
) {
1449 /* Promote the object. Note: Unboxed objects may have been
1450 * allocated to a BOXED region so it may be necessary to
1451 * change the region to UNBOXED. */
1452 os_vm_size_t remaining_bytes
;
1453 os_vm_size_t bytes_freed
;
1454 page_index_t next_page
;
1455 page_bytes_t old_bytes_used
;
1457 /* FIXME: This comment is somewhat stale.
1459 * Note: Any page write-protection must be removed, else a
1460 * later scavenge_newspace may incorrectly not scavenge these
1461 * pages. This would not be necessary if they are added to the
1462 * new areas, but let's do it for them all (they'll probably
1463 * be written anyway?). */
1465 gc_assert(page_starts_contiguous_block_p(first_page
));
1466 next_page
= first_page
;
1467 remaining_bytes
= nwords
*N_WORD_BYTES
;
1469 while (remaining_bytes
> GENCGC_CARD_BYTES
) {
1470 gc_assert(page_table
[next_page
].gen
== from_space
);
1471 gc_assert(page_table
[next_page
].large_object
);
1472 gc_assert(page_scan_start_offset(next_page
) ==
1473 npage_bytes(next_page
-first_page
));
1474 gc_assert(page_bytes_used(next_page
) == GENCGC_CARD_BYTES
);
1475 /* Should have been unprotected by unprotect_oldspace()
1476 * for boxed objects, and after promotion unboxed ones
1477 * should not be on protected pages at all. */
1478 gc_assert(!page_table
[next_page
].write_protected
);
1481 gc_assert(page_boxed_p(next_page
));
1483 gc_assert(page_allocated_no_region_p(next_page
));
1484 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1486 page_table
[next_page
].gen
= new_space
;
1488 remaining_bytes
-= GENCGC_CARD_BYTES
;
1492 /* Now only one page remains, but the object may have shrunk so
1493 * there may be more unused pages which will be freed. */
1495 /* Object may have shrunk but shouldn't have grown - check. */
1496 gc_assert(page_bytes_used(next_page
) >= remaining_bytes
);
1498 page_table
[next_page
].gen
= new_space
;
1501 gc_assert(page_boxed_p(next_page
));
1503 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1505 /* Adjust the bytes_used. */
1506 old_bytes_used
= page_bytes_used(next_page
);
1507 set_page_bytes_used(next_page
, remaining_bytes
);
1509 bytes_freed
= old_bytes_used
- remaining_bytes
;
1511 /* Free any remaining pages; needs care. */
1513 while ((old_bytes_used
== GENCGC_CARD_BYTES
) &&
1514 (page_table
[next_page
].gen
== from_space
) &&
1515 /* FIXME: It is not obvious to me why this is necessary
1516 * as a loop condition: it seems to me that the
1517 * scan_start_offset test should be sufficient, but
1518 * experimentally that is not the case. --NS
1521 page_boxed_p(next_page
) :
1522 page_allocated_no_region_p(next_page
)) &&
1523 page_table
[next_page
].large_object
&&
1524 (page_scan_start_offset(next_page
) ==
1525 npage_bytes(next_page
- first_page
))) {
1526 /* Checks out OK, free the page. Don't need to both zeroing
1527 * pages as this should have been done before shrinking the
1528 * object. These pages shouldn't be write-protected, even if
1529 * boxed they should be zero filled. */
1530 gc_assert(!page_table
[next_page
].write_protected
);
1532 old_bytes_used
= page_bytes_used(next_page
);
1533 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1534 set_page_bytes_used(next_page
, 0);
1535 bytes_freed
+= old_bytes_used
;
1539 if ((bytes_freed
> 0) && gencgc_verbose
) {
1541 "/general_copy_large_object bytes_freed=%"OS_VM_SIZE_FMT
"\n",
1545 generations
[from_space
].bytes_allocated
-= nwords
*N_WORD_BYTES
1547 generations
[new_space
].bytes_allocated
+= nwords
*N_WORD_BYTES
;
1548 bytes_allocated
-= bytes_freed
;
1550 /* Add the region to the new_areas if requested. */
1552 add_new_area(first_page
,0,nwords
*N_WORD_BYTES
);
1557 /* Allocate space. */
1558 new = gc_general_alloc(nwords
*N_WORD_BYTES
,
1559 (boxedp
? BOXED_PAGE_FLAG
: UNBOXED_PAGE_FLAG
),
1562 /* Copy the object. */
1563 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1565 /* Return Lisp pointer of new object. */
1566 return make_lispobj(new, lowtag_of(object
));
1571 copy_large_object(lispobj object
, sword_t nwords
)
1573 return general_copy_large_object(object
, nwords
, 1);
1577 copy_large_unboxed_object(lispobj object
, sword_t nwords
)
1579 return general_copy_large_object(object
, nwords
, 0);
1582 /* to copy unboxed objects */
1584 copy_unboxed_object(lispobj object
, sword_t nwords
)
1586 return gc_general_copy_object(object
, nwords
, UNBOXED_PAGE_FLAG
);
1593 /* XX This is a hack adapted from cgc.c. These don't work too
1594 * efficiently with the gencgc as a list of the weak pointers is
1595 * maintained within the objects which causes writes to the pages. A
1596 * limited attempt is made to avoid unnecessary writes, but this needs
1598 /* FIXME: now that we have non-Lisp hashtables in the GC, it might make sense
1599 * to stop chaining weak pointers through a slot in the object, as a remedy to
1600 * the above concern. It would also shorten the object by 2 words. */
1602 scav_weak_pointer(lispobj
*where
, lispobj object
)
1604 /* Since we overwrite the 'next' field, we have to make
1605 * sure not to do so for pointers already in the list.
1606 * Instead of searching the list of weak_pointers each
1607 * time, we ensure that next is always NULL when the weak
1608 * pointer isn't in the list, and not NULL otherwise.
1609 * Since we can't use NULL to denote end of list, we
1610 * use a pointer back to the same weak_pointer.
1612 struct weak_pointer
* wp
= (struct weak_pointer
*)where
;
1614 if (NULL
== wp
->next
&& weak_pointer_breakable_p(wp
)) {
1615 wp
->next
= weak_pointers
;
1617 if (NULL
== wp
->next
)
1621 /* Do not let GC scavenge the value slot of the weak pointer.
1622 * (That is why it is a weak pointer.) */
1624 return WEAK_POINTER_NWORDS
;
1629 search_read_only_space(void *pointer
)
1631 lispobj
*start
= (lispobj
*) READ_ONLY_SPACE_START
;
1632 lispobj
*end
= read_only_space_free_pointer
;
1633 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
1635 return gc_search_space(start
, pointer
);
1639 search_static_space(void *pointer
)
1641 lispobj
*start
= (lispobj
*)STATIC_SPACE_START
;
1642 lispobj
*end
= static_space_free_pointer
;
1643 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
1645 return gc_search_space(start
, pointer
);
1648 /* a faster version for searching the dynamic space. This will work even
1649 * if the object is in a current allocation region. */
1651 search_dynamic_space(void *pointer
)
1653 page_index_t page_index
= find_page_index(pointer
);
1656 /* The address may be invalid, so do some checks. */
1657 if ((page_index
== -1) || page_free_p(page_index
))
1659 start
= (lispobj
*)page_scan_start(page_index
);
1660 return gc_search_space(start
, pointer
);
1663 #ifndef GENCGC_IS_PRECISE
1664 // Return the starting address of the object containing 'addr'
1665 // if and only if the object is one which would be evacuated from 'from_space'
1666 // were it allowed to be either discarded as garbage or moved.
1667 // 'addr_page_index' is the page containing 'addr' and must not be -1.
1668 // Return 0 if there is no such object - that is, if addr is past the
1669 // end of the used bytes, or its pages are not in 'from_space' etc.
1671 conservative_root_p(void *addr
, page_index_t addr_page_index
)
1673 /* quick check 1: Address is quite likely to have been invalid. */
1674 struct page
* page
= &page_table
[addr_page_index
];
1675 if (page
->gen
!= from_space
||
1676 #ifdef LISP_FEATURE_SEGREGATED_CODE
1677 (!is_lisp_pointer((lispobj
)addr
) && page
->allocated
!= CODE_PAGE_FLAG
) ||
1679 ((uword_t
)addr
& (GENCGC_CARD_BYTES
- 1)) > page_bytes_used(addr_page_index
) ||
1680 (page
->large_object
&& page
->dont_move
))
1682 gc_assert(!(page
->allocated
& OPEN_REGION_PAGE_FLAG
));
1684 #ifdef LISP_FEATURE_SEGREGATED_CODE
1685 /* quick check 2: Unless the page can hold code, the pointer's lowtag must
1686 * correspond to the widetag of the object. The object header can safely
1687 * be read even if it turns out that the pointer is not valid,
1688 * because the pointer was in bounds for the page.
1689 * Note that this can falsely pass if looking at the interior of an unboxed
1690 * array that masquerades as a Lisp object header by pure luck.
1691 * But if this doesn't pass, there's no point in proceeding to the
1692 * definitive test which involves searching for the containing object. */
1694 if (page
->allocated
!= CODE_PAGE_FLAG
) {
1695 lispobj
* obj
= native_pointer((lispobj
)addr
);
1696 if (lowtag_of((lispobj
)addr
) == LIST_POINTER_LOWTAG
) {
1697 if (!is_cons_half(obj
[0]) || !is_cons_half(obj
[1]))
1700 unsigned char widetag
= widetag_of(*obj
);
1701 if (!other_immediate_lowtag_p(widetag
) ||
1702 lowtag_of((lispobj
)addr
) != lowtag_for_widetag
[widetag
>>2])
1708 /* Filter out anything which can't be a pointer to a Lisp object
1709 * (or, as a special case which also requires dont_move, a return
1710 * address referring to something in a CodeObject). This is
1711 * expensive but important, since it vastly reduces the
1712 * probability that random garbage will be bogusly interpreted as
1713 * a pointer which prevents a page from moving. */
1714 lispobj
* object_start
= search_dynamic_space(addr
);
1715 if (!object_start
) return 0;
1717 /* If the containing object is a code object and 'addr' points
1718 * anywhere beyond the boxed words,
1719 * presume it to be a valid unboxed return address. */
1720 if (instruction_ptr_p(addr
, object_start
))
1721 return object_start
;
1723 /* Large object pages only contain ONE object, and it will never
1724 * be a CONS. However, arrays and bignums can be allocated larger
1725 * than necessary and then shrunk to fit, leaving what look like
1726 * (0 . 0) CONSes at the end. These appear valid to
1727 * properly_tagged_descriptor_p(), so pick them off here. */
1728 if (((lowtag_of((lispobj
)addr
) == LIST_POINTER_LOWTAG
) &&
1729 page_table
[addr_page_index
].large_object
)
1730 || !properly_tagged_descriptor_p(addr
, object_start
))
1733 return object_start
;
1737 /* Adjust large bignum and vector objects. This will adjust the
1738 * allocated region if the size has shrunk, and move unboxed objects
1739 * into unboxed pages. The pages are not promoted here, and the
1740 * promoted region is not added to the new_regions; this is really
1741 * only designed to be called from preserve_pointer(). Shouldn't fail
1742 * if this is missed, just may delay the moving of objects to unboxed
1743 * pages, and the freeing of pages. */
1745 maybe_adjust_large_object(page_index_t first_page
)
1747 lispobj
* where
= (lispobj
*)page_address(first_page
);
1748 page_index_t next_page
;
1750 uword_t remaining_bytes
;
1751 uword_t bytes_freed
;
1752 uword_t old_bytes_used
;
1756 /* Check whether it's a vector or bignum object. */
1757 lispobj widetag
= widetag_of(where
[0]);
1758 if (widetag
== SIMPLE_VECTOR_WIDETAG
)
1759 page_type_flag
= BOXED_PAGE_FLAG
;
1760 else if (specialized_vector_widetag_p(widetag
) || widetag
== BIGNUM_WIDETAG
)
1761 page_type_flag
= UNBOXED_PAGE_FLAG
;
1765 /* Find its current size. */
1766 sword_t nwords
= sizetab
[widetag
](where
);
1768 /* Note: Any page write-protection must be removed, else a later
1769 * scavenge_newspace may incorrectly not scavenge these pages.
1770 * This would not be necessary if they are added to the new areas,
1771 * but lets do it for them all (they'll probably be written
1774 gc_assert(page_starts_contiguous_block_p(first_page
));
1776 next_page
= first_page
;
1777 remaining_bytes
= nwords
*N_WORD_BYTES
;
1778 while (remaining_bytes
> GENCGC_CARD_BYTES
) {
1779 gc_assert(page_table
[next_page
].gen
== from_space
);
1780 // We can't assert that page_table[next_page].allocated is correct,
1781 // because unboxed objects are initially allocated on boxed pages.
1782 gc_assert(page_allocated_no_region_p(next_page
));
1783 gc_assert(page_table
[next_page
].large_object
);
1784 gc_assert(page_scan_start_offset(next_page
) ==
1785 npage_bytes(next_page
-first_page
));
1786 gc_assert(page_bytes_used(next_page
) == GENCGC_CARD_BYTES
);
1788 // This affects only one object, since large objects don't share pages.
1789 page_table
[next_page
].allocated
= page_type_flag
;
1791 /* Shouldn't be write-protected at this stage. Essential that the
1793 gc_assert(!page_table
[next_page
].write_protected
);
1794 remaining_bytes
-= GENCGC_CARD_BYTES
;
1798 /* Now only one page remains, but the object may have shrunk so
1799 * there may be more unused pages which will be freed. */
1801 /* Object may have shrunk but shouldn't have grown - check. */
1802 gc_assert(page_bytes_used(next_page
) >= remaining_bytes
);
1804 page_table
[next_page
].allocated
= page_type_flag
;
1806 /* Adjust the bytes_used. */
1807 old_bytes_used
= page_bytes_used(next_page
);
1808 set_page_bytes_used(next_page
, remaining_bytes
);
1810 bytes_freed
= old_bytes_used
- remaining_bytes
;
1812 /* Free any remaining pages; needs care. */
1814 while ((old_bytes_used
== GENCGC_CARD_BYTES
) &&
1815 (page_table
[next_page
].gen
== from_space
) &&
1816 page_allocated_no_region_p(next_page
) &&
1817 page_table
[next_page
].large_object
&&
1818 (page_scan_start_offset(next_page
) ==
1819 npage_bytes(next_page
- first_page
))) {
1820 /* It checks out OK, free the page. We don't need to bother zeroing
1821 * pages as this should have been done before shrinking the
1822 * object. These pages shouldn't be write protected as they
1823 * should be zero filled. */
1824 gc_assert(!page_table
[next_page
].write_protected
);
1826 old_bytes_used
= page_bytes_used(next_page
);
1827 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1828 set_page_bytes_used(next_page
, 0);
1829 bytes_freed
+= old_bytes_used
;
1833 if ((bytes_freed
> 0) && gencgc_verbose
) {
1835 "/maybe_adjust_large_object() freed %d\n",
1839 generations
[from_space
].bytes_allocated
-= bytes_freed
;
1840 bytes_allocated
-= bytes_freed
;
1845 #ifdef PIN_GRANULARITY_LISPOBJ
1846 /* After scavenging of the roots is done, we go back to the pinned objects
1847 * and look within them for pointers. While heap_scavenge() could certainly
1848 * do this, it would potentially lead to extra work, since we can't know
1849 * whether any given object has been examined at least once, since there is
1850 * no telltale forwarding-pointer. The easiest thing to do is defer all
1851 * pinned objects to a subsequent pass, as is done here.
1854 scavenge_pinned_ranges()
1858 for_each_hopscotch_key(i
, key
, pinned_objects
) {
1859 lispobj
* obj
= native_pointer(key
);
1860 lispobj header
= *obj
;
1861 // Never invoke scavenger on a simple-fun, just code components.
1862 if (is_cons_half(header
))
1864 else if (widetag_of(header
) != SIMPLE_FUN_WIDETAG
)
1865 scavtab
[widetag_of(header
)](obj
, header
);
1869 /* Create an array of fixnum to consume the space between 'from' and 'to' */
1870 static void deposit_filler(uword_t from
, uword_t to
)
1873 lispobj
* where
= (lispobj
*)from
;
1874 sword_t nwords
= (to
- from
) >> WORD_SHIFT
;
1875 where
[0] = SIMPLE_ARRAY_WORD_WIDETAG
;
1876 where
[1] = make_fixnum(nwords
- 2);
1880 /* Zero out the byte ranges on small object pages marked dont_move,
1881 * carefully skipping over objects in the pin hashtable.
1882 * TODO: by recording an additional bit per page indicating whether
1883 * there is more than one pinned object on it, we could avoid qsort()
1884 * except in the case where there is more than one. */
1886 wipe_nonpinned_words()
1888 void gc_heapsort_uwords(uword_t
*, int);
1889 // Loop over the keys in pinned_objects and pack them densely into
1890 // the same array - pinned_objects.keys[] - but skip any simple-funs.
1891 // Admittedly this is abstraction breakage.
1892 int limit
= hopscotch_max_key_index(pinned_objects
);
1894 for (i
= 0; i
<= limit
; ++i
) {
1895 lispobj key
= pinned_objects
.keys
[i
];
1897 lispobj
* obj
= native_pointer(key
);
1898 // No need to check for is_cons_half() - it will be false
1899 // on a simple-fun header, and that's the correct answer.
1900 if (widetag_of(*obj
) != SIMPLE_FUN_WIDETAG
)
1901 pinned_objects
.keys
[n_pins
++] = (uword_t
)obj
;
1904 // Store a sentinel at the end. Even if n_pins = table capacity (unlikely),
1905 // it is safe to write one more word, because the hops[] array immediately
1906 // follows the keys[] array in memory. At worst, 2 elements of hops[]
1907 // are clobbered, which is irrelevant since the table has already been
1908 // rendered unusable by stealing its key array for a different purpose.
1909 pinned_objects
.keys
[n_pins
] = 0;
1910 // Don't touch pinned_objects.count in case the reset function uses it
1911 // to decide how to resize for next use (which it doesn't, but could).
1912 gc_n_stack_pins
= n_pins
;
1913 // Order by ascending address, stopping short of the sentinel.
1914 gc_heapsort_uwords(pinned_objects
.keys
, n_pins
);
1916 printf("Sorted pin list:\n");
1917 for (i
= 0; i
< n_pins
; ++i
) {
1918 lispobj
* obj
= (lispobj
*)pinned_objects
.keys
[i
];
1919 if (!is_cons_half(*obj
))
1920 printf("%p: %5d words\n", obj
, (int)sizetab
[widetag_of(*obj
)](obj
));
1921 else printf("%p: CONS\n", obj
);
1924 // Each entry in the pinned objects demarcates two ranges to be cleared:
1925 // - the range preceding it back to either the page start, or prior object.
1926 // - the range after it, up to the lesser of page bytes used or next object.
1927 uword_t preceding_object
= 0;
1928 uword_t this_page_end
= 0;
1929 #define page_base_address(x) (x&~(GENCGC_CARD_BYTES-1))
1930 for (i
= 0; i
< n_pins
; ++i
) {
1931 // Handle the preceding range. If this object is on the same page as
1932 // its predecessor, then intervening bytes were already zeroed.
1933 // If not, then start a new page and do some bookkeeping.
1934 lispobj
* obj
= (lispobj
*)pinned_objects
.keys
[i
];
1935 uword_t this_page_base
= page_base_address((uword_t
)obj
);
1936 /* printf("i=%d obj=%p base=%p\n", i, obj, (void*)this_page_base); */
1937 if (this_page_base
> page_base_address(preceding_object
)) {
1938 deposit_filler(this_page_base
, (lispobj
)obj
);
1939 // Move the page to newspace
1940 page_index_t page
= find_page_index(obj
);
1941 int used
= page_bytes_used(page
);
1942 this_page_end
= this_page_base
+ used
;
1943 /* printf(" Clearing %p .. %p (limit=%p)\n",
1944 (void*)this_page_base, obj, (void*)this_page_end); */
1945 generations
[new_space
].bytes_allocated
+= used
;
1946 generations
[page_table
[page
].gen
].bytes_allocated
-= used
;
1947 page_table
[page
].gen
= new_space
;
1948 page_table
[page
].has_pins
= 0;
1950 // Handle the following range.
1951 lispobj word
= *obj
;
1952 size_t nwords
= is_cons_half(word
) ? 2 : sizetab
[widetag_of(word
)](obj
);
1953 uword_t range_start
= (uword_t
)(obj
+ nwords
);
1954 uword_t range_end
= this_page_end
;
1955 // There is always an i+1'th key due to the sentinel value.
1956 if (page_base_address(pinned_objects
.keys
[i
+1]) == this_page_base
)
1957 range_end
= pinned_objects
.keys
[i
+1];
1958 /* printf(" Clearing %p .. %p\n", (void*)range_start, (void*)range_end); */
1959 deposit_filler(range_start
, range_end
);
1960 preceding_object
= (uword_t
)obj
;
1964 /* Add 'object' to the hashtable, and if the object is a code component,
1965 * then also add all of the embedded simple-funs.
1966 * The rationale for the extra work on code components is that without it,
1967 * every test of pinned_p() on an object would have to check if the pointer
1968 * is to a simple-fun - entailing an extra read of the header - and mapping
1969 * to its code component if so. Since more calls to pinned_p occur than to
1970 * pin_object, the extra burden should be on this function.
1971 * Experimentation bears out that this is the better technique.
1972 * Also, we wouldn't often expect code components in the collected generation
1973 * so the extra work here is quite minimal, even if it can generally add to
1974 * the number of keys in the hashtable.
1977 pin_object(lispobj
* base_addr
)
1979 lispobj object
= compute_lispobj(base_addr
);
1980 if (!hopscotch_containsp(&pinned_objects
, object
)) {
1981 hopscotch_insert(&pinned_objects
, object
, 1);
1982 struct code
* maybe_code
= (struct code
*)native_pointer(object
);
1983 if (widetag_of(maybe_code
->header
) == CODE_HEADER_WIDETAG
) {
1984 for_each_simple_fun(i
, fun
, maybe_code
, 0, {
1985 hopscotch_insert(&pinned_objects
,
1986 make_lispobj(fun
, FUN_POINTER_LOWTAG
),
1993 # define scavenge_pinned_ranges()
1994 # define wipe_nonpinned_words()
1997 /* Take a possible pointer to a Lisp object and mark its page in the
1998 * page_table so that it will not be relocated during a GC.
2000 * This involves locating the page it points to, then backing up to
2001 * the start of its region, then marking all pages dont_move from there
2002 * up to the first page that's not full or has a different generation
2004 * It is assumed that all the page static flags have been cleared at
2005 * the start of a GC.
2007 * It is also assumed that the current gc_alloc() region has been
2008 * flushed and the tables updated. */
2010 // TODO: there's probably a way to be a little more efficient here.
2011 // As things are, we start by finding the object that encloses 'addr',
2012 // then we see if 'addr' was a "valid" Lisp pointer to that object
2013 // - meaning we expect the correct lowtag on the pointer - except
2014 // that for code objects we don't require a correct lowtag
2015 // and we allow a pointer to anywhere in the object.
2017 // It should be possible to avoid calling search_dynamic_space
2018 // more of the time. First, check if the page pointed to might hold code.
2019 // If it does, then we continue regardless of the pointer's lowtag
2020 // (because of the special allowance). If the page definitely does *not*
2021 // hold code, then we require up front that the lowtake make sense,
2022 // by doing the same checks that are in properly_tagged_descriptor_p.
2024 // Problem: when code is allocated from a per-thread region,
2025 // does it ensure that the occupied pages are flagged as having code?
2027 #if defined(__GNUC__) && defined(MEMORY_SANITIZER)
2028 #define NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory))
2030 #define NO_SANITIZE_MEMORY
2033 static void NO_SANITIZE_MEMORY
2034 preserve_pointer(void *addr
)
2036 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2037 /* Immobile space MUST be lower than dynamic space,
2038 or else this test needs to be revised */
2039 if (addr
< (void*)IMMOBILE_SPACE_END
) {
2040 extern void immobile_space_preserve_pointer(void*);
2041 immobile_space_preserve_pointer(addr
);
2045 page_index_t addr_page_index
= find_page_index(addr
);
2047 #ifdef GENCGC_IS_PRECISE
2048 /* If we're in precise gencgc (non-x86oid as of this writing) then
2049 * we are only called on valid object pointers in the first place,
2050 * so we just have to do a bounds-check against the heap, a
2051 * generation check, and the already-pinned check. */
2052 if (addr_page_index
== -1
2053 || (page_table
[addr_page_index
].gen
!= from_space
)
2054 || page_table
[addr_page_index
].dont_move
)
2057 lispobj
*object_start
;
2058 if (addr_page_index
== -1
2059 || (object_start
= conservative_root_p(addr
, addr_page_index
)) == 0)
2063 /* (Now that we know that addr_page_index is in range, it's
2064 * safe to index into page_table[] with it.) */
2065 unsigned int region_allocation
= page_table
[addr_page_index
].allocated
;
2067 /* Find the beginning of the region. Note that there may be
2068 * objects in the region preceding the one that we were passed a
2069 * pointer to: if this is the case, we will write-protect all the
2070 * previous objects' pages too. */
2073 /* I think this'd work just as well, but without the assertions.
2074 * -dan 2004.01.01 */
2075 page_index_t first_page
= find_page_index(page_scan_start(addr_page_index
))
2077 page_index_t first_page
= addr_page_index
;
2078 while (!page_starts_contiguous_block_p(first_page
)) {
2080 /* Do some checks. */
2081 gc_assert(page_bytes_used(first_page
) == GENCGC_CARD_BYTES
);
2082 gc_assert(page_table
[first_page
].gen
== from_space
);
2083 gc_assert(page_table
[first_page
].allocated
== region_allocation
);
2087 /* Adjust any large objects before promotion as they won't be
2088 * copied after promotion. */
2089 if (page_table
[first_page
].large_object
) {
2090 maybe_adjust_large_object(first_page
);
2091 /* It may have moved to unboxed pages. */
2092 region_allocation
= page_table
[first_page
].allocated
;
2095 /* Now work forward until the end of this contiguous area is found,
2096 * marking all pages as dont_move. */
2098 for (i
= first_page
; ;i
++) {
2099 gc_assert(page_table
[i
].allocated
== region_allocation
);
2101 /* Mark the page static. */
2102 page_table
[i
].dont_move
= 1;
2104 /* It is essential that the pages are not write protected as
2105 * they may have pointers into the old-space which need
2106 * scavenging. They shouldn't be write protected at this
2108 gc_assert(!page_table
[i
].write_protected
);
2110 /* Check whether this is the last page in this contiguous block.. */
2111 if (page_ends_contiguous_block_p(i
, from_space
))
2115 #ifdef PIN_GRANULARITY_LISPOBJ
2116 /* Do not do this for multi-page objects. Those pages do not need
2117 * object wipeout anyway. */
2118 if (i
== first_page
) { // single-page object
2119 pin_object(object_start
);
2120 page_table
[i
].has_pins
= 1;
2124 /* Check that the page is now static. */
2125 gc_assert(page_table
[addr_page_index
].dont_move
!= 0);
2129 #define IN_REGION_P(a,kind) (kind##_region.start_addr<=a && a<=kind##_region.free_pointer)
2130 #ifdef LISP_FEATURE_SEGREGATED_CODE
2131 #define IN_BOXED_REGION_P(a) IN_REGION_P(a,boxed)||IN_REGION_P(a,code)
2133 #define IN_BOXED_REGION_P(a) IN_REGION_P(a,boxed)
2136 /* If the given page is not write-protected, then scan it for pointers
2137 * to younger generations or the top temp. generation, if no
2138 * suspicious pointers are found then the page is write-protected.
2140 * Care is taken to check for pointers to the current gc_alloc()
2141 * region if it is a younger generation or the temp. generation. This
2142 * frees the caller from doing a gc_alloc_update_page_tables(). Actually
2143 * the gc_alloc_generation does not need to be checked as this is only
2144 * called from scavenge_generation() when the gc_alloc generation is
2145 * younger, so it just checks if there is a pointer to the current
2148 * We return 1 if the page was write-protected, else 0. */
2150 update_page_write_prot(page_index_t page
)
2152 generation_index_t gen
= page_table
[page
].gen
;
2155 void **page_addr
= (void **)page_address(page
);
2156 sword_t num_words
= page_bytes_used(page
) / N_WORD_BYTES
;
2158 /* Shouldn't be a free page. */
2159 gc_assert(!page_free_p(page
));
2160 gc_assert(page_bytes_used(page
) != 0);
2162 if (!ENABLE_PAGE_PROTECTION
) return 0;
2164 /* Skip if it's already write-protected, pinned, or unboxed */
2165 if (page_table
[page
].write_protected
2166 /* FIXME: What's the reason for not write-protecting pinned pages? */
2167 || page_table
[page
].dont_move
2168 || page_unboxed_p(page
))
2171 /* Scan the page for pointers to younger generations or the
2172 * top temp. generation. */
2174 /* This is conservative: any word satisfying is_lisp_pointer() is
2175 * assumed to be a pointer. To do otherwise would require a family
2176 * of scavenge-like functions. */
2177 for (j
= 0; j
< num_words
; j
++) {
2178 void *ptr
= *(page_addr
+j
);
2180 lispobj
__attribute__((unused
)) header
;
2182 if (!is_lisp_pointer((lispobj
)ptr
))
2184 /* Check that it's in the dynamic space */
2185 if ((index
= find_page_index(ptr
)) != -1) {
2186 if (/* Does it point to a younger or the temp. generation? */
2187 (!page_free_p(index
)
2188 && (page_bytes_used(index
) != 0)
2189 && ((page_table
[index
].gen
< gen
)
2190 || (page_table
[index
].gen
== SCRATCH_GENERATION
)))
2192 /* Or does it point within a current gc_alloc() region? */
2193 || (IN_BOXED_REGION_P(ptr
) || IN_REGION_P(ptr
,unboxed
))) {
2198 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2199 else if ((index
= find_immobile_page_index(ptr
)) >= 0 &&
2200 other_immediate_lowtag_p(header
= *native_pointer((lispobj
)ptr
))) {
2201 // This is *possibly* a pointer to an object in immobile space,
2202 // given that above two conditions were satisfied.
2203 // But unlike in the dynamic space case, we need to read a byte
2204 // from the object to determine its generation, which requires care.
2205 // Consider an unboxed word that looks like a pointer to a word that
2206 // looks like fun-header-widetag. We can't naively back up to the
2207 // underlying code object since the alleged header might not be one.
2208 int obj_gen
= gen
; // Make comparison fail if we fall through
2209 if (lowtag_of((lispobj
)ptr
) != FUN_POINTER_LOWTAG
) {
2210 obj_gen
= __immobile_obj_generation(native_pointer((lispobj
)ptr
));
2211 } else if (widetag_of(header
) == SIMPLE_FUN_WIDETAG
) {
2212 lispobj
* code
= fun_code_header((lispobj
)ptr
- FUN_POINTER_LOWTAG
);
2213 // This is a heuristic, since we're not actually looking for
2214 // an object boundary. Precise scanning of 'page' would obviate
2215 // the guard conditions here.
2216 if ((lispobj
)code
>= IMMOBILE_VARYOBJ_SUBSPACE_START
2217 && widetag_of(*code
) == CODE_HEADER_WIDETAG
)
2218 obj_gen
= __immobile_obj_generation(code
);
2220 // A bogus generation number implies a not-really-pointer,
2221 // but it won't cause misbehavior.
2222 if (obj_gen
< gen
|| obj_gen
== SCRATCH_GENERATION
) {
2231 /* Write-protect the page. */
2232 /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
2234 os_protect((void *)page_addr
,
2236 OS_VM_PROT_READ
|OS_VM_PROT_EXECUTE
);
2238 /* Note the page as protected in the page tables. */
2239 page_table
[page
].write_protected
= 1;
2245 /* Is this page holding a normal (non-hashtable) large-object
2247 static inline boolean
large_simple_vector_p(page_index_t page
) {
2248 if (!page_table
[page
].large_object
)
2250 lispobj object
= *(lispobj
*)page_address(page
);
2251 return widetag_of(object
) == SIMPLE_VECTOR_WIDETAG
&&
2252 (HeaderValue(object
) & 0xFF) == subtype_VectorNormal
;
2256 /* Scavenge all generations from FROM to TO, inclusive, except for
2257 * new_space which needs special handling, as new objects may be
2258 * added which are not checked here - use scavenge_newspace generation.
2260 * Write-protected pages should not have any pointers to the
2261 * from_space so do need scavenging; thus write-protected pages are
2262 * not always scavenged. There is some code to check that these pages
2263 * are not written; but to check fully the write-protected pages need
2264 * to be scavenged by disabling the code to skip them.
2266 * Under the current scheme when a generation is GCed the younger
2267 * generations will be empty. So, when a generation is being GCed it
2268 * is only necessary to scavenge the older generations for pointers
2269 * not the younger. So a page that does not have pointers to younger
2270 * generations does not need to be scavenged.
2272 * The write-protection can be used to note pages that don't have
2273 * pointers to younger pages. But pages can be written without having
2274 * pointers to younger generations. After the pages are scavenged here
2275 * they can be scanned for pointers to younger generations and if
2276 * there are none the page can be write-protected.
2278 * One complication is when the newspace is the top temp. generation.
2280 * Enabling SC_GEN_CK scavenges the write-protected pages and checks
2281 * that none were written, which they shouldn't be as they should have
2282 * no pointers to younger generations. This breaks down for weak
2283 * pointers as the objects contain a link to the next and are written
2284 * if a weak pointer is scavenged. Still it's a useful check. */
2286 scavenge_generations(generation_index_t from
, generation_index_t to
)
2289 page_index_t num_wp
= 0;
2293 /* Clear the write_protected_cleared flags on all pages. */
2294 for (i
= 0; i
< page_table_pages
; i
++)
2295 page_table
[i
].write_protected_cleared
= 0;
2298 for (i
= 0; i
< last_free_page
; i
++) {
2299 generation_index_t generation
= page_table
[i
].gen
;
2301 && (page_bytes_used(i
) != 0)
2302 && (generation
!= new_space
)
2303 && (generation
>= from
)
2304 && (generation
<= to
)) {
2305 page_index_t last_page
,j
;
2306 int write_protected
=1;
2308 /* This should be the start of a region */
2309 gc_assert(page_starts_contiguous_block_p(i
));
2311 if (large_simple_vector_p(i
)) {
2312 /* Scavenge only the unprotected pages of a
2313 * large-object vector, other large objects could be
2314 * handled as well, but vectors are easier to deal
2315 * with and are more likely to grow to very large
2316 * sizes where avoiding scavenging the whole thing is
2318 if (!page_table
[i
].write_protected
) {
2319 scavenge((lispobj
*)page_address(i
) + 2,
2320 GENCGC_CARD_BYTES
/ N_WORD_BYTES
- 2);
2321 update_page_write_prot(i
);
2323 for (last_page
= i
+ 1; ; last_page
++) {
2324 lispobj
* start
= (lispobj
*)page_address(last_page
);
2325 write_protected
= page_table
[last_page
].write_protected
;
2326 if (page_ends_contiguous_block_p(last_page
, generation
)) {
2327 if (!write_protected
) {
2328 scavenge(start
, page_bytes_used(last_page
) / N_WORD_BYTES
);
2329 update_page_write_prot(last_page
);
2333 if (!write_protected
) {
2334 scavenge(start
, GENCGC_CARD_BYTES
/ N_WORD_BYTES
);
2335 update_page_write_prot(last_page
);
2339 /* Now work forward until the end of the region */
2340 for (last_page
= i
; ; last_page
++) {
2342 write_protected
&& page_table
[last_page
].write_protected
;
2343 if (page_ends_contiguous_block_p(last_page
, generation
))
2346 if (!write_protected
) {
2347 heap_scavenge((lispobj
*)page_address(i
),
2348 (lispobj
*)(page_address(last_page
)
2349 + page_bytes_used(last_page
)));
2351 /* Now scan the pages and write protect those that
2352 * don't have pointers to younger generations. */
2353 if (ENABLE_PAGE_PROTECTION
) {
2354 for (j
= i
; j
<= last_page
; j
++) {
2355 num_wp
+= update_page_write_prot(j
);
2358 if ((gencgc_verbose
> 1) && (num_wp
!= 0)) {
2360 "/write protected %d pages within generation %d\n",
2361 num_wp
, generation
));
2370 /* Check that none of the write_protected pages in this generation
2371 * have been written to. */
2372 for (i
= 0; i
< page_table_pages
; i
++) {
2374 && (page_bytes_used(i
) != 0)
2375 && (page_table
[i
].gen
== generation
)
2376 && (page_table
[i
].write_protected_cleared
!= 0)) {
2377 FSHOW((stderr
, "/scavenge_generation() %d\n", generation
));
2379 "/page bytes_used=%d scan_start_offset=%lu dont_move=%d\n",
2381 scan_start_offset(page_table
[i
]),
2382 page_table
[i
].dont_move
));
2383 lose("write to protected page %d in scavenge_generation()\n", i
);
2390 /* Scavenge a newspace generation. As it is scavenged new objects may
2391 * be allocated to it; these will also need to be scavenged. This
2392 * repeats until there are no more objects unscavenged in the
2393 * newspace generation.
2395 * To help improve the efficiency, areas written are recorded by
2396 * gc_alloc() and only these scavenged. Sometimes a little more will be
2397 * scavenged, but this causes no harm. An easy check is done that the
2398 * scavenged bytes equals the number allocated in the previous
2401 * Write-protected pages are not scanned except if they are marked
2402 * dont_move in which case they may have been promoted and still have
2403 * pointers to the from space.
2405 * Write-protected pages could potentially be written by alloc however
2406 * to avoid having to handle re-scavenging of write-protected pages
2407 * gc_alloc() does not write to write-protected pages.
2409 * New areas of objects allocated are recorded alternatively in the two
2410 * new_areas arrays below. */
2411 static struct new_area new_areas_1
[NUM_NEW_AREAS
];
2412 static struct new_area new_areas_2
[NUM_NEW_AREAS
];
2414 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2415 extern unsigned int immobile_scav_queue_count
;
2417 update_immobile_nursery_bits(),
2418 scavenge_immobile_roots(generation_index_t
,generation_index_t
),
2419 scavenge_immobile_newspace(),
2420 sweep_immobile_space(int raise
),
2421 write_protect_immobile_space();
2423 #define immobile_scav_queue_count 0
2426 /* Do one full scan of the new space generation. This is not enough to
2427 * complete the job as new objects may be added to the generation in
2428 * the process which are not scavenged. */
2430 scavenge_newspace_generation_one_scan(generation_index_t generation
)
2435 "/starting one full scan of newspace generation %d\n",
2437 for (i
= 0; i
< last_free_page
; i
++) {
2438 /* Note that this skips over open regions when it encounters them. */
2440 && (page_bytes_used(i
) != 0)
2441 && (page_table
[i
].gen
== generation
)
2442 && (!page_table
[i
].write_protected
2443 /* (This may be redundant as write_protected is now
2444 * cleared before promotion.) */
2445 || page_table
[i
].dont_move
)) {
2446 page_index_t last_page
;
2449 /* The scavenge will start at the scan_start_offset of
2452 * We need to find the full extent of this contiguous
2453 * block in case objects span pages.
2455 * Now work forward until the end of this contiguous area
2456 * is found. A small area is preferred as there is a
2457 * better chance of its pages being write-protected. */
2458 for (last_page
= i
; ;last_page
++) {
2459 /* If all pages are write-protected and movable,
2460 * then no need to scavenge */
2461 all_wp
=all_wp
&& page_table
[last_page
].write_protected
&&
2462 !page_table
[last_page
].dont_move
;
2464 /* Check whether this is the last page in this
2465 * contiguous block */
2466 if (page_ends_contiguous_block_p(last_page
, generation
))
2470 /* Do a limited check for write-protected pages. */
2472 new_areas_ignore_page
= last_page
;
2473 heap_scavenge(page_scan_start(i
),
2474 (lispobj
*)(page_address(last_page
)
2475 + page_bytes_used(last_page
)));
2481 "/done with one full scan of newspace generation %d\n",
2485 /* Do a complete scavenge of the newspace generation. */
2487 scavenge_newspace_generation(generation_index_t generation
)
2491 /* the new_areas array currently being written to by gc_alloc() */
2492 struct new_area (*current_new_areas
)[] = &new_areas_1
;
2493 size_t current_new_areas_index
;
2495 /* the new_areas created by the previous scavenge cycle */
2496 struct new_area (*previous_new_areas
)[] = NULL
;
2497 size_t previous_new_areas_index
;
2499 /* Flush the current regions updating the tables. */
2500 gc_alloc_update_all_page_tables(0);
2502 /* Turn on the recording of new areas by gc_alloc(). */
2503 new_areas
= current_new_areas
;
2504 new_areas_index
= 0;
2506 /* Don't need to record new areas that get scavenged anyway during
2507 * scavenge_newspace_generation_one_scan. */
2508 record_new_objects
= 1;
2510 /* Start with a full scavenge. */
2511 scavenge_newspace_generation_one_scan(generation
);
2513 /* Record all new areas now. */
2514 record_new_objects
= 2;
2516 /* Give a chance to weak hash tables to make other objects live.
2517 * FIXME: The algorithm implemented here for weak hash table gcing
2518 * is O(W^2+N) as Bruno Haible warns in
2519 * http://www.haible.de/bruno/papers/cs/weak/WeakDatastructures-writeup.html
2520 * see "Implementation 2". */
2521 scav_weak_hash_tables();
2523 /* Flush the current regions updating the tables. */
2524 gc_alloc_update_all_page_tables(0);
2526 /* Grab new_areas_index. */
2527 current_new_areas_index
= new_areas_index
;
2530 "The first scan is finished; current_new_areas_index=%d.\n",
2531 current_new_areas_index));*/
2533 while (current_new_areas_index
> 0 || immobile_scav_queue_count
) {
2534 /* Move the current to the previous new areas */
2535 previous_new_areas
= current_new_areas
;
2536 previous_new_areas_index
= current_new_areas_index
;
2538 /* Scavenge all the areas in previous new areas. Any new areas
2539 * allocated are saved in current_new_areas. */
2541 /* Allocate an array for current_new_areas; alternating between
2542 * new_areas_1 and 2 */
2543 if (previous_new_areas
== &new_areas_1
)
2544 current_new_areas
= &new_areas_2
;
2546 current_new_areas
= &new_areas_1
;
2548 /* Set up for gc_alloc(). */
2549 new_areas
= current_new_areas
;
2550 new_areas_index
= 0;
2552 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2553 scavenge_immobile_newspace();
2555 /* Check whether previous_new_areas had overflowed. */
2556 if (previous_new_areas_index
>= NUM_NEW_AREAS
) {
2558 /* New areas of objects allocated have been lost so need to do a
2559 * full scan to be sure! If this becomes a problem try
2560 * increasing NUM_NEW_AREAS. */
2561 if (gencgc_verbose
) {
2562 SHOW("new_areas overflow, doing full scavenge");
2565 /* Don't need to record new areas that get scavenged
2566 * anyway during scavenge_newspace_generation_one_scan. */
2567 record_new_objects
= 1;
2569 scavenge_newspace_generation_one_scan(generation
);
2571 /* Record all new areas now. */
2572 record_new_objects
= 2;
2574 scav_weak_hash_tables();
2576 /* Flush the current regions updating the tables. */
2577 gc_alloc_update_all_page_tables(0);
2581 /* Work through previous_new_areas. */
2582 for (i
= 0; i
< previous_new_areas_index
; i
++) {
2583 page_index_t page
= (*previous_new_areas
)[i
].page
;
2584 size_t offset
= (*previous_new_areas
)[i
].offset
;
2585 size_t size
= (*previous_new_areas
)[i
].size
;
2586 gc_assert(size
% N_WORD_BYTES
== 0);
2587 lispobj
*start
= (lispobj
*)(page_address(page
) + offset
);
2588 heap_scavenge(start
, (lispobj
*)((char*)start
+ size
));
2591 scav_weak_hash_tables();
2593 /* Flush the current regions updating the tables. */
2594 gc_alloc_update_all_page_tables(0);
2597 current_new_areas_index
= new_areas_index
;
2600 "The re-scan has finished; current_new_areas_index=%d.\n",
2601 current_new_areas_index));*/
2604 /* Turn off recording of areas allocated by gc_alloc(). */
2605 record_new_objects
= 0;
2610 /* Check that none of the write_protected pages in this generation
2611 * have been written to. */
2612 for (i
= 0; i
< page_table_pages
; i
++) {
2614 && (page_bytes_used(i
) != 0)
2615 && (page_table
[i
].gen
== generation
)
2616 && (page_table
[i
].write_protected_cleared
!= 0)
2617 && (page_table
[i
].dont_move
== 0)) {
2618 lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d\n",
2619 i
, generation
, page_table
[i
].dont_move
);
2626 /* Un-write-protect all the pages in from_space. This is done at the
2627 * start of a GC else there may be many page faults while scavenging
2628 * the newspace (I've seen drive the system time to 99%). These pages
2629 * would need to be unprotected anyway before unmapping in
2630 * free_oldspace; not sure what effect this has on paging.. */
2632 unprotect_oldspace(void)
2635 char *region_addr
= 0;
2636 char *page_addr
= 0;
2637 uword_t region_bytes
= 0;
2639 for (i
= 0; i
< last_free_page
; i
++) {
2641 && (page_bytes_used(i
) != 0)
2642 && (page_table
[i
].gen
== from_space
)) {
2644 /* Remove any write-protection. We should be able to rely
2645 * on the write-protect flag to avoid redundant calls. */
2646 if (page_table
[i
].write_protected
) {
2647 page_table
[i
].write_protected
= 0;
2648 page_addr
= page_address(i
);
2651 region_addr
= page_addr
;
2652 region_bytes
= GENCGC_CARD_BYTES
;
2653 } else if (region_addr
+ region_bytes
== page_addr
) {
2654 /* Region continue. */
2655 region_bytes
+= GENCGC_CARD_BYTES
;
2657 /* Unprotect previous region. */
2658 os_protect(region_addr
, region_bytes
, OS_VM_PROT_ALL
);
2659 /* First page in new region. */
2660 region_addr
= page_addr
;
2661 region_bytes
= GENCGC_CARD_BYTES
;
2667 /* Unprotect last region. */
2668 os_protect(region_addr
, region_bytes
, OS_VM_PROT_ALL
);
2672 /* Work through all the pages and free any in from_space. This
2673 * assumes that all objects have been copied or promoted to an older
2674 * generation. Bytes_allocated and the generation bytes_allocated
2675 * counter are updated. The number of bytes freed is returned. */
2679 uword_t bytes_freed
= 0;
2680 page_index_t first_page
, last_page
;
2685 /* Find a first page for the next region of pages. */
2686 while ((first_page
< last_free_page
)
2687 && (page_free_p(first_page
)
2688 || (page_bytes_used(first_page
) == 0)
2689 || (page_table
[first_page
].gen
!= from_space
)))
2692 if (first_page
>= last_free_page
)
2695 /* Find the last page of this region. */
2696 last_page
= first_page
;
2699 /* Free the page. */
2700 bytes_freed
+= page_bytes_used(last_page
);
2701 generations
[page_table
[last_page
].gen
].bytes_allocated
-=
2702 page_bytes_used(last_page
);
2703 page_table
[last_page
].allocated
= FREE_PAGE_FLAG
;
2704 set_page_bytes_used(last_page
, 0);
2705 /* Should already be unprotected by unprotect_oldspace(). */
2706 gc_assert(!page_table
[last_page
].write_protected
);
2709 while ((last_page
< last_free_page
)
2710 && !page_free_p(last_page
)
2711 && (page_bytes_used(last_page
) != 0)
2712 && (page_table
[last_page
].gen
== from_space
));
2714 #ifdef READ_PROTECT_FREE_PAGES
2715 os_protect(page_address(first_page
),
2716 npage_bytes(last_page
-first_page
),
2719 first_page
= last_page
;
2720 } while (first_page
< last_free_page
);
2722 bytes_allocated
-= bytes_freed
;
2727 /* Print some information about a pointer at the given address. */
2729 print_ptr(lispobj
*addr
)
2731 /* If addr is in the dynamic space then out the page information. */
2732 page_index_t pi1
= find_page_index((void*)addr
);
2735 fprintf(stderr
," %p: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n",
2738 page_table
[pi1
].allocated
,
2739 page_table
[pi1
].gen
,
2740 page_bytes_used(pi1
),
2741 scan_start_offset(page_table
[pi1
]),
2742 page_table
[pi1
].dont_move
);
2743 fprintf(stderr
," %x %x %x %x (%x) %x %x %x %x\n",
2757 is_in_stack_space(lispobj ptr
)
2759 /* For space verification: Pointers can be valid if they point
2760 * to a thread stack space. This would be faster if the thread
2761 * structures had page-table entries as if they were part of
2762 * the heap space. */
2764 for_each_thread(th
) {
2765 if ((th
->control_stack_start
<= (lispobj
*)ptr
) &&
2766 (th
->control_stack_end
>= (lispobj
*)ptr
)) {
2773 // NOTE: This function can produces false failure indications,
2774 // usually related to dynamic space pointing to the stack of a
2775 // dead thread, but there may be other reasons as well.
2777 verify_range(lispobj
*start
, size_t words
)
2779 extern int valid_lisp_pointer_p(lispobj
);
2780 int is_in_readonly_space
=
2781 (READ_ONLY_SPACE_START
<= (uword_t
)start
&&
2782 start
< read_only_space_free_pointer
);
2783 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2784 int is_in_immobile_space
=
2785 (IMMOBILE_SPACE_START
<= (uword_t
)start
&&
2786 (uword_t
)start
< SymbolValue(IMMOBILE_SPACE_FREE_POINTER
,0));
2789 lispobj
*end
= start
+ words
;
2791 for ( ; start
< end
; start
+= count
) {
2793 lispobj thing
= *start
;
2794 lispobj
__attribute__((unused
)) pointee
;
2796 if (is_lisp_pointer(thing
)) {
2797 page_index_t page_index
= find_page_index((void*)thing
);
2798 sword_t to_readonly_space
=
2799 (READ_ONLY_SPACE_START
<= thing
&&
2800 thing
< (lispobj
)read_only_space_free_pointer
);
2801 sword_t to_static_space
=
2802 (STATIC_SPACE_START
<= thing
&&
2803 thing
< (lispobj
)static_space_free_pointer
);
2804 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2805 sword_t to_immobile_space
=
2806 (IMMOBILE_SPACE_START
<= thing
&&
2807 thing
< SymbolValue(IMMOBILE_FIXEDOBJ_FREE_POINTER
,0)) ||
2808 (IMMOBILE_VARYOBJ_SUBSPACE_START
<= thing
&&
2809 thing
< SymbolValue(IMMOBILE_SPACE_FREE_POINTER
,0));
2812 /* Does it point to the dynamic space? */
2813 if (page_index
!= -1) {
2814 /* If it's within the dynamic space it should point to a used page. */
2815 if (page_free_p(page_index
))
2816 lose ("Ptr %p @ %p sees free page.\n", thing
, start
);
2817 if ((thing
& (GENCGC_CARD_BYTES
-1)) >= page_bytes_used(page_index
))
2818 lose ("Ptr %p @ %p sees unallocated space.\n", thing
, start
);
2819 /* Check that it doesn't point to a forwarding pointer! */
2820 if (*native_pointer(thing
) == 0x01) {
2821 lose("Ptr %p @ %p sees forwarding ptr.\n", thing
, start
);
2823 /* Check that its not in the RO space as it would then be a
2824 * pointer from the RO to the dynamic space. */
2825 if (is_in_readonly_space
) {
2826 lose("ptr to dynamic space %p from RO space %x\n",
2829 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2830 // verify all immobile space -> dynamic space pointers
2831 if (is_in_immobile_space
&& !valid_lisp_pointer_p(thing
)) {
2832 lose("Ptr %p @ %p sees junk.\n", thing
, start
);
2835 /* Does it point to a plausible object? This check slows
2836 * it down a lot (so it's commented out).
2838 * "a lot" is serious: it ate 50 minutes cpu time on
2839 * my duron 950 before I came back from lunch and
2842 * FIXME: Add a variable to enable this
2845 if (!valid_lisp_pointer_p((lispobj *)thing) {
2846 lose("ptr %p to invalid object %p\n", thing, start);
2849 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2850 } else if (to_immobile_space
) {
2851 // the object pointed to must not have been discarded as garbage
2852 if (!other_immediate_lowtag_p(*native_pointer(thing
))
2853 || immobile_filler_p(native_pointer(thing
)))
2854 lose("Ptr %p @ %p sees trashed object.\n", (void*)thing
, start
);
2855 // verify all pointers to immobile space
2856 if (!valid_lisp_pointer_p(thing
))
2857 lose("Ptr %p @ %p sees junk.\n", thing
, start
);
2860 extern char __attribute__((unused
)) funcallable_instance_tramp
;
2861 /* Verify that it points to another valid space. */
2862 if (!to_readonly_space
&& !to_static_space
2863 && !is_in_stack_space(thing
)) {
2864 lose("Ptr %p @ %p sees junk.\n", thing
, start
);
2869 int widetag
= widetag_of(thing
);
2870 if (is_lisp_immediate(thing
) || widetag
== NO_TLS_VALUE_MARKER_WIDETAG
) {
2871 /* skip immediates */
2872 } else if (!(other_immediate_lowtag_p(widetag
)
2873 && lowtag_for_widetag
[widetag
>>2])) {
2874 lose("Unhandled widetag %p at %p\n", widetag
, start
);
2875 } else if (unboxed_obj_widetag_p(widetag
)) {
2876 count
= sizetab
[widetag
](start
);
2877 } else switch(widetag
) {
2878 /* boxed or partially boxed objects */
2879 // FIXME: x86-64 can have partially unboxed FINs. The raw words
2880 // are at the moment valid fixnums by blind luck.
2881 case INSTANCE_WIDETAG
:
2882 if (instance_layout(start
)) {
2883 sword_t nslots
= instance_length(thing
) | 1;
2884 instance_scan(verify_range
, start
+1, nslots
,
2885 LAYOUT(instance_layout(start
))->bitmap
);
2889 case CODE_HEADER_WIDETAG
:
2891 struct code
*code
= (struct code
*) start
;
2892 sword_t nheader_words
= code_header_words(code
->header
);
2893 /* Scavenge the boxed section of the code data block */
2894 verify_range(start
+ 1, nheader_words
- 1);
2896 /* Scavenge the boxed section of each function
2897 * object in the code data block. */
2898 for_each_simple_fun(i
, fheaderp
, code
, 1, {
2899 verify_range(SIMPLE_FUN_SCAV_START(fheaderp
),
2900 SIMPLE_FUN_SCAV_NWORDS(fheaderp
)); });
2901 count
= nheader_words
+ code_instruction_words(code
->code_size
);
2904 #ifdef LISP_FEATURE_IMMOBILE_CODE
2906 verify_range(start
+ 1, 2);
2907 pointee
= fdefn_raw_referent((struct fdefn
*)start
);
2908 verify_range(&pointee
, 1);
2909 count
= CEILING(sizeof (struct fdefn
)/sizeof(lispobj
), 2);
2915 static uword_t
verify_space(lispobj start
, lispobj
* end
) {
2916 verify_range((lispobj
*)start
, end
-(lispobj
*)start
);
2920 static void verify_dynamic_space();
2925 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2927 // Try this verification if marknsweep was compiled with extra debugging.
2928 // But weak symbols don't work on macOS.
2929 extern void __attribute__((weak
)) check_varyobj_pages();
2930 if (&check_varyobj_pages
) check_varyobj_pages();
2932 verify_space(IMMOBILE_SPACE_START
,
2933 (lispobj
*)SymbolValue(IMMOBILE_FIXEDOBJ_FREE_POINTER
,0));
2934 verify_space(IMMOBILE_VARYOBJ_SUBSPACE_START
,
2935 (lispobj
*)SymbolValue(IMMOBILE_SPACE_FREE_POINTER
,0));
2938 for_each_thread(th
) {
2939 verify_space((lispobj
)th
->binding_stack_start
,
2940 get_binding_stack_pointer(th
));
2942 verify_space(READ_ONLY_SPACE_START
, read_only_space_free_pointer
);
2943 verify_space(STATIC_SPACE_START
, static_space_free_pointer
);
2944 verify_dynamic_space();
2947 /* Call 'proc' with pairs of addresses demarcating ranges in the
2948 * specified generation.
2949 * Stop if any invocation returns non-zero, and return that value */
2951 walk_generation(uword_t (*proc
)(lispobj
*,lispobj
*,uword_t
),
2952 generation_index_t generation
, uword_t extra
)
2955 int genmask
= generation
>= 0 ? 1 << generation
: ~0;
2957 for (i
= 0; i
< last_free_page
; i
++) {
2959 && (page_bytes_used(i
) != 0)
2960 && ((1 << page_table
[i
].gen
) & genmask
)) {
2961 page_index_t last_page
;
2963 /* This should be the start of a contiguous block */
2964 gc_assert(page_starts_contiguous_block_p(i
));
2966 /* Need to find the full extent of this contiguous block in case
2967 objects span pages. */
2969 /* Now work forward until the end of this contiguous area is
2971 for (last_page
= i
; ;last_page
++)
2972 /* Check whether this is the last page in this contiguous
2974 if (page_ends_contiguous_block_p(last_page
, page_table
[i
].gen
))
2978 proc((lispobj
*)page_address(i
),
2979 (lispobj
*)(page_bytes_used(last_page
) + page_address(last_page
)),
2981 if (result
) return result
;
2988 static void verify_generation(generation_index_t generation
)
2990 walk_generation((uword_t(*)(lispobj
*,lispobj
*,uword_t
))verify_space
,
2994 /* Check that all the free space is zero filled. */
2996 verify_zero_fill(void)
3000 for (page
= 0; page
< last_free_page
; page
++) {
3001 if (page_free_p(page
)) {
3002 /* The whole page should be zero filled. */
3003 sword_t
*start_addr
= (sword_t
*)page_address(page
);
3005 for (i
= 0; i
< (sword_t
)GENCGC_CARD_BYTES
/N_WORD_BYTES
; i
++) {
3006 if (start_addr
[i
] != 0) {
3007 lose("free page not zero at %p\n", start_addr
+ i
);
3011 sword_t free_bytes
= GENCGC_CARD_BYTES
- page_bytes_used(page
);
3012 if (free_bytes
> 0) {
3013 sword_t
*start_addr
=
3014 (sword_t
*)(page_address(page
) + page_bytes_used(page
));
3015 sword_t size
= free_bytes
/ N_WORD_BYTES
;
3017 for (i
= 0; i
< size
; i
++) {
3018 if (start_addr
[i
] != 0) {
3019 lose("free region not zero at %p\n", start_addr
+ i
);
3027 /* External entry point for verify_zero_fill */
3029 gencgc_verify_zero_fill(void)
3031 /* Flush the alloc regions updating the tables. */
3032 gc_alloc_update_all_page_tables(1);
3033 SHOW("verifying zero fill");
3038 verify_dynamic_space(void)
3040 verify_generation(-1);
3041 if (gencgc_enable_verify_zero_fill
)
3045 /* Write-protect all the dynamic boxed pages in the given generation. */
3047 write_protect_generation_pages(generation_index_t generation
)
3051 gc_assert(generation
< SCRATCH_GENERATION
);
3053 for (start
= 0; start
< last_free_page
; start
++) {
3054 if (protect_page_p(start
, generation
)) {
3058 /* Note the page as protected in the page tables. */
3059 page_table
[start
].write_protected
= 1;
3061 for (last
= start
+ 1; last
< last_free_page
; last
++) {
3062 if (!protect_page_p(last
, generation
))
3064 page_table
[last
].write_protected
= 1;
3067 page_start
= page_address(start
);
3069 os_protect(page_start
,
3070 npage_bytes(last
- start
),
3071 OS_VM_PROT_READ
| OS_VM_PROT_EXECUTE
);
3077 if (gencgc_verbose
> 1) {
3079 "/write protected %d of %d pages in generation %d\n",
3080 count_write_protect_generation_pages(generation
),
3081 count_generation_pages(generation
),
3086 #ifndef GENCGC_IS_PRECISE
3088 preserve_context_registers (void (*proc
)(os_context_register_t
), os_context_t
*c
)
3090 #ifdef LISP_FEATURE_SB_THREAD
3092 /* On Darwin the signal context isn't a contiguous block of memory,
3093 * so just preserve_pointering its contents won't be sufficient.
3095 #if defined(LISP_FEATURE_DARWIN)||defined(LISP_FEATURE_WIN32)
3096 #if defined LISP_FEATURE_X86
3097 proc(*os_context_register_addr(c
,reg_EAX
));
3098 proc(*os_context_register_addr(c
,reg_ECX
));
3099 proc(*os_context_register_addr(c
,reg_EDX
));
3100 proc(*os_context_register_addr(c
,reg_EBX
));
3101 proc(*os_context_register_addr(c
,reg_ESI
));
3102 proc(*os_context_register_addr(c
,reg_EDI
));
3103 proc(*os_context_pc_addr(c
));
3104 #elif defined LISP_FEATURE_X86_64
3105 proc(*os_context_register_addr(c
,reg_RAX
));
3106 proc(*os_context_register_addr(c
,reg_RCX
));
3107 proc(*os_context_register_addr(c
,reg_RDX
));
3108 proc(*os_context_register_addr(c
,reg_RBX
));
3109 proc(*os_context_register_addr(c
,reg_RSI
));
3110 proc(*os_context_register_addr(c
,reg_RDI
));
3111 proc(*os_context_register_addr(c
,reg_R8
));
3112 proc(*os_context_register_addr(c
,reg_R9
));
3113 proc(*os_context_register_addr(c
,reg_R10
));
3114 proc(*os_context_register_addr(c
,reg_R11
));
3115 proc(*os_context_register_addr(c
,reg_R12
));
3116 proc(*os_context_register_addr(c
,reg_R13
));
3117 proc(*os_context_register_addr(c
,reg_R14
));
3118 proc(*os_context_register_addr(c
,reg_R15
));
3119 proc(*os_context_pc_addr(c
));
3121 #error "preserve_context_registers needs to be tweaked for non-x86 Darwin"
3124 #if !defined(LISP_FEATURE_WIN32)
3125 for(ptr
= ((void **)(c
+1))-1; ptr
>=(void **)c
; ptr
--) {
3126 proc((os_context_register_t
)*ptr
);
3129 #endif // LISP_FEATURE_SB_THREAD
3134 move_pinned_pages_to_newspace()
3138 /* scavenge() will evacuate all oldspace pages, but no newspace
3139 * pages. Pinned pages are precisely those pages which must not
3140 * be evacuated, so move them to newspace directly. */
3142 for (i
= 0; i
< last_free_page
; i
++) {
3143 if (page_table
[i
].dont_move
&&
3144 /* dont_move is cleared lazily, so test the 'gen' field as well. */
3145 page_table
[i
].gen
== from_space
) {
3146 if (page_table
[i
].has_pins
) {
3147 // do not move to newspace after all, this will be word-wiped
3150 page_table
[i
].gen
= new_space
;
3151 /* And since we're moving the pages wholesale, also adjust
3152 * the generation allocation counters. */
3153 int used
= page_bytes_used(i
);
3154 generations
[new_space
].bytes_allocated
+= used
;
3155 generations
[from_space
].bytes_allocated
-= used
;
3160 #if defined(__GNUC__) && defined(ADDRESS_SANITIZER)
3161 #define NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
3163 #define NO_SANITIZE_ADDRESS
3166 /* Garbage collect a generation. If raise is 0 then the remains of the
3167 * generation are not raised to the next generation. */
3168 static void NO_SANITIZE_ADDRESS
3169 garbage_collect_generation(generation_index_t generation
, int raise
)
3174 gc_assert(generation
<= HIGHEST_NORMAL_GENERATION
);
3176 /* The oldest generation can't be raised. */
3177 gc_assert((generation
!= HIGHEST_NORMAL_GENERATION
) || (raise
== 0));
3179 /* Check if weak hash tables were processed in the previous GC. */
3180 gc_assert(weak_hash_tables
== NULL
);
3182 /* Initialize the weak pointer list. */
3183 weak_pointers
= NULL
;
3185 /* When a generation is not being raised it is transported to a
3186 * temporary generation (NUM_GENERATIONS), and lowered when
3187 * done. Set up this new generation. There should be no pages
3188 * allocated to it yet. */
3190 gc_assert(generations
[SCRATCH_GENERATION
].bytes_allocated
== 0);
3193 /* Set the global src and dest. generations */
3194 from_space
= generation
;
3196 new_space
= generation
+1;
3198 new_space
= SCRATCH_GENERATION
;
3200 /* Change to a new space for allocation, resetting the alloc_start_page */
3201 gc_alloc_generation
= new_space
;
3202 #ifdef LISP_FEATURE_SEGREGATED_CODE
3203 bzero(generations
[new_space
].alloc_start_page_
,
3204 sizeof generations
[new_space
].alloc_start_page_
);
3206 generations
[new_space
].alloc_start_page
= 0;
3207 generations
[new_space
].alloc_unboxed_start_page
= 0;
3208 generations
[new_space
].alloc_large_start_page
= 0;
3211 #ifdef PIN_GRANULARITY_LISPOBJ
3212 hopscotch_reset(&pinned_objects
);
3214 /* Before any pointers are preserved, the dont_move flags on the
3215 * pages need to be cleared. */
3216 /* FIXME: consider moving this bitmap into its own range of words,
3217 * out of the page table. Then we can just bzero() it.
3218 * This will also obviate the extra test at the comment
3219 * "dont_move is cleared lazily" in move_pinned_pages_to_newspace().
3221 for (i
= 0; i
< last_free_page
; i
++)
3222 if(page_table
[i
].gen
==from_space
) {
3223 page_table
[i
].dont_move
= 0;
3226 /* Un-write-protect the old-space pages. This is essential for the
3227 * promoted pages as they may contain pointers into the old-space
3228 * which need to be scavenged. It also helps avoid unnecessary page
3229 * faults as forwarding pointers are written into them. They need to
3230 * be un-protected anyway before unmapping later. */
3231 if (ENABLE_PAGE_PROTECTION
)
3232 unprotect_oldspace();
3234 /* Scavenge the stacks' conservative roots. */
3236 /* there are potentially two stacks for each thread: the main
3237 * stack, which may contain Lisp pointers, and the alternate stack.
3238 * We don't ever run Lisp code on the altstack, but it may
3239 * host a sigcontext with lisp objects in it */
3241 /* what we need to do: (1) find the stack pointer for the main
3242 * stack; scavenge it (2) find the interrupt context on the
3243 * alternate stack that might contain lisp values, and scavenge
3246 /* we assume that none of the preceding applies to the thread that
3247 * initiates GC. If you ever call GC from inside an altstack
3248 * handler, you will lose. */
3250 #ifndef GENCGC_IS_PRECISE
3251 /* And if we're saving a core, there's no point in being conservative. */
3252 if (conservative_stack
) {
3253 for_each_thread(th
) {
3255 void **esp
=(void **)-1;
3256 if (th
->state
== STATE_DEAD
)
3258 # if defined(LISP_FEATURE_SB_SAFEPOINT)
3259 /* Conservative collect_garbage is always invoked with a
3260 * foreign C call or an interrupt handler on top of every
3261 * existing thread, so the stored SP in each thread
3262 * structure is valid, no matter which thread we are looking
3263 * at. For threads that were running Lisp code, the pitstop
3264 * and edge functions maintain this value within the
3265 * interrupt or exception handler. */
3266 esp
= os_get_csp(th
);
3267 assert_on_stack(th
, esp
);
3269 /* In addition to pointers on the stack, also preserve the
3270 * return PC, the only value from the context that we need
3271 * in addition to the SP. The return PC gets saved by the
3272 * foreign call wrapper, and removed from the control stack
3273 * into a register. */
3274 preserve_pointer(th
->pc_around_foreign_call
);
3276 /* And on platforms with interrupts: scavenge ctx registers. */
3278 /* Disabled on Windows, because it does not have an explicit
3279 * stack of `interrupt_contexts'. The reported CSP has been
3280 * chosen so that the current context on the stack is
3281 * covered by the stack scan. See also set_csp_from_context(). */
3282 # ifndef LISP_FEATURE_WIN32
3283 if (th
!= arch_os_get_current_thread()) {
3284 long k
= fixnum_value(
3285 SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
3287 preserve_context_registers((void(*)(os_context_register_t
))preserve_pointer
,
3288 th
->interrupt_contexts
[--k
]);
3291 # elif defined(LISP_FEATURE_SB_THREAD)
3293 if(th
==arch_os_get_current_thread()) {
3294 /* Somebody is going to burn in hell for this, but casting
3295 * it in two steps shuts gcc up about strict aliasing. */
3296 esp
= (void **)((void *)&raise
);
3299 free
=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
3300 for(i
=free
-1;i
>=0;i
--) {
3301 os_context_t
*c
=th
->interrupt_contexts
[i
];
3302 esp1
= (void **) *os_context_register_addr(c
,reg_SP
);
3303 if (esp1
>=(void **)th
->control_stack_start
&&
3304 esp1
<(void **)th
->control_stack_end
) {
3305 if(esp1
<esp
) esp
=esp1
;
3306 preserve_context_registers((void(*)(os_context_register_t
))preserve_pointer
,
3312 esp
= (void **)((void *)&raise
);
3314 if (!esp
|| esp
== (void*) -1)
3315 lose("garbage_collect: no SP known for thread %x (OS %x)",
3317 for (ptr
= ((void **)th
->control_stack_end
)-1; ptr
>= esp
; ptr
--) {
3318 preserve_pointer(*ptr
);
3323 /* Non-x86oid systems don't have "conservative roots" as such, but
3324 * the same mechanism is used for objects pinned for use by alien
3326 for_each_thread(th
) {
3327 lispobj pin_list
= SymbolTlValue(PINNED_OBJECTS
,th
);
3328 while (pin_list
!= NIL
) {
3329 preserve_pointer((void*)(CONS(pin_list
)->car
));
3330 pin_list
= CONS(pin_list
)->cdr
;
3336 if (gencgc_verbose
> 1) {
3337 sword_t num_dont_move_pages
= count_dont_move_pages();
3339 "/non-movable pages due to conservative pointers = %ld (%lu bytes)\n",
3340 num_dont_move_pages
,
3341 npage_bytes(num_dont_move_pages
));
3345 /* Now that all of the pinned (dont_move) pages are known, and
3346 * before we start to scavenge (and thus relocate) objects,
3347 * relocate the pinned pages to newspace, so that the scavenger
3348 * will not attempt to relocate their contents. */
3349 move_pinned_pages_to_newspace();
3351 /* Scavenge all the rest of the roots. */
3353 #ifdef GENCGC_IS_PRECISE
3355 * If not x86, we need to scavenge the interrupt context(s) and the
3360 for_each_thread(th
) {
3361 scavenge_interrupt_contexts(th
);
3362 scavenge_control_stack(th
);
3365 # ifdef LISP_FEATURE_SB_SAFEPOINT
3366 /* In this case, scrub all stacks right here from the GCing thread
3367 * instead of doing what the comment below says. Suboptimal, but
3370 scrub_thread_control_stack(th
);
3372 /* Scrub the unscavenged control stack space, so that we can't run
3373 * into any stale pointers in a later GC (this is done by the
3374 * stop-for-gc handler in the other threads). */
3375 scrub_control_stack();
3380 /* Scavenge the Lisp functions of the interrupt handlers, taking
3381 * care to avoid SIG_DFL and SIG_IGN. */
3382 for (i
= 0; i
< NSIG
; i
++) {
3383 union interrupt_handler handler
= interrupt_handlers
[i
];
3384 if (!ARE_SAME_HANDLER(handler
.c
, SIG_IGN
) &&
3385 !ARE_SAME_HANDLER(handler
.c
, SIG_DFL
)) {
3386 scavenge((lispobj
*)(interrupt_handlers
+ i
), 1);
3389 /* Scavenge the binding stacks. */
3392 for_each_thread(th
) {
3393 scav_binding_stack((lispobj
*)th
->binding_stack_start
,
3394 (lispobj
*)get_binding_stack_pointer(th
));
3395 #ifdef LISP_FEATURE_SB_THREAD
3396 /* do the tls as well */
3398 len
=(SymbolValue(FREE_TLS_INDEX
,0) >> WORD_SHIFT
) -
3399 (sizeof (struct thread
))/(sizeof (lispobj
));
3400 scavenge((lispobj
*) (th
+1),len
);
3405 /* Scavenge static space. */
3406 if (gencgc_verbose
> 1) {
3408 "/scavenge static space: %d bytes\n",
3409 (uword_t
)static_space_free_pointer
- STATIC_SPACE_START
));
3411 heap_scavenge((lispobj
*)STATIC_SPACE_START
, static_space_free_pointer
);
3413 /* All generations but the generation being GCed need to be
3414 * scavenged. The new_space generation needs special handling as
3415 * objects may be moved in - it is handled separately below. */
3416 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3417 scavenge_immobile_roots(generation
+1, SCRATCH_GENERATION
);
3419 scavenge_generations(generation
+1, PSEUDO_STATIC_GENERATION
);
3421 #ifdef LISP_FEATURE_SB_TRACEROOT
3422 if (gc_object_watcher
) scavenge(&gc_object_watcher
, 1);
3424 scavenge_pinned_ranges();
3425 /* The Lisp start function is stored in the core header, not a static
3426 * symbol. It is passed to gc_and_save() in this C variable */
3427 if (lisp_init_function
) scavenge(&lisp_init_function
, 1);
3429 /* Finally scavenge the new_space generation. Keep going until no
3430 * more objects are moved into the new generation */
3431 scavenge_newspace_generation(new_space
);
3433 /* FIXME: I tried reenabling this check when debugging unrelated
3434 * GC weirdness ca. sbcl-0.6.12.45, and it failed immediately.
3435 * Since the current GC code seems to work well, I'm guessing that
3436 * this debugging code is just stale, but I haven't tried to
3437 * figure it out. It should be figured out and then either made to
3438 * work or just deleted. */
3440 #define RESCAN_CHECK 0
3442 /* As a check re-scavenge the newspace once; no new objects should
3445 os_vm_size_t old_bytes_allocated
= bytes_allocated
;
3446 os_vm_size_t bytes_allocated
;
3448 /* Start with a full scavenge. */
3449 scavenge_newspace_generation_one_scan(new_space
);
3451 /* Flush the current regions, updating the tables. */
3452 gc_alloc_update_all_page_tables(1);
3454 bytes_allocated
= bytes_allocated
- old_bytes_allocated
;
3456 if (bytes_allocated
!= 0) {
3457 lose("Rescan of new_space allocated %d more bytes.\n",
3463 scan_weak_hash_tables();
3464 scan_weak_pointers();
3465 wipe_nonpinned_words();
3466 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3467 // Do this last, because until wipe_nonpinned_words() happens,
3468 // not all page table entries have the 'gen' value updated,
3469 // which we need to correctly find all old->young pointers.
3470 sweep_immobile_space(raise
);
3473 /* Flush the current regions, updating the tables. */
3474 gc_alloc_update_all_page_tables(0);
3475 #ifdef PIN_GRANULARITY_LISPOBJ
3476 hopscotch_log_stats(&pinned_objects
, "pins");
3479 /* Free the pages in oldspace, but not those marked dont_move. */
3482 /* If the GC is not raising the age then lower the generation back
3483 * to its normal generation number */
3485 for (i
= 0; i
< last_free_page
; i
++)
3486 if ((page_bytes_used(i
) != 0)
3487 && (page_table
[i
].gen
== SCRATCH_GENERATION
))
3488 page_table
[i
].gen
= generation
;
3489 gc_assert(generations
[generation
].bytes_allocated
== 0);
3490 generations
[generation
].bytes_allocated
=
3491 generations
[SCRATCH_GENERATION
].bytes_allocated
;
3492 generations
[SCRATCH_GENERATION
].bytes_allocated
= 0;
3495 /* Reset the alloc_start_page for generation. */
3496 #ifdef LISP_FEATURE_SEGREGATED_CODE
3497 bzero(generations
[generation
].alloc_start_page_
,
3498 sizeof generations
[generation
].alloc_start_page_
);
3500 generations
[generation
].alloc_start_page
= 0;
3501 generations
[generation
].alloc_unboxed_start_page
= 0;
3502 generations
[generation
].alloc_large_start_page
= 0;
3505 if (generation
>= verify_gens
) {
3506 if (gencgc_verbose
) {
3512 /* Set the new gc trigger for the GCed generation. */
3513 generations
[generation
].gc_trigger
=
3514 generations
[generation
].bytes_allocated
3515 + generations
[generation
].bytes_consed_between_gc
;
3518 generations
[generation
].num_gc
= 0;
3520 ++generations
[generation
].num_gc
;
3524 /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
3526 update_dynamic_space_free_pointer(void)
3528 page_index_t last_page
= -1, i
;
3530 for (i
= 0; i
< last_free_page
; i
++)
3531 if (!page_free_p(i
) && (page_bytes_used(i
) != 0))
3534 last_free_page
= last_page
+1;
3536 set_alloc_pointer((lispobj
)(page_address(last_free_page
)));
3537 return 0; /* dummy value: return something ... */
3541 remap_page_range (page_index_t from
, page_index_t to
)
3543 /* There's a mysterious Solaris/x86 problem with using mmap
3544 * tricks for memory zeroing. See sbcl-devel thread
3545 * "Re: patch: standalone executable redux".
3547 #if defined(LISP_FEATURE_SUNOS)
3548 zero_and_mark_pages(from
, to
);
3551 release_granularity
= gencgc_release_granularity
/GENCGC_CARD_BYTES
,
3552 release_mask
= release_granularity
-1,
3554 aligned_from
= (from
+release_mask
)&~release_mask
,
3555 aligned_end
= (end
&~release_mask
);
3557 if (aligned_from
< aligned_end
) {
3558 zero_pages_with_mmap(aligned_from
, aligned_end
-1);
3559 if (aligned_from
!= from
)
3560 zero_and_mark_pages(from
, aligned_from
-1);
3561 if (aligned_end
!= end
)
3562 zero_and_mark_pages(aligned_end
, end
-1);
3564 zero_and_mark_pages(from
, to
);
3570 remap_free_pages (page_index_t from
, page_index_t to
)
3572 page_index_t first_page
, last_page
;
3574 for (first_page
= from
; first_page
<= to
; first_page
++) {
3575 if (!page_free_p(first_page
) || !page_need_to_zero(first_page
))
3578 last_page
= first_page
+ 1;
3579 while (page_free_p(last_page
) &&
3580 (last_page
<= to
) &&
3581 (page_need_to_zero(last_page
)))
3584 remap_page_range(first_page
, last_page
-1);
3586 first_page
= last_page
;
3590 generation_index_t small_generation_limit
= 1;
3592 /* GC all generations newer than last_gen, raising the objects in each
3593 * to the next older generation - we finish when all generations below
3594 * last_gen are empty. Then if last_gen is due for a GC, or if
3595 * last_gen==NUM_GENERATIONS (the scratch generation? eh?) we GC that
3596 * too. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
3598 * We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than
3599 * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */
3601 collect_garbage(generation_index_t last_gen
)
3603 generation_index_t gen
= 0, i
;
3604 int raise
, more
= 0;
3606 /* The largest value of last_free_page seen since the time
3607 * remap_free_pages was called. */
3608 static page_index_t high_water_mark
= 0;
3610 FSHOW((stderr
, "/entering collect_garbage(%d)\n", last_gen
));
3611 log_generation_stats(gc_logfile
, "=== GC Start ===");
3615 if (last_gen
> HIGHEST_NORMAL_GENERATION
+1) {
3617 "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
3622 /* Flush the alloc regions updating the tables. */
3623 gc_alloc_update_all_page_tables(1);
3625 /* Verify the new objects created by Lisp code. */
3626 if (pre_verify_gen_0
) {
3627 FSHOW((stderr
, "pre-checking generation 0\n"));
3628 verify_generation(0);
3631 if (gencgc_verbose
> 1)
3632 print_generation_stats();
3634 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3635 /* Immobile space generation bits are lazily updated for gen0
3636 (not touched on every object allocation) so do it now */
3637 update_immobile_nursery_bits();
3641 /* Collect the generation. */
3643 if (more
|| (gen
>= gencgc_oldest_gen_to_gc
)) {
3644 /* Never raise the oldest generation. Never raise the extra generation
3645 * collected due to more-flag. */
3651 || (generations
[gen
].num_gc
>= generations
[gen
].number_of_gcs_before_promotion
);
3652 /* If we would not normally raise this one, but we're
3653 * running low on space in comparison to the object-sizes
3654 * we've been seeing, raise it and collect the next one
3656 if (!raise
&& gen
== last_gen
) {
3657 more
= (2*large_allocation
) >= (dynamic_space_size
- bytes_allocated
);
3662 if (gencgc_verbose
> 1) {
3664 "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
3667 generations
[gen
].bytes_allocated
,
3668 generations
[gen
].gc_trigger
,
3669 generations
[gen
].num_gc
));
3672 /* If an older generation is being filled, then update its
3675 generations
[gen
+1].cum_sum_bytes_allocated
+=
3676 generations
[gen
+1].bytes_allocated
;
3679 garbage_collect_generation(gen
, raise
);
3681 /* Reset the memory age cum_sum. */
3682 generations
[gen
].cum_sum_bytes_allocated
= 0;
3684 if (gencgc_verbose
> 1) {
3685 FSHOW((stderr
, "GC of generation %d finished:\n", gen
));
3686 print_generation_stats();
3690 } while ((gen
<= gencgc_oldest_gen_to_gc
)
3691 && ((gen
< last_gen
)
3694 && (generations
[gen
].bytes_allocated
3695 > generations
[gen
].gc_trigger
)
3696 && (generation_average_age(gen
)
3697 > generations
[gen
].minimum_age_before_gc
))));
3699 /* Now if gen-1 was raised all generations before gen are empty.
3700 * If it wasn't raised then all generations before gen-1 are empty.
3702 * Now objects within this gen's pages cannot point to younger
3703 * generations unless they are written to. This can be exploited
3704 * by write-protecting the pages of gen; then when younger
3705 * generations are GCed only the pages which have been written
3710 gen_to_wp
= gen
- 1;
3712 /* There's not much point in WPing pages in generation 0 as it is
3713 * never scavenged (except promoted pages). */
3714 if ((gen_to_wp
> 0) && ENABLE_PAGE_PROTECTION
) {
3715 /* Check that they are all empty. */
3716 for (i
= 0; i
< gen_to_wp
; i
++) {
3717 if (generations
[i
].bytes_allocated
)
3718 lose("trying to write-protect gen. %d when gen. %d nonempty\n",
3721 write_protect_generation_pages(gen_to_wp
);
3723 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3724 write_protect_immobile_space();
3727 /* Set gc_alloc() back to generation 0. The current regions should
3728 * be flushed after the above GCs. */
3729 gc_assert(boxed_region
.free_pointer
== boxed_region
.start_addr
);
3730 gc_alloc_generation
= 0;
3732 /* Save the high-water mark before updating last_free_page */
3733 if (last_free_page
> high_water_mark
)
3734 high_water_mark
= last_free_page
;
3736 update_dynamic_space_free_pointer();
3738 /* Update auto_gc_trigger. Make sure we trigger the next GC before
3739 * running out of heap! */
3740 if (bytes_consed_between_gcs
<= (dynamic_space_size
- bytes_allocated
))
3741 auto_gc_trigger
= bytes_allocated
+ bytes_consed_between_gcs
;
3743 auto_gc_trigger
= bytes_allocated
+ (dynamic_space_size
- bytes_allocated
)/2;
3745 if(gencgc_verbose
) {
3746 #define MESSAGE ("Next gc when %"OS_VM_SIZE_FMT" bytes have been consed\n")
3749 // fprintf() can - and does - cause deadlock here.
3750 // snprintf() seems to work fine.
3751 n
= snprintf(buf
, sizeof buf
, MESSAGE
, auto_gc_trigger
);
3752 ignore_value(write(2, buf
, n
));
3756 /* If we did a big GC (arbitrarily defined as gen > 1), release memory
3759 if (gen
> small_generation_limit
) {
3760 if (last_free_page
> high_water_mark
)
3761 high_water_mark
= last_free_page
;
3762 remap_free_pages(0, high_water_mark
);
3763 high_water_mark
= 0;
3767 large_allocation
= 0;
3769 #ifdef LISP_FEATURE_SB_TRACEROOT
3770 if (gc_object_watcher
) {
3771 extern void gc_prove_liveness(void(*)(), lispobj
, int, uword_t
*, int);
3772 gc_prove_liveness(preserve_context_registers
,
3774 gc_n_stack_pins
, pinned_objects
.keys
,
3775 gc_traceroot_criterion
);
3779 log_generation_stats(gc_logfile
, "=== GC End ===");
3780 SHOW("returning from collect_garbage");
3783 /* Initialization of gencgc metadata is split into three steps:
3784 * 1. gc_init() - allocation of a fixed-address space via mmap(),
3785 * failing which there's no reason to go on. (safepoint only)
3786 * 2. gc_allocate_ptes() - page table entries
3787 * 3. gencgc_pickup_dynamic() - calculation of scan start offsets
3788 * Steps (2) and (3) are combined in self-build because there is
3789 * no PAGE_TABLE_CORE_ENTRY_TYPE_CODE core entry. */
3793 #if defined(LISP_FEATURE_SB_SAFEPOINT)
3798 void gc_allocate_ptes()
3802 /* Compute the number of pages needed for the dynamic space.
3803 * Dynamic space size should be aligned on page size. */
3804 page_table_pages
= dynamic_space_size
/GENCGC_CARD_BYTES
;
3805 gc_assert(dynamic_space_size
== npage_bytes(page_table_pages
));
3807 /* Default nursery size to 5% of the total dynamic space size,
3809 bytes_consed_between_gcs
= dynamic_space_size
/(os_vm_size_t
)20;
3810 if (bytes_consed_between_gcs
< (1024*1024))
3811 bytes_consed_between_gcs
= 1024*1024;
3813 /* The page_table must be allocated using "calloc" to initialize
3814 * the page structures correctly. There used to be a separate
3815 * initialization loop (now commented out; see below) but that was
3816 * unnecessary and did hurt startup time. */
3817 page_table
= calloc(page_table_pages
, sizeof(struct page
));
3818 gc_assert(page_table
);
3821 #ifdef PIN_GRANULARITY_LISPOBJ
3822 hopscotch_create(&pinned_objects
, HOPSCOTCH_HASH_FUN_DEFAULT
, 0 /* hashset */,
3823 32 /* logical bin count */, 0 /* default range */);
3826 scavtab
[WEAK_POINTER_WIDETAG
] = scav_weak_pointer
;
3828 /* The page structures are initialized implicitly when page_table
3829 * is allocated with "calloc" above. Formerly we had the following
3830 * explicit initialization here (comments converted to C99 style
3831 * for readability as C's block comments don't nest):
3833 * // Initialize each page structure.
3834 * for (i = 0; i < page_table_pages; i++) {
3835 * // Initialize all pages as free.
3836 * page_table[i].allocated = FREE_PAGE_FLAG;
3837 * page_table[i].bytes_used = 0;
3839 * // Pages are not write-protected at startup.
3840 * page_table[i].write_protected = 0;
3843 * Without this loop the image starts up much faster when dynamic
3844 * space is large -- which it is on 64-bit platforms already by
3845 * default -- and when "calloc" for large arrays is implemented
3846 * using copy-on-write of a page of zeroes -- which it is at least
3847 * on Linux. In this case the pages that page_table_pages is stored
3848 * in are mapped and cleared not before the corresponding part of
3849 * dynamic space is used. For example, this saves clearing 16 MB of
3850 * memory at startup if the page size is 4 KB and the size of
3851 * dynamic space is 4 GB.
3852 * FREE_PAGE_FLAG must be 0 for this to work correctly which is
3853 * asserted below: */
3855 /* Compile time assertion: If triggered, declares an array
3856 * of dimension -1 forcing a syntax error. The intent of the
3857 * assignment is to avoid an "unused variable" warning. */
3858 char assert_free_page_flag_0
[(FREE_PAGE_FLAG
) ? -1 : 1];
3859 assert_free_page_flag_0
[0] = assert_free_page_flag_0
[0];
3862 bytes_allocated
= 0;
3864 /* Initialize the generations. */
3865 for (i
= 0; i
< NUM_GENERATIONS
; i
++) {
3866 generations
[i
].alloc_start_page
= 0;
3867 generations
[i
].alloc_unboxed_start_page
= 0;
3868 generations
[i
].alloc_large_start_page
= 0;
3869 generations
[i
].bytes_allocated
= 0;
3870 generations
[i
].gc_trigger
= 2000000;
3871 generations
[i
].num_gc
= 0;
3872 generations
[i
].cum_sum_bytes_allocated
= 0;
3873 /* the tune-able parameters */
3874 generations
[i
].bytes_consed_between_gc
3875 = bytes_consed_between_gcs
/(os_vm_size_t
)HIGHEST_NORMAL_GENERATION
;
3876 generations
[i
].number_of_gcs_before_promotion
= 1;
3877 generations
[i
].minimum_age_before_gc
= 0.75;
3880 /* Initialize gc_alloc. */
3881 gc_alloc_generation
= 0;
3882 gc_set_region_empty(&boxed_region
);
3883 gc_set_region_empty(&unboxed_region
);
3884 #ifdef LISP_FEATURE_SEGREGATED_CODE
3885 gc_set_region_empty(&code_region
);
3891 /* Pick up the dynamic space from after a core load.
3893 * The ALLOCATION_POINTER points to the end of the dynamic space.
3897 gencgc_pickup_dynamic(void)
3899 page_index_t page
= 0;
3900 char *alloc_ptr
= (char *)get_alloc_pointer();
3901 lispobj
*prev
=(lispobj
*)page_address(page
);
3902 generation_index_t gen
= PSEUDO_STATIC_GENERATION
;
3904 bytes_allocated
= 0;
3907 lispobj
*first
,*ptr
= (lispobj
*)page_address(page
);
3909 if (!gencgc_partial_pickup
|| !page_free_p(page
)) {
3910 page_bytes_t bytes_used
= GENCGC_CARD_BYTES
;
3911 /* It is possible, though rare, for the saved page table
3912 * to contain free pages below alloc_ptr. */
3913 page_table
[page
].gen
= gen
;
3914 if (gencgc_partial_pickup
)
3915 bytes_used
= page_bytes_used(page
);
3917 set_page_bytes_used(page
, GENCGC_CARD_BYTES
);
3918 page_table
[page
].large_object
= 0;
3919 page_table
[page
].write_protected
= 0;
3920 page_table
[page
].write_protected_cleared
= 0;
3921 page_table
[page
].dont_move
= 0;
3922 set_page_need_to_zero(page
, 1);
3924 bytes_allocated
+= bytes_used
;
3927 if (!gencgc_partial_pickup
) {
3928 #ifdef LISP_FEATURE_SEGREGATED_CODE
3929 // Make the most general assumption: any page *might* contain code.
3930 page_table
[page
].allocated
= CODE_PAGE_FLAG
;
3932 page_table
[page
].allocated
= BOXED_PAGE_FLAG
;
3934 first
= gc_search_space3(ptr
, prev
, (ptr
+2));
3937 set_page_scan_start_offset(page
, page_address(page
) - (char*)prev
);
3940 } while (page_address(page
) < alloc_ptr
);
3942 last_free_page
= page
;
3944 generations
[gen
].bytes_allocated
= bytes_allocated
;
3946 gc_alloc_update_all_page_tables(1);
3947 if (ENABLE_PAGE_PROTECTION
)
3948 write_protect_generation_pages(gen
);
3952 gc_initialize_pointers(void)
3954 /* !page_table_pages happens once only in self-build and not again */
3955 if (!page_table_pages
)
3957 gencgc_pickup_dynamic();
3961 /* alloc(..) is the external interface for memory allocation. It
3962 * allocates to generation 0. It is not called from within the garbage
3963 * collector as it is only external uses that need the check for heap
3964 * size (GC trigger) and to disable the interrupts (interrupts are
3965 * always disabled during a GC).
3967 * The vops that call alloc(..) assume that the returned space is zero-filled.
3968 * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
3970 * The check for a GC trigger is only performed when the current
3971 * region is full, so in most cases it's not needed. */
3973 static inline lispobj
*
3974 general_alloc_internal(sword_t nbytes
, int page_type_flag
, struct alloc_region
*region
,
3975 struct thread
*thread
)
3977 #ifndef LISP_FEATURE_WIN32
3978 lispobj alloc_signal
;
3981 void *new_free_pointer
;
3982 os_vm_size_t trigger_bytes
= 0;
3984 gc_assert(nbytes
> 0);
3986 /* Check for alignment allocation problems. */
3987 gc_assert((((uword_t
)region
->free_pointer
& LOWTAG_MASK
) == 0)
3988 && ((nbytes
& LOWTAG_MASK
) == 0));
3990 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
3991 /* Must be inside a PA section. */
3992 gc_assert(get_pseudo_atomic_atomic(thread
));
3995 if ((os_vm_size_t
) nbytes
> large_allocation
)
3996 large_allocation
= nbytes
;
3998 /* maybe we can do this quickly ... */
3999 new_free_pointer
= (char*)region
->free_pointer
+ nbytes
;
4000 if (new_free_pointer
<= region
->end_addr
) {
4001 new_obj
= (void*)(region
->free_pointer
);
4002 region
->free_pointer
= new_free_pointer
;
4003 return(new_obj
); /* yup */
4006 /* We don't want to count nbytes against auto_gc_trigger unless we
4007 * have to: it speeds up the tenuring of objects and slows down
4008 * allocation. However, unless we do so when allocating _very_
4009 * large objects we are in danger of exhausting the heap without
4010 * running sufficient GCs.
4012 if ((os_vm_size_t
) nbytes
>= bytes_consed_between_gcs
)
4013 trigger_bytes
= nbytes
;
4015 /* we have to go the long way around, it seems. Check whether we
4016 * should GC in the near future
4018 if (auto_gc_trigger
&& (bytes_allocated
+trigger_bytes
> auto_gc_trigger
)) {
4019 /* Don't flood the system with interrupts if the need to gc is
4020 * already noted. This can happen for example when SUB-GC
4021 * allocates or after a gc triggered in a WITHOUT-GCING. */
4022 if (SymbolValue(GC_PENDING
,thread
) == NIL
) {
4023 /* set things up so that GC happens when we finish the PA
4025 SetSymbolValue(GC_PENDING
,T
,thread
);
4026 if (SymbolValue(GC_INHIBIT
,thread
) == NIL
) {
4027 #ifdef LISP_FEATURE_SB_SAFEPOINT
4028 thread_register_gc_trigger();
4030 set_pseudo_atomic_interrupted(thread
);
4031 #ifdef GENCGC_IS_PRECISE
4032 /* PPC calls alloc() from a trap
4033 * look up the most context if it's from a trap. */
4035 os_context_t
*context
=
4036 thread
->interrupt_data
->allocation_trap_context
;
4037 maybe_save_gc_mask_and_block_deferrables
4038 (context
? os_context_sigmask_addr(context
) : NULL
);
4041 maybe_save_gc_mask_and_block_deferrables(NULL
);
4047 new_obj
= gc_alloc_with_region(nbytes
, page_type_flag
, region
, 0);
4049 #ifndef LISP_FEATURE_WIN32
4050 /* for sb-prof, and not supported on Windows yet */
4051 alloc_signal
= SymbolValue(ALLOC_SIGNAL
,thread
);
4052 if ((alloc_signal
& FIXNUM_TAG_MASK
) == 0) {
4053 if ((sword_t
) alloc_signal
<= 0) {
4054 SetSymbolValue(ALLOC_SIGNAL
, T
, thread
);
4057 SetSymbolValue(ALLOC_SIGNAL
,
4058 alloc_signal
- (1 << N_FIXNUM_TAG_BITS
),
4068 general_alloc(sword_t nbytes
, int page_type_flag
)
4070 struct thread
*thread
= arch_os_get_current_thread();
4071 /* Select correct region, and call general_alloc_internal with it.
4072 * For other then boxed allocation we must lock first, since the
4073 * region is shared. */
4074 #ifdef LISP_FEATURE_SEGREGATED_CODE
4075 if (page_type_flag
== BOXED_PAGE_FLAG
) {
4077 if (BOXED_PAGE_FLAG
& page_type_flag
) {
4079 #ifdef LISP_FEATURE_SB_THREAD
4080 struct alloc_region
*region
= (thread
? &(thread
->alloc_region
) : &boxed_region
);
4082 struct alloc_region
*region
= &boxed_region
;
4084 return general_alloc_internal(nbytes
, page_type_flag
, region
, thread
);
4085 #ifdef LISP_FEATURE_SEGREGATED_CODE
4086 } else if (page_type_flag
== UNBOXED_PAGE_FLAG
||
4087 page_type_flag
== CODE_PAGE_FLAG
) {
4088 struct alloc_region
*region
=
4089 page_type_flag
== CODE_PAGE_FLAG
? &code_region
: &unboxed_region
;
4091 } else if (UNBOXED_PAGE_FLAG
== page_type_flag
) {
4092 struct alloc_region
*region
= &unboxed_region
;
4096 result
= thread_mutex_lock(&allocation_lock
);
4098 obj
= general_alloc_internal(nbytes
, page_type_flag
, region
, thread
);
4099 result
= thread_mutex_unlock(&allocation_lock
);
4103 lose("bad page type flag: %d", page_type_flag
);
4107 lispobj AMD64_SYSV_ABI
*
4108 alloc(sword_t nbytes
)
4110 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4111 struct thread
*self
= arch_os_get_current_thread();
4112 int was_pseudo_atomic
= get_pseudo_atomic_atomic(self
);
4113 if (!was_pseudo_atomic
)
4114 set_pseudo_atomic_atomic(self
);
4116 gc_assert(get_pseudo_atomic_atomic(arch_os_get_current_thread()));
4119 lispobj
*result
= general_alloc(nbytes
, BOXED_PAGE_FLAG
);
4121 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4122 if (!was_pseudo_atomic
)
4123 clear_pseudo_atomic_atomic(self
);
4130 * shared support for the OS-dependent signal handlers which
4131 * catch GENCGC-related write-protect violations
4133 void unhandled_sigmemoryfault(void* addr
);
4135 /* Depending on which OS we're running under, different signals might
4136 * be raised for a violation of write protection in the heap. This
4137 * function factors out the common generational GC magic which needs
4138 * to invoked in this case, and should be called from whatever signal
4139 * handler is appropriate for the OS we're running under.
4141 * Return true if this signal is a normal generational GC thing that
4142 * we were able to handle, or false if it was abnormal and control
4143 * should fall through to the general SIGSEGV/SIGBUS/whatever logic.
4145 * We have two control flags for this: one causes us to ignore faults
4146 * on unprotected pages completely, and the second complains to stderr
4147 * but allows us to continue without losing.
4149 extern boolean ignore_memoryfaults_on_unprotected_pages
;
4150 boolean ignore_memoryfaults_on_unprotected_pages
= 0;
4152 extern boolean continue_after_memoryfault_on_unprotected_pages
;
4153 boolean continue_after_memoryfault_on_unprotected_pages
= 0;
4156 gencgc_handle_wp_violation(void* fault_addr
)
4158 page_index_t page_index
= find_page_index(fault_addr
);
4162 "heap WP violation? fault_addr=%p, page_index=%"PAGE_INDEX_FMT
"\n",
4163 fault_addr
, page_index
));
4166 /* Check whether the fault is within the dynamic space. */
4167 if (page_index
== (-1)) {
4168 #ifdef LISP_FEATURE_IMMOBILE_SPACE
4169 extern int immobile_space_handle_wp_violation(void*);
4170 if (immobile_space_handle_wp_violation(fault_addr
))
4174 /* It can be helpful to be able to put a breakpoint on this
4175 * case to help diagnose low-level problems. */
4176 unhandled_sigmemoryfault(fault_addr
);
4178 /* not within the dynamic space -- not our responsibility */
4183 ret
= thread_mutex_lock(&free_pages_lock
);
4184 gc_assert(ret
== 0);
4185 if (page_table
[page_index
].write_protected
) {
4186 /* Unprotect the page. */
4187 os_protect(page_address(page_index
), GENCGC_CARD_BYTES
, OS_VM_PROT_ALL
);
4188 page_table
[page_index
].write_protected_cleared
= 1;
4189 page_table
[page_index
].write_protected
= 0;
4190 } else if (!ignore_memoryfaults_on_unprotected_pages
) {
4191 /* The only acceptable reason for this signal on a heap
4192 * access is that GENCGC write-protected the page.
4193 * However, if two CPUs hit a wp page near-simultaneously,
4194 * we had better not have the second one lose here if it
4195 * does this test after the first one has already set wp=0
4197 if(page_table
[page_index
].write_protected_cleared
!= 1) {
4198 void lisp_backtrace(int frames
);
4201 "Fault @ %p, page %"PAGE_INDEX_FMT
" not marked as write-protected:\n"
4202 " boxed_region.first_page: %"PAGE_INDEX_FMT
","
4203 " boxed_region.last_page %"PAGE_INDEX_FMT
"\n"
4204 " page.scan_start_offset: %"OS_VM_SIZE_FMT
"\n"
4205 " page.bytes_used: %u\n"
4206 " page.allocated: %d\n"
4207 " page.write_protected: %d\n"
4208 " page.write_protected_cleared: %d\n"
4209 " page.generation: %d\n",
4212 boxed_region
.first_page
,
4213 boxed_region
.last_page
,
4214 page_scan_start_offset(page_index
),
4215 page_bytes_used(page_index
),
4216 page_table
[page_index
].allocated
,
4217 page_table
[page_index
].write_protected
,
4218 page_table
[page_index
].write_protected_cleared
,
4219 page_table
[page_index
].gen
);
4220 if (!continue_after_memoryfault_on_unprotected_pages
)
4224 ret
= thread_mutex_unlock(&free_pages_lock
);
4225 gc_assert(ret
== 0);
4226 /* Don't worry, we can handle it. */
4230 /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
4231 * it's not just a case of the program hitting the write barrier, and
4232 * are about to let Lisp deal with it. It's basically just a
4233 * convenient place to set a gdb breakpoint. */
4235 unhandled_sigmemoryfault(void *addr
)
4239 update_thread_page_tables(struct thread
*th
)
4241 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->alloc_region
);
4242 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
4243 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &th
->sprof_alloc_region
);
4247 /* GC is single-threaded and all memory allocations during a
4248 collection happen in the GC thread, so it is sufficient to update
4249 all the the page tables once at the beginning of a collection and
4250 update only page tables of the GC thread during the collection. */
4251 void gc_alloc_update_all_page_tables(int for_all_threads
)
4253 /* Flush the alloc regions updating the tables. */
4255 if (for_all_threads
) {
4256 for_each_thread(th
) {
4257 update_thread_page_tables(th
);
4261 th
= arch_os_get_current_thread();
4263 update_thread_page_tables(th
);
4266 #ifdef LISP_FEATURE_SEGREGATED_CODE
4267 gc_alloc_update_page_tables(CODE_PAGE_FLAG
, &code_region
);
4269 gc_alloc_update_page_tables(UNBOXED_PAGE_FLAG
, &unboxed_region
);
4270 gc_alloc_update_page_tables(BOXED_PAGE_FLAG
, &boxed_region
);
4274 gc_set_region_empty(struct alloc_region
*region
)
4276 region
->first_page
= 0;
4277 region
->last_page
= -1;
4278 region
->start_addr
= page_address(0);
4279 region
->free_pointer
= page_address(0);
4280 region
->end_addr
= page_address(0);
4284 zero_all_free_pages()
4288 for (i
= 0; i
< last_free_page
; i
++) {
4289 if (page_free_p(i
)) {
4290 #ifdef READ_PROTECT_FREE_PAGES
4291 os_protect(page_address(i
),
4300 /* Things to do before doing a final GC before saving a core (without
4303 * + Pages in large_object pages aren't moved by the GC, so we need to
4304 * unset that flag from all pages.
4305 * + The pseudo-static generation isn't normally collected, but it seems
4306 * reasonable to collect it at least when saving a core. So move the
4307 * pages to a normal generation.
4310 prepare_for_final_gc ()
4314 #ifdef LISP_FEATURE_IMMOBILE_SPACE
4315 extern void prepare_immobile_space_for_final_gc();
4316 prepare_immobile_space_for_final_gc ();
4318 for (i
= 0; i
< last_free_page
; i
++) {
4319 page_table
[i
].large_object
= 0;
4320 if (page_table
[i
].gen
== PSEUDO_STATIC_GENERATION
) {
4321 int used
= page_bytes_used(i
);
4322 page_table
[i
].gen
= HIGHEST_NORMAL_GENERATION
;
4323 generations
[PSEUDO_STATIC_GENERATION
].bytes_allocated
-= used
;
4324 generations
[HIGHEST_NORMAL_GENERATION
].bytes_allocated
+= used
;
4329 /* Set this switch to 1 for coalescing of strings dumped to fasl,
4330 * or 2 for coalescing of those,
4331 * plus literal strings in code compiled to memory. */
4332 char gc_coalesce_string_literals
= 0;
4334 /* Do a non-conservative GC, and then save a core with the initial
4335 * function being set to the value of 'lisp_init_function' */
4337 gc_and_save(char *filename
, boolean prepend_runtime
,
4338 boolean save_runtime_options
, boolean compressed
,
4339 int compression_level
, int application_type
)
4342 void *runtime_bytes
= NULL
;
4343 size_t runtime_size
;
4344 extern void coalesce_similar_objects();
4345 extern struct lisp_startup_options lisp_startup_options
;
4346 boolean verbose
= !lisp_startup_options
.noinform
;
4348 file
= prepare_to_save(filename
, prepend_runtime
, &runtime_bytes
,
4353 conservative_stack
= 0;
4355 /* The filename might come from Lisp, and be moved by the now
4356 * non-conservative GC. */
4357 filename
= strdup(filename
);
4359 /* Collect twice: once into relatively high memory, and then back
4360 * into low memory. This compacts the retained data into the lower
4361 * pages, minimizing the size of the core file.
4363 prepare_for_final_gc();
4364 gencgc_alloc_start_page
= last_free_page
;
4365 collect_garbage(HIGHEST_NORMAL_GENERATION
+1);
4367 // We always coalesce copyable numbers. Addional coalescing is done
4368 // only on request, in which case a message is shown (unless verbose=0).
4369 if (gc_coalesce_string_literals
&& verbose
) {
4370 printf("[coalescing similar vectors... ");
4373 coalesce_similar_objects();
4374 if (gc_coalesce_string_literals
&& verbose
)
4377 prepare_for_final_gc();
4378 gencgc_alloc_start_page
= -1;
4379 collect_garbage(HIGHEST_NORMAL_GENERATION
+1);
4381 if (prepend_runtime
)
4382 save_runtime_to_filehandle(file
, runtime_bytes
, runtime_size
,
4385 /* The dumper doesn't know that pages need to be zeroed before use. */
4386 zero_all_free_pages();
4387 save_to_filehandle(file
, filename
, lisp_init_function
,
4388 prepend_runtime
, save_runtime_options
,
4389 compressed
? compression_level
: COMPRESSION_LEVEL_NONE
);
4390 /* Oops. Save still managed to fail. Since we've mangled the stack
4391 * beyond hope, there's not much we can do.
4392 * (beyond FUNCALLing lisp_init_function, but I suspect that's
4393 * going to be rather unsatisfactory too... */
4394 lose("Attempt to save core after non-conservative GC failed.\n");