Workaround next problem with MSAN and amend previous comment
[sbcl.git] / src / runtime / gencgc.c
blob2771392a8f28fa4ab36e4ad3311f3c52e1078eda
1 /*
2 * GENerational Conservative Garbage Collector for SBCL
3 */
5 /*
6 * This software is part of the SBCL system. See the README file for
7 * more information.
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 * For a review of garbage collection techniques (e.g. generational
18 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
19 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
20 * had been accepted for _ACM Computing Surveys_ and was available
21 * as a PostScript preprint through
22 * <http://www.cs.utexas.edu/users/oops/papers.html>
23 * as
24 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <errno.h>
30 #include <string.h>
31 #include <inttypes.h>
32 #include "sbcl.h"
33 #if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)
34 #include "pthreads_win32.h"
35 #else
36 #include <signal.h>
37 #endif
38 #include "runtime.h"
39 #include "os.h"
40 #include "interr.h"
41 #include "globals.h"
42 #include "interrupt.h"
43 #include "validate.h"
44 #include "lispregs.h"
45 #include "arch.h"
46 #include "gc.h"
47 #include "gc-internal.h"
48 #include "thread.h"
49 #include "pseudo-atomic.h"
50 #include "alloc.h"
51 #include "genesis/gc-tables.h"
52 #include "genesis/vector.h"
53 #include "genesis/weak-pointer.h"
54 #include "genesis/fdefn.h"
55 #include "genesis/simple-fun.h"
56 #include "save.h"
57 #include "genesis/hash-table.h"
58 #include "genesis/instance.h"
59 #include "genesis/layout.h"
60 #include "gencgc.h"
61 #include "hopscotch.h"
62 #ifdef GENCGC_IS_PRECISE
63 #include "genesis/cons.h" /* for accessing *pinned-objects* */
64 #endif
65 #include "forwarding-ptr.h"
67 /* forward declarations */
68 page_index_t gc_find_freeish_pages(page_index_t *restart_page_ptr, sword_t nbytes,
69 int page_type_flag);
73 * GC parameters
76 /* As usually configured, generations 0-5 are normal collected generations,
77 6 is pseudo-static (the objects in which are never moved nor reclaimed),
78 and 7 is scratch space used when collecting a generation without promotion,
79 wherein it is moved to generation 7 and back again.
81 enum {
82 SCRATCH_GENERATION = PSEUDO_STATIC_GENERATION+1,
83 NUM_GENERATIONS
86 /* Largest allocation seen since last GC. */
87 os_vm_size_t large_allocation = 0;
91 * debugging
94 /* the verbosity level. All non-error messages are disabled at level 0;
95 * and only a few rare messages are printed at level 1. */
96 #if QSHOW == 2
97 boolean gencgc_verbose = 1;
98 #else
99 boolean gencgc_verbose = 0;
100 #endif
102 /* FIXME: At some point enable the various error-checking things below
103 * and see what they say. */
105 /* We hunt for pointers to old-space, when GCing generations >= verify_gen.
106 * Set verify_gens to HIGHEST_NORMAL_GENERATION + 1 to disable this kind of
107 * check. */
108 generation_index_t verify_gens = HIGHEST_NORMAL_GENERATION + 1;
110 /* Should we do a pre-scan verify of generation 0 before it's GCed? */
111 boolean pre_verify_gen_0 = 0;
113 /* Should we check that newly allocated regions are zero filled? */
114 boolean gencgc_zero_check = 0;
116 /* Should we check that the free space is zero filled? */
117 boolean gencgc_enable_verify_zero_fill = 0;
119 /* When loading a core, don't do a full scan of the memory for the
120 * memory region boundaries. (Set to true by coreparse.c if the core
121 * contained a pagetable entry).
123 boolean gencgc_partial_pickup = 0;
125 /* If defined, free pages are read-protected to ensure that nothing
126 * accesses them.
129 /* #define READ_PROTECT_FREE_PAGES */
133 * GC structures and variables
136 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
137 os_vm_size_t bytes_allocated = 0;
138 os_vm_size_t auto_gc_trigger = 0;
140 /* the source and destination generations. These are set before a GC starts
141 * scavenging. */
142 generation_index_t from_space;
143 generation_index_t new_space;
145 /* Set to 1 when in GC */
146 boolean gc_active_p = 0;
148 /* should the GC be conservative on stack. If false (only right before
149 * saving a core), don't scan the stack / mark pages dont_move. */
150 static boolean conservative_stack = 1;
152 /* An array of page structures is allocated on gc initialization.
153 * This helps to quickly map between an address and its page structure.
154 * page_table_pages is set from the size of the dynamic space. */
155 page_index_t page_table_pages;
156 struct page *page_table;
157 #ifdef LISP_FEATURE_SB_TRACEROOT
158 lispobj gc_object_watcher;
159 int gc_traceroot_criterion;
160 #endif
161 #ifdef PIN_GRANULARITY_LISPOBJ
162 int gc_n_stack_pins;
163 struct hopscotch_table pinned_objects;
164 #endif
166 /* This is always 0 except during gc_and_save() */
167 lispobj lisp_init_function;
169 /// Constants defined in gc-internal:
170 /// #define BOXED_PAGE_FLAG 1
171 /// #define UNBOXED_PAGE_FLAG 2
172 /// #define OPEN_REGION_PAGE_FLAG 4
174 /// Return true if 'allocated' bits are: {001, 010, 011}, false if 1zz or 000.
175 static inline boolean page_allocated_no_region_p(page_index_t page) {
176 return (page_table[page].allocated ^ OPEN_REGION_PAGE_FLAG) > OPEN_REGION_PAGE_FLAG;
179 static inline boolean page_free_p(page_index_t page) {
180 return (page_table[page].allocated == FREE_PAGE_FLAG);
183 static inline boolean page_boxed_p(page_index_t page) {
184 return (page_table[page].allocated & BOXED_PAGE_FLAG);
187 /// Return true if 'allocated' bits are: {001, 011}, false otherwise.
188 /// i.e. true of pages which could hold boxed or partially boxed objects.
189 static inline boolean page_boxed_no_region_p(page_index_t page) {
190 return (page_table[page].allocated & 5) == BOXED_PAGE_FLAG;
193 /// Return true if page MUST NOT hold boxed objects (including code).
194 static inline boolean page_unboxed_p(page_index_t page) {
195 /* Both flags set == boxed code page */
196 return (page_table[page].allocated & 3) == UNBOXED_PAGE_FLAG;
199 static inline boolean protect_page_p(page_index_t page, generation_index_t generation) {
200 return (page_boxed_no_region_p(page)
201 && (page_bytes_used(page) != 0)
202 && !page_table[page].dont_move
203 && (page_table[page].gen == generation));
206 /* Calculate the start address for the given page number. */
207 inline char *
208 page_address(page_index_t page_num)
210 return (void*)(DYNAMIC_SPACE_START + (page_num * GENCGC_CARD_BYTES));
213 /* Calculate the address where the allocation region associated with
214 * the page starts. */
215 static inline void *
216 page_scan_start(page_index_t page_index)
218 return page_address(page_index)-page_scan_start_offset(page_index);
221 /* True if the page starts a contiguous block. */
222 static inline boolean
223 page_starts_contiguous_block_p(page_index_t page_index)
225 // Don't use the preprocessor macro: 0 means 0.
226 return page_table[page_index].scan_start_offset_ == 0;
229 /* True if the page is the last page in a contiguous block. */
230 static inline boolean
231 page_ends_contiguous_block_p(page_index_t page_index, generation_index_t gen)
233 return (/* page doesn't fill block */
234 (page_bytes_used(page_index) < GENCGC_CARD_BYTES)
235 /* page is last allocated page */
236 || ((page_index + 1) >= last_free_page)
237 /* next page free */
238 || page_free_p(page_index + 1)
239 /* next page contains no data */
240 || (page_bytes_used(page_index + 1) == 0)
241 /* next page is in different generation */
242 || (page_table[page_index + 1].gen != gen)
243 /* next page starts its own contiguous block */
244 || (page_starts_contiguous_block_p(page_index + 1)));
247 /// External function for calling from Lisp.
248 page_index_t ext_find_page_index(void *addr) { return find_page_index(addr); }
250 static os_vm_size_t
251 npage_bytes(page_index_t npages)
253 gc_assert(npages>=0);
254 return ((os_vm_size_t)npages)*GENCGC_CARD_BYTES;
257 /* Check that X is a higher address than Y and return offset from Y to
258 * X in bytes. */
259 static inline os_vm_size_t
260 addr_diff(void *x, void *y)
262 gc_assert(x >= y);
263 return (uintptr_t)x - (uintptr_t)y;
266 /* a structure to hold the state of a generation
268 * CAUTION: If you modify this, make sure to touch up the alien
269 * definition in src/code/gc.lisp accordingly. ...or better yes,
270 * deal with the FIXME there...
272 struct generation {
274 #ifdef LISP_FEATURE_SEGREGATED_CODE
275 // A distinct start page per nonzero value of 'page_type_flag'.
276 // The zeroth index is the large object start page.
277 page_index_t alloc_start_page_[4];
278 #define alloc_large_start_page alloc_start_page_[0]
279 #define alloc_start_page alloc_start_page_[BOXED_PAGE_FLAG]
280 #define alloc_unboxed_start_page alloc_start_page_[UNBOXED_PAGE_FLAG]
281 #else
282 /* the first page that gc_alloc_large (boxed) considers on its next
283 * call. (Although it always allocates after the boxed_region.) */
284 page_index_t alloc_large_start_page;
286 /* the first page that gc_alloc() checks on its next call */
287 page_index_t alloc_start_page;
289 /* the first page that gc_alloc_unboxed() checks on its next call */
290 page_index_t alloc_unboxed_start_page;
291 #endif
293 /* the bytes allocated to this generation */
294 os_vm_size_t bytes_allocated;
296 /* the number of bytes at which to trigger a GC */
297 os_vm_size_t gc_trigger;
299 /* to calculate a new level for gc_trigger */
300 os_vm_size_t bytes_consed_between_gc;
302 /* the number of GCs since the last raise */
303 int num_gc;
305 /* the number of GCs to run on the generations before raising objects to the
306 * next generation */
307 int number_of_gcs_before_promotion;
309 /* the cumulative sum of the bytes allocated to this generation. It is
310 * cleared after a GC on this generations, and update before new
311 * objects are added from a GC of a younger generation. Dividing by
312 * the bytes_allocated will give the average age of the memory in
313 * this generation since its last GC. */
314 os_vm_size_t cum_sum_bytes_allocated;
316 /* a minimum average memory age before a GC will occur helps
317 * prevent a GC when a large number of new live objects have been
318 * added, in which case a GC could be a waste of time */
319 double minimum_age_before_gc;
322 /* an array of generation structures. There needs to be one more
323 * generation structure than actual generations as the oldest
324 * generation is temporarily raised then lowered. */
325 struct generation generations[NUM_GENERATIONS];
327 /* the oldest generation that is will currently be GCed by default.
328 * Valid values are: 0, 1, ... HIGHEST_NORMAL_GENERATION
330 * The default of HIGHEST_NORMAL_GENERATION enables GC on all generations.
332 * Setting this to 0 effectively disables the generational nature of
333 * the GC. In some applications generational GC may not be useful
334 * because there are no long-lived objects.
336 * An intermediate value could be handy after moving long-lived data
337 * into an older generation so an unnecessary GC of this long-lived
338 * data can be avoided. */
339 generation_index_t gencgc_oldest_gen_to_gc = HIGHEST_NORMAL_GENERATION;
341 /* META: Is nobody aside from me bothered by this especially misleading
342 * use of the word "last"? It could mean either "ultimate" or "prior",
343 * but in fact means neither. It is the *FIRST* page that should be grabbed
344 * for more space, so it is min free page, or 1+ the max used page. */
345 /* The maximum free page in the heap is maintained and used to update
346 * ALLOCATION_POINTER which is used by the room function to limit its
347 * search of the heap. XX Gencgc obviously needs to be better
348 * integrated with the Lisp code. */
350 page_index_t last_free_page;
352 #ifdef LISP_FEATURE_SB_THREAD
353 /* This lock is to prevent multiple threads from simultaneously
354 * allocating new regions which overlap each other. Note that the
355 * majority of GC is single-threaded, but alloc() may be called from
356 * >1 thread at a time and must be thread-safe. This lock must be
357 * seized before all accesses to generations[] or to parts of
358 * page_table[] that other threads may want to see */
359 static pthread_mutex_t free_pages_lock = PTHREAD_MUTEX_INITIALIZER;
360 /* This lock is used to protect non-thread-local allocation. */
361 static pthread_mutex_t allocation_lock = PTHREAD_MUTEX_INITIALIZER;
362 #endif
364 extern os_vm_size_t gencgc_release_granularity;
365 os_vm_size_t gencgc_release_granularity = GENCGC_RELEASE_GRANULARITY;
367 extern os_vm_size_t gencgc_alloc_granularity;
368 os_vm_size_t gencgc_alloc_granularity = GENCGC_ALLOC_GRANULARITY;
372 * miscellaneous heap functions
375 /* Count the number of pages which are write-protected within the
376 * given generation. */
377 static page_index_t
378 count_write_protect_generation_pages(generation_index_t generation)
380 page_index_t i, count = 0;
382 for (i = 0; i < last_free_page; i++)
383 if (!page_free_p(i)
384 && (page_table[i].gen == generation)
385 && page_table[i].write_protected)
386 count++;
387 return count;
390 /* Count the number of pages within the given generation. */
391 static page_index_t
392 count_generation_pages(generation_index_t generation)
394 page_index_t i;
395 page_index_t count = 0;
397 for (i = 0; i < last_free_page; i++)
398 if (!page_free_p(i)
399 && (page_table[i].gen == generation))
400 count++;
401 return count;
404 #if QSHOW
405 static page_index_t
406 count_dont_move_pages(void)
408 page_index_t i;
409 page_index_t count = 0;
410 for (i = 0; i < last_free_page; i++) {
411 if (!page_free_p(i)
412 && (page_table[i].dont_move != 0)) {
413 ++count;
416 return count;
418 #endif /* QSHOW */
420 /* Work through the pages and add up the number of bytes used for the
421 * given generation. */
422 static __attribute__((unused)) os_vm_size_t
423 count_generation_bytes_allocated (generation_index_t gen)
425 page_index_t i;
426 os_vm_size_t result = 0;
427 for (i = 0; i < last_free_page; i++) {
428 if (!page_free_p(i)
429 && (page_table[i].gen == gen))
430 result += page_bytes_used(i);
432 return result;
435 /* Return the average age of the memory in a generation. */
436 extern double
437 generation_average_age(generation_index_t gen)
439 if (generations[gen].bytes_allocated == 0)
440 return 0.0;
442 return
443 ((double)generations[gen].cum_sum_bytes_allocated)
444 / ((double)generations[gen].bytes_allocated);
447 #ifdef LISP_FEATURE_X86
448 extern void fpu_save(void *);
449 extern void fpu_restore(void *);
450 #endif
452 #define PAGE_INDEX_FMT PRIdPTR
454 extern void
455 write_generation_stats(FILE *file)
457 generation_index_t i;
459 #ifdef LISP_FEATURE_X86
460 int fpu_state[27];
462 /* Can end up here after calling alloc_tramp which doesn't prepare
463 * the x87 state, and the C ABI uses a different mode */
464 fpu_save(fpu_state);
465 #endif
467 /* Print the heap stats. */
468 fprintf(file,
469 " Gen StaPg UbSta LaSta Boxed Unbox LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
471 for (i = 0; i <= SCRATCH_GENERATION; i++) {
472 page_index_t j;
473 page_index_t boxed_cnt = 0;
474 page_index_t unboxed_cnt = 0;
475 page_index_t large_boxed_cnt = 0;
476 page_index_t large_unboxed_cnt = 0;
477 page_index_t pinned_cnt=0;
479 for (j = 0; j < last_free_page; j++)
480 if (page_table[j].gen == i) {
482 /* Count the number of boxed pages within the given
483 * generation. */
484 if (page_boxed_p(j)) {
485 if (page_table[j].large_object)
486 large_boxed_cnt++;
487 else
488 boxed_cnt++;
490 if(page_table[j].dont_move) pinned_cnt++;
491 /* Count the number of unboxed pages within the given
492 * generation. */
493 if (page_unboxed_p(j)) {
494 if (page_table[j].large_object)
495 large_unboxed_cnt++;
496 else
497 unboxed_cnt++;
501 gc_assert(generations[i].bytes_allocated
502 == count_generation_bytes_allocated(i));
503 fprintf(file,
504 " %1d: %5ld %5ld %5ld",
506 (long)generations[i].alloc_start_page,
507 (long)generations[i].alloc_unboxed_start_page,
508 (long)generations[i].alloc_large_start_page);
509 fprintf(file,
510 " %5"PAGE_INDEX_FMT" %5"PAGE_INDEX_FMT" %5"PAGE_INDEX_FMT
511 " %5"PAGE_INDEX_FMT" %5"PAGE_INDEX_FMT,
512 boxed_cnt, unboxed_cnt, large_boxed_cnt,
513 large_unboxed_cnt, pinned_cnt);
514 fprintf(file,
515 " %8"OS_VM_SIZE_FMT
516 " %6"OS_VM_SIZE_FMT
517 " %8"OS_VM_SIZE_FMT
518 " %4"PAGE_INDEX_FMT" %3d %7.4f\n",
519 generations[i].bytes_allocated,
520 (npage_bytes(count_generation_pages(i)) - generations[i].bytes_allocated),
521 generations[i].gc_trigger,
522 count_write_protect_generation_pages(i),
523 generations[i].num_gc,
524 generation_average_age(i));
526 fprintf(file," Total bytes allocated = %"OS_VM_SIZE_FMT"\n", bytes_allocated);
527 fprintf(file," Dynamic-space-size bytes = %"OS_VM_SIZE_FMT"\n", dynamic_space_size);
529 #ifdef LISP_FEATURE_X86
530 fpu_restore(fpu_state);
531 #endif
534 extern void
535 write_heap_exhaustion_report(FILE *file, long available, long requested,
536 struct thread *thread)
538 fprintf(file,
539 "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
540 gc_active_p ? "garbage collection" : "allocation",
541 available,
542 requested);
543 write_generation_stats(file);
544 fprintf(file, "GC control variables:\n");
545 fprintf(file, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n",
546 SymbolValue(GC_INHIBIT,thread)==NIL ? "false" : "true",
547 (SymbolValue(GC_PENDING, thread) == T) ?
548 "true" : ((SymbolValue(GC_PENDING, thread) == NIL) ?
549 "false" : "in progress"));
550 #ifdef LISP_FEATURE_SB_THREAD
551 fprintf(file, " *STOP-FOR-GC-PENDING* = %s\n",
552 SymbolValue(STOP_FOR_GC_PENDING,thread)==NIL ? "false" : "true");
553 #endif
556 extern void
557 print_generation_stats(void)
559 write_generation_stats(stderr);
562 extern char* gc_logfile;
563 char * gc_logfile = NULL;
565 extern void
566 log_generation_stats(char *logfile, char *header)
568 if (logfile) {
569 FILE * log = fopen(logfile, "a");
570 if (log) {
571 fprintf(log, "%s\n", header);
572 write_generation_stats(log);
573 fclose(log);
574 } else {
575 fprintf(stderr, "Could not open gc logfile: %s\n", logfile);
576 fflush(stderr);
581 extern void
582 report_heap_exhaustion(long available, long requested, struct thread *th)
584 if (gc_logfile) {
585 FILE * log = fopen(gc_logfile, "a");
586 if (log) {
587 write_heap_exhaustion_report(log, available, requested, th);
588 fclose(log);
589 } else {
590 fprintf(stderr, "Could not open gc logfile: %s\n", gc_logfile);
591 fflush(stderr);
594 /* Always to stderr as well. */
595 write_heap_exhaustion_report(stderr, available, requested, th);
599 #if defined(LISP_FEATURE_X86)
600 void fast_bzero(void*, size_t); /* in <arch>-assem.S */
601 #endif
603 /* Zero the pages from START to END (inclusive), but use mmap/munmap instead
604 * if zeroing it ourselves, i.e. in practice give the memory back to the
605 * OS. Generally done after a large GC.
607 void zero_pages_with_mmap(page_index_t start, page_index_t end) {
608 page_index_t i;
609 void *addr = page_address(start), *new_addr;
610 os_vm_size_t length = npage_bytes(1+end-start);
612 if (start > end)
613 return;
615 gc_assert(length >= gencgc_release_granularity);
616 gc_assert((length % gencgc_release_granularity) == 0);
618 #ifdef LISP_FEATURE_LINUX
619 extern os_vm_address_t anon_dynamic_space_start;
620 // We use MADV_DONTNEED only on Linux due to differing semantics from BSD.
621 // Linux treats it as a demand that the memory be 0-filled, or refreshed
622 // from a file that backs the range. BSD takes it as a hint that you don't
623 // care if the memory has to brought in from swap when next accessed,
624 // i.e. it's not a request to make a user-visible alteration to memory.
625 // So in theory this can bring a page in from the core file, if we happen
626 // to hit a page that resides in the portion of memory mapped by coreparse.
627 // In practice this should not happen because objects from a core file can't
628 // become garbage. Except in save-lisp-and-die they can, and we must be
629 // cautious not to resurrect bytes that originally came from the file.
630 if ((os_vm_address_t)addr >= anon_dynamic_space_start) {
631 if (madvise(addr, length, MADV_DONTNEED) != 0)
632 lose("madvise failed\n");
633 } else
634 #endif
636 os_invalidate(addr, length);
637 new_addr = os_validate(NOT_MOVABLE, addr, length);
638 if (new_addr == NULL || new_addr != addr) {
639 lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x",
640 start, new_addr);
644 for (i = start; i <= end; i++)
645 set_page_need_to_zero(i, 0);
648 /* Zero the pages from START to END (inclusive). Generally done just after
649 * a new region has been allocated.
651 static void
652 zero_pages(page_index_t start, page_index_t end) {
653 if (start > end)
654 return;
656 #if defined(LISP_FEATURE_X86)
657 fast_bzero(page_address(start), npage_bytes(1+end-start));
658 #else
659 bzero(page_address(start), npage_bytes(1+end-start));
660 #endif
664 static void
665 zero_and_mark_pages(page_index_t start, page_index_t end) {
666 page_index_t i;
668 zero_pages(start, end);
669 for (i = start; i <= end; i++)
670 set_page_need_to_zero(i, 0);
673 /* Zero the pages from START to END (inclusive), except for those
674 * pages that are known to already zeroed. Mark all pages in the
675 * ranges as non-zeroed.
677 static void
678 zero_dirty_pages(page_index_t start, page_index_t end) {
679 page_index_t i, j;
681 for (i = start; i <= end; i++) {
682 if (!page_need_to_zero(i)) continue;
683 for (j = i+1; (j <= end) && page_need_to_zero(j) ; j++)
684 ; /* empty body */
685 zero_pages(i, j-1);
686 i = j;
689 for (i = start; i <= end; i++) {
690 set_page_need_to_zero(i, 1);
696 * To support quick and inline allocation, regions of memory can be
697 * allocated and then allocated from with just a free pointer and a
698 * check against an end address.
700 * Since objects can be allocated to spaces with different properties
701 * e.g. boxed/unboxed, generation, ages; there may need to be many
702 * allocation regions.
704 * Each allocation region may start within a partly used page. Many
705 * features of memory use are noted on a page wise basis, e.g. the
706 * generation; so if a region starts within an existing allocated page
707 * it must be consistent with this page.
709 * During the scavenging of the newspace, objects will be transported
710 * into an allocation region, and pointers updated to point to this
711 * allocation region. It is possible that these pointers will be
712 * scavenged again before the allocation region is closed, e.g. due to
713 * trans_list which jumps all over the place to cleanup the list. It
714 * is important to be able to determine properties of all objects
715 * pointed to when scavenging, e.g to detect pointers to the oldspace.
716 * Thus it's important that the allocation regions have the correct
717 * properties set when allocated, and not just set when closed. The
718 * region allocation routines return regions with the specified
719 * properties, and grab all the pages, setting their properties
720 * appropriately, except that the amount used is not known.
722 * These regions are used to support quicker allocation using just a
723 * free pointer. The actual space used by the region is not reflected
724 * in the pages tables until it is closed. It can't be scavenged until
725 * closed.
727 * When finished with the region it should be closed, which will
728 * update the page tables for the actual space used returning unused
729 * space. Further it may be noted in the new regions which is
730 * necessary when scavenging the newspace.
732 * Large objects may be allocated directly without an allocation
733 * region, the page tables are updated immediately.
735 * Unboxed objects don't contain pointers to other objects and so
736 * don't need scavenging. Further they can't contain pointers to
737 * younger generations so WP is not needed. By allocating pages to
738 * unboxed objects the whole page never needs scavenging or
739 * write-protecting. */
741 /* We use either two or three regions for the current newspace generation. */
742 #ifdef LISP_FEATURE_SEGREGATED_CODE
743 struct alloc_region gc_alloc_regions[3];
744 #define boxed_region gc_alloc_regions[BOXED_PAGE_FLAG-1]
745 #define unboxed_region gc_alloc_regions[UNBOXED_PAGE_FLAG-1]
746 #define code_region gc_alloc_regions[CODE_PAGE_FLAG-1]
747 #else
748 struct alloc_region boxed_region;
749 struct alloc_region unboxed_region;
750 #endif
752 /* The generation currently being allocated to. */
753 static generation_index_t gc_alloc_generation;
755 static inline page_index_t
756 generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large)
758 if (!(page_type_flag >= 1 && page_type_flag <= 3))
759 lose("bad page_type_flag: %d", page_type_flag);
760 if (large)
761 return generations[generation].alloc_large_start_page;
762 #ifdef LISP_FEATURE_SEGREGATED_CODE
763 return generations[generation].alloc_start_page_[page_type_flag];
764 #else
765 if (UNBOXED_PAGE_FLAG == page_type_flag)
766 return generations[generation].alloc_unboxed_start_page;
767 /* Both code and data. */
768 return generations[generation].alloc_start_page;
769 #endif
772 static inline void
773 set_generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large,
774 page_index_t page)
776 if (!(page_type_flag >= 1 && page_type_flag <= 3))
777 lose("bad page_type_flag: %d", page_type_flag);
778 if (large)
779 generations[generation].alloc_large_start_page = page;
780 #ifdef LISP_FEATURE_SEGREGATED_CODE
781 else
782 generations[generation].alloc_start_page_[page_type_flag] = page;
783 #else
784 else if (UNBOXED_PAGE_FLAG == page_type_flag)
785 generations[generation].alloc_unboxed_start_page = page;
786 else /* Both code and data. */
787 generations[generation].alloc_start_page = page;
788 #endif
791 /* Find a new region with room for at least the given number of bytes.
793 * It starts looking at the current generation's alloc_start_page. So
794 * may pick up from the previous region if there is enough space. This
795 * keeps the allocation contiguous when scavenging the newspace.
797 * The alloc_region should have been closed by a call to
798 * gc_alloc_update_page_tables(), and will thus be in an empty state.
800 * To assist the scavenging functions write-protected pages are not
801 * used. Free pages should not be write-protected.
803 * It is critical to the conservative GC that the start of regions be
804 * known. To help achieve this only small regions are allocated at a
805 * time.
807 * During scavenging, pointers may be found to within the current
808 * region and the page generation must be set so that pointers to the
809 * from space can be recognized. Therefore the generation of pages in
810 * the region are set to gc_alloc_generation. To prevent another
811 * allocation call using the same pages, all the pages in the region
812 * are allocated, although they will initially be empty.
814 static void
815 gc_alloc_new_region(sword_t nbytes, int page_type_flag, struct alloc_region *alloc_region)
817 page_index_t first_page;
818 page_index_t last_page;
819 page_index_t i;
820 int ret;
823 FSHOW((stderr,
824 "/alloc_new_region for %d bytes from gen %d\n",
825 nbytes, gc_alloc_generation));
828 /* Check that the region is in a reset state. */
829 gc_assert((alloc_region->first_page == 0)
830 && (alloc_region->last_page == -1)
831 && (alloc_region->free_pointer == alloc_region->end_addr));
832 ret = thread_mutex_lock(&free_pages_lock);
833 gc_assert(ret == 0);
834 first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0);
835 last_page=gc_find_freeish_pages(&first_page, nbytes, page_type_flag);
837 /* Set up the alloc_region. */
838 alloc_region->first_page = first_page;
839 alloc_region->last_page = last_page;
840 alloc_region->start_addr = page_address(first_page) + page_bytes_used(first_page);
841 alloc_region->free_pointer = alloc_region->start_addr;
842 alloc_region->end_addr = page_address(last_page+1);
844 /* Set up the pages. */
846 /* The first page may have already been in use. */
847 /* If so, just assert that it's consistent, otherwise, set it up. */
848 if (page_bytes_used(first_page)) {
849 gc_assert(page_table[first_page].allocated == page_type_flag);
850 gc_assert(page_table[first_page].gen == gc_alloc_generation);
851 gc_assert(page_table[first_page].large_object == 0);
852 } else {
853 page_table[first_page].allocated = page_type_flag;
854 page_table[first_page].gen = gc_alloc_generation;
855 page_table[first_page].large_object = 0;
856 set_page_scan_start_offset(first_page, 0);
858 page_table[first_page].allocated |= OPEN_REGION_PAGE_FLAG;
860 for (i = first_page+1; i <= last_page; i++) {
861 page_table[i].allocated = page_type_flag;
862 page_table[i].gen = gc_alloc_generation;
863 page_table[i].large_object = 0;
864 /* This may not be necessary for unboxed regions (think it was
865 * broken before!) */
866 set_page_scan_start_offset(i,
867 addr_diff(page_address(i), alloc_region->start_addr));
868 page_table[i].allocated |= OPEN_REGION_PAGE_FLAG;
870 /* Bump up last_free_page. */
871 if (last_page+1 > last_free_page) {
872 last_free_page = last_page+1;
873 /* do we only want to call this on special occasions? like for
874 * boxed_region? */
875 set_alloc_pointer((lispobj)page_address(last_free_page));
877 ret = thread_mutex_unlock(&free_pages_lock);
878 gc_assert(ret == 0);
880 #ifdef READ_PROTECT_FREE_PAGES
881 os_protect(page_address(first_page),
882 npage_bytes(1+last_page-first_page),
883 OS_VM_PROT_ALL);
884 #endif
886 /* If the first page was only partial, don't check whether it's
887 * zeroed (it won't be) and don't zero it (since the parts that
888 * we're interested in are guaranteed to be zeroed).
890 if (page_bytes_used(first_page)) {
891 first_page++;
894 zero_dirty_pages(first_page, last_page);
896 /* we can do this after releasing free_pages_lock */
897 if (gencgc_zero_check) {
898 word_t *p;
899 for (p = (word_t *)alloc_region->start_addr;
900 p < (word_t *)alloc_region->end_addr; p++) {
901 if (*p != 0) {
902 lose("The new region is not zero at %p (start=%p, end=%p).\n",
903 p, alloc_region->start_addr, alloc_region->end_addr);
909 /* If the record_new_objects flag is 2 then all new regions created
910 * are recorded.
912 * If it's 1 then then it is only recorded if the first page of the
913 * current region is <= new_areas_ignore_page. This helps avoid
914 * unnecessary recording when doing full scavenge pass.
916 * The new_object structure holds the page, byte offset, and size of
917 * new regions of objects. Each new area is placed in the array of
918 * these structures pointer to by new_areas. new_areas_index holds the
919 * offset into new_areas.
921 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
922 * later code must detect this and handle it, probably by doing a full
923 * scavenge of a generation. */
924 #define NUM_NEW_AREAS 512
925 static int record_new_objects = 0;
926 static page_index_t new_areas_ignore_page;
927 struct new_area {
928 page_index_t page;
929 size_t offset;
930 size_t size;
932 static struct new_area (*new_areas)[];
933 static size_t new_areas_index;
934 size_t max_new_areas;
936 /* Add a new area to new_areas. */
937 static void
938 add_new_area(page_index_t first_page, size_t offset, size_t size)
940 size_t new_area_start, c;
941 ssize_t i;
943 /* Ignore if full. */
944 if (new_areas_index >= NUM_NEW_AREAS)
945 return;
947 switch (record_new_objects) {
948 case 0:
949 return;
950 case 1:
951 if (first_page > new_areas_ignore_page)
952 return;
953 break;
954 case 2:
955 break;
956 default:
957 gc_abort();
960 new_area_start = npage_bytes(first_page) + offset;
962 /* Search backwards for a prior area that this follows from. If
963 found this will save adding a new area. */
964 for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
965 size_t area_end =
966 npage_bytes((*new_areas)[i].page)
967 + (*new_areas)[i].offset
968 + (*new_areas)[i].size;
969 /*FSHOW((stderr,
970 "/add_new_area S1 %d %d %d %d\n",
971 i, c, new_area_start, area_end));*/
972 if (new_area_start == area_end) {
973 /*FSHOW((stderr,
974 "/adding to [%d] %d %d %d with %d %d %d:\n",
976 (*new_areas)[i].page,
977 (*new_areas)[i].offset,
978 (*new_areas)[i].size,
979 first_page,
980 offset,
981 size);*/
982 (*new_areas)[i].size += size;
983 return;
987 (*new_areas)[new_areas_index].page = first_page;
988 (*new_areas)[new_areas_index].offset = offset;
989 (*new_areas)[new_areas_index].size = size;
990 /*FSHOW((stderr,
991 "/new_area %d page %d offset %d size %d\n",
992 new_areas_index, first_page, offset, size));*/
993 new_areas_index++;
995 /* Note the max new_areas used. */
996 if (new_areas_index > max_new_areas)
997 max_new_areas = new_areas_index;
1000 /* Update the tables for the alloc_region. The region may be added to
1001 * the new_areas.
1003 * When done the alloc_region is set up so that the next quick alloc
1004 * will fail safely and thus a new region will be allocated. Further
1005 * it is safe to try to re-update the page table of this reset
1006 * alloc_region. */
1007 void
1008 gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_region)
1010 boolean more;
1011 page_index_t first_page;
1012 page_index_t next_page;
1013 os_vm_size_t bytes_used;
1014 os_vm_size_t region_size;
1015 os_vm_size_t byte_cnt;
1016 page_bytes_t orig_first_page_bytes_used;
1017 int ret;
1020 first_page = alloc_region->first_page;
1022 /* Catch an unused alloc_region. */
1023 if ((first_page == 0) && (alloc_region->last_page == -1))
1024 return;
1026 next_page = first_page+1;
1028 ret = thread_mutex_lock(&free_pages_lock);
1029 gc_assert(ret == 0);
1030 if (alloc_region->free_pointer != alloc_region->start_addr) {
1031 /* some bytes were allocated in the region */
1032 orig_first_page_bytes_used = page_bytes_used(first_page);
1034 gc_assert(alloc_region->start_addr ==
1035 (page_address(first_page) + page_bytes_used(first_page)));
1037 /* All the pages used need to be updated */
1039 /* Update the first page. */
1041 /* If the page was free then set up the gen, and
1042 * scan_start_offset. */
1043 if (page_bytes_used(first_page) == 0)
1044 gc_assert(page_starts_contiguous_block_p(first_page));
1045 page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
1047 #ifdef LISP_FEATURE_SEGREGATED_CODE
1048 gc_assert(page_table[first_page].allocated == page_type_flag);
1049 #else
1050 gc_assert(page_table[first_page].allocated & page_type_flag);
1051 #endif
1052 gc_assert(page_table[first_page].gen == gc_alloc_generation);
1053 gc_assert(page_table[first_page].large_object == 0);
1055 byte_cnt = 0;
1057 /* Calculate the number of bytes used in this page. This is not
1058 * always the number of new bytes, unless it was free. */
1059 more = 0;
1060 if ((bytes_used = addr_diff(alloc_region->free_pointer,
1061 page_address(first_page)))
1062 >GENCGC_CARD_BYTES) {
1063 bytes_used = GENCGC_CARD_BYTES;
1064 more = 1;
1066 set_page_bytes_used(first_page, bytes_used);
1067 byte_cnt += bytes_used;
1070 /* All the rest of the pages should be free. We need to set
1071 * their scan_start_offset pointer to the start of the
1072 * region, and set the bytes_used. */
1073 while (more) {
1074 page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
1075 #ifdef LISP_FEATURE_SEGREGATED_CODE
1076 gc_assert(page_table[next_page].allocated == page_type_flag);
1077 #else
1078 gc_assert(page_table[next_page].allocated & page_type_flag);
1079 #endif
1080 gc_assert(page_bytes_used(next_page) == 0);
1081 gc_assert(page_table[next_page].gen == gc_alloc_generation);
1082 gc_assert(page_table[next_page].large_object == 0);
1083 gc_assert(page_scan_start_offset(next_page) ==
1084 addr_diff(page_address(next_page),
1085 alloc_region->start_addr));
1087 /* Calculate the number of bytes used in this page. */
1088 more = 0;
1089 if ((bytes_used = addr_diff(alloc_region->free_pointer,
1090 page_address(next_page)))>GENCGC_CARD_BYTES) {
1091 bytes_used = GENCGC_CARD_BYTES;
1092 more = 1;
1094 set_page_bytes_used(next_page, bytes_used);
1095 byte_cnt += bytes_used;
1097 next_page++;
1100 region_size = addr_diff(alloc_region->free_pointer,
1101 alloc_region->start_addr);
1102 bytes_allocated += region_size;
1103 generations[gc_alloc_generation].bytes_allocated += region_size;
1105 gc_assert((byte_cnt- orig_first_page_bytes_used) == region_size);
1107 /* Set the generations alloc restart page to the last page of
1108 * the region. */
1109 set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0, next_page-1);
1111 /* Add the region to the new_areas if requested. */
1112 if (BOXED_PAGE_FLAG & page_type_flag)
1113 add_new_area(first_page,orig_first_page_bytes_used, region_size);
1116 FSHOW((stderr,
1117 "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
1118 region_size,
1119 gc_alloc_generation));
1121 } else {
1122 /* There are no bytes allocated. Unallocate the first_page if
1123 * there are 0 bytes_used. */
1124 page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
1125 if (page_bytes_used(first_page) == 0)
1126 page_table[first_page].allocated = FREE_PAGE_FLAG;
1129 /* Unallocate any unused pages. */
1130 while (next_page <= alloc_region->last_page) {
1131 gc_assert(page_bytes_used(next_page) == 0);
1132 page_table[next_page].allocated = FREE_PAGE_FLAG;
1133 next_page++;
1135 ret = thread_mutex_unlock(&free_pages_lock);
1136 gc_assert(ret == 0);
1138 /* alloc_region is per-thread, we're ok to do this unlocked */
1139 gc_set_region_empty(alloc_region);
1142 /* Allocate a possibly large object. */
1143 void *
1144 gc_alloc_large(sword_t nbytes, int page_type_flag, struct alloc_region *alloc_region)
1146 boolean more;
1147 page_index_t first_page, next_page, last_page;
1148 os_vm_size_t byte_cnt;
1149 os_vm_size_t bytes_used;
1150 int ret;
1152 ret = thread_mutex_lock(&free_pages_lock);
1153 gc_assert(ret == 0);
1155 first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1);
1156 // FIXME: really we want to try looking for space following the highest of
1157 // the last page of all other small object regions. That's impossible - there's
1158 // not enough information. At best we can skip some work in only the case where
1159 // the supplied region was the one most recently created. To do this right
1160 // would entail a malloc-like allocator at the page granularity.
1161 if (first_page <= alloc_region->last_page) {
1162 first_page = alloc_region->last_page+1;
1165 last_page=gc_find_freeish_pages(&first_page,nbytes, page_type_flag);
1167 gc_assert(first_page > alloc_region->last_page);
1169 set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1, last_page);
1171 /* Large objects don't share pages with other objects. */
1172 gc_assert(page_bytes_used(first_page) == 0);
1174 /* Set up the pages. */
1175 page_table[first_page].allocated = page_type_flag;
1176 page_table[first_page].gen = gc_alloc_generation;
1177 page_table[first_page].large_object = 1;
1178 set_page_scan_start_offset(first_page, 0);
1180 byte_cnt = 0;
1182 /* Calc. the number of bytes used in this page. This is not
1183 * always the number of new bytes, unless it was free. */
1184 more = 0;
1185 if ((bytes_used = nbytes) > GENCGC_CARD_BYTES) {
1186 bytes_used = GENCGC_CARD_BYTES;
1187 more = 1;
1189 set_page_bytes_used(first_page, bytes_used);
1190 byte_cnt += bytes_used;
1192 next_page = first_page+1;
1194 /* All the rest of the pages should be free. We need to set their
1195 * scan_start_offset pointer to the start of the region, and set
1196 * the bytes_used. */
1197 while (more) {
1198 gc_assert(page_free_p(next_page));
1199 gc_assert(page_bytes_used(next_page) == 0);
1200 page_table[next_page].allocated = page_type_flag;
1201 page_table[next_page].gen = gc_alloc_generation;
1202 page_table[next_page].large_object = 1;
1204 set_page_scan_start_offset(next_page, npage_bytes(next_page-first_page));
1206 /* Calculate the number of bytes used in this page. */
1207 more = 0;
1208 bytes_used = nbytes - byte_cnt;
1209 if (bytes_used > GENCGC_CARD_BYTES) {
1210 bytes_used = GENCGC_CARD_BYTES;
1211 more = 1;
1213 set_page_bytes_used(next_page, bytes_used);
1214 page_table[next_page].write_protected=0;
1215 page_table[next_page].dont_move=0;
1216 byte_cnt += bytes_used;
1217 next_page++;
1220 gc_assert(byte_cnt == (size_t)nbytes);
1222 bytes_allocated += nbytes;
1223 generations[gc_alloc_generation].bytes_allocated += nbytes;
1225 /* Add the region to the new_areas if requested. */
1226 if (BOXED_PAGE_FLAG & page_type_flag)
1227 add_new_area(first_page, 0, nbytes);
1229 /* Bump up last_free_page */
1230 if (last_page+1 > last_free_page) {
1231 last_free_page = last_page+1;
1232 set_alloc_pointer((lispobj)(page_address(last_free_page)));
1234 ret = thread_mutex_unlock(&free_pages_lock);
1235 gc_assert(ret == 0);
1237 #ifdef READ_PROTECT_FREE_PAGES
1238 os_protect(page_address(first_page),
1239 npage_bytes(1+last_page-first_page),
1240 OS_VM_PROT_ALL);
1241 #endif
1243 zero_dirty_pages(first_page, last_page);
1245 return page_address(first_page);
1248 static page_index_t gencgc_alloc_start_page = -1;
1250 void
1251 gc_heap_exhausted_error_or_lose (sword_t available, sword_t requested)
1253 struct thread *thread = arch_os_get_current_thread();
1254 /* Write basic information before doing anything else: if we don't
1255 * call to lisp this is a must, and even if we do there is always
1256 * the danger that we bounce back here before the error has been
1257 * handled, or indeed even printed.
1259 report_heap_exhaustion(available, requested, thread);
1260 if (gc_active_p || (available == 0)) {
1261 /* If we are in GC, or totally out of memory there is no way
1262 * to sanely transfer control to the lisp-side of things.
1264 lose("Heap exhausted, game over.");
1266 else {
1267 /* FIXME: assert free_pages_lock held */
1268 (void)thread_mutex_unlock(&free_pages_lock);
1269 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
1270 gc_assert(get_pseudo_atomic_atomic(thread));
1271 clear_pseudo_atomic_atomic(thread);
1272 if (get_pseudo_atomic_interrupted(thread))
1273 do_pending_interrupt();
1274 #endif
1275 /* Another issue is that signalling HEAP-EXHAUSTED error leads
1276 * to running user code at arbitrary places, even in a
1277 * WITHOUT-INTERRUPTS which may lead to a deadlock without
1278 * running out of the heap. So at this point all bets are
1279 * off. */
1280 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
1281 corruption_warning_and_maybe_lose
1282 ("Signalling HEAP-EXHAUSTED in a WITHOUT-INTERRUPTS.");
1283 /* available and requested should be double word aligned, thus
1284 they can passed as fixnums and shifted later. */
1285 funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR), available, requested);
1286 lose("HEAP-EXHAUSTED-ERROR fell through");
1290 page_index_t
1291 gc_find_freeish_pages(page_index_t *restart_page_ptr, sword_t bytes,
1292 int page_type_flag)
1294 page_index_t most_bytes_found_from = 0, most_bytes_found_to = 0;
1295 page_index_t first_page, last_page, restart_page = *restart_page_ptr;
1296 os_vm_size_t nbytes = bytes;
1297 os_vm_size_t nbytes_goal = nbytes;
1298 os_vm_size_t bytes_found = 0;
1299 os_vm_size_t most_bytes_found = 0;
1300 boolean small_object = nbytes < GENCGC_CARD_BYTES;
1301 /* FIXME: assert(free_pages_lock is held); */
1303 if (nbytes_goal < gencgc_alloc_granularity)
1304 nbytes_goal = gencgc_alloc_granularity;
1306 /* Toggled by gc_and_save for heap compaction, normally -1. */
1307 if (gencgc_alloc_start_page != -1) {
1308 restart_page = gencgc_alloc_start_page;
1311 /* FIXME: This is on bytes instead of nbytes pending cleanup of
1312 * long from the interface. */
1313 gc_assert(bytes>=0);
1314 /* Search for a page with at least nbytes of space. We prefer
1315 * not to split small objects on multiple pages, to reduce the
1316 * number of contiguous allocation regions spaning multiple
1317 * pages: this helps avoid excessive conservativism.
1319 * For other objects, we guarantee that they start on their own
1320 * page boundary.
1322 first_page = restart_page;
1323 while (first_page < page_table_pages) {
1324 bytes_found = 0;
1325 if (page_free_p(first_page)) {
1326 gc_assert(0 == page_bytes_used(first_page));
1327 bytes_found = GENCGC_CARD_BYTES;
1328 } else if (small_object &&
1329 (page_table[first_page].allocated == page_type_flag) &&
1330 (!page_table[first_page].large_object) &&
1331 (page_table[first_page].gen == gc_alloc_generation) &&
1332 (!page_table[first_page].write_protected) &&
1333 (!page_table[first_page].dont_move)) {
1334 bytes_found = GENCGC_CARD_BYTES - page_bytes_used(first_page);
1335 if (bytes_found < nbytes) {
1336 if (bytes_found > most_bytes_found)
1337 most_bytes_found = bytes_found;
1338 first_page++;
1339 continue;
1341 } else {
1342 first_page++;
1343 continue;
1346 gc_assert(!page_table[first_page].write_protected);
1347 for (last_page = first_page+1;
1348 ((last_page < page_table_pages) &&
1349 page_free_p(last_page) &&
1350 (bytes_found < nbytes_goal));
1351 last_page++) {
1352 bytes_found += GENCGC_CARD_BYTES;
1353 gc_assert(0 == page_bytes_used(last_page));
1354 gc_assert(!page_table[last_page].write_protected);
1357 if (bytes_found > most_bytes_found) {
1358 most_bytes_found = bytes_found;
1359 most_bytes_found_from = first_page;
1360 most_bytes_found_to = last_page;
1362 if (bytes_found >= nbytes_goal)
1363 break;
1365 first_page = last_page;
1368 bytes_found = most_bytes_found;
1369 restart_page = first_page + 1;
1371 /* Check for a failure */
1372 if (bytes_found < nbytes) {
1373 gc_assert(restart_page >= page_table_pages);
1374 gc_heap_exhausted_error_or_lose(most_bytes_found, nbytes);
1377 gc_assert(most_bytes_found_to);
1378 *restart_page_ptr = most_bytes_found_from;
1379 return most_bytes_found_to-1;
1382 /* Allocate bytes. All the rest of the special-purpose allocation
1383 * functions will eventually call this */
1385 void *
1386 gc_alloc_with_region(sword_t nbytes,int page_type_flag, struct alloc_region *my_region,
1387 int quick_p)
1389 void *new_free_pointer;
1391 if (nbytes>=LARGE_OBJECT_SIZE)
1392 return gc_alloc_large(nbytes, page_type_flag, my_region);
1394 /* Check whether there is room in the current alloc region. */
1395 new_free_pointer = (char*)my_region->free_pointer + nbytes;
1397 /* fprintf(stderr, "alloc %d bytes from %p to %p\n", nbytes,
1398 my_region->free_pointer, new_free_pointer); */
1400 if (new_free_pointer <= my_region->end_addr) {
1401 /* If so then allocate from the current alloc region. */
1402 void *new_obj = my_region->free_pointer;
1403 my_region->free_pointer = new_free_pointer;
1405 /* Unless a `quick' alloc was requested, check whether the
1406 alloc region is almost empty. */
1407 if (!quick_p &&
1408 addr_diff(my_region->end_addr,my_region->free_pointer) <= 32) {
1409 /* If so, finished with the current region. */
1410 gc_alloc_update_page_tables(page_type_flag, my_region);
1411 /* Set up a new region. */
1412 gc_alloc_new_region(32 /*bytes*/, page_type_flag, my_region);
1415 return((void *)new_obj);
1418 /* Else not enough free space in the current region: retry with a
1419 * new region. */
1421 gc_alloc_update_page_tables(page_type_flag, my_region);
1422 gc_alloc_new_region(nbytes, page_type_flag, my_region);
1423 return gc_alloc_with_region(nbytes, page_type_flag, my_region,0);
1426 /* Copy a large object. If the object is in a large object region then
1427 * it is simply promoted, else it is copied. If it's large enough then
1428 * it's copied to a large object region.
1430 * Bignums and vectors may have shrunk. If the object is not copied
1431 * the space needs to be reclaimed, and the page_tables corrected. */
1432 static lispobj
1433 general_copy_large_object(lispobj object, word_t nwords, boolean boxedp)
1435 lispobj *new;
1436 page_index_t first_page;
1438 CHECK_COPY_PRECONDITIONS(object, nwords);
1440 if ((nwords > 1024*1024) && gencgc_verbose) {
1441 FSHOW((stderr, "/general_copy_large_object: %d bytes\n",
1442 nwords*N_WORD_BYTES));
1445 /* Check whether it's a large object. */
1446 first_page = find_page_index((void *)object);
1447 gc_assert(first_page >= 0);
1449 if (page_table[first_page].large_object) {
1450 /* Promote the object. Note: Unboxed objects may have been
1451 * allocated to a BOXED region so it may be necessary to
1452 * change the region to UNBOXED. */
1453 os_vm_size_t remaining_bytes;
1454 os_vm_size_t bytes_freed;
1455 page_index_t next_page;
1456 page_bytes_t old_bytes_used;
1458 /* FIXME: This comment is somewhat stale.
1460 * Note: Any page write-protection must be removed, else a
1461 * later scavenge_newspace may incorrectly not scavenge these
1462 * pages. This would not be necessary if they are added to the
1463 * new areas, but let's do it for them all (they'll probably
1464 * be written anyway?). */
1466 gc_assert(page_starts_contiguous_block_p(first_page));
1467 next_page = first_page;
1468 remaining_bytes = nwords*N_WORD_BYTES;
1470 while (remaining_bytes > GENCGC_CARD_BYTES) {
1471 gc_assert(page_table[next_page].gen == from_space);
1472 gc_assert(page_table[next_page].large_object);
1473 gc_assert(page_scan_start_offset(next_page) ==
1474 npage_bytes(next_page-first_page));
1475 gc_assert(page_bytes_used(next_page) == GENCGC_CARD_BYTES);
1476 /* Should have been unprotected by unprotect_oldspace()
1477 * for boxed objects, and after promotion unboxed ones
1478 * should not be on protected pages at all. */
1479 gc_assert(!page_table[next_page].write_protected);
1481 if (boxedp)
1482 gc_assert(page_boxed_p(next_page));
1483 else {
1484 gc_assert(page_allocated_no_region_p(next_page));
1485 page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
1487 page_table[next_page].gen = new_space;
1489 remaining_bytes -= GENCGC_CARD_BYTES;
1490 next_page++;
1493 /* Now only one page remains, but the object may have shrunk so
1494 * there may be more unused pages which will be freed. */
1496 /* Object may have shrunk but shouldn't have grown - check. */
1497 gc_assert(page_bytes_used(next_page) >= remaining_bytes);
1499 page_table[next_page].gen = new_space;
1501 if (boxedp)
1502 gc_assert(page_boxed_p(next_page));
1503 else
1504 page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
1506 /* Adjust the bytes_used. */
1507 old_bytes_used = page_bytes_used(next_page);
1508 set_page_bytes_used(next_page, remaining_bytes);
1510 bytes_freed = old_bytes_used - remaining_bytes;
1512 /* Free any remaining pages; needs care. */
1513 next_page++;
1514 while ((old_bytes_used == GENCGC_CARD_BYTES) &&
1515 (page_table[next_page].gen == from_space) &&
1516 /* FIXME: It is not obvious to me why this is necessary
1517 * as a loop condition: it seems to me that the
1518 * scan_start_offset test should be sufficient, but
1519 * experimentally that is not the case. --NS
1520 * 2011-11-28 */
1521 (boxedp ?
1522 page_boxed_p(next_page) :
1523 page_allocated_no_region_p(next_page)) &&
1524 page_table[next_page].large_object &&
1525 (page_scan_start_offset(next_page) ==
1526 npage_bytes(next_page - first_page))) {
1527 /* Checks out OK, free the page. Don't need to both zeroing
1528 * pages as this should have been done before shrinking the
1529 * object. These pages shouldn't be write-protected, even if
1530 * boxed they should be zero filled. */
1531 gc_assert(!page_table[next_page].write_protected);
1533 old_bytes_used = page_bytes_used(next_page);
1534 page_table[next_page].allocated = FREE_PAGE_FLAG;
1535 set_page_bytes_used(next_page, 0);
1536 bytes_freed += old_bytes_used;
1537 next_page++;
1540 if ((bytes_freed > 0) && gencgc_verbose) {
1541 FSHOW((stderr,
1542 "/general_copy_large_object bytes_freed=%"OS_VM_SIZE_FMT"\n",
1543 bytes_freed));
1546 generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES
1547 + bytes_freed;
1548 generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
1549 bytes_allocated -= bytes_freed;
1551 /* Add the region to the new_areas if requested. */
1552 if (boxedp)
1553 add_new_area(first_page,0,nwords*N_WORD_BYTES);
1555 return(object);
1557 } else {
1558 /* Allocate space. */
1559 new = gc_general_alloc(nwords*N_WORD_BYTES,
1560 (boxedp ? BOXED_PAGE_FLAG : UNBOXED_PAGE_FLAG),
1561 ALLOC_QUICK);
1563 /* Copy the object. */
1564 memcpy(new,native_pointer(object),nwords*N_WORD_BYTES);
1566 /* Return Lisp pointer of new object. */
1567 return make_lispobj(new, lowtag_of(object));
1571 lispobj
1572 copy_large_object(lispobj object, sword_t nwords)
1574 return general_copy_large_object(object, nwords, 1);
1577 lispobj
1578 copy_large_unboxed_object(lispobj object, sword_t nwords)
1580 return general_copy_large_object(object, nwords, 0);
1583 /* to copy unboxed objects */
1584 lispobj
1585 copy_unboxed_object(lispobj object, sword_t nwords)
1587 return gc_general_copy_object(object, nwords, UNBOXED_PAGE_FLAG);
1590 static lispobj
1591 trans_boxed_large(lispobj object)
1593 gc_assert(is_lisp_pointer(object));
1594 return copy_large_object(object,
1595 (HeaderValue(*native_pointer(object)) | 1) + 1);
1599 * weak pointers
1602 /* XX This is a hack adapted from cgc.c. These don't work too
1603 * efficiently with the gencgc as a list of the weak pointers is
1604 * maintained within the objects which causes writes to the pages. A
1605 * limited attempt is made to avoid unnecessary writes, but this needs
1606 * a re-think. */
1607 /* FIXME: now that we have non-Lisp hashtables in the GC, it might make sense
1608 * to stop chaining weak pointers through a slot in the object, as a remedy to
1609 * the above concern. It would also shorten the object by 2 words. */
1610 static sword_t
1611 scav_weak_pointer(lispobj *where, lispobj object)
1613 /* Since we overwrite the 'next' field, we have to make
1614 * sure not to do so for pointers already in the list.
1615 * Instead of searching the list of weak_pointers each
1616 * time, we ensure that next is always NULL when the weak
1617 * pointer isn't in the list, and not NULL otherwise.
1618 * Since we can't use NULL to denote end of list, we
1619 * use a pointer back to the same weak_pointer.
1621 struct weak_pointer * wp = (struct weak_pointer*)where;
1623 if (NULL == wp->next && weak_pointer_breakable_p(wp)) {
1624 wp->next = weak_pointers;
1625 weak_pointers = wp;
1626 if (NULL == wp->next)
1627 wp->next = wp;
1630 /* Do not let GC scavenge the value slot of the weak pointer.
1631 * (That is why it is a weak pointer.) */
1633 return WEAK_POINTER_NWORDS;
1637 lispobj *
1638 search_read_only_space(void *pointer)
1640 lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
1641 lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0);
1642 if ((pointer < (void *)start) || (pointer >= (void *)end))
1643 return NULL;
1644 return gc_search_space(start, pointer);
1647 lispobj *
1648 search_static_space(void *pointer)
1650 lispobj *start = (lispobj *)STATIC_SPACE_START;
1651 lispobj *end = (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0);
1652 if ((pointer < (void *)start) || (pointer >= (void *)end))
1653 return NULL;
1654 return gc_search_space(start, pointer);
1657 /* a faster version for searching the dynamic space. This will work even
1658 * if the object is in a current allocation region. */
1659 lispobj *
1660 search_dynamic_space(void *pointer)
1662 page_index_t page_index = find_page_index(pointer);
1663 lispobj *start;
1665 /* The address may be invalid, so do some checks. */
1666 if ((page_index == -1) || page_free_p(page_index))
1667 return NULL;
1668 start = (lispobj *)page_scan_start(page_index);
1669 return gc_search_space(start, pointer);
1672 #ifndef GENCGC_IS_PRECISE
1673 // Return the starting address of the object containing 'addr'
1674 // if and only if the object is one which would be evacuated from 'from_space'
1675 // were it allowed to be either discarded as garbage or moved.
1676 // 'addr_page_index' is the page containing 'addr' and must not be -1.
1677 // Return 0 if there is no such object - that is, if addr is past the
1678 // end of the used bytes, or its pages are not in 'from_space' etc.
1679 static lispobj*
1680 conservative_root_p(void *addr, page_index_t addr_page_index)
1682 /* quick check 1: Address is quite likely to have been invalid. */
1683 struct page* page = &page_table[addr_page_index];
1684 if (page->gen != from_space ||
1685 #ifdef LISP_FEATURE_SEGREGATED_CODE
1686 (!is_lisp_pointer((lispobj)addr) && page->allocated != CODE_PAGE_FLAG) ||
1687 #endif
1688 ((uword_t)addr & (GENCGC_CARD_BYTES - 1)) > page_bytes_used(addr_page_index) ||
1689 (page->large_object && page->dont_move))
1690 return 0;
1691 gc_assert(!(page->allocated & OPEN_REGION_PAGE_FLAG));
1693 #ifdef LISP_FEATURE_SEGREGATED_CODE
1694 /* quick check 2: Unless the page can hold code, the pointer's lowtag must
1695 * correspond to the widetag of the object. The object header can safely
1696 * be read even if it turns out that the pointer is not valid,
1697 * because the pointer was in bounds for the page.
1698 * Note that this can falsely pass if looking at the interior of an unboxed
1699 * array that masquerades as a Lisp object header by pure luck.
1700 * But if this doesn't pass, there's no point in proceeding to the
1701 * definitive test which involves searching for the containing object. */
1703 if (page->allocated != CODE_PAGE_FLAG) {
1704 lispobj* obj = native_pointer((lispobj)addr);
1705 if (lowtag_of((lispobj)addr) == LIST_POINTER_LOWTAG) {
1706 if (!is_cons_half(obj[0]) || !is_cons_half(obj[1]))
1707 return 0;
1708 } else {
1709 unsigned char widetag = widetag_of(*obj);
1710 if (!other_immediate_lowtag_p(widetag) ||
1711 lowtag_of((lispobj)addr) != lowtag_for_widetag[widetag>>2])
1712 return 0;
1715 #endif
1717 /* Filter out anything which can't be a pointer to a Lisp object
1718 * (or, as a special case which also requires dont_move, a return
1719 * address referring to something in a CodeObject). This is
1720 * expensive but important, since it vastly reduces the
1721 * probability that random garbage will be bogusly interpreted as
1722 * a pointer which prevents a page from moving. */
1723 lispobj* object_start = search_dynamic_space(addr);
1724 if (!object_start) return 0;
1726 /* If the containing object is a code object and 'addr' points
1727 * anywhere beyond the boxed words,
1728 * presume it to be a valid unboxed return address. */
1729 if (instruction_ptr_p(addr, object_start))
1730 return object_start;
1732 /* Large object pages only contain ONE object, and it will never
1733 * be a CONS. However, arrays and bignums can be allocated larger
1734 * than necessary and then shrunk to fit, leaving what look like
1735 * (0 . 0) CONSes at the end. These appear valid to
1736 * properly_tagged_descriptor_p(), so pick them off here. */
1737 if (((lowtag_of((lispobj)addr) == LIST_POINTER_LOWTAG) &&
1738 page_table[addr_page_index].large_object)
1739 || !properly_tagged_descriptor_p(addr, object_start))
1740 return 0;
1742 return object_start;
1744 #endif
1746 /* Adjust large bignum and vector objects. This will adjust the
1747 * allocated region if the size has shrunk, and move unboxed objects
1748 * into unboxed pages. The pages are not promoted here, and the
1749 * promoted region is not added to the new_regions; this is really
1750 * only designed to be called from preserve_pointer(). Shouldn't fail
1751 * if this is missed, just may delay the moving of objects to unboxed
1752 * pages, and the freeing of pages. */
1753 static void
1754 maybe_adjust_large_object(page_index_t first_page)
1756 lispobj* where = (lispobj*)page_address(first_page);
1757 page_index_t next_page;
1759 uword_t remaining_bytes;
1760 uword_t bytes_freed;
1761 uword_t old_bytes_used;
1763 int page_type_flag;
1765 /* Check whether it's a vector or bignum object. */
1766 lispobj widetag = widetag_of(where[0]);
1767 if (widetag == SIMPLE_VECTOR_WIDETAG)
1768 page_type_flag = BOXED_PAGE_FLAG;
1769 else if (specialized_vector_widetag_p(widetag) || widetag == BIGNUM_WIDETAG)
1770 page_type_flag = UNBOXED_PAGE_FLAG;
1771 else
1772 return;
1774 /* Find its current size. */
1775 sword_t nwords = sizetab[widetag](where);
1777 /* Note: Any page write-protection must be removed, else a later
1778 * scavenge_newspace may incorrectly not scavenge these pages.
1779 * This would not be necessary if they are added to the new areas,
1780 * but lets do it for them all (they'll probably be written
1781 * anyway?). */
1783 gc_assert(page_starts_contiguous_block_p(first_page));
1785 next_page = first_page;
1786 remaining_bytes = nwords*N_WORD_BYTES;
1787 while (remaining_bytes > GENCGC_CARD_BYTES) {
1788 gc_assert(page_table[next_page].gen == from_space);
1789 // We can't assert that page_table[next_page].allocated is correct,
1790 // because unboxed objects are initially allocated on boxed pages.
1791 gc_assert(page_allocated_no_region_p(next_page));
1792 gc_assert(page_table[next_page].large_object);
1793 gc_assert(page_scan_start_offset(next_page) ==
1794 npage_bytes(next_page-first_page));
1795 gc_assert(page_bytes_used(next_page) == GENCGC_CARD_BYTES);
1797 // This affects only one object, since large objects don't share pages.
1798 page_table[next_page].allocated = page_type_flag;
1800 /* Shouldn't be write-protected at this stage. Essential that the
1801 * pages aren't. */
1802 gc_assert(!page_table[next_page].write_protected);
1803 remaining_bytes -= GENCGC_CARD_BYTES;
1804 next_page++;
1807 /* Now only one page remains, but the object may have shrunk so
1808 * there may be more unused pages which will be freed. */
1810 /* Object may have shrunk but shouldn't have grown - check. */
1811 gc_assert(page_bytes_used(next_page) >= remaining_bytes);
1813 page_table[next_page].allocated = page_type_flag;
1815 /* Adjust the bytes_used. */
1816 old_bytes_used = page_bytes_used(next_page);
1817 set_page_bytes_used(next_page, remaining_bytes);
1819 bytes_freed = old_bytes_used - remaining_bytes;
1821 /* Free any remaining pages; needs care. */
1822 next_page++;
1823 while ((old_bytes_used == GENCGC_CARD_BYTES) &&
1824 (page_table[next_page].gen == from_space) &&
1825 page_allocated_no_region_p(next_page) &&
1826 page_table[next_page].large_object &&
1827 (page_scan_start_offset(next_page) ==
1828 npage_bytes(next_page - first_page))) {
1829 /* It checks out OK, free the page. We don't need to bother zeroing
1830 * pages as this should have been done before shrinking the
1831 * object. These pages shouldn't be write protected as they
1832 * should be zero filled. */
1833 gc_assert(!page_table[next_page].write_protected);
1835 old_bytes_used = page_bytes_used(next_page);
1836 page_table[next_page].allocated = FREE_PAGE_FLAG;
1837 set_page_bytes_used(next_page, 0);
1838 bytes_freed += old_bytes_used;
1839 next_page++;
1842 if ((bytes_freed > 0) && gencgc_verbose) {
1843 FSHOW((stderr,
1844 "/maybe_adjust_large_object() freed %d\n",
1845 bytes_freed));
1848 generations[from_space].bytes_allocated -= bytes_freed;
1849 bytes_allocated -= bytes_freed;
1851 return;
1854 #ifdef PIN_GRANULARITY_LISPOBJ
1855 /* After scavenging of the roots is done, we go back to the pinned objects
1856 * and look within them for pointers. While heap_scavenge() could certainly
1857 * do this, it would potentially lead to extra work, since we can't know
1858 * whether any given object has been examined at least once, since there is
1859 * no telltale forwarding-pointer. The easiest thing to do is defer all
1860 * pinned objects to a subsequent pass, as is done here.
1862 static void
1863 scavenge_pinned_ranges()
1865 int i;
1866 lispobj key;
1867 for_each_hopscotch_key(i, key, pinned_objects) {
1868 lispobj* obj = native_pointer(key);
1869 lispobj header = *obj;
1870 // Never invoke scavenger on a simple-fun, just code components.
1871 if (is_cons_half(header))
1872 scavenge(obj, 2);
1873 else if (widetag_of(header) != SIMPLE_FUN_WIDETAG)
1874 scavtab[widetag_of(header)](obj, header);
1878 /* Create an array of fixnum to consume the space between 'from' and 'to' */
1879 static void deposit_filler(uword_t from, uword_t to)
1881 if (to > from) {
1882 lispobj* where = (lispobj*)from;
1883 sword_t nwords = (to - from) >> WORD_SHIFT;
1884 where[0] = SIMPLE_ARRAY_WORD_WIDETAG;
1885 where[1] = make_fixnum(nwords - 2);
1889 /* Zero out the byte ranges on small object pages marked dont_move,
1890 * carefully skipping over objects in the pin hashtable.
1891 * TODO: by recording an additional bit per page indicating whether
1892 * there is more than one pinned object on it, we could avoid qsort()
1893 * except in the case where there is more than one. */
1894 static void
1895 wipe_nonpinned_words()
1897 void gc_heapsort_uwords(uword_t*, int);
1898 // Loop over the keys in pinned_objects and pack them densely into
1899 // the same array - pinned_objects.keys[] - but skip any simple-funs.
1900 // Admittedly this is abstraction breakage.
1901 int limit = hopscotch_max_key_index(pinned_objects);
1902 int n_pins = 0, i;
1903 for (i = 0; i <= limit; ++i) {
1904 lispobj key = pinned_objects.keys[i];
1905 if (key) {
1906 lispobj* obj = native_pointer(key);
1907 // No need to check for is_cons_half() - it will be false
1908 // on a simple-fun header, and that's the correct answer.
1909 if (widetag_of(*obj) != SIMPLE_FUN_WIDETAG)
1910 pinned_objects.keys[n_pins++] = (uword_t)obj;
1913 // Store a sentinel at the end. Even if n_pins = table capacity (unlikely),
1914 // it is safe to write one more word, because the hops[] array immediately
1915 // follows the keys[] array in memory. At worst, 2 elements of hops[]
1916 // are clobbered, which is irrelevant since the table has already been
1917 // rendered unusable by stealing its key array for a different purpose.
1918 pinned_objects.keys[n_pins] = 0;
1919 // Don't touch pinned_objects.count in case the reset function uses it
1920 // to decide how to resize for next use (which it doesn't, but could).
1921 gc_n_stack_pins = n_pins;
1922 // Order by ascending address, stopping short of the sentinel.
1923 gc_heapsort_uwords(pinned_objects.keys, n_pins);
1924 #if 0
1925 printf("Sorted pin list:\n");
1926 for (i = 0; i < n_pins; ++i) {
1927 lispobj* obj = (lispobj*)pinned_objects.keys[i];
1928 if (!is_cons_half(*obj))
1929 printf("%p: %5d words\n", obj, (int)sizetab[widetag_of(*obj)](obj));
1930 else printf("%p: CONS\n", obj);
1932 #endif
1933 // Each entry in the pinned objects demarcates two ranges to be cleared:
1934 // - the range preceding it back to either the page start, or prior object.
1935 // - the range after it, up to the lesser of page bytes used or next object.
1936 uword_t preceding_object = 0;
1937 uword_t this_page_end = 0;
1938 #define page_base_address(x) (x&~(GENCGC_CARD_BYTES-1))
1939 for (i = 0; i < n_pins; ++i) {
1940 // Handle the preceding range. If this object is on the same page as
1941 // its predecessor, then intervening bytes were already zeroed.
1942 // If not, then start a new page and do some bookkeeping.
1943 lispobj* obj = (lispobj*)pinned_objects.keys[i];
1944 uword_t this_page_base = page_base_address((uword_t)obj);
1945 /* printf("i=%d obj=%p base=%p\n", i, obj, (void*)this_page_base); */
1946 if (this_page_base > page_base_address(preceding_object)) {
1947 deposit_filler(this_page_base, (lispobj)obj);
1948 // Move the page to newspace
1949 page_index_t page = find_page_index(obj);
1950 int used = page_bytes_used(page);
1951 this_page_end = this_page_base + used;
1952 /* printf(" Clearing %p .. %p (limit=%p)\n",
1953 (void*)this_page_base, obj, (void*)this_page_end); */
1954 generations[new_space].bytes_allocated += used;
1955 generations[page_table[page].gen].bytes_allocated -= used;
1956 page_table[page].gen = new_space;
1957 page_table[page].has_pins = 0;
1959 // Handle the following range.
1960 lispobj word = *obj;
1961 size_t nwords = is_cons_half(word) ? 2 : sizetab[widetag_of(word)](obj);
1962 uword_t range_start = (uword_t)(obj + nwords);
1963 uword_t range_end = this_page_end;
1964 // There is always an i+1'th key due to the sentinel value.
1965 if (page_base_address(pinned_objects.keys[i+1]) == this_page_base)
1966 range_end = pinned_objects.keys[i+1];
1967 /* printf(" Clearing %p .. %p\n", (void*)range_start, (void*)range_end); */
1968 deposit_filler(range_start, range_end);
1969 preceding_object = (uword_t)obj;
1973 /* Add 'object' to the hashtable, and if the object is a code component,
1974 * then also add all of the embedded simple-funs.
1975 * The rationale for the extra work on code components is that without it,
1976 * every test of pinned_p() on an object would have to check if the pointer
1977 * is to a simple-fun - entailing an extra read of the header - and mapping
1978 * to its code component if so. Since more calls to pinned_p occur than to
1979 * pin_object, the extra burden should be on this function.
1980 * Experimentation bears out that this is the better technique.
1981 * Also, we wouldn't often expect code components in the collected generation
1982 * so the extra work here is quite minimal, even if it can generally add to
1983 * the number of keys in the hashtable.
1985 static void
1986 pin_object(lispobj object)
1988 if (!hopscotch_containsp(&pinned_objects, object)) {
1989 hopscotch_insert(&pinned_objects, object, 1);
1990 struct code* maybe_code = (struct code*)native_pointer(object);
1991 if (widetag_of(maybe_code->header) == CODE_HEADER_WIDETAG) {
1992 for_each_simple_fun(i, fun, maybe_code, 0, {
1993 hopscotch_insert(&pinned_objects,
1994 make_lispobj(fun, FUN_POINTER_LOWTAG),
2000 #else
2001 # define scavenge_pinned_ranges()
2002 # define wipe_nonpinned_words()
2003 #endif
2005 /* Take a possible pointer to a Lisp object and mark its page in the
2006 * page_table so that it will not be relocated during a GC.
2008 * This involves locating the page it points to, then backing up to
2009 * the start of its region, then marking all pages dont_move from there
2010 * up to the first page that's not full or has a different generation
2012 * It is assumed that all the page static flags have been cleared at
2013 * the start of a GC.
2015 * It is also assumed that the current gc_alloc() region has been
2016 * flushed and the tables updated. */
2018 // TODO: there's probably a way to be a little more efficient here.
2019 // As things are, we start by finding the object that encloses 'addr',
2020 // then we see if 'addr' was a "valid" Lisp pointer to that object
2021 // - meaning we expect the correct lowtag on the pointer - except
2022 // that for code objects we don't require a correct lowtag
2023 // and we allow a pointer to anywhere in the object.
2025 // It should be possible to avoid calling search_dynamic_space
2026 // more of the time. First, check if the page pointed to might hold code.
2027 // If it does, then we continue regardless of the pointer's lowtag
2028 // (because of the special allowance). If the page definitely does *not*
2029 // hold code, then we require up front that the lowtake make sense,
2030 // by doing the same checks that are in properly_tagged_descriptor_p.
2032 // Problem: when code is allocated from a per-thread region,
2033 // does it ensure that the occupied pages are flagged as having code?
2035 #if defined(__GNUC__) && defined(MEMORY_SANITIZER)
2036 #define NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory))
2037 #else
2038 #define NO_SANITIZE_MEMORY
2039 #endif
2041 static void NO_SANITIZE_MEMORY
2042 preserve_pointer(void *addr)
2044 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2045 /* Immobile space MUST be lower than dynamic space,
2046 or else this test needs to be revised */
2047 if (addr < (void*)IMMOBILE_SPACE_END) {
2048 extern void immobile_space_preserve_pointer(void*);
2049 immobile_space_preserve_pointer(addr);
2050 return;
2052 #endif
2053 page_index_t addr_page_index = find_page_index(addr);
2055 #ifdef GENCGC_IS_PRECISE
2056 /* If we're in precise gencgc (non-x86oid as of this writing) then
2057 * we are only called on valid object pointers in the first place,
2058 * so we just have to do a bounds-check against the heap, a
2059 * generation check, and the already-pinned check. */
2060 if (addr_page_index == -1
2061 || (page_table[addr_page_index].gen != from_space)
2062 || page_table[addr_page_index].dont_move)
2063 return;
2064 #else
2065 lispobj *object_start;
2066 if (addr_page_index == -1
2067 || (object_start = conservative_root_p(addr, addr_page_index)) == 0)
2068 return;
2069 #endif
2071 /* (Now that we know that addr_page_index is in range, it's
2072 * safe to index into page_table[] with it.) */
2073 unsigned int region_allocation = page_table[addr_page_index].allocated;
2075 /* Find the beginning of the region. Note that there may be
2076 * objects in the region preceding the one that we were passed a
2077 * pointer to: if this is the case, we will write-protect all the
2078 * previous objects' pages too. */
2080 #if 0
2081 /* I think this'd work just as well, but without the assertions.
2082 * -dan 2004.01.01 */
2083 page_index_t first_page = find_page_index(page_scan_start(addr_page_index))
2084 #else
2085 page_index_t first_page = addr_page_index;
2086 while (!page_starts_contiguous_block_p(first_page)) {
2087 --first_page;
2088 /* Do some checks. */
2089 gc_assert(page_bytes_used(first_page) == GENCGC_CARD_BYTES);
2090 gc_assert(page_table[first_page].gen == from_space);
2091 gc_assert(page_table[first_page].allocated == region_allocation);
2093 #endif
2095 /* Adjust any large objects before promotion as they won't be
2096 * copied after promotion. */
2097 if (page_table[first_page].large_object) {
2098 maybe_adjust_large_object(first_page);
2099 /* It may have moved to unboxed pages. */
2100 region_allocation = page_table[first_page].allocated;
2103 /* Now work forward until the end of this contiguous area is found,
2104 * marking all pages as dont_move. */
2105 page_index_t i;
2106 for (i = first_page; ;i++) {
2107 gc_assert(page_table[i].allocated == region_allocation);
2109 /* Mark the page static. */
2110 page_table[i].dont_move = 1;
2112 /* It is essential that the pages are not write protected as
2113 * they may have pointers into the old-space which need
2114 * scavenging. They shouldn't be write protected at this
2115 * stage. */
2116 gc_assert(!page_table[i].write_protected);
2118 /* Check whether this is the last page in this contiguous block.. */
2119 if (page_ends_contiguous_block_p(i, from_space))
2120 break;
2123 #ifdef PIN_GRANULARITY_LISPOBJ
2124 /* Do not do this for multi-page objects. Those pages do not need
2125 * object wipeout anyway. */
2126 if (i == first_page) { // single-page object
2127 lispobj word = *object_start;
2128 int lowtag = is_cons_half(word) ?
2129 LIST_POINTER_LOWTAG : lowtag_for_widetag[widetag_of(word)>>2];
2130 pin_object(make_lispobj(object_start, lowtag));
2131 page_table[i].has_pins = 1;
2133 #endif
2135 /* Check that the page is now static. */
2136 gc_assert(page_table[addr_page_index].dont_move != 0);
2140 #define IN_REGION_P(a,kind) (kind##_region.start_addr<=a && a<=kind##_region.free_pointer)
2141 #ifdef LISP_FEATURE_SEGREGATED_CODE
2142 #define IN_BOXED_REGION_P(a) IN_REGION_P(a,boxed)||IN_REGION_P(a,code)
2143 #else
2144 #define IN_BOXED_REGION_P(a) IN_REGION_P(a,boxed)
2145 #endif
2147 /* If the given page is not write-protected, then scan it for pointers
2148 * to younger generations or the top temp. generation, if no
2149 * suspicious pointers are found then the page is write-protected.
2151 * Care is taken to check for pointers to the current gc_alloc()
2152 * region if it is a younger generation or the temp. generation. This
2153 * frees the caller from doing a gc_alloc_update_page_tables(). Actually
2154 * the gc_alloc_generation does not need to be checked as this is only
2155 * called from scavenge_generation() when the gc_alloc generation is
2156 * younger, so it just checks if there is a pointer to the current
2157 * region.
2159 * We return 1 if the page was write-protected, else 0. */
2160 static int
2161 update_page_write_prot(page_index_t page)
2163 generation_index_t gen = page_table[page].gen;
2164 sword_t j;
2165 int wp_it = 1;
2166 void **page_addr = (void **)page_address(page);
2167 sword_t num_words = page_bytes_used(page) / N_WORD_BYTES;
2169 /* Shouldn't be a free page. */
2170 gc_assert(!page_free_p(page));
2171 gc_assert(page_bytes_used(page) != 0);
2173 if (!ENABLE_PAGE_PROTECTION) return 0;
2175 /* Skip if it's already write-protected, pinned, or unboxed */
2176 if (page_table[page].write_protected
2177 /* FIXME: What's the reason for not write-protecting pinned pages? */
2178 || page_table[page].dont_move
2179 || page_unboxed_p(page))
2180 return (0);
2182 /* Scan the page for pointers to younger generations or the
2183 * top temp. generation. */
2185 /* This is conservative: any word satisfying is_lisp_pointer() is
2186 * assumed to be a pointer. To do otherwise would require a family
2187 * of scavenge-like functions. */
2188 for (j = 0; j < num_words; j++) {
2189 void *ptr = *(page_addr+j);
2190 page_index_t index;
2191 lispobj __attribute__((unused)) header;
2193 if (!is_lisp_pointer((lispobj)ptr))
2194 continue;
2195 /* Check that it's in the dynamic space */
2196 if ((index = find_page_index(ptr)) != -1) {
2197 if (/* Does it point to a younger or the temp. generation? */
2198 (!page_free_p(index)
2199 && (page_bytes_used(index) != 0)
2200 && ((page_table[index].gen < gen)
2201 || (page_table[index].gen == SCRATCH_GENERATION)))
2203 /* Or does it point within a current gc_alloc() region? */
2204 || (IN_BOXED_REGION_P(ptr) || IN_REGION_P(ptr,unboxed))) {
2205 wp_it = 0;
2206 break;
2209 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2210 else if ((index = find_immobile_page_index(ptr)) >= 0 &&
2211 other_immediate_lowtag_p(header = *native_pointer((lispobj)ptr))) {
2212 // This is *possibly* a pointer to an object in immobile space,
2213 // given that above two conditions were satisfied.
2214 // But unlike in the dynamic space case, we need to read a byte
2215 // from the object to determine its generation, which requires care.
2216 // Consider an unboxed word that looks like a pointer to a word that
2217 // looks like fun-header-widetag. We can't naively back up to the
2218 // underlying code object since the alleged header might not be one.
2219 int obj_gen = gen; // Make comparison fail if we fall through
2220 if (lowtag_of((lispobj)ptr) != FUN_POINTER_LOWTAG) {
2221 obj_gen = __immobile_obj_generation(native_pointer((lispobj)ptr));
2222 } else if (widetag_of(header) == SIMPLE_FUN_WIDETAG) {
2223 lispobj* code = fun_code_header((lispobj)ptr - FUN_POINTER_LOWTAG);
2224 // This is a heuristic, since we're not actually looking for
2225 // an object boundary. Precise scanning of 'page' would obviate
2226 // the guard conditions here.
2227 if ((lispobj)code >= IMMOBILE_VARYOBJ_SUBSPACE_START
2228 && widetag_of(*code) == CODE_HEADER_WIDETAG)
2229 obj_gen = __immobile_obj_generation(code);
2231 // A bogus generation number implies a not-really-pointer,
2232 // but it won't cause misbehavior.
2233 if (obj_gen < gen || obj_gen == SCRATCH_GENERATION) {
2234 wp_it = 0;
2235 break;
2238 #endif
2241 if (wp_it == 1) {
2242 /* Write-protect the page. */
2243 /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
2245 os_protect((void *)page_addr,
2246 GENCGC_CARD_BYTES,
2247 OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
2249 /* Note the page as protected in the page tables. */
2250 page_table[page].write_protected = 1;
2253 return (wp_it);
2256 /* Is this page holding a normal (non-hashtable) large-object
2257 * simple-vector? */
2258 static inline boolean large_simple_vector_p(page_index_t page) {
2259 if (!page_table[page].large_object)
2260 return 0;
2261 lispobj object = *(lispobj *)page_address(page);
2262 return widetag_of(object) == SIMPLE_VECTOR_WIDETAG &&
2263 (HeaderValue(object) & 0xFF) == subtype_VectorNormal;
2267 /* Scavenge all generations from FROM to TO, inclusive, except for
2268 * new_space which needs special handling, as new objects may be
2269 * added which are not checked here - use scavenge_newspace generation.
2271 * Write-protected pages should not have any pointers to the
2272 * from_space so do need scavenging; thus write-protected pages are
2273 * not always scavenged. There is some code to check that these pages
2274 * are not written; but to check fully the write-protected pages need
2275 * to be scavenged by disabling the code to skip them.
2277 * Under the current scheme when a generation is GCed the younger
2278 * generations will be empty. So, when a generation is being GCed it
2279 * is only necessary to scavenge the older generations for pointers
2280 * not the younger. So a page that does not have pointers to younger
2281 * generations does not need to be scavenged.
2283 * The write-protection can be used to note pages that don't have
2284 * pointers to younger pages. But pages can be written without having
2285 * pointers to younger generations. After the pages are scavenged here
2286 * they can be scanned for pointers to younger generations and if
2287 * there are none the page can be write-protected.
2289 * One complication is when the newspace is the top temp. generation.
2291 * Enabling SC_GEN_CK scavenges the write-protected pages and checks
2292 * that none were written, which they shouldn't be as they should have
2293 * no pointers to younger generations. This breaks down for weak
2294 * pointers as the objects contain a link to the next and are written
2295 * if a weak pointer is scavenged. Still it's a useful check. */
2296 static void
2297 scavenge_generations(generation_index_t from, generation_index_t to)
2299 page_index_t i;
2300 page_index_t num_wp = 0;
2302 #define SC_GEN_CK 0
2303 #if SC_GEN_CK
2304 /* Clear the write_protected_cleared flags on all pages. */
2305 for (i = 0; i < page_table_pages; i++)
2306 page_table[i].write_protected_cleared = 0;
2307 #endif
2309 for (i = 0; i < last_free_page; i++) {
2310 generation_index_t generation = page_table[i].gen;
2311 if (page_boxed_p(i)
2312 && (page_bytes_used(i) != 0)
2313 && (generation != new_space)
2314 && (generation >= from)
2315 && (generation <= to)) {
2316 page_index_t last_page,j;
2317 int write_protected=1;
2319 /* This should be the start of a region */
2320 gc_assert(page_starts_contiguous_block_p(i));
2322 if (large_simple_vector_p(i)) {
2323 /* Scavenge only the unprotected pages of a
2324 * large-object vector, other large objects could be
2325 * handled as well, but vectors are easier to deal
2326 * with and are more likely to grow to very large
2327 * sizes where avoiding scavenging the whole thing is
2328 * worthwile */
2329 if (!page_table[i].write_protected) {
2330 scavenge((lispobj*)page_address(i) + 2,
2331 GENCGC_CARD_BYTES / N_WORD_BYTES - 2);
2332 update_page_write_prot(i);
2334 for (last_page = i + 1; ; last_page++) {
2335 lispobj* start = (lispobj*)page_address(last_page);
2336 write_protected = page_table[last_page].write_protected;
2337 if (page_ends_contiguous_block_p(last_page, generation)) {
2338 if (!write_protected) {
2339 scavenge(start, page_bytes_used(last_page) / N_WORD_BYTES);
2340 update_page_write_prot(last_page);
2342 break;
2344 if (!write_protected) {
2345 scavenge(start, GENCGC_CARD_BYTES / N_WORD_BYTES);
2346 update_page_write_prot(last_page);
2349 } else {
2350 /* Now work forward until the end of the region */
2351 for (last_page = i; ; last_page++) {
2352 write_protected =
2353 write_protected && page_table[last_page].write_protected;
2354 if (page_ends_contiguous_block_p(last_page, generation))
2355 break;
2357 if (!write_protected) {
2358 heap_scavenge((lispobj*)page_address(i),
2359 (lispobj*)(page_address(last_page)
2360 + page_bytes_used(last_page)));
2362 /* Now scan the pages and write protect those that
2363 * don't have pointers to younger generations. */
2364 if (ENABLE_PAGE_PROTECTION) {
2365 for (j = i; j <= last_page; j++) {
2366 num_wp += update_page_write_prot(j);
2369 if ((gencgc_verbose > 1) && (num_wp != 0)) {
2370 FSHOW((stderr,
2371 "/write protected %d pages within generation %d\n",
2372 num_wp, generation));
2376 i = last_page;
2380 #if SC_GEN_CK
2381 /* Check that none of the write_protected pages in this generation
2382 * have been written to. */
2383 for (i = 0; i < page_table_pages; i++) {
2384 if (!page_free_p(i)
2385 && (page_bytes_used(i) != 0)
2386 && (page_table[i].gen == generation)
2387 && (page_table[i].write_protected_cleared != 0)) {
2388 FSHOW((stderr, "/scavenge_generation() %d\n", generation));
2389 FSHOW((stderr,
2390 "/page bytes_used=%d scan_start_offset=%lu dont_move=%d\n",
2391 page_bytes_used(i),
2392 scan_start_offset(page_table[i]),
2393 page_table[i].dont_move));
2394 lose("write to protected page %d in scavenge_generation()\n", i);
2397 #endif
2401 /* Scavenge a newspace generation. As it is scavenged new objects may
2402 * be allocated to it; these will also need to be scavenged. This
2403 * repeats until there are no more objects unscavenged in the
2404 * newspace generation.
2406 * To help improve the efficiency, areas written are recorded by
2407 * gc_alloc() and only these scavenged. Sometimes a little more will be
2408 * scavenged, but this causes no harm. An easy check is done that the
2409 * scavenged bytes equals the number allocated in the previous
2410 * scavenge.
2412 * Write-protected pages are not scanned except if they are marked
2413 * dont_move in which case they may have been promoted and still have
2414 * pointers to the from space.
2416 * Write-protected pages could potentially be written by alloc however
2417 * to avoid having to handle re-scavenging of write-protected pages
2418 * gc_alloc() does not write to write-protected pages.
2420 * New areas of objects allocated are recorded alternatively in the two
2421 * new_areas arrays below. */
2422 static struct new_area new_areas_1[NUM_NEW_AREAS];
2423 static struct new_area new_areas_2[NUM_NEW_AREAS];
2425 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2426 extern unsigned int immobile_scav_queue_count;
2427 extern void
2428 gc_init_immobile(),
2429 update_immobile_nursery_bits(),
2430 scavenge_immobile_roots(generation_index_t,generation_index_t),
2431 scavenge_immobile_newspace(),
2432 sweep_immobile_space(int raise),
2433 write_protect_immobile_space();
2434 #else
2435 #define immobile_scav_queue_count 0
2436 #endif
2438 /* Do one full scan of the new space generation. This is not enough to
2439 * complete the job as new objects may be added to the generation in
2440 * the process which are not scavenged. */
2441 static void
2442 scavenge_newspace_generation_one_scan(generation_index_t generation)
2444 page_index_t i;
2446 FSHOW((stderr,
2447 "/starting one full scan of newspace generation %d\n",
2448 generation));
2449 for (i = 0; i < last_free_page; i++) {
2450 /* Note that this skips over open regions when it encounters them. */
2451 if (page_boxed_p(i)
2452 && (page_bytes_used(i) != 0)
2453 && (page_table[i].gen == generation)
2454 && (!page_table[i].write_protected
2455 /* (This may be redundant as write_protected is now
2456 * cleared before promotion.) */
2457 || page_table[i].dont_move)) {
2458 page_index_t last_page;
2459 int all_wp=1;
2461 /* The scavenge will start at the scan_start_offset of
2462 * page i.
2464 * We need to find the full extent of this contiguous
2465 * block in case objects span pages.
2467 * Now work forward until the end of this contiguous area
2468 * is found. A small area is preferred as there is a
2469 * better chance of its pages being write-protected. */
2470 for (last_page = i; ;last_page++) {
2471 /* If all pages are write-protected and movable,
2472 * then no need to scavenge */
2473 all_wp=all_wp && page_table[last_page].write_protected &&
2474 !page_table[last_page].dont_move;
2476 /* Check whether this is the last page in this
2477 * contiguous block */
2478 if (page_ends_contiguous_block_p(last_page, generation))
2479 break;
2482 /* Do a limited check for write-protected pages. */
2483 if (!all_wp) {
2484 new_areas_ignore_page = last_page;
2485 heap_scavenge(page_scan_start(i),
2486 (lispobj*)(page_address(last_page)
2487 + page_bytes_used(last_page)));
2489 i = last_page;
2492 FSHOW((stderr,
2493 "/done with one full scan of newspace generation %d\n",
2494 generation));
2497 /* Do a complete scavenge of the newspace generation. */
2498 static void
2499 scavenge_newspace_generation(generation_index_t generation)
2501 size_t i;
2503 /* the new_areas array currently being written to by gc_alloc() */
2504 struct new_area (*current_new_areas)[] = &new_areas_1;
2505 size_t current_new_areas_index;
2507 /* the new_areas created by the previous scavenge cycle */
2508 struct new_area (*previous_new_areas)[] = NULL;
2509 size_t previous_new_areas_index;
2511 /* Flush the current regions updating the tables. */
2512 gc_alloc_update_all_page_tables(0);
2514 /* Turn on the recording of new areas by gc_alloc(). */
2515 new_areas = current_new_areas;
2516 new_areas_index = 0;
2518 /* Don't need to record new areas that get scavenged anyway during
2519 * scavenge_newspace_generation_one_scan. */
2520 record_new_objects = 1;
2522 /* Start with a full scavenge. */
2523 scavenge_newspace_generation_one_scan(generation);
2525 /* Record all new areas now. */
2526 record_new_objects = 2;
2528 /* Give a chance to weak hash tables to make other objects live.
2529 * FIXME: The algorithm implemented here for weak hash table gcing
2530 * is O(W^2+N) as Bruno Haible warns in
2531 * http://www.haible.de/bruno/papers/cs/weak/WeakDatastructures-writeup.html
2532 * see "Implementation 2". */
2533 scav_weak_hash_tables();
2535 /* Flush the current regions updating the tables. */
2536 gc_alloc_update_all_page_tables(0);
2538 /* Grab new_areas_index. */
2539 current_new_areas_index = new_areas_index;
2541 /*FSHOW((stderr,
2542 "The first scan is finished; current_new_areas_index=%d.\n",
2543 current_new_areas_index));*/
2545 while (current_new_areas_index > 0 || immobile_scav_queue_count) {
2546 /* Move the current to the previous new areas */
2547 previous_new_areas = current_new_areas;
2548 previous_new_areas_index = current_new_areas_index;
2550 /* Scavenge all the areas in previous new areas. Any new areas
2551 * allocated are saved in current_new_areas. */
2553 /* Allocate an array for current_new_areas; alternating between
2554 * new_areas_1 and 2 */
2555 if (previous_new_areas == &new_areas_1)
2556 current_new_areas = &new_areas_2;
2557 else
2558 current_new_areas = &new_areas_1;
2560 /* Set up for gc_alloc(). */
2561 new_areas = current_new_areas;
2562 new_areas_index = 0;
2564 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2565 scavenge_immobile_newspace();
2566 #endif
2567 /* Check whether previous_new_areas had overflowed. */
2568 if (previous_new_areas_index >= NUM_NEW_AREAS) {
2570 /* New areas of objects allocated have been lost so need to do a
2571 * full scan to be sure! If this becomes a problem try
2572 * increasing NUM_NEW_AREAS. */
2573 if (gencgc_verbose) {
2574 SHOW("new_areas overflow, doing full scavenge");
2577 /* Don't need to record new areas that get scavenged
2578 * anyway during scavenge_newspace_generation_one_scan. */
2579 record_new_objects = 1;
2581 scavenge_newspace_generation_one_scan(generation);
2583 /* Record all new areas now. */
2584 record_new_objects = 2;
2586 scav_weak_hash_tables();
2588 /* Flush the current regions updating the tables. */
2589 gc_alloc_update_all_page_tables(0);
2591 } else {
2593 /* Work through previous_new_areas. */
2594 for (i = 0; i < previous_new_areas_index; i++) {
2595 page_index_t page = (*previous_new_areas)[i].page;
2596 size_t offset = (*previous_new_areas)[i].offset;
2597 size_t size = (*previous_new_areas)[i].size;
2598 gc_assert(size % N_WORD_BYTES == 0);
2599 lispobj *start = (lispobj*)(page_address(page) + offset);
2600 heap_scavenge(start, (lispobj*)((char*)start + size));
2603 scav_weak_hash_tables();
2605 /* Flush the current regions updating the tables. */
2606 gc_alloc_update_all_page_tables(0);
2609 current_new_areas_index = new_areas_index;
2611 /*FSHOW((stderr,
2612 "The re-scan has finished; current_new_areas_index=%d.\n",
2613 current_new_areas_index));*/
2616 /* Turn off recording of areas allocated by gc_alloc(). */
2617 record_new_objects = 0;
2619 #if SC_NS_GEN_CK
2621 page_index_t i;
2622 /* Check that none of the write_protected pages in this generation
2623 * have been written to. */
2624 for (i = 0; i < page_table_pages; i++) {
2625 if (!page_free_p(i)
2626 && (page_bytes_used(i) != 0)
2627 && (page_table[i].gen == generation)
2628 && (page_table[i].write_protected_cleared != 0)
2629 && (page_table[i].dont_move == 0)) {
2630 lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d\n",
2631 i, generation, page_table[i].dont_move);
2635 #endif
2638 /* Un-write-protect all the pages in from_space. This is done at the
2639 * start of a GC else there may be many page faults while scavenging
2640 * the newspace (I've seen drive the system time to 99%). These pages
2641 * would need to be unprotected anyway before unmapping in
2642 * free_oldspace; not sure what effect this has on paging.. */
2643 static void
2644 unprotect_oldspace(void)
2646 page_index_t i;
2647 char *region_addr = 0;
2648 char *page_addr = 0;
2649 uword_t region_bytes = 0;
2651 for (i = 0; i < last_free_page; i++) {
2652 if (!page_free_p(i)
2653 && (page_bytes_used(i) != 0)
2654 && (page_table[i].gen == from_space)) {
2656 /* Remove any write-protection. We should be able to rely
2657 * on the write-protect flag to avoid redundant calls. */
2658 if (page_table[i].write_protected) {
2659 page_table[i].write_protected = 0;
2660 page_addr = page_address(i);
2661 if (!region_addr) {
2662 /* First region. */
2663 region_addr = page_addr;
2664 region_bytes = GENCGC_CARD_BYTES;
2665 } else if (region_addr + region_bytes == page_addr) {
2666 /* Region continue. */
2667 region_bytes += GENCGC_CARD_BYTES;
2668 } else {
2669 /* Unprotect previous region. */
2670 os_protect(region_addr, region_bytes, OS_VM_PROT_ALL);
2671 /* First page in new region. */
2672 region_addr = page_addr;
2673 region_bytes = GENCGC_CARD_BYTES;
2678 if (region_addr) {
2679 /* Unprotect last region. */
2680 os_protect(region_addr, region_bytes, OS_VM_PROT_ALL);
2684 /* Work through all the pages and free any in from_space. This
2685 * assumes that all objects have been copied or promoted to an older
2686 * generation. Bytes_allocated and the generation bytes_allocated
2687 * counter are updated. The number of bytes freed is returned. */
2688 static uword_t
2689 free_oldspace(void)
2691 uword_t bytes_freed = 0;
2692 page_index_t first_page, last_page;
2694 first_page = 0;
2696 do {
2697 /* Find a first page for the next region of pages. */
2698 while ((first_page < last_free_page)
2699 && (page_free_p(first_page)
2700 || (page_bytes_used(first_page) == 0)
2701 || (page_table[first_page].gen != from_space)))
2702 first_page++;
2704 if (first_page >= last_free_page)
2705 break;
2707 /* Find the last page of this region. */
2708 last_page = first_page;
2710 do {
2711 /* Free the page. */
2712 bytes_freed += page_bytes_used(last_page);
2713 generations[page_table[last_page].gen].bytes_allocated -=
2714 page_bytes_used(last_page);
2715 page_table[last_page].allocated = FREE_PAGE_FLAG;
2716 set_page_bytes_used(last_page, 0);
2717 /* Should already be unprotected by unprotect_oldspace(). */
2718 gc_assert(!page_table[last_page].write_protected);
2719 last_page++;
2721 while ((last_page < last_free_page)
2722 && !page_free_p(last_page)
2723 && (page_bytes_used(last_page) != 0)
2724 && (page_table[last_page].gen == from_space));
2726 #ifdef READ_PROTECT_FREE_PAGES
2727 os_protect(page_address(first_page),
2728 npage_bytes(last_page-first_page),
2729 OS_VM_PROT_NONE);
2730 #endif
2731 first_page = last_page;
2732 } while (first_page < last_free_page);
2734 bytes_allocated -= bytes_freed;
2735 return bytes_freed;
2738 #if 0
2739 /* Print some information about a pointer at the given address. */
2740 static void
2741 print_ptr(lispobj *addr)
2743 /* If addr is in the dynamic space then out the page information. */
2744 page_index_t pi1 = find_page_index((void*)addr);
2746 if (pi1 != -1)
2747 fprintf(stderr," %p: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n",
2748 addr,
2749 pi1,
2750 page_table[pi1].allocated,
2751 page_table[pi1].gen,
2752 page_bytes_used(pi1),
2753 scan_start_offset(page_table[pi1]),
2754 page_table[pi1].dont_move);
2755 fprintf(stderr," %x %x %x %x (%x) %x %x %x %x\n",
2756 *(addr-4),
2757 *(addr-3),
2758 *(addr-2),
2759 *(addr-1),
2760 *(addr-0),
2761 *(addr+1),
2762 *(addr+2),
2763 *(addr+3),
2764 *(addr+4));
2766 #endif
2768 static int
2769 is_in_stack_space(lispobj ptr)
2771 /* For space verification: Pointers can be valid if they point
2772 * to a thread stack space. This would be faster if the thread
2773 * structures had page-table entries as if they were part of
2774 * the heap space. */
2775 struct thread *th;
2776 for_each_thread(th) {
2777 if ((th->control_stack_start <= (lispobj *)ptr) &&
2778 (th->control_stack_end >= (lispobj *)ptr)) {
2779 return 1;
2782 return 0;
2785 // NOTE: This function can produces false failure indications,
2786 // usually related to dynamic space pointing to the stack of a
2787 // dead thread, but there may be other reasons as well.
2788 static void
2789 verify_range(lispobj *start, size_t words)
2791 extern int valid_lisp_pointer_p(lispobj);
2792 int is_in_readonly_space =
2793 (READ_ONLY_SPACE_START <= (uword_t)start &&
2794 (uword_t)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
2795 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2796 int is_in_immobile_space =
2797 (IMMOBILE_SPACE_START <= (uword_t)start &&
2798 (uword_t)start < SymbolValue(IMMOBILE_SPACE_FREE_POINTER,0));
2799 #endif
2801 lispobj *end = start + words;
2802 size_t count;
2803 for ( ; start < end ; start += count) {
2804 count = 1;
2805 lispobj thing = *start;
2806 lispobj __attribute__((unused)) pointee;
2808 if (is_lisp_pointer(thing)) {
2809 page_index_t page_index = find_page_index((void*)thing);
2810 sword_t to_readonly_space =
2811 (READ_ONLY_SPACE_START <= thing &&
2812 thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
2813 sword_t to_static_space =
2814 (STATIC_SPACE_START <= thing &&
2815 thing < SymbolValue(STATIC_SPACE_FREE_POINTER,0));
2816 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2817 sword_t to_immobile_space =
2818 (IMMOBILE_SPACE_START <= thing &&
2819 thing < SymbolValue(IMMOBILE_FIXEDOBJ_FREE_POINTER,0)) ||
2820 (IMMOBILE_VARYOBJ_SUBSPACE_START <= thing &&
2821 thing < SymbolValue(IMMOBILE_SPACE_FREE_POINTER,0));
2822 #endif
2824 /* Does it point to the dynamic space? */
2825 if (page_index != -1) {
2826 /* If it's within the dynamic space it should point to a used page. */
2827 if (page_free_p(page_index))
2828 lose ("Ptr %p @ %p sees free page.\n", thing, start);
2829 if ((thing & (GENCGC_CARD_BYTES-1)) >= page_bytes_used(page_index))
2830 lose ("Ptr %p @ %p sees unallocated space.\n", thing, start);
2831 /* Check that it doesn't point to a forwarding pointer! */
2832 if (*native_pointer(thing) == 0x01) {
2833 lose("Ptr %p @ %p sees forwarding ptr.\n", thing, start);
2835 /* Check that its not in the RO space as it would then be a
2836 * pointer from the RO to the dynamic space. */
2837 if (is_in_readonly_space) {
2838 lose("ptr to dynamic space %p from RO space %x\n",
2839 thing, start);
2841 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2842 // verify all immobile space -> dynamic space pointers
2843 if (is_in_immobile_space && !valid_lisp_pointer_p(thing)) {
2844 lose("Ptr %p @ %p sees junk.\n", thing, start);
2846 #endif
2847 /* Does it point to a plausible object? This check slows
2848 * it down a lot (so it's commented out).
2850 * "a lot" is serious: it ate 50 minutes cpu time on
2851 * my duron 950 before I came back from lunch and
2852 * killed it.
2854 * FIXME: Add a variable to enable this
2855 * dynamically. */
2857 if (!valid_lisp_pointer_p((lispobj *)thing) {
2858 lose("ptr %p to invalid object %p\n", thing, start);
2861 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2862 } else if (to_immobile_space) {
2863 // the object pointed to must not have been discarded as garbage
2864 if (!other_immediate_lowtag_p(*native_pointer(thing))
2865 || immobile_filler_p(native_pointer(thing)))
2866 lose("Ptr %p @ %p sees trashed object.\n", (void*)thing, start);
2867 // verify all pointers to immobile space
2868 if (!valid_lisp_pointer_p(thing))
2869 lose("Ptr %p @ %p sees junk.\n", thing, start);
2870 #endif
2871 } else {
2872 extern char __attribute__((unused)) funcallable_instance_tramp;
2873 /* Verify that it points to another valid space. */
2874 if (!to_readonly_space && !to_static_space
2875 && !is_in_stack_space(thing)) {
2876 lose("Ptr %p @ %p sees junk.\n", thing, start);
2879 continue;
2881 int widetag = widetag_of(thing);
2882 if (is_lisp_immediate(thing) || widetag == NO_TLS_VALUE_MARKER_WIDETAG) {
2883 /* skip immediates */
2884 } else if (!(other_immediate_lowtag_p(widetag)
2885 && lowtag_for_widetag[widetag>>2])) {
2886 lose("Unhandled widetag %p at %p\n", widetag, start);
2887 } else if (unboxed_obj_widetag_p(widetag)) {
2888 count = sizetab[widetag](start);
2889 } else switch(widetag) {
2890 /* boxed or partially boxed objects */
2891 // FIXME: x86-64 can have partially unboxed FINs. The raw words
2892 // are at the moment valid fixnums by blind luck.
2893 case INSTANCE_WIDETAG:
2894 if (instance_layout(start)) {
2895 sword_t nslots = instance_length(thing) | 1;
2896 instance_scan(verify_range, start+1, nslots,
2897 ((struct layout*)
2898 native_pointer(instance_layout(start)))->bitmap);
2899 count = 1 + nslots;
2901 break;
2902 case CODE_HEADER_WIDETAG:
2904 struct code *code = (struct code *) start;
2905 sword_t nheader_words = code_header_words(code->header);
2906 /* Scavenge the boxed section of the code data block */
2907 verify_range(start + 1, nheader_words - 1);
2909 /* Scavenge the boxed section of each function
2910 * object in the code data block. */
2911 for_each_simple_fun(i, fheaderp, code, 1, {
2912 verify_range(SIMPLE_FUN_SCAV_START(fheaderp),
2913 SIMPLE_FUN_SCAV_NWORDS(fheaderp)); });
2914 count = nheader_words + code_instruction_words(code->code_size);
2915 break;
2917 #ifdef LISP_FEATURE_IMMOBILE_CODE
2918 case FDEFN_WIDETAG:
2919 verify_range(start + 1, 2);
2920 pointee = fdefn_raw_referent((struct fdefn*)start);
2921 verify_range(&pointee, 1);
2922 count = CEILING(sizeof (struct fdefn)/sizeof(lispobj), 2);
2923 break;
2924 #endif
2928 static uword_t verify_space(lispobj start, lispobj end) {
2929 verify_range((lispobj*)start, (end-start)>>WORD_SHIFT);
2930 return 0;
2933 static void verify_dynamic_space();
2935 static void
2936 verify_gc(void)
2938 #ifdef LISP_FEATURE_IMMOBILE_SPACE
2939 # ifdef __linux__
2940 // Try this verification if marknsweep was compiled with extra debugging.
2941 // But weak symbols don't work on macOS.
2942 extern void __attribute__((weak)) check_varyobj_pages();
2943 if (&check_varyobj_pages) check_varyobj_pages();
2944 # endif
2945 verify_space(IMMOBILE_SPACE_START,
2946 SymbolValue(IMMOBILE_FIXEDOBJ_FREE_POINTER,0));
2947 verify_space(IMMOBILE_VARYOBJ_SUBSPACE_START,
2948 SymbolValue(IMMOBILE_SPACE_FREE_POINTER,0));
2949 #endif
2950 struct thread *th;
2951 for_each_thread(th) {
2952 verify_space((lispobj)th->binding_stack_start,
2953 (lispobj)get_binding_stack_pointer(th));
2955 verify_space(READ_ONLY_SPACE_START,
2956 SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
2957 verify_space(STATIC_SPACE_START,
2958 SymbolValue(STATIC_SPACE_FREE_POINTER,0));
2959 verify_dynamic_space();
2962 /* Call 'proc' with pairs of addresses demarcating ranges in the
2963 * specified generation.
2964 * Stop if any invocation returns non-zero, and return that value */
2965 uword_t
2966 walk_generation(uword_t (*proc)(lispobj*,lispobj*,uword_t),
2967 generation_index_t generation, uword_t extra)
2969 page_index_t i;
2970 int genmask = generation >= 0 ? 1 << generation : ~0;
2972 for (i = 0; i < last_free_page; i++) {
2973 if (!page_free_p(i)
2974 && (page_bytes_used(i) != 0)
2975 && ((1 << page_table[i].gen) & genmask)) {
2976 page_index_t last_page;
2978 /* This should be the start of a contiguous block */
2979 gc_assert(page_starts_contiguous_block_p(i));
2981 /* Need to find the full extent of this contiguous block in case
2982 objects span pages. */
2984 /* Now work forward until the end of this contiguous area is
2985 found. */
2986 for (last_page = i; ;last_page++)
2987 /* Check whether this is the last page in this contiguous
2988 * block. */
2989 if (page_ends_contiguous_block_p(last_page, page_table[i].gen))
2990 break;
2992 uword_t result =
2993 proc((lispobj*)page_address(i),
2994 (lispobj*)(page_bytes_used(last_page) + page_address(last_page)),
2995 extra);
2996 if (result) return result;
2998 i = last_page;
3001 return 0;
3003 static void verify_generation(generation_index_t generation)
3005 walk_generation((uword_t(*)(lispobj*,lispobj*,uword_t))verify_space,
3006 generation, 0);
3009 /* Check that all the free space is zero filled. */
3010 static void
3011 verify_zero_fill(void)
3013 page_index_t page;
3015 for (page = 0; page < last_free_page; page++) {
3016 if (page_free_p(page)) {
3017 /* The whole page should be zero filled. */
3018 sword_t *start_addr = (sword_t *)page_address(page);
3019 sword_t i;
3020 for (i = 0; i < (sword_t)GENCGC_CARD_BYTES/N_WORD_BYTES; i++) {
3021 if (start_addr[i] != 0) {
3022 lose("free page not zero at %p\n", start_addr + i);
3025 } else {
3026 sword_t free_bytes = GENCGC_CARD_BYTES - page_bytes_used(page);
3027 if (free_bytes > 0) {
3028 sword_t *start_addr =
3029 (sword_t *)(page_address(page) + page_bytes_used(page));
3030 sword_t size = free_bytes / N_WORD_BYTES;
3031 sword_t i;
3032 for (i = 0; i < size; i++) {
3033 if (start_addr[i] != 0) {
3034 lose("free region not zero at %p\n", start_addr + i);
3042 /* External entry point for verify_zero_fill */
3043 void
3044 gencgc_verify_zero_fill(void)
3046 /* Flush the alloc regions updating the tables. */
3047 gc_alloc_update_all_page_tables(1);
3048 SHOW("verifying zero fill");
3049 verify_zero_fill();
3052 static void
3053 verify_dynamic_space(void)
3055 verify_generation(-1);
3056 if (gencgc_enable_verify_zero_fill)
3057 verify_zero_fill();
3060 /* Write-protect all the dynamic boxed pages in the given generation. */
3061 static void
3062 write_protect_generation_pages(generation_index_t generation)
3064 page_index_t start;
3066 gc_assert(generation < SCRATCH_GENERATION);
3068 for (start = 0; start < last_free_page; start++) {
3069 if (protect_page_p(start, generation)) {
3070 void *page_start;
3071 page_index_t last;
3073 /* Note the page as protected in the page tables. */
3074 page_table[start].write_protected = 1;
3076 for (last = start + 1; last < last_free_page; last++) {
3077 if (!protect_page_p(last, generation))
3078 break;
3079 page_table[last].write_protected = 1;
3082 page_start = page_address(start);
3084 os_protect(page_start,
3085 npage_bytes(last - start),
3086 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
3088 start = last;
3092 if (gencgc_verbose > 1) {
3093 FSHOW((stderr,
3094 "/write protected %d of %d pages in generation %d\n",
3095 count_write_protect_generation_pages(generation),
3096 count_generation_pages(generation),
3097 generation));
3101 #ifndef GENCGC_IS_PRECISE
3102 static void
3103 preserve_context_registers (void (*proc)(os_context_register_t), os_context_t *c)
3105 #ifdef LISP_FEATURE_SB_THREAD
3106 void **ptr;
3107 /* On Darwin the signal context isn't a contiguous block of memory,
3108 * so just preserve_pointering its contents won't be sufficient.
3110 #if defined(LISP_FEATURE_DARWIN)||defined(LISP_FEATURE_WIN32)
3111 #if defined LISP_FEATURE_X86
3112 proc(*os_context_register_addr(c,reg_EAX));
3113 proc(*os_context_register_addr(c,reg_ECX));
3114 proc(*os_context_register_addr(c,reg_EDX));
3115 proc(*os_context_register_addr(c,reg_EBX));
3116 proc(*os_context_register_addr(c,reg_ESI));
3117 proc(*os_context_register_addr(c,reg_EDI));
3118 proc(*os_context_pc_addr(c));
3119 #elif defined LISP_FEATURE_X86_64
3120 proc(*os_context_register_addr(c,reg_RAX));
3121 proc(*os_context_register_addr(c,reg_RCX));
3122 proc(*os_context_register_addr(c,reg_RDX));
3123 proc(*os_context_register_addr(c,reg_RBX));
3124 proc(*os_context_register_addr(c,reg_RSI));
3125 proc(*os_context_register_addr(c,reg_RDI));
3126 proc(*os_context_register_addr(c,reg_R8));
3127 proc(*os_context_register_addr(c,reg_R9));
3128 proc(*os_context_register_addr(c,reg_R10));
3129 proc(*os_context_register_addr(c,reg_R11));
3130 proc(*os_context_register_addr(c,reg_R12));
3131 proc(*os_context_register_addr(c,reg_R13));
3132 proc(*os_context_register_addr(c,reg_R14));
3133 proc(*os_context_register_addr(c,reg_R15));
3134 proc(*os_context_pc_addr(c));
3135 #else
3136 #error "preserve_context_registers needs to be tweaked for non-x86 Darwin"
3137 #endif
3138 #endif
3139 #if !defined(LISP_FEATURE_WIN32)
3140 for(ptr = ((void **)(c+1))-1; ptr>=(void **)c; ptr--) {
3141 proc((os_context_register_t)*ptr);
3143 #endif
3144 #endif // LISP_FEATURE_SB_THREAD
3146 #endif
3148 static void
3149 move_pinned_pages_to_newspace()
3151 page_index_t i;
3153 /* scavenge() will evacuate all oldspace pages, but no newspace
3154 * pages. Pinned pages are precisely those pages which must not
3155 * be evacuated, so move them to newspace directly. */
3157 for (i = 0; i < last_free_page; i++) {
3158 if (page_table[i].dont_move &&
3159 /* dont_move is cleared lazily, so test the 'gen' field as well. */
3160 page_table[i].gen == from_space) {
3161 if (page_table[i].has_pins) {
3162 // do not move to newspace after all, this will be word-wiped
3163 continue;
3165 page_table[i].gen = new_space;
3166 /* And since we're moving the pages wholesale, also adjust
3167 * the generation allocation counters. */
3168 int used = page_bytes_used(i);
3169 generations[new_space].bytes_allocated += used;
3170 generations[from_space].bytes_allocated -= used;
3175 #if defined(__GNUC__) && defined(ADDRESS_SANITIZER)
3176 #define NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
3177 #else
3178 #define NO_SANITIZE_ADDRESS
3179 #endif
3181 /* Garbage collect a generation. If raise is 0 then the remains of the
3182 * generation are not raised to the next generation. */
3183 static void NO_SANITIZE_ADDRESS
3184 garbage_collect_generation(generation_index_t generation, int raise)
3186 page_index_t i;
3187 struct thread *th;
3189 gc_assert(generation <= HIGHEST_NORMAL_GENERATION);
3191 /* The oldest generation can't be raised. */
3192 gc_assert((generation != HIGHEST_NORMAL_GENERATION) || (raise == 0));
3194 /* Check if weak hash tables were processed in the previous GC. */
3195 gc_assert(weak_hash_tables == NULL);
3197 /* Initialize the weak pointer list. */
3198 weak_pointers = NULL;
3200 /* When a generation is not being raised it is transported to a
3201 * temporary generation (NUM_GENERATIONS), and lowered when
3202 * done. Set up this new generation. There should be no pages
3203 * allocated to it yet. */
3204 if (!raise) {
3205 gc_assert(generations[SCRATCH_GENERATION].bytes_allocated == 0);
3208 /* Set the global src and dest. generations */
3209 from_space = generation;
3210 if (raise)
3211 new_space = generation+1;
3212 else
3213 new_space = SCRATCH_GENERATION;
3215 /* Change to a new space for allocation, resetting the alloc_start_page */
3216 gc_alloc_generation = new_space;
3217 #ifdef LISP_FEATURE_SEGREGATED_CODE
3218 bzero(generations[new_space].alloc_start_page_,
3219 sizeof generations[new_space].alloc_start_page_);
3220 #else
3221 generations[new_space].alloc_start_page = 0;
3222 generations[new_space].alloc_unboxed_start_page = 0;
3223 generations[new_space].alloc_large_start_page = 0;
3224 #endif
3226 #ifdef PIN_GRANULARITY_LISPOBJ
3227 hopscotch_reset(&pinned_objects);
3228 #endif
3229 /* Before any pointers are preserved, the dont_move flags on the
3230 * pages need to be cleared. */
3231 /* FIXME: consider moving this bitmap into its own range of words,
3232 * out of the page table. Then we can just bzero() it.
3233 * This will also obviate the extra test at the comment
3234 * "dont_move is cleared lazily" in move_pinned_pages_to_newspace().
3236 for (i = 0; i < last_free_page; i++)
3237 if(page_table[i].gen==from_space) {
3238 page_table[i].dont_move = 0;
3241 /* Un-write-protect the old-space pages. This is essential for the
3242 * promoted pages as they may contain pointers into the old-space
3243 * which need to be scavenged. It also helps avoid unnecessary page
3244 * faults as forwarding pointers are written into them. They need to
3245 * be un-protected anyway before unmapping later. */
3246 if (ENABLE_PAGE_PROTECTION)
3247 unprotect_oldspace();
3249 /* Scavenge the stacks' conservative roots. */
3251 /* there are potentially two stacks for each thread: the main
3252 * stack, which may contain Lisp pointers, and the alternate stack.
3253 * We don't ever run Lisp code on the altstack, but it may
3254 * host a sigcontext with lisp objects in it */
3256 /* what we need to do: (1) find the stack pointer for the main
3257 * stack; scavenge it (2) find the interrupt context on the
3258 * alternate stack that might contain lisp values, and scavenge
3259 * that */
3261 /* we assume that none of the preceding applies to the thread that
3262 * initiates GC. If you ever call GC from inside an altstack
3263 * handler, you will lose. */
3265 #ifndef GENCGC_IS_PRECISE
3266 /* And if we're saving a core, there's no point in being conservative. */
3267 if (conservative_stack) {
3268 for_each_thread(th) {
3269 void **ptr;
3270 void **esp=(void **)-1;
3271 if (th->state == STATE_DEAD)
3272 continue;
3273 # if defined(LISP_FEATURE_SB_SAFEPOINT)
3274 /* Conservative collect_garbage is always invoked with a
3275 * foreign C call or an interrupt handler on top of every
3276 * existing thread, so the stored SP in each thread
3277 * structure is valid, no matter which thread we are looking
3278 * at. For threads that were running Lisp code, the pitstop
3279 * and edge functions maintain this value within the
3280 * interrupt or exception handler. */
3281 esp = os_get_csp(th);
3282 assert_on_stack(th, esp);
3284 /* In addition to pointers on the stack, also preserve the
3285 * return PC, the only value from the context that we need
3286 * in addition to the SP. The return PC gets saved by the
3287 * foreign call wrapper, and removed from the control stack
3288 * into a register. */
3289 preserve_pointer(th->pc_around_foreign_call);
3291 /* And on platforms with interrupts: scavenge ctx registers. */
3293 /* Disabled on Windows, because it does not have an explicit
3294 * stack of `interrupt_contexts'. The reported CSP has been
3295 * chosen so that the current context on the stack is
3296 * covered by the stack scan. See also set_csp_from_context(). */
3297 # ifndef LISP_FEATURE_WIN32
3298 if (th != arch_os_get_current_thread()) {
3299 long k = fixnum_value(
3300 SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
3301 while (k > 0)
3302 preserve_context_registers((void(*)(os_context_register_t))preserve_pointer,
3303 th->interrupt_contexts[--k]);
3305 # endif
3306 # elif defined(LISP_FEATURE_SB_THREAD)
3307 sword_t i,free;
3308 if(th==arch_os_get_current_thread()) {
3309 /* Somebody is going to burn in hell for this, but casting
3310 * it in two steps shuts gcc up about strict aliasing. */
3311 esp = (void **)((void *)&raise);
3312 } else {
3313 void **esp1;
3314 free=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
3315 for(i=free-1;i>=0;i--) {
3316 os_context_t *c=th->interrupt_contexts[i];
3317 esp1 = (void **) *os_context_register_addr(c,reg_SP);
3318 if (esp1>=(void **)th->control_stack_start &&
3319 esp1<(void **)th->control_stack_end) {
3320 if(esp1<esp) esp=esp1;
3321 preserve_context_registers((void(*)(os_context_register_t))preserve_pointer,
3326 # else
3327 esp = (void **)((void *)&raise);
3328 # endif
3329 if (!esp || esp == (void*) -1)
3330 lose("garbage_collect: no SP known for thread %x (OS %x)",
3331 th, th->os_thread);
3332 for (ptr = ((void **)th->control_stack_end)-1; ptr >= esp; ptr--) {
3333 preserve_pointer(*ptr);
3337 #else
3338 /* Non-x86oid systems don't have "conservative roots" as such, but
3339 * the same mechanism is used for objects pinned for use by alien
3340 * code. */
3341 for_each_thread(th) {
3342 lispobj pin_list = SymbolTlValue(PINNED_OBJECTS,th);
3343 while (pin_list != NIL) {
3344 preserve_pointer((void*)(CONS(pin_list)->car));
3345 pin_list = CONS(pin_list)->cdr;
3348 #endif
3350 #if QSHOW
3351 if (gencgc_verbose > 1) {
3352 sword_t num_dont_move_pages = count_dont_move_pages();
3353 fprintf(stderr,
3354 "/non-movable pages due to conservative pointers = %ld (%lu bytes)\n",
3355 num_dont_move_pages,
3356 npage_bytes(num_dont_move_pages));
3358 #endif
3360 /* Now that all of the pinned (dont_move) pages are known, and
3361 * before we start to scavenge (and thus relocate) objects,
3362 * relocate the pinned pages to newspace, so that the scavenger
3363 * will not attempt to relocate their contents. */
3364 move_pinned_pages_to_newspace();
3366 /* Scavenge all the rest of the roots. */
3368 #ifdef GENCGC_IS_PRECISE
3370 * If not x86, we need to scavenge the interrupt context(s) and the
3371 * control stack.
3374 struct thread *th;
3375 for_each_thread(th) {
3376 scavenge_interrupt_contexts(th);
3377 scavenge_control_stack(th);
3380 # ifdef LISP_FEATURE_SB_SAFEPOINT
3381 /* In this case, scrub all stacks right here from the GCing thread
3382 * instead of doing what the comment below says. Suboptimal, but
3383 * easier. */
3384 for_each_thread(th)
3385 scrub_thread_control_stack(th);
3386 # else
3387 /* Scrub the unscavenged control stack space, so that we can't run
3388 * into any stale pointers in a later GC (this is done by the
3389 * stop-for-gc handler in the other threads). */
3390 scrub_control_stack();
3391 # endif
3393 #endif
3395 /* Scavenge the Lisp functions of the interrupt handlers, taking
3396 * care to avoid SIG_DFL and SIG_IGN. */
3397 for (i = 0; i < NSIG; i++) {
3398 union interrupt_handler handler = interrupt_handlers[i];
3399 if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) &&
3400 !ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
3401 scavenge((lispobj *)(interrupt_handlers + i), 1);
3404 /* Scavenge the binding stacks. */
3406 struct thread *th;
3407 for_each_thread(th) {
3408 scav_binding_stack((lispobj*)th->binding_stack_start,
3409 (lispobj*)get_binding_stack_pointer(th));
3410 #ifdef LISP_FEATURE_SB_THREAD
3411 /* do the tls as well */
3412 sword_t len;
3413 len=(SymbolValue(FREE_TLS_INDEX,0) >> WORD_SHIFT) -
3414 (sizeof (struct thread))/(sizeof (lispobj));
3415 scavenge((lispobj *) (th+1),len);
3416 #endif
3420 /* Scavenge static space. */
3421 if (gencgc_verbose > 1) {
3422 FSHOW((stderr,
3423 "/scavenge static space: %d bytes\n",
3424 SymbolValue(STATIC_SPACE_FREE_POINTER,0) - STATIC_SPACE_START));
3426 heap_scavenge((lispobj*)STATIC_SPACE_START,
3427 (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0));
3429 /* All generations but the generation being GCed need to be
3430 * scavenged. The new_space generation needs special handling as
3431 * objects may be moved in - it is handled separately below. */
3432 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3433 scavenge_immobile_roots(generation+1, SCRATCH_GENERATION);
3434 #endif
3435 scavenge_generations(generation+1, PSEUDO_STATIC_GENERATION);
3437 #ifdef LISP_FEATURE_SB_TRACEROOT
3438 if (gc_object_watcher) scavenge(&gc_object_watcher, 1);
3439 #endif
3440 scavenge_pinned_ranges();
3441 /* The Lisp start function is stored in the core header, not a static
3442 * symbol. It is passed to gc_and_save() in this C variable */
3443 if (lisp_init_function) scavenge(&lisp_init_function, 1);
3445 /* Finally scavenge the new_space generation. Keep going until no
3446 * more objects are moved into the new generation */
3447 scavenge_newspace_generation(new_space);
3449 /* FIXME: I tried reenabling this check when debugging unrelated
3450 * GC weirdness ca. sbcl-0.6.12.45, and it failed immediately.
3451 * Since the current GC code seems to work well, I'm guessing that
3452 * this debugging code is just stale, but I haven't tried to
3453 * figure it out. It should be figured out and then either made to
3454 * work or just deleted. */
3456 #define RESCAN_CHECK 0
3457 #if RESCAN_CHECK
3458 /* As a check re-scavenge the newspace once; no new objects should
3459 * be found. */
3461 os_vm_size_t old_bytes_allocated = bytes_allocated;
3462 os_vm_size_t bytes_allocated;
3464 /* Start with a full scavenge. */
3465 scavenge_newspace_generation_one_scan(new_space);
3467 /* Flush the current regions, updating the tables. */
3468 gc_alloc_update_all_page_tables(1);
3470 bytes_allocated = bytes_allocated - old_bytes_allocated;
3472 if (bytes_allocated != 0) {
3473 lose("Rescan of new_space allocated %d more bytes.\n",
3474 bytes_allocated);
3477 #endif
3479 scan_weak_hash_tables();
3480 scan_weak_pointers();
3481 wipe_nonpinned_words();
3482 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3483 // Do this last, because until wipe_nonpinned_words() happens,
3484 // not all page table entries have the 'gen' value updated,
3485 // which we need to correctly find all old->young pointers.
3486 sweep_immobile_space(raise);
3487 #endif
3489 /* Flush the current regions, updating the tables. */
3490 gc_alloc_update_all_page_tables(0);
3491 #ifdef PIN_GRANULARITY_LISPOBJ
3492 hopscotch_log_stats(&pinned_objects, "pins");
3493 #endif
3495 /* Free the pages in oldspace, but not those marked dont_move. */
3496 free_oldspace();
3498 /* If the GC is not raising the age then lower the generation back
3499 * to its normal generation number */
3500 if (!raise) {
3501 for (i = 0; i < last_free_page; i++)
3502 if ((page_bytes_used(i) != 0)
3503 && (page_table[i].gen == SCRATCH_GENERATION))
3504 page_table[i].gen = generation;
3505 gc_assert(generations[generation].bytes_allocated == 0);
3506 generations[generation].bytes_allocated =
3507 generations[SCRATCH_GENERATION].bytes_allocated;
3508 generations[SCRATCH_GENERATION].bytes_allocated = 0;
3511 /* Reset the alloc_start_page for generation. */
3512 #ifdef LISP_FEATURE_SEGREGATED_CODE
3513 bzero(generations[generation].alloc_start_page_,
3514 sizeof generations[generation].alloc_start_page_);
3515 #else
3516 generations[generation].alloc_start_page = 0;
3517 generations[generation].alloc_unboxed_start_page = 0;
3518 generations[generation].alloc_large_start_page = 0;
3519 #endif
3521 if (generation >= verify_gens) {
3522 if (gencgc_verbose) {
3523 SHOW("verifying");
3525 verify_gc();
3528 /* Set the new gc trigger for the GCed generation. */
3529 generations[generation].gc_trigger =
3530 generations[generation].bytes_allocated
3531 + generations[generation].bytes_consed_between_gc;
3533 if (raise)
3534 generations[generation].num_gc = 0;
3535 else
3536 ++generations[generation].num_gc;
3540 /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
3541 sword_t
3542 update_dynamic_space_free_pointer(void)
3544 page_index_t last_page = -1, i;
3546 for (i = 0; i < last_free_page; i++)
3547 if (!page_free_p(i) && (page_bytes_used(i) != 0))
3548 last_page = i;
3550 last_free_page = last_page+1;
3552 set_alloc_pointer((lispobj)(page_address(last_free_page)));
3553 return 0; /* dummy value: return something ... */
3556 static void
3557 remap_page_range (page_index_t from, page_index_t to)
3559 /* There's a mysterious Solaris/x86 problem with using mmap
3560 * tricks for memory zeroing. See sbcl-devel thread
3561 * "Re: patch: standalone executable redux".
3563 #if defined(LISP_FEATURE_SUNOS)
3564 zero_and_mark_pages(from, to);
3565 #else
3566 const page_index_t
3567 release_granularity = gencgc_release_granularity/GENCGC_CARD_BYTES,
3568 release_mask = release_granularity-1,
3569 end = to+1,
3570 aligned_from = (from+release_mask)&~release_mask,
3571 aligned_end = (end&~release_mask);
3573 if (aligned_from < aligned_end) {
3574 zero_pages_with_mmap(aligned_from, aligned_end-1);
3575 if (aligned_from != from)
3576 zero_and_mark_pages(from, aligned_from-1);
3577 if (aligned_end != end)
3578 zero_and_mark_pages(aligned_end, end-1);
3579 } else {
3580 zero_and_mark_pages(from, to);
3582 #endif
3585 static void
3586 remap_free_pages (page_index_t from, page_index_t to)
3588 page_index_t first_page, last_page;
3590 for (first_page = from; first_page <= to; first_page++) {
3591 if (!page_free_p(first_page) || !page_need_to_zero(first_page))
3592 continue;
3594 last_page = first_page + 1;
3595 while (page_free_p(last_page) &&
3596 (last_page <= to) &&
3597 (page_need_to_zero(last_page)))
3598 last_page++;
3600 remap_page_range(first_page, last_page-1);
3602 first_page = last_page;
3606 generation_index_t small_generation_limit = 1;
3608 /* GC all generations newer than last_gen, raising the objects in each
3609 * to the next older generation - we finish when all generations below
3610 * last_gen are empty. Then if last_gen is due for a GC, or if
3611 * last_gen==NUM_GENERATIONS (the scratch generation? eh?) we GC that
3612 * too. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
3614 * We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than
3615 * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */
3616 void
3617 collect_garbage(generation_index_t last_gen)
3619 generation_index_t gen = 0, i;
3620 int raise, more = 0;
3621 int gen_to_wp;
3622 /* The largest value of last_free_page seen since the time
3623 * remap_free_pages was called. */
3624 static page_index_t high_water_mark = 0;
3626 FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen));
3627 log_generation_stats(gc_logfile, "=== GC Start ===");
3629 gc_active_p = 1;
3631 if (last_gen > HIGHEST_NORMAL_GENERATION+1) {
3632 FSHOW((stderr,
3633 "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
3634 last_gen));
3635 last_gen = 0;
3638 /* Flush the alloc regions updating the tables. */
3639 gc_alloc_update_all_page_tables(1);
3641 /* Verify the new objects created by Lisp code. */
3642 if (pre_verify_gen_0) {
3643 FSHOW((stderr, "pre-checking generation 0\n"));
3644 verify_generation(0);
3647 if (gencgc_verbose > 1)
3648 print_generation_stats();
3650 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3651 /* Immobile space generation bits are lazily updated for gen0
3652 (not touched on every object allocation) so do it now */
3653 update_immobile_nursery_bits();
3654 #endif
3656 do {
3657 /* Collect the generation. */
3659 if (more || (gen >= gencgc_oldest_gen_to_gc)) {
3660 /* Never raise the oldest generation. Never raise the extra generation
3661 * collected due to more-flag. */
3662 raise = 0;
3663 more = 0;
3664 } else {
3665 raise =
3666 (gen < last_gen)
3667 || (generations[gen].num_gc >= generations[gen].number_of_gcs_before_promotion);
3668 /* If we would not normally raise this one, but we're
3669 * running low on space in comparison to the object-sizes
3670 * we've been seeing, raise it and collect the next one
3671 * too. */
3672 if (!raise && gen == last_gen) {
3673 more = (2*large_allocation) >= (dynamic_space_size - bytes_allocated);
3674 raise = more;
3678 if (gencgc_verbose > 1) {
3679 FSHOW((stderr,
3680 "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
3681 gen,
3682 raise,
3683 generations[gen].bytes_allocated,
3684 generations[gen].gc_trigger,
3685 generations[gen].num_gc));
3688 /* If an older generation is being filled, then update its
3689 * memory age. */
3690 if (raise == 1) {
3691 generations[gen+1].cum_sum_bytes_allocated +=
3692 generations[gen+1].bytes_allocated;
3695 garbage_collect_generation(gen, raise);
3697 /* Reset the memory age cum_sum. */
3698 generations[gen].cum_sum_bytes_allocated = 0;
3700 if (gencgc_verbose > 1) {
3701 FSHOW((stderr, "GC of generation %d finished:\n", gen));
3702 print_generation_stats();
3705 gen++;
3706 } while ((gen <= gencgc_oldest_gen_to_gc)
3707 && ((gen < last_gen)
3708 || more
3709 || (raise
3710 && (generations[gen].bytes_allocated
3711 > generations[gen].gc_trigger)
3712 && (generation_average_age(gen)
3713 > generations[gen].minimum_age_before_gc))));
3715 /* Now if gen-1 was raised all generations before gen are empty.
3716 * If it wasn't raised then all generations before gen-1 are empty.
3718 * Now objects within this gen's pages cannot point to younger
3719 * generations unless they are written to. This can be exploited
3720 * by write-protecting the pages of gen; then when younger
3721 * generations are GCed only the pages which have been written
3722 * need scanning. */
3723 if (raise)
3724 gen_to_wp = gen;
3725 else
3726 gen_to_wp = gen - 1;
3728 /* There's not much point in WPing pages in generation 0 as it is
3729 * never scavenged (except promoted pages). */
3730 if ((gen_to_wp > 0) && ENABLE_PAGE_PROTECTION) {
3731 /* Check that they are all empty. */
3732 for (i = 0; i < gen_to_wp; i++) {
3733 if (generations[i].bytes_allocated)
3734 lose("trying to write-protect gen. %d when gen. %d nonempty\n",
3735 gen_to_wp, i);
3737 write_protect_generation_pages(gen_to_wp);
3739 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3740 write_protect_immobile_space();
3741 #endif
3743 /* Set gc_alloc() back to generation 0. The current regions should
3744 * be flushed after the above GCs. */
3745 gc_assert(boxed_region.free_pointer == boxed_region.start_addr);
3746 gc_alloc_generation = 0;
3748 /* Save the high-water mark before updating last_free_page */
3749 if (last_free_page > high_water_mark)
3750 high_water_mark = last_free_page;
3752 update_dynamic_space_free_pointer();
3754 /* Update auto_gc_trigger. Make sure we trigger the next GC before
3755 * running out of heap! */
3756 if (bytes_consed_between_gcs <= (dynamic_space_size - bytes_allocated))
3757 auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
3758 else
3759 auto_gc_trigger = bytes_allocated + (dynamic_space_size - bytes_allocated)/2;
3761 if(gencgc_verbose) {
3762 #define MESSAGE ("Next gc when %"OS_VM_SIZE_FMT" bytes have been consed\n")
3763 char buf[64];
3764 int n;
3765 // fprintf() can - and does - cause deadlock here.
3766 // snprintf() seems to work fine.
3767 n = snprintf(buf, sizeof buf, MESSAGE, auto_gc_trigger);
3768 ignore_value(write(2, buf, n));
3769 #undef MESSAGE
3772 /* If we did a big GC (arbitrarily defined as gen > 1), release memory
3773 * back to the OS.
3775 if (gen > small_generation_limit) {
3776 if (last_free_page > high_water_mark)
3777 high_water_mark = last_free_page;
3778 remap_free_pages(0, high_water_mark);
3779 high_water_mark = 0;
3782 gc_active_p = 0;
3783 large_allocation = 0;
3785 #ifdef LISP_FEATURE_SB_TRACEROOT
3786 if (gc_object_watcher) {
3787 extern void gc_prove_liveness(void(*)(), lispobj, int, uword_t*, int);
3788 gc_prove_liveness(preserve_context_registers,
3789 gc_object_watcher,
3790 gc_n_stack_pins, pinned_objects.keys,
3791 gc_traceroot_criterion);
3793 #endif
3795 log_generation_stats(gc_logfile, "=== GC End ===");
3796 SHOW("returning from collect_garbage");
3799 void
3800 gc_init(void)
3802 page_index_t i;
3804 #if defined(LISP_FEATURE_SB_SAFEPOINT)
3805 alloc_gc_page();
3806 #endif
3808 /* Compute the number of pages needed for the dynamic space.
3809 * Dynamic space size should be aligned on page size. */
3810 page_table_pages = dynamic_space_size/GENCGC_CARD_BYTES;
3811 gc_assert(dynamic_space_size == npage_bytes(page_table_pages));
3813 /* Default nursery size to 5% of the total dynamic space size,
3814 * min 1Mb. */
3815 bytes_consed_between_gcs = dynamic_space_size/(os_vm_size_t)20;
3816 if (bytes_consed_between_gcs < (1024*1024))
3817 bytes_consed_between_gcs = 1024*1024;
3819 /* The page_table must be allocated using "calloc" to initialize
3820 * the page structures correctly. There used to be a separate
3821 * initialization loop (now commented out; see below) but that was
3822 * unnecessary and did hurt startup time. */
3823 page_table = calloc(page_table_pages, sizeof(struct page));
3824 gc_assert(page_table);
3825 #ifdef LISP_FEATURE_IMMOBILE_SPACE
3826 gc_init_immobile();
3827 #endif
3829 hopscotch_init();
3830 #ifdef PIN_GRANULARITY_LISPOBJ
3831 hopscotch_create(&pinned_objects, HOPSCOTCH_HASH_FUN_DEFAULT, 0 /* hashset */,
3832 32 /* logical bin count */, 0 /* default range */);
3833 #endif
3835 scavtab[WEAK_POINTER_WIDETAG] = scav_weak_pointer;
3836 transother[SIMPLE_ARRAY_WIDETAG] = trans_boxed_large;
3838 /* The page structures are initialized implicitly when page_table
3839 * is allocated with "calloc" above. Formerly we had the following
3840 * explicit initialization here (comments converted to C99 style
3841 * for readability as C's block comments don't nest):
3843 * // Initialize each page structure.
3844 * for (i = 0; i < page_table_pages; i++) {
3845 * // Initialize all pages as free.
3846 * page_table[i].allocated = FREE_PAGE_FLAG;
3847 * page_table[i].bytes_used = 0;
3849 * // Pages are not write-protected at startup.
3850 * page_table[i].write_protected = 0;
3853 * Without this loop the image starts up much faster when dynamic
3854 * space is large -- which it is on 64-bit platforms already by
3855 * default -- and when "calloc" for large arrays is implemented
3856 * using copy-on-write of a page of zeroes -- which it is at least
3857 * on Linux. In this case the pages that page_table_pages is stored
3858 * in are mapped and cleared not before the corresponding part of
3859 * dynamic space is used. For example, this saves clearing 16 MB of
3860 * memory at startup if the page size is 4 KB and the size of
3861 * dynamic space is 4 GB.
3862 * FREE_PAGE_FLAG must be 0 for this to work correctly which is
3863 * asserted below: */
3865 /* Compile time assertion: If triggered, declares an array
3866 * of dimension -1 forcing a syntax error. The intent of the
3867 * assignment is to avoid an "unused variable" warning. */
3868 char assert_free_page_flag_0[(FREE_PAGE_FLAG) ? -1 : 1];
3869 assert_free_page_flag_0[0] = assert_free_page_flag_0[0];
3872 bytes_allocated = 0;
3874 /* Initialize the generations. */
3875 for (i = 0; i < NUM_GENERATIONS; i++) {
3876 generations[i].alloc_start_page = 0;
3877 generations[i].alloc_unboxed_start_page = 0;
3878 generations[i].alloc_large_start_page = 0;
3879 generations[i].bytes_allocated = 0;
3880 generations[i].gc_trigger = 2000000;
3881 generations[i].num_gc = 0;
3882 generations[i].cum_sum_bytes_allocated = 0;
3883 /* the tune-able parameters */
3884 generations[i].bytes_consed_between_gc
3885 = bytes_consed_between_gcs/(os_vm_size_t)HIGHEST_NORMAL_GENERATION;
3886 generations[i].number_of_gcs_before_promotion = 1;
3887 generations[i].minimum_age_before_gc = 0.75;
3890 /* Initialize gc_alloc. */
3891 gc_alloc_generation = 0;
3892 gc_set_region_empty(&boxed_region);
3893 gc_set_region_empty(&unboxed_region);
3894 #ifdef LISP_FEATURE_SEGREGATED_CODE
3895 gc_set_region_empty(&code_region);
3896 #endif
3898 last_free_page = 0;
3901 /* Pick up the dynamic space from after a core load.
3903 * The ALLOCATION_POINTER points to the end of the dynamic space.
3906 static void
3907 gencgc_pickup_dynamic(void)
3909 page_index_t page = 0;
3910 char *alloc_ptr = (char *)get_alloc_pointer();
3911 lispobj *prev=(lispobj *)page_address(page);
3912 generation_index_t gen = PSEUDO_STATIC_GENERATION;
3914 bytes_allocated = 0;
3916 do {
3917 lispobj *first,*ptr= (lispobj *)page_address(page);
3919 if (!gencgc_partial_pickup || !page_free_p(page)) {
3920 /* It is possible, though rare, for the saved page table
3921 * to contain free pages below alloc_ptr. */
3922 page_table[page].gen = gen;
3923 set_page_bytes_used(page, GENCGC_CARD_BYTES);
3924 page_table[page].large_object = 0;
3925 page_table[page].write_protected = 0;
3926 page_table[page].write_protected_cleared = 0;
3927 page_table[page].dont_move = 0;
3928 set_page_need_to_zero(page, 1);
3930 bytes_allocated += GENCGC_CARD_BYTES;
3933 if (!gencgc_partial_pickup) {
3934 #ifdef LISP_FEATURE_SEGREGATED_CODE
3935 // Make the most general assumption: any page *might* contain code.
3936 page_table[page].allocated = CODE_PAGE_FLAG;
3937 #else
3938 page_table[page].allocated = BOXED_PAGE_FLAG;
3939 #endif
3940 first = gc_search_space3(ptr, prev, (ptr+2));
3941 if(ptr == first)
3942 prev=ptr;
3943 set_page_scan_start_offset(page, page_address(page) - (char*)prev);
3945 page++;
3946 } while (page_address(page) < alloc_ptr);
3948 last_free_page = page;
3950 generations[gen].bytes_allocated = bytes_allocated;
3952 gc_alloc_update_all_page_tables(1);
3953 if (ENABLE_PAGE_PROTECTION)
3954 write_protect_generation_pages(gen);
3957 void
3958 gc_initialize_pointers(void)
3960 gencgc_pickup_dynamic();
3964 /* alloc(..) is the external interface for memory allocation. It
3965 * allocates to generation 0. It is not called from within the garbage
3966 * collector as it is only external uses that need the check for heap
3967 * size (GC trigger) and to disable the interrupts (interrupts are
3968 * always disabled during a GC).
3970 * The vops that call alloc(..) assume that the returned space is zero-filled.
3971 * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
3973 * The check for a GC trigger is only performed when the current
3974 * region is full, so in most cases it's not needed. */
3976 static inline lispobj *
3977 general_alloc_internal(sword_t nbytes, int page_type_flag, struct alloc_region *region,
3978 struct thread *thread)
3980 #ifndef LISP_FEATURE_WIN32
3981 lispobj alloc_signal;
3982 #endif
3983 void *new_obj;
3984 void *new_free_pointer;
3985 os_vm_size_t trigger_bytes = 0;
3987 gc_assert(nbytes > 0);
3989 /* Check for alignment allocation problems. */
3990 gc_assert((((uword_t)region->free_pointer & LOWTAG_MASK) == 0)
3991 && ((nbytes & LOWTAG_MASK) == 0));
3993 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
3994 /* Must be inside a PA section. */
3995 gc_assert(get_pseudo_atomic_atomic(thread));
3996 #endif
3998 if ((os_vm_size_t) nbytes > large_allocation)
3999 large_allocation = nbytes;
4001 /* maybe we can do this quickly ... */
4002 new_free_pointer = (char*)region->free_pointer + nbytes;
4003 if (new_free_pointer <= region->end_addr) {
4004 new_obj = (void*)(region->free_pointer);
4005 region->free_pointer = new_free_pointer;
4006 return(new_obj); /* yup */
4009 /* We don't want to count nbytes against auto_gc_trigger unless we
4010 * have to: it speeds up the tenuring of objects and slows down
4011 * allocation. However, unless we do so when allocating _very_
4012 * large objects we are in danger of exhausting the heap without
4013 * running sufficient GCs.
4015 if ((os_vm_size_t) nbytes >= bytes_consed_between_gcs)
4016 trigger_bytes = nbytes;
4018 /* we have to go the long way around, it seems. Check whether we
4019 * should GC in the near future
4021 if (auto_gc_trigger && (bytes_allocated+trigger_bytes > auto_gc_trigger)) {
4022 /* Don't flood the system with interrupts if the need to gc is
4023 * already noted. This can happen for example when SUB-GC
4024 * allocates or after a gc triggered in a WITHOUT-GCING. */
4025 if (SymbolValue(GC_PENDING,thread) == NIL) {
4026 /* set things up so that GC happens when we finish the PA
4027 * section */
4028 SetSymbolValue(GC_PENDING,T,thread);
4029 if (SymbolValue(GC_INHIBIT,thread) == NIL) {
4030 #ifdef LISP_FEATURE_SB_SAFEPOINT
4031 thread_register_gc_trigger();
4032 #else
4033 set_pseudo_atomic_interrupted(thread);
4034 #ifdef GENCGC_IS_PRECISE
4035 /* PPC calls alloc() from a trap
4036 * look up the most context if it's from a trap. */
4038 os_context_t *context =
4039 thread->interrupt_data->allocation_trap_context;
4040 maybe_save_gc_mask_and_block_deferrables
4041 (context ? os_context_sigmask_addr(context) : NULL);
4043 #else
4044 maybe_save_gc_mask_and_block_deferrables(NULL);
4045 #endif
4046 #endif
4050 new_obj = gc_alloc_with_region(nbytes, page_type_flag, region, 0);
4052 #ifndef LISP_FEATURE_WIN32
4053 /* for sb-prof, and not supported on Windows yet */
4054 alloc_signal = SymbolValue(ALLOC_SIGNAL,thread);
4055 if ((alloc_signal & FIXNUM_TAG_MASK) == 0) {
4056 if ((sword_t) alloc_signal <= 0) {
4057 SetSymbolValue(ALLOC_SIGNAL, T, thread);
4058 raise(SIGPROF);
4059 } else {
4060 SetSymbolValue(ALLOC_SIGNAL,
4061 alloc_signal - (1 << N_FIXNUM_TAG_BITS),
4062 thread);
4065 #endif
4067 return (new_obj);
4070 lispobj *
4071 general_alloc(sword_t nbytes, int page_type_flag)
4073 struct thread *thread = arch_os_get_current_thread();
4074 /* Select correct region, and call general_alloc_internal with it.
4075 * For other then boxed allocation we must lock first, since the
4076 * region is shared. */
4077 #ifdef LISP_FEATURE_SEGREGATED_CODE
4078 if (page_type_flag == BOXED_PAGE_FLAG) {
4079 #else
4080 if (BOXED_PAGE_FLAG & page_type_flag) {
4081 #endif
4082 #ifdef LISP_FEATURE_SB_THREAD
4083 struct alloc_region *region = (thread ? &(thread->alloc_region) : &boxed_region);
4084 #else
4085 struct alloc_region *region = &boxed_region;
4086 #endif
4087 return general_alloc_internal(nbytes, page_type_flag, region, thread);
4088 #ifdef LISP_FEATURE_SEGREGATED_CODE
4089 } else if (page_type_flag == UNBOXED_PAGE_FLAG ||
4090 page_type_flag == CODE_PAGE_FLAG) {
4091 struct alloc_region *region =
4092 page_type_flag == CODE_PAGE_FLAG ? &code_region : &unboxed_region;
4093 #else
4094 } else if (UNBOXED_PAGE_FLAG == page_type_flag) {
4095 struct alloc_region *region = &unboxed_region;
4096 #endif
4097 lispobj * obj;
4098 int result;
4099 result = thread_mutex_lock(&allocation_lock);
4100 gc_assert(!result);
4101 obj = general_alloc_internal(nbytes, page_type_flag, region, thread);
4102 result = thread_mutex_unlock(&allocation_lock);
4103 gc_assert(!result);
4104 return obj;
4105 } else {
4106 lose("bad page type flag: %d", page_type_flag);
4110 lispobj AMD64_SYSV_ABI *
4111 alloc(sword_t nbytes)
4113 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4114 struct thread *self = arch_os_get_current_thread();
4115 int was_pseudo_atomic = get_pseudo_atomic_atomic(self);
4116 if (!was_pseudo_atomic)
4117 set_pseudo_atomic_atomic(self);
4118 #else
4119 gc_assert(get_pseudo_atomic_atomic(arch_os_get_current_thread()));
4120 #endif
4122 lispobj *result = general_alloc(nbytes, BOXED_PAGE_FLAG);
4124 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
4125 if (!was_pseudo_atomic)
4126 clear_pseudo_atomic_atomic(self);
4127 #endif
4129 return result;
4133 * shared support for the OS-dependent signal handlers which
4134 * catch GENCGC-related write-protect violations
4136 void unhandled_sigmemoryfault(void* addr);
4138 /* Depending on which OS we're running under, different signals might
4139 * be raised for a violation of write protection in the heap. This
4140 * function factors out the common generational GC magic which needs
4141 * to invoked in this case, and should be called from whatever signal
4142 * handler is appropriate for the OS we're running under.
4144 * Return true if this signal is a normal generational GC thing that
4145 * we were able to handle, or false if it was abnormal and control
4146 * should fall through to the general SIGSEGV/SIGBUS/whatever logic.
4148 * We have two control flags for this: one causes us to ignore faults
4149 * on unprotected pages completely, and the second complains to stderr
4150 * but allows us to continue without losing.
4152 extern boolean ignore_memoryfaults_on_unprotected_pages;
4153 boolean ignore_memoryfaults_on_unprotected_pages = 0;
4155 extern boolean continue_after_memoryfault_on_unprotected_pages;
4156 boolean continue_after_memoryfault_on_unprotected_pages = 0;
4159 gencgc_handle_wp_violation(void* fault_addr)
4161 page_index_t page_index = find_page_index(fault_addr);
4163 #if QSHOW_SIGNALS
4164 FSHOW((stderr,
4165 "heap WP violation? fault_addr=%p, page_index=%"PAGE_INDEX_FMT"\n",
4166 fault_addr, page_index));
4167 #endif
4169 /* Check whether the fault is within the dynamic space. */
4170 if (page_index == (-1)) {
4171 #ifdef LISP_FEATURE_IMMOBILE_SPACE
4172 extern int immobile_space_handle_wp_violation(void*);
4173 if (immobile_space_handle_wp_violation(fault_addr))
4174 return 1;
4175 #endif
4177 /* It can be helpful to be able to put a breakpoint on this
4178 * case to help diagnose low-level problems. */
4179 unhandled_sigmemoryfault(fault_addr);
4181 /* not within the dynamic space -- not our responsibility */
4182 return 0;
4184 } else {
4185 int ret;
4186 ret = thread_mutex_lock(&free_pages_lock);
4187 gc_assert(ret == 0);
4188 if (page_table[page_index].write_protected) {
4189 /* Unprotect the page. */
4190 os_protect(page_address(page_index), GENCGC_CARD_BYTES, OS_VM_PROT_ALL);
4191 page_table[page_index].write_protected_cleared = 1;
4192 page_table[page_index].write_protected = 0;
4193 } else if (!ignore_memoryfaults_on_unprotected_pages) {
4194 /* The only acceptable reason for this signal on a heap
4195 * access is that GENCGC write-protected the page.
4196 * However, if two CPUs hit a wp page near-simultaneously,
4197 * we had better not have the second one lose here if it
4198 * does this test after the first one has already set wp=0
4200 if(page_table[page_index].write_protected_cleared != 1) {
4201 void lisp_backtrace(int frames);
4202 lisp_backtrace(10);
4203 fprintf(stderr,
4204 "Fault @ %p, page %"PAGE_INDEX_FMT" not marked as write-protected:\n"
4205 " boxed_region.first_page: %"PAGE_INDEX_FMT","
4206 " boxed_region.last_page %"PAGE_INDEX_FMT"\n"
4207 " page.scan_start_offset: %"OS_VM_SIZE_FMT"\n"
4208 " page.bytes_used: %u\n"
4209 " page.allocated: %d\n"
4210 " page.write_protected: %d\n"
4211 " page.write_protected_cleared: %d\n"
4212 " page.generation: %d\n",
4213 fault_addr,
4214 page_index,
4215 boxed_region.first_page,
4216 boxed_region.last_page,
4217 page_scan_start_offset(page_index),
4218 page_bytes_used(page_index),
4219 page_table[page_index].allocated,
4220 page_table[page_index].write_protected,
4221 page_table[page_index].write_protected_cleared,
4222 page_table[page_index].gen);
4223 if (!continue_after_memoryfault_on_unprotected_pages)
4224 lose("Feh.\n");
4227 ret = thread_mutex_unlock(&free_pages_lock);
4228 gc_assert(ret == 0);
4229 /* Don't worry, we can handle it. */
4230 return 1;
4233 /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
4234 * it's not just a case of the program hitting the write barrier, and
4235 * are about to let Lisp deal with it. It's basically just a
4236 * convenient place to set a gdb breakpoint. */
4237 void
4238 unhandled_sigmemoryfault(void *addr)
4241 static void
4242 update_thread_page_tables(struct thread *th)
4244 gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &th->alloc_region);
4245 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
4246 gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &th->sprof_alloc_region);
4247 #endif
4250 /* GC is single-threaded and all memory allocations during a
4251 collection happen in the GC thread, so it is sufficient to update
4252 all the the page tables once at the beginning of a collection and
4253 update only page tables of the GC thread during the collection. */
4254 void gc_alloc_update_all_page_tables(int for_all_threads)
4256 /* Flush the alloc regions updating the tables. */
4257 struct thread *th;
4258 if (for_all_threads) {
4259 for_each_thread(th) {
4260 update_thread_page_tables(th);
4263 else {
4264 th = arch_os_get_current_thread();
4265 if (th) {
4266 update_thread_page_tables(th);
4269 #ifdef LISP_FEATURE_SEGREGATED_CODE
4270 gc_alloc_update_page_tables(CODE_PAGE_FLAG, &code_region);
4271 #endif
4272 gc_alloc_update_page_tables(UNBOXED_PAGE_FLAG, &unboxed_region);
4273 gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &boxed_region);
4276 void
4277 gc_set_region_empty(struct alloc_region *region)
4279 region->first_page = 0;
4280 region->last_page = -1;
4281 region->start_addr = page_address(0);
4282 region->free_pointer = page_address(0);
4283 region->end_addr = page_address(0);
4286 static void
4287 zero_all_free_pages()
4289 page_index_t i;
4291 for (i = 0; i < last_free_page; i++) {
4292 if (page_free_p(i)) {
4293 #ifdef READ_PROTECT_FREE_PAGES
4294 os_protect(page_address(i),
4295 GENCGC_CARD_BYTES,
4296 OS_VM_PROT_ALL);
4297 #endif
4298 zero_pages(i, i);
4303 /* Things to do before doing a final GC before saving a core (without
4304 * purify).
4306 * + Pages in large_object pages aren't moved by the GC, so we need to
4307 * unset that flag from all pages.
4308 * + The pseudo-static generation isn't normally collected, but it seems
4309 * reasonable to collect it at least when saving a core. So move the
4310 * pages to a normal generation.
4312 static void
4313 prepare_for_final_gc ()
4315 page_index_t i;
4317 #ifdef LISP_FEATURE_IMMOBILE_SPACE
4318 extern void prepare_immobile_space_for_final_gc();
4319 prepare_immobile_space_for_final_gc ();
4320 #endif
4321 for (i = 0; i < last_free_page; i++) {
4322 page_table[i].large_object = 0;
4323 if (page_table[i].gen == PSEUDO_STATIC_GENERATION) {
4324 int used = page_bytes_used(i);
4325 page_table[i].gen = HIGHEST_NORMAL_GENERATION;
4326 generations[PSEUDO_STATIC_GENERATION].bytes_allocated -= used;
4327 generations[HIGHEST_NORMAL_GENERATION].bytes_allocated += used;
4332 /* Set this switch to 1 for coalescing of strings dumped to fasl,
4333 * or 2 for coalescing of those,
4334 * plus literal strings in code compiled to memory. */
4335 char gc_coalesce_string_literals = 0;
4337 /* Do a non-conservative GC, and then save a core with the initial
4338 * function being set to the value of 'lisp_init_function' */
4339 void
4340 gc_and_save(char *filename, boolean prepend_runtime,
4341 boolean save_runtime_options, boolean compressed,
4342 int compression_level, int application_type)
4344 FILE *file;
4345 void *runtime_bytes = NULL;
4346 size_t runtime_size;
4347 extern void coalesce_similar_objects();
4348 extern struct lisp_startup_options lisp_startup_options;
4349 boolean verbose = !lisp_startup_options.noinform;
4351 file = prepare_to_save(filename, prepend_runtime, &runtime_bytes,
4352 &runtime_size);
4353 if (file == NULL)
4354 return;
4356 conservative_stack = 0;
4358 /* The filename might come from Lisp, and be moved by the now
4359 * non-conservative GC. */
4360 filename = strdup(filename);
4362 /* Collect twice: once into relatively high memory, and then back
4363 * into low memory. This compacts the retained data into the lower
4364 * pages, minimizing the size of the core file.
4366 prepare_for_final_gc();
4367 gencgc_alloc_start_page = last_free_page;
4368 collect_garbage(HIGHEST_NORMAL_GENERATION+1);
4370 // We always coalesce copyable numbers. Addional coalescing is done
4371 // only on request, in which case a message is shown (unless verbose=0).
4372 if (gc_coalesce_string_literals && verbose) {
4373 printf("[coalescing similar vectors... ");
4374 fflush(stdout);
4376 coalesce_similar_objects();
4377 if (gc_coalesce_string_literals && verbose)
4378 printf("done]\n");
4380 prepare_for_final_gc();
4381 gencgc_alloc_start_page = -1;
4382 collect_garbage(HIGHEST_NORMAL_GENERATION+1);
4384 if (prepend_runtime)
4385 save_runtime_to_filehandle(file, runtime_bytes, runtime_size,
4386 application_type);
4388 /* The dumper doesn't know that pages need to be zeroed before use. */
4389 zero_all_free_pages();
4390 save_to_filehandle(file, filename, lisp_init_function,
4391 prepend_runtime, save_runtime_options,
4392 compressed ? compression_level : COMPRESSION_LEVEL_NONE);
4393 /* Oops. Save still managed to fail. Since we've mangled the stack
4394 * beyond hope, there's not much we can do.
4395 * (beyond FUNCALLing lisp_init_function, but I suspect that's
4396 * going to be rather unsatisfactory too... */
4397 lose("Attempt to save core after non-conservative GC failed.\n");