2 * GENerational Conservative Garbage Collector for SBCL x86
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 * For a review of garbage collection techniques (e.g. generational
18 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
19 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
20 * had been accepted for _ACM Computing Surveys_ and was available
21 * as a PostScript preprint through
22 * <http://www.cs.utexas.edu/users/oops/papers.html>
24 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
36 #include "interrupt.h"
42 #include "gc-internal.h"
44 #include "genesis/vector.h"
45 #include "genesis/weak-pointer.h"
46 #include "genesis/simple-fun.h"
47 #include "genesis/hash-table.h"
49 /* forward declarations */
50 long gc_find_freeish_pages(long *restart_page_ptr
, long nbytes
, int unboxed
);
51 static void gencgc_pickup_dynamic(void);
58 /* the number of actual generations. (The number of 'struct
59 * generation' objects is one more than this, because one object
60 * serves as scratch when GC'ing.) */
61 #define NUM_GENERATIONS 6
63 /* Should we use page protection to help avoid the scavenging of pages
64 * that don't have pointers to younger generations? */
65 boolean enable_page_protection
= 1;
67 /* Should we unmap a page and re-mmap it to have it zero filled? */
68 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
69 /* comment from cmucl-2.4.8: This can waste a lot of swap on FreeBSD
70 * so don't unmap there.
72 * The CMU CL comment didn't specify a version, but was probably an
73 * old version of FreeBSD (pre-4.0), so this might no longer be true.
74 * OTOH, if it is true, this behavior might exist on OpenBSD too, so
75 * for now we don't unmap there either. -- WHN 2001-04-07 */
76 boolean gencgc_unmap_zero
= 0;
78 boolean gencgc_unmap_zero
= 1;
81 /* the minimum size (in bytes) for a large object*/
82 unsigned large_object_size
= 4 * PAGE_BYTES
;
91 /* the verbosity level. All non-error messages are disabled at level 0;
92 * and only a few rare messages are printed at level 1. */
94 unsigned gencgc_verbose
= 1;
96 unsigned gencgc_verbose
= 0;
99 /* FIXME: At some point enable the various error-checking things below
100 * and see what they say. */
102 /* We hunt for pointers to old-space, when GCing generations >= verify_gen.
103 * Set verify_gens to NUM_GENERATIONS to disable this kind of check. */
104 int verify_gens
= NUM_GENERATIONS
;
106 /* Should we do a pre-scan verify of generation 0 before it's GCed? */
107 boolean pre_verify_gen_0
= 0;
109 /* Should we check for bad pointers after gc_free_heap is called
110 * from Lisp PURIFY? */
111 boolean verify_after_free_heap
= 0;
113 /* Should we print a note when code objects are found in the dynamic space
114 * during a heap verify? */
115 boolean verify_dynamic_code_check
= 0;
117 /* Should we check code objects for fixup errors after they are transported? */
118 boolean check_code_fixups
= 0;
120 /* Should we check that newly allocated regions are zero filled? */
121 boolean gencgc_zero_check
= 0;
123 /* Should we check that the free space is zero filled? */
124 boolean gencgc_enable_verify_zero_fill
= 0;
126 /* Should we check that free pages are zero filled during gc_free_heap
127 * called after Lisp PURIFY? */
128 boolean gencgc_zero_check_during_free_heap
= 0;
131 * GC structures and variables
134 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
135 unsigned long bytes_allocated
= 0;
136 extern unsigned long bytes_consed_between_gcs
; /* gc-common.c */
137 unsigned long auto_gc_trigger
= 0;
139 /* the source and destination generations. These are set before a GC starts
145 /* An array of page structures is statically allocated.
146 * This helps quickly map between an address its page structure.
147 * NUM_PAGES is set from the size of the dynamic space. */
148 struct page page_table
[NUM_PAGES
];
150 /* To map addresses to page structures the address of the first page
152 static void *heap_base
= NULL
;
154 #if N_WORD_BITS == 32
155 #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
156 #elif N_WORD_BITS == 64
157 #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
160 /* Calculate the start address for the given page number. */
162 page_address(long page_num
)
164 return (heap_base
+ (page_num
* PAGE_BYTES
));
167 /* Find the page index within the page_table for the given
168 * address. Return -1 on failure. */
170 find_page_index(void *addr
)
172 long index
= addr
-heap_base
;
175 index
= ((unsigned long)index
)/PAGE_BYTES
;
176 if (index
< NUM_PAGES
)
183 /* a structure to hold the state of a generation */
186 /* the first page that gc_alloc() checks on its next call */
187 long alloc_start_page
;
189 /* the first page that gc_alloc_unboxed() checks on its next call */
190 long alloc_unboxed_start_page
;
192 /* the first page that gc_alloc_large (boxed) considers on its next
193 * call. (Although it always allocates after the boxed_region.) */
194 long alloc_large_start_page
;
196 /* the first page that gc_alloc_large (unboxed) considers on its
197 * next call. (Although it always allocates after the
198 * current_unboxed_region.) */
199 long alloc_large_unboxed_start_page
;
201 /* the bytes allocated to this generation */
202 long bytes_allocated
;
204 /* the number of bytes at which to trigger a GC */
207 /* to calculate a new level for gc_trigger */
208 long bytes_consed_between_gc
;
210 /* the number of GCs since the last raise */
213 /* the average age after which a GC will raise objects to the
217 /* the cumulative sum of the bytes allocated to this generation. It is
218 * cleared after a GC on this generations, and update before new
219 * objects are added from a GC of a younger generation. Dividing by
220 * the bytes_allocated will give the average age of the memory in
221 * this generation since its last GC. */
222 long cum_sum_bytes_allocated
;
224 /* a minimum average memory age before a GC will occur helps
225 * prevent a GC when a large number of new live objects have been
226 * added, in which case a GC could be a waste of time */
227 double min_av_mem_age
;
229 /* the number of actual generations. (The number of 'struct
230 * generation' objects is one more than this, because one object
231 * serves as scratch when GC'ing.) */
232 #define NUM_GENERATIONS 6
234 /* an array of generation structures. There needs to be one more
235 * generation structure than actual generations as the oldest
236 * generation is temporarily raised then lowered. */
237 struct generation generations
[NUM_GENERATIONS
+1];
239 /* the oldest generation that is will currently be GCed by default.
240 * Valid values are: 0, 1, ... (NUM_GENERATIONS-1)
242 * The default of (NUM_GENERATIONS-1) enables GC on all generations.
244 * Setting this to 0 effectively disables the generational nature of
245 * the GC. In some applications generational GC may not be useful
246 * because there are no long-lived objects.
248 * An intermediate value could be handy after moving long-lived data
249 * into an older generation so an unnecessary GC of this long-lived
250 * data can be avoided. */
251 unsigned int gencgc_oldest_gen_to_gc
= NUM_GENERATIONS
-1;
253 /* The maximum free page in the heap is maintained and used to update
254 * ALLOCATION_POINTER which is used by the room function to limit its
255 * search of the heap. XX Gencgc obviously needs to be better
256 * integrated with the Lisp code. */
257 static long last_free_page
;
259 /* This lock is to prevent multiple threads from simultaneously
260 * allocating new regions which overlap each other. Note that the
261 * majority of GC is single-threaded, but alloc() may be called from
262 * >1 thread at a time and must be thread-safe. This lock must be
263 * seized before all accesses to generations[] or to parts of
264 * page_table[] that other threads may want to see */
266 static lispobj free_pages_lock
=0;
270 * miscellaneous heap functions
273 /* Count the number of pages which are write-protected within the
274 * given generation. */
276 count_write_protect_generation_pages(int generation
)
281 for (i
= 0; i
< last_free_page
; i
++)
282 if ((page_table
[i
].allocated
!= FREE_PAGE_FLAG
)
283 && (page_table
[i
].gen
== generation
)
284 && (page_table
[i
].write_protected
== 1))
289 /* Count the number of pages within the given generation. */
291 count_generation_pages(int generation
)
296 for (i
= 0; i
< last_free_page
; i
++)
297 if ((page_table
[i
].allocated
!= 0)
298 && (page_table
[i
].gen
== generation
))
305 count_dont_move_pages(void)
309 for (i
= 0; i
< last_free_page
; i
++) {
310 if ((page_table
[i
].allocated
!= 0) && (page_table
[i
].dont_move
!= 0)) {
318 /* Work through the pages and add up the number of bytes used for the
319 * given generation. */
321 count_generation_bytes_allocated (int gen
)
325 for (i
= 0; i
< last_free_page
; i
++) {
326 if ((page_table
[i
].allocated
!= 0) && (page_table
[i
].gen
== gen
))
327 result
+= page_table
[i
].bytes_used
;
332 /* Return the average age of the memory in a generation. */
334 gen_av_mem_age(int gen
)
336 if (generations
[gen
].bytes_allocated
== 0)
340 ((double)generations
[gen
].cum_sum_bytes_allocated
)
341 / ((double)generations
[gen
].bytes_allocated
);
344 void fpu_save(int *); /* defined in x86-assem.S */
345 void fpu_restore(int *); /* defined in x86-assem.S */
346 /* The verbose argument controls how much to print: 0 for normal
347 * level of detail; 1 for debugging. */
349 print_generation_stats(int verbose
) /* FIXME: should take FILE argument */
354 /* This code uses the FP instructions which may be set up for Lisp
355 * so they need to be saved and reset for C. */
358 /* number of generations to print */
360 gens
= NUM_GENERATIONS
+1;
362 gens
= NUM_GENERATIONS
;
364 /* Print the heap stats. */
366 " Gen Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
368 for (i
= 0; i
< gens
; i
++) {
372 int large_boxed_cnt
= 0;
373 int large_unboxed_cnt
= 0;
376 for (j
= 0; j
< last_free_page
; j
++)
377 if (page_table
[j
].gen
== i
) {
379 /* Count the number of boxed pages within the given
381 if (page_table
[j
].allocated
& BOXED_PAGE_FLAG
) {
382 if (page_table
[j
].large_object
)
387 if(page_table
[j
].dont_move
) pinned_cnt
++;
388 /* Count the number of unboxed pages within the given
390 if (page_table
[j
].allocated
& UNBOXED_PAGE_FLAG
) {
391 if (page_table
[j
].large_object
)
398 gc_assert(generations
[i
].bytes_allocated
399 == count_generation_bytes_allocated(i
));
401 " %1d: %5d %5d %5d %5d %5d %8ld %5ld %8ld %4ld %3d %7.4f\n",
403 boxed_cnt
, unboxed_cnt
, large_boxed_cnt
, large_unboxed_cnt
,
405 generations
[i
].bytes_allocated
,
406 (count_generation_pages(i
)*PAGE_BYTES
407 - generations
[i
].bytes_allocated
),
408 generations
[i
].gc_trigger
,
409 count_write_protect_generation_pages(i
),
410 generations
[i
].num_gc
,
413 fprintf(stderr
," Total bytes allocated=%ld\n", bytes_allocated
);
415 fpu_restore(fpu_state
);
419 * allocation routines
423 * To support quick and inline allocation, regions of memory can be
424 * allocated and then allocated from with just a free pointer and a
425 * check against an end address.
427 * Since objects can be allocated to spaces with different properties
428 * e.g. boxed/unboxed, generation, ages; there may need to be many
429 * allocation regions.
431 * Each allocation region may start within a partly used page. Many
432 * features of memory use are noted on a page wise basis, e.g. the
433 * generation; so if a region starts within an existing allocated page
434 * it must be consistent with this page.
436 * During the scavenging of the newspace, objects will be transported
437 * into an allocation region, and pointers updated to point to this
438 * allocation region. It is possible that these pointers will be
439 * scavenged again before the allocation region is closed, e.g. due to
440 * trans_list which jumps all over the place to cleanup the list. It
441 * is important to be able to determine properties of all objects
442 * pointed to when scavenging, e.g to detect pointers to the oldspace.
443 * Thus it's important that the allocation regions have the correct
444 * properties set when allocated, and not just set when closed. The
445 * region allocation routines return regions with the specified
446 * properties, and grab all the pages, setting their properties
447 * appropriately, except that the amount used is not known.
449 * These regions are used to support quicker allocation using just a
450 * free pointer. The actual space used by the region is not reflected
451 * in the pages tables until it is closed. It can't be scavenged until
454 * When finished with the region it should be closed, which will
455 * update the page tables for the actual space used returning unused
456 * space. Further it may be noted in the new regions which is
457 * necessary when scavenging the newspace.
459 * Large objects may be allocated directly without an allocation
460 * region, the page tables are updated immediately.
462 * Unboxed objects don't contain pointers to other objects and so
463 * don't need scavenging. Further they can't contain pointers to
464 * younger generations so WP is not needed. By allocating pages to
465 * unboxed objects the whole page never needs scavenging or
466 * write-protecting. */
468 /* We are only using two regions at present. Both are for the current
469 * newspace generation. */
470 struct alloc_region boxed_region
;
471 struct alloc_region unboxed_region
;
473 /* The generation currently being allocated to. */
474 static int gc_alloc_generation
;
476 /* Find a new region with room for at least the given number of bytes.
478 * It starts looking at the current generation's alloc_start_page. So
479 * may pick up from the previous region if there is enough space. This
480 * keeps the allocation contiguous when scavenging the newspace.
482 * The alloc_region should have been closed by a call to
483 * gc_alloc_update_page_tables(), and will thus be in an empty state.
485 * To assist the scavenging functions write-protected pages are not
486 * used. Free pages should not be write-protected.
488 * It is critical to the conservative GC that the start of regions be
489 * known. To help achieve this only small regions are allocated at a
492 * During scavenging, pointers may be found to within the current
493 * region and the page generation must be set so that pointers to the
494 * from space can be recognized. Therefore the generation of pages in
495 * the region are set to gc_alloc_generation. To prevent another
496 * allocation call using the same pages, all the pages in the region
497 * are allocated, although they will initially be empty.
500 gc_alloc_new_region(long nbytes
, int unboxed
, struct alloc_region
*alloc_region
)
509 "/alloc_new_region for %d bytes from gen %d\n",
510 nbytes, gc_alloc_generation));
513 /* Check that the region is in a reset state. */
514 gc_assert((alloc_region
->first_page
== 0)
515 && (alloc_region
->last_page
== -1)
516 && (alloc_region
->free_pointer
== alloc_region
->end_addr
));
517 get_spinlock(&free_pages_lock
,(long) alloc_region
);
520 generations
[gc_alloc_generation
].alloc_unboxed_start_page
;
523 generations
[gc_alloc_generation
].alloc_start_page
;
525 last_page
=gc_find_freeish_pages(&first_page
,nbytes
,unboxed
);
526 bytes_found
=(PAGE_BYTES
- page_table
[first_page
].bytes_used
)
527 + PAGE_BYTES
*(last_page
-first_page
);
529 /* Set up the alloc_region. */
530 alloc_region
->first_page
= first_page
;
531 alloc_region
->last_page
= last_page
;
532 alloc_region
->start_addr
= page_table
[first_page
].bytes_used
533 + page_address(first_page
);
534 alloc_region
->free_pointer
= alloc_region
->start_addr
;
535 alloc_region
->end_addr
= alloc_region
->start_addr
+ bytes_found
;
537 /* Set up the pages. */
539 /* The first page may have already been in use. */
540 if (page_table
[first_page
].bytes_used
== 0) {
542 page_table
[first_page
].allocated
= UNBOXED_PAGE_FLAG
;
544 page_table
[first_page
].allocated
= BOXED_PAGE_FLAG
;
545 page_table
[first_page
].gen
= gc_alloc_generation
;
546 page_table
[first_page
].large_object
= 0;
547 page_table
[first_page
].first_object_offset
= 0;
551 gc_assert(page_table
[first_page
].allocated
== UNBOXED_PAGE_FLAG
);
553 gc_assert(page_table
[first_page
].allocated
== BOXED_PAGE_FLAG
);
554 page_table
[first_page
].allocated
|= OPEN_REGION_PAGE_FLAG
;
556 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
557 gc_assert(page_table
[first_page
].large_object
== 0);
559 for (i
= first_page
+1; i
<= last_page
; i
++) {
561 page_table
[i
].allocated
= UNBOXED_PAGE_FLAG
;
563 page_table
[i
].allocated
= BOXED_PAGE_FLAG
;
564 page_table
[i
].gen
= gc_alloc_generation
;
565 page_table
[i
].large_object
= 0;
566 /* This may not be necessary for unboxed regions (think it was
568 page_table
[i
].first_object_offset
=
569 alloc_region
->start_addr
- page_address(i
);
570 page_table
[i
].allocated
|= OPEN_REGION_PAGE_FLAG
;
572 /* Bump up last_free_page. */
573 if (last_page
+1 > last_free_page
) {
574 last_free_page
= last_page
+1;
575 SetSymbolValue(ALLOCATION_POINTER
,
576 (lispobj
)(((char *)heap_base
) + last_free_page
*PAGE_BYTES
),
579 release_spinlock(&free_pages_lock
);
581 /* we can do this after releasing free_pages_lock */
582 if (gencgc_zero_check
) {
584 for (p
= (long *)alloc_region
->start_addr
;
585 p
< (long *)alloc_region
->end_addr
; p
++) {
587 /* KLUDGE: It would be nice to use %lx and explicit casts
588 * (long) in code like this, so that it is less likely to
589 * break randomly when running on a machine with different
590 * word sizes. -- WHN 19991129 */
591 lose("The new region at %x is not zero.", p
);
598 /* If the record_new_objects flag is 2 then all new regions created
601 * If it's 1 then then it is only recorded if the first page of the
602 * current region is <= new_areas_ignore_page. This helps avoid
603 * unnecessary recording when doing full scavenge pass.
605 * The new_object structure holds the page, byte offset, and size of
606 * new regions of objects. Each new area is placed in the array of
607 * these structures pointer to by new_areas. new_areas_index holds the
608 * offset into new_areas.
610 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
611 * later code must detect this and handle it, probably by doing a full
612 * scavenge of a generation. */
613 #define NUM_NEW_AREAS 512
614 static int record_new_objects
= 0;
615 static long new_areas_ignore_page
;
621 static struct new_area (*new_areas
)[];
622 static long new_areas_index
;
625 /* Add a new area to new_areas. */
627 add_new_area(long first_page
, long offset
, long size
)
629 unsigned new_area_start
,c
;
632 /* Ignore if full. */
633 if (new_areas_index
>= NUM_NEW_AREAS
)
636 switch (record_new_objects
) {
640 if (first_page
> new_areas_ignore_page
)
649 new_area_start
= PAGE_BYTES
*first_page
+ offset
;
651 /* Search backwards for a prior area that this follows from. If
652 found this will save adding a new area. */
653 for (i
= new_areas_index
-1, c
= 0; (i
>= 0) && (c
< 8); i
--, c
++) {
655 PAGE_BYTES
*((*new_areas
)[i
].page
)
656 + (*new_areas
)[i
].offset
657 + (*new_areas
)[i
].size
;
659 "/add_new_area S1 %d %d %d %d\n",
660 i, c, new_area_start, area_end));*/
661 if (new_area_start
== area_end
) {
663 "/adding to [%d] %d %d %d with %d %d %d:\n",
665 (*new_areas)[i].page,
666 (*new_areas)[i].offset,
667 (*new_areas)[i].size,
671 (*new_areas
)[i
].size
+= size
;
676 (*new_areas
)[new_areas_index
].page
= first_page
;
677 (*new_areas
)[new_areas_index
].offset
= offset
;
678 (*new_areas
)[new_areas_index
].size
= size
;
680 "/new_area %d page %d offset %d size %d\n",
681 new_areas_index, first_page, offset, size));*/
684 /* Note the max new_areas used. */
685 if (new_areas_index
> max_new_areas
)
686 max_new_areas
= new_areas_index
;
689 /* Update the tables for the alloc_region. The region may be added to
692 * When done the alloc_region is set up so that the next quick alloc
693 * will fail safely and thus a new region will be allocated. Further
694 * it is safe to try to re-update the page table of this reset
697 gc_alloc_update_page_tables(int unboxed
, struct alloc_region
*alloc_region
)
703 long orig_first_page_bytes_used
;
708 first_page
= alloc_region
->first_page
;
710 /* Catch an unused alloc_region. */
711 if ((first_page
== 0) && (alloc_region
->last_page
== -1))
714 next_page
= first_page
+1;
716 get_spinlock(&free_pages_lock
,(long) alloc_region
);
717 if (alloc_region
->free_pointer
!= alloc_region
->start_addr
) {
718 /* some bytes were allocated in the region */
719 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
721 gc_assert(alloc_region
->start_addr
== (page_address(first_page
) + page_table
[first_page
].bytes_used
));
723 /* All the pages used need to be updated */
725 /* Update the first page. */
727 /* If the page was free then set up the gen, and
728 * first_object_offset. */
729 if (page_table
[first_page
].bytes_used
== 0)
730 gc_assert(page_table
[first_page
].first_object_offset
== 0);
731 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
734 gc_assert(page_table
[first_page
].allocated
== UNBOXED_PAGE_FLAG
);
736 gc_assert(page_table
[first_page
].allocated
== BOXED_PAGE_FLAG
);
737 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
738 gc_assert(page_table
[first_page
].large_object
== 0);
742 /* Calculate the number of bytes used in this page. This is not
743 * always the number of new bytes, unless it was free. */
745 if ((bytes_used
= (alloc_region
->free_pointer
- page_address(first_page
)))>PAGE_BYTES
) {
746 bytes_used
= PAGE_BYTES
;
749 page_table
[first_page
].bytes_used
= bytes_used
;
750 byte_cnt
+= bytes_used
;
753 /* All the rest of the pages should be free. We need to set their
754 * first_object_offset pointer to the start of the region, and set
757 page_table
[next_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
759 gc_assert(page_table
[next_page
].allocated
==UNBOXED_PAGE_FLAG
);
761 gc_assert(page_table
[next_page
].allocated
== BOXED_PAGE_FLAG
);
762 gc_assert(page_table
[next_page
].bytes_used
== 0);
763 gc_assert(page_table
[next_page
].gen
== gc_alloc_generation
);
764 gc_assert(page_table
[next_page
].large_object
== 0);
766 gc_assert(page_table
[next_page
].first_object_offset
==
767 alloc_region
->start_addr
- page_address(next_page
));
769 /* Calculate the number of bytes used in this page. */
771 if ((bytes_used
= (alloc_region
->free_pointer
772 - page_address(next_page
)))>PAGE_BYTES
) {
773 bytes_used
= PAGE_BYTES
;
776 page_table
[next_page
].bytes_used
= bytes_used
;
777 byte_cnt
+= bytes_used
;
782 region_size
= alloc_region
->free_pointer
- alloc_region
->start_addr
;
783 bytes_allocated
+= region_size
;
784 generations
[gc_alloc_generation
].bytes_allocated
+= region_size
;
786 gc_assert((byte_cnt
- orig_first_page_bytes_used
) == region_size
);
788 /* Set the generations alloc restart page to the last page of
791 generations
[gc_alloc_generation
].alloc_unboxed_start_page
=
794 generations
[gc_alloc_generation
].alloc_start_page
= next_page
-1;
796 /* Add the region to the new_areas if requested. */
798 add_new_area(first_page
,orig_first_page_bytes_used
, region_size
);
802 "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
804 gc_alloc_generation));
807 /* There are no bytes allocated. Unallocate the first_page if
808 * there are 0 bytes_used. */
809 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
810 if (page_table
[first_page
].bytes_used
== 0)
811 page_table
[first_page
].allocated
= FREE_PAGE_FLAG
;
814 /* Unallocate any unused pages. */
815 while (next_page
<= alloc_region
->last_page
) {
816 gc_assert(page_table
[next_page
].bytes_used
== 0);
817 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
820 release_spinlock(&free_pages_lock
);
821 /* alloc_region is per-thread, we're ok to do this unlocked */
822 gc_set_region_empty(alloc_region
);
825 static inline void *gc_quick_alloc(long nbytes
);
827 /* Allocate a possibly large object. */
829 gc_alloc_large(long nbytes
, int unboxed
, struct alloc_region
*alloc_region
)
833 long orig_first_page_bytes_used
;
839 get_spinlock(&free_pages_lock
,(long) alloc_region
);
843 generations
[gc_alloc_generation
].alloc_large_unboxed_start_page
;
845 first_page
= generations
[gc_alloc_generation
].alloc_large_start_page
;
847 if (first_page
<= alloc_region
->last_page
) {
848 first_page
= alloc_region
->last_page
+1;
851 last_page
=gc_find_freeish_pages(&first_page
,nbytes
,unboxed
);
853 gc_assert(first_page
> alloc_region
->last_page
);
855 generations
[gc_alloc_generation
].alloc_large_unboxed_start_page
=
858 generations
[gc_alloc_generation
].alloc_large_start_page
= last_page
;
860 /* Set up the pages. */
861 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
863 /* If the first page was free then set up the gen, and
864 * first_object_offset. */
865 if (page_table
[first_page
].bytes_used
== 0) {
867 page_table
[first_page
].allocated
= UNBOXED_PAGE_FLAG
;
869 page_table
[first_page
].allocated
= BOXED_PAGE_FLAG
;
870 page_table
[first_page
].gen
= gc_alloc_generation
;
871 page_table
[first_page
].first_object_offset
= 0;
872 page_table
[first_page
].large_object
= 1;
876 gc_assert(page_table
[first_page
].allocated
== UNBOXED_PAGE_FLAG
);
878 gc_assert(page_table
[first_page
].allocated
== BOXED_PAGE_FLAG
);
879 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
880 gc_assert(page_table
[first_page
].large_object
== 1);
884 /* Calc. the number of bytes used in this page. This is not
885 * always the number of new bytes, unless it was free. */
887 if ((bytes_used
= nbytes
+orig_first_page_bytes_used
) > PAGE_BYTES
) {
888 bytes_used
= PAGE_BYTES
;
891 page_table
[first_page
].bytes_used
= bytes_used
;
892 byte_cnt
+= bytes_used
;
894 next_page
= first_page
+1;
896 /* All the rest of the pages should be free. We need to set their
897 * first_object_offset pointer to the start of the region, and
898 * set the bytes_used. */
900 gc_assert(page_table
[next_page
].allocated
== FREE_PAGE_FLAG
);
901 gc_assert(page_table
[next_page
].bytes_used
== 0);
903 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
905 page_table
[next_page
].allocated
= BOXED_PAGE_FLAG
;
906 page_table
[next_page
].gen
= gc_alloc_generation
;
907 page_table
[next_page
].large_object
= 1;
909 page_table
[next_page
].first_object_offset
=
910 orig_first_page_bytes_used
- PAGE_BYTES
*(next_page
-first_page
);
912 /* Calculate the number of bytes used in this page. */
914 if ((bytes_used
=(nbytes
+orig_first_page_bytes_used
)-byte_cnt
) > PAGE_BYTES
) {
915 bytes_used
= PAGE_BYTES
;
918 page_table
[next_page
].bytes_used
= bytes_used
;
919 page_table
[next_page
].write_protected
=0;
920 page_table
[next_page
].dont_move
=0;
921 byte_cnt
+= bytes_used
;
925 gc_assert((byte_cnt
-orig_first_page_bytes_used
) == nbytes
);
927 bytes_allocated
+= nbytes
;
928 generations
[gc_alloc_generation
].bytes_allocated
+= nbytes
;
930 /* Add the region to the new_areas if requested. */
932 add_new_area(first_page
,orig_first_page_bytes_used
,nbytes
);
934 /* Bump up last_free_page */
935 if (last_page
+1 > last_free_page
) {
936 last_free_page
= last_page
+1;
937 SetSymbolValue(ALLOCATION_POINTER
,
938 (lispobj
)(((char *)heap_base
) + last_free_page
*PAGE_BYTES
),0);
940 release_spinlock(&free_pages_lock
);
942 return((void *)(page_address(first_page
)+orig_first_page_bytes_used
));
946 gc_find_freeish_pages(long *restart_page_ptr
, long nbytes
, int unboxed
)
951 long restart_page
=*restart_page_ptr
;
954 long large_p
=(nbytes
>=large_object_size
);
955 gc_assert(free_pages_lock
);
957 /* Search for a contiguous free space of at least nbytes. If it's
958 * a large object then align it on a page boundary by searching
959 * for a free page. */
962 first_page
= restart_page
;
964 while ((first_page
< NUM_PAGES
)
965 && (page_table
[first_page
].allocated
!= FREE_PAGE_FLAG
))
968 while (first_page
< NUM_PAGES
) {
969 if(page_table
[first_page
].allocated
== FREE_PAGE_FLAG
)
971 if((page_table
[first_page
].allocated
==
972 (unboxed
? UNBOXED_PAGE_FLAG
: BOXED_PAGE_FLAG
)) &&
973 (page_table
[first_page
].large_object
== 0) &&
974 (page_table
[first_page
].gen
== gc_alloc_generation
) &&
975 (page_table
[first_page
].bytes_used
< (PAGE_BYTES
-32)) &&
976 (page_table
[first_page
].write_protected
== 0) &&
977 (page_table
[first_page
].dont_move
== 0)) {
983 if (first_page
>= NUM_PAGES
) {
985 "Argh! gc_find_free_space failed (first_page), nbytes=%ld.\n",
987 print_generation_stats(1);
991 gc_assert(page_table
[first_page
].write_protected
== 0);
993 last_page
= first_page
;
994 bytes_found
= PAGE_BYTES
- page_table
[first_page
].bytes_used
;
996 while (((bytes_found
< nbytes
)
997 || (!large_p
&& (num_pages
< 2)))
998 && (last_page
< (NUM_PAGES
-1))
999 && (page_table
[last_page
+1].allocated
== FREE_PAGE_FLAG
)) {
1002 bytes_found
+= PAGE_BYTES
;
1003 gc_assert(page_table
[last_page
].write_protected
== 0);
1006 region_size
= (PAGE_BYTES
- page_table
[first_page
].bytes_used
)
1007 + PAGE_BYTES
*(last_page
-first_page
);
1009 gc_assert(bytes_found
== region_size
);
1010 restart_page
= last_page
+ 1;
1011 } while ((restart_page
< NUM_PAGES
) && (bytes_found
< nbytes
));
1013 /* Check for a failure */
1014 if ((restart_page
>= NUM_PAGES
) && (bytes_found
< nbytes
)) {
1016 "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%ld.\n",
1018 print_generation_stats(1);
1021 *restart_page_ptr
=first_page
;
1025 /* Allocate bytes. All the rest of the special-purpose allocation
1026 * functions will eventually call this */
1029 gc_alloc_with_region(long nbytes
,int unboxed_p
, struct alloc_region
*my_region
,
1032 void *new_free_pointer
;
1034 if(nbytes
>=large_object_size
)
1035 return gc_alloc_large(nbytes
,unboxed_p
,my_region
);
1037 /* Check whether there is room in the current alloc region. */
1038 new_free_pointer
= my_region
->free_pointer
+ nbytes
;
1040 /* fprintf(stderr, "alloc %d bytes from %p to %p\n", nbytes,
1041 my_region->free_pointer, new_free_pointer); */
1043 if (new_free_pointer
<= my_region
->end_addr
) {
1044 /* If so then allocate from the current alloc region. */
1045 void *new_obj
= my_region
->free_pointer
;
1046 my_region
->free_pointer
= new_free_pointer
;
1048 /* Unless a `quick' alloc was requested, check whether the
1049 alloc region is almost empty. */
1051 (my_region
->end_addr
- my_region
->free_pointer
) <= 32) {
1052 /* If so, finished with the current region. */
1053 gc_alloc_update_page_tables(unboxed_p
, my_region
);
1054 /* Set up a new region. */
1055 gc_alloc_new_region(32 /*bytes*/, unboxed_p
, my_region
);
1058 return((void *)new_obj
);
1061 /* Else not enough free space in the current region: retry with a
1064 gc_alloc_update_page_tables(unboxed_p
, my_region
);
1065 gc_alloc_new_region(nbytes
, unboxed_p
, my_region
);
1066 return gc_alloc_with_region(nbytes
,unboxed_p
,my_region
,0);
1069 /* these are only used during GC: all allocation from the mutator calls
1070 * alloc() -> gc_alloc_with_region() with the appropriate per-thread
1074 gc_general_alloc(long nbytes
,int unboxed_p
,int quick_p
)
1076 struct alloc_region
*my_region
=
1077 unboxed_p
? &unboxed_region
: &boxed_region
;
1078 return gc_alloc_with_region(nbytes
,unboxed_p
, my_region
,quick_p
);
1081 static inline void *
1082 gc_quick_alloc(long nbytes
)
1084 return gc_general_alloc(nbytes
,ALLOC_BOXED
,ALLOC_QUICK
);
1087 static inline void *
1088 gc_quick_alloc_large(long nbytes
)
1090 return gc_general_alloc(nbytes
,ALLOC_BOXED
,ALLOC_QUICK
);
1093 static inline void *
1094 gc_alloc_unboxed(long nbytes
)
1096 return gc_general_alloc(nbytes
,ALLOC_UNBOXED
,0);
1099 static inline void *
1100 gc_quick_alloc_unboxed(long nbytes
)
1102 return gc_general_alloc(nbytes
,ALLOC_UNBOXED
,ALLOC_QUICK
);
1105 static inline void *
1106 gc_quick_alloc_large_unboxed(long nbytes
)
1108 return gc_general_alloc(nbytes
,ALLOC_UNBOXED
,ALLOC_QUICK
);
1112 * scavenging/transporting routines derived from gc.c in CMU CL ca. 18b
1115 extern long (*scavtab
[256])(lispobj
*where
, lispobj object
);
1116 extern lispobj (*transother
[256])(lispobj object
);
1117 extern long (*sizetab
[256])(lispobj
*where
);
1119 /* Copy a large boxed object. If the object is in a large object
1120 * region then it is simply promoted, else it is copied. If it's large
1121 * enough then it's copied to a large object region.
1123 * Vectors may have shrunk. If the object is not copied the space
1124 * needs to be reclaimed, and the page_tables corrected. */
1126 copy_large_object(lispobj object
, long nwords
)
1132 gc_assert(is_lisp_pointer(object
));
1133 gc_assert(from_space_p(object
));
1134 gc_assert((nwords
& 0x01) == 0);
1137 /* Check whether it's in a large object region. */
1138 first_page
= find_page_index((void *)object
);
1139 gc_assert(first_page
>= 0);
1141 if (page_table
[first_page
].large_object
) {
1143 /* Promote the object. */
1145 long remaining_bytes
;
1148 long old_bytes_used
;
1150 /* Note: Any page write-protection must be removed, else a
1151 * later scavenge_newspace may incorrectly not scavenge these
1152 * pages. This would not be necessary if they are added to the
1153 * new areas, but let's do it for them all (they'll probably
1154 * be written anyway?). */
1156 gc_assert(page_table
[first_page
].first_object_offset
== 0);
1158 next_page
= first_page
;
1159 remaining_bytes
= nwords
*N_WORD_BYTES
;
1160 while (remaining_bytes
> PAGE_BYTES
) {
1161 gc_assert(page_table
[next_page
].gen
== from_space
);
1162 gc_assert(page_table
[next_page
].allocated
== BOXED_PAGE_FLAG
);
1163 gc_assert(page_table
[next_page
].large_object
);
1164 gc_assert(page_table
[next_page
].first_object_offset
==
1165 -PAGE_BYTES
*(next_page
-first_page
));
1166 gc_assert(page_table
[next_page
].bytes_used
== PAGE_BYTES
);
1168 page_table
[next_page
].gen
= new_space
;
1170 /* Remove any write-protection. We should be able to rely
1171 * on the write-protect flag to avoid redundant calls. */
1172 if (page_table
[next_page
].write_protected
) {
1173 os_protect(page_address(next_page
), PAGE_BYTES
, OS_VM_PROT_ALL
);
1174 page_table
[next_page
].write_protected
= 0;
1176 remaining_bytes
-= PAGE_BYTES
;
1180 /* Now only one page remains, but the object may have shrunk
1181 * so there may be more unused pages which will be freed. */
1183 /* The object may have shrunk but shouldn't have grown. */
1184 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
1186 page_table
[next_page
].gen
= new_space
;
1187 gc_assert(page_table
[next_page
].allocated
== BOXED_PAGE_FLAG
);
1189 /* Adjust the bytes_used. */
1190 old_bytes_used
= page_table
[next_page
].bytes_used
;
1191 page_table
[next_page
].bytes_used
= remaining_bytes
;
1193 bytes_freed
= old_bytes_used
- remaining_bytes
;
1195 /* Free any remaining pages; needs care. */
1197 while ((old_bytes_used
== PAGE_BYTES
) &&
1198 (page_table
[next_page
].gen
== from_space
) &&
1199 (page_table
[next_page
].allocated
== BOXED_PAGE_FLAG
) &&
1200 page_table
[next_page
].large_object
&&
1201 (page_table
[next_page
].first_object_offset
==
1202 -(next_page
- first_page
)*PAGE_BYTES
)) {
1203 /* Checks out OK, free the page. Don't need to bother zeroing
1204 * pages as this should have been done before shrinking the
1205 * object. These pages shouldn't be write-protected as they
1206 * should be zero filled. */
1207 gc_assert(page_table
[next_page
].write_protected
== 0);
1209 old_bytes_used
= page_table
[next_page
].bytes_used
;
1210 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1211 page_table
[next_page
].bytes_used
= 0;
1212 bytes_freed
+= old_bytes_used
;
1216 generations
[from_space
].bytes_allocated
-= N_WORD_BYTES
*nwords
+
1218 generations
[new_space
].bytes_allocated
+= N_WORD_BYTES
*nwords
;
1219 bytes_allocated
-= bytes_freed
;
1221 /* Add the region to the new_areas if requested. */
1222 add_new_area(first_page
,0,nwords
*N_WORD_BYTES
);
1226 /* Get tag of object. */
1227 tag
= lowtag_of(object
);
1229 /* Allocate space. */
1230 new = gc_quick_alloc_large(nwords
*N_WORD_BYTES
);
1232 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1234 /* Return Lisp pointer of new object. */
1235 return ((lispobj
) new) | tag
;
1239 /* to copy unboxed objects */
1241 copy_unboxed_object(lispobj object
, long nwords
)
1246 gc_assert(is_lisp_pointer(object
));
1247 gc_assert(from_space_p(object
));
1248 gc_assert((nwords
& 0x01) == 0);
1250 /* Get tag of object. */
1251 tag
= lowtag_of(object
);
1253 /* Allocate space. */
1254 new = gc_quick_alloc_unboxed(nwords
*N_WORD_BYTES
);
1256 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1258 /* Return Lisp pointer of new object. */
1259 return ((lispobj
) new) | tag
;
1262 /* to copy large unboxed objects
1264 * If the object is in a large object region then it is simply
1265 * promoted, else it is copied. If it's large enough then it's copied
1266 * to a large object region.
1268 * Bignums and vectors may have shrunk. If the object is not copied
1269 * the space needs to be reclaimed, and the page_tables corrected.
1271 * KLUDGE: There's a lot of cut-and-paste duplication between this
1272 * function and copy_large_object(..). -- WHN 20000619 */
1274 copy_large_unboxed_object(lispobj object
, long nwords
)
1280 gc_assert(is_lisp_pointer(object
));
1281 gc_assert(from_space_p(object
));
1282 gc_assert((nwords
& 0x01) == 0);
1284 if ((nwords
> 1024*1024) && gencgc_verbose
)
1285 FSHOW((stderr
, "/copy_large_unboxed_object: %d bytes\n", nwords
*N_WORD_BYTES
));
1287 /* Check whether it's a large object. */
1288 first_page
= find_page_index((void *)object
);
1289 gc_assert(first_page
>= 0);
1291 if (page_table
[first_page
].large_object
) {
1292 /* Promote the object. Note: Unboxed objects may have been
1293 * allocated to a BOXED region so it may be necessary to
1294 * change the region to UNBOXED. */
1295 long remaining_bytes
;
1298 long old_bytes_used
;
1300 gc_assert(page_table
[first_page
].first_object_offset
== 0);
1302 next_page
= first_page
;
1303 remaining_bytes
= nwords
*N_WORD_BYTES
;
1304 while (remaining_bytes
> PAGE_BYTES
) {
1305 gc_assert(page_table
[next_page
].gen
== from_space
);
1306 gc_assert((page_table
[next_page
].allocated
== UNBOXED_PAGE_FLAG
)
1307 || (page_table
[next_page
].allocated
== BOXED_PAGE_FLAG
));
1308 gc_assert(page_table
[next_page
].large_object
);
1309 gc_assert(page_table
[next_page
].first_object_offset
==
1310 -PAGE_BYTES
*(next_page
-first_page
));
1311 gc_assert(page_table
[next_page
].bytes_used
== PAGE_BYTES
);
1313 page_table
[next_page
].gen
= new_space
;
1314 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1315 remaining_bytes
-= PAGE_BYTES
;
1319 /* Now only one page remains, but the object may have shrunk so
1320 * there may be more unused pages which will be freed. */
1322 /* Object may have shrunk but shouldn't have grown - check. */
1323 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
1325 page_table
[next_page
].gen
= new_space
;
1326 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1328 /* Adjust the bytes_used. */
1329 old_bytes_used
= page_table
[next_page
].bytes_used
;
1330 page_table
[next_page
].bytes_used
= remaining_bytes
;
1332 bytes_freed
= old_bytes_used
- remaining_bytes
;
1334 /* Free any remaining pages; needs care. */
1336 while ((old_bytes_used
== PAGE_BYTES
) &&
1337 (page_table
[next_page
].gen
== from_space
) &&
1338 ((page_table
[next_page
].allocated
== UNBOXED_PAGE_FLAG
)
1339 || (page_table
[next_page
].allocated
== BOXED_PAGE_FLAG
)) &&
1340 page_table
[next_page
].large_object
&&
1341 (page_table
[next_page
].first_object_offset
==
1342 -(next_page
- first_page
)*PAGE_BYTES
)) {
1343 /* Checks out OK, free the page. Don't need to both zeroing
1344 * pages as this should have been done before shrinking the
1345 * object. These pages shouldn't be write-protected, even if
1346 * boxed they should be zero filled. */
1347 gc_assert(page_table
[next_page
].write_protected
== 0);
1349 old_bytes_used
= page_table
[next_page
].bytes_used
;
1350 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1351 page_table
[next_page
].bytes_used
= 0;
1352 bytes_freed
+= old_bytes_used
;
1356 if ((bytes_freed
> 0) && gencgc_verbose
)
1358 "/copy_large_unboxed bytes_freed=%d\n",
1361 generations
[from_space
].bytes_allocated
-= nwords
*N_WORD_BYTES
+ bytes_freed
;
1362 generations
[new_space
].bytes_allocated
+= nwords
*N_WORD_BYTES
;
1363 bytes_allocated
-= bytes_freed
;
1368 /* Get tag of object. */
1369 tag
= lowtag_of(object
);
1371 /* Allocate space. */
1372 new = gc_quick_alloc_large_unboxed(nwords
*N_WORD_BYTES
);
1374 /* Copy the object. */
1375 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1377 /* Return Lisp pointer of new object. */
1378 return ((lispobj
) new) | tag
;
1387 * code and code-related objects
1390 static lispobj trans_fun_header(lispobj object);
1391 static lispobj trans_boxed(lispobj object);
1394 /* Scan a x86 compiled code object, looking for possible fixups that
1395 * have been missed after a move.
1397 * Two types of fixups are needed:
1398 * 1. Absolute fixups to within the code object.
1399 * 2. Relative fixups to outside the code object.
1401 * Currently only absolute fixups to the constant vector, or to the
1402 * code area are checked. */
1404 sniff_code_object(struct code
*code
, unsigned displacement
)
1406 long nheader_words
, ncode_words
, nwords
;
1408 void *constants_start_addr
, *constants_end_addr
;
1409 void *code_start_addr
, *code_end_addr
;
1410 int fixup_found
= 0;
1412 if (!check_code_fixups
)
1415 ncode_words
= fixnum_value(code
->code_size
);
1416 nheader_words
= HeaderValue(*(lispobj
*)code
);
1417 nwords
= ncode_words
+ nheader_words
;
1419 constants_start_addr
= (void *)code
+ 5*N_WORD_BYTES
;
1420 constants_end_addr
= (void *)code
+ nheader_words
*N_WORD_BYTES
;
1421 code_start_addr
= (void *)code
+ nheader_words
*N_WORD_BYTES
;
1422 code_end_addr
= (void *)code
+ nwords
*N_WORD_BYTES
;
1424 /* Work through the unboxed code. */
1425 for (p
= code_start_addr
; p
< code_end_addr
; p
++) {
1426 void *data
= *(void **)p
;
1427 unsigned d1
= *((unsigned char *)p
- 1);
1428 unsigned d2
= *((unsigned char *)p
- 2);
1429 unsigned d3
= *((unsigned char *)p
- 3);
1430 unsigned d4
= *((unsigned char *)p
- 4);
1432 unsigned d5
= *((unsigned char *)p
- 5);
1433 unsigned d6
= *((unsigned char *)p
- 6);
1436 /* Check for code references. */
1437 /* Check for a 32 bit word that looks like an absolute
1438 reference to within the code adea of the code object. */
1439 if ((data
>= (code_start_addr
-displacement
))
1440 && (data
< (code_end_addr
-displacement
))) {
1441 /* function header */
1443 && (((unsigned)p
- 4 - 4*HeaderValue(*((unsigned *)p
-1))) == (unsigned)code
)) {
1444 /* Skip the function header */
1448 /* the case of PUSH imm32 */
1452 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1453 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1454 FSHOW((stderr
, "/PUSH $0x%.8x\n", data
));
1456 /* the case of MOV [reg-8],imm32 */
1458 && (d2
==0x40 || d2
==0x41 || d2
==0x42 || d2
==0x43
1459 || d2
==0x45 || d2
==0x46 || d2
==0x47)
1463 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1464 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1465 FSHOW((stderr
, "/MOV [reg-8],$0x%.8x\n", data
));
1467 /* the case of LEA reg,[disp32] */
1468 if ((d2
== 0x8d) && ((d1
& 0xc7) == 5)) {
1471 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1472 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1473 FSHOW((stderr
,"/LEA reg,[$0x%.8x]\n", data
));
1477 /* Check for constant references. */
1478 /* Check for a 32 bit word that looks like an absolute
1479 reference to within the constant vector. Constant references
1481 if ((data
>= (constants_start_addr
-displacement
))
1482 && (data
< (constants_end_addr
-displacement
))
1483 && (((unsigned)data
& 0x3) == 0)) {
1488 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1489 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1490 FSHOW((stderr
,"/MOV eax,0x%.8x\n", data
));
1493 /* the case of MOV m32,EAX */
1497 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1498 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1499 FSHOW((stderr
, "/MOV 0x%.8x,eax\n", data
));
1502 /* the case of CMP m32,imm32 */
1503 if ((d1
== 0x3d) && (d2
== 0x81)) {
1506 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1507 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1509 FSHOW((stderr
, "/CMP 0x%.8x,immed32\n", data
));
1512 /* Check for a mod=00, r/m=101 byte. */
1513 if ((d1
& 0xc7) == 5) {
1518 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1519 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1520 FSHOW((stderr
,"/CMP 0x%.8x,reg\n", data
));
1522 /* the case of CMP reg32,m32 */
1526 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1527 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1528 FSHOW((stderr
, "/CMP reg32,0x%.8x\n", data
));
1530 /* the case of MOV m32,reg32 */
1534 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1535 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1536 FSHOW((stderr
, "/MOV 0x%.8x,reg32\n", data
));
1538 /* the case of MOV reg32,m32 */
1542 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1543 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1544 FSHOW((stderr
, "/MOV reg32,0x%.8x\n", data
));
1546 /* the case of LEA reg32,m32 */
1550 "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1551 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1552 FSHOW((stderr
, "/LEA reg32,0x%.8x\n", data
));
1558 /* If anything was found, print some information on the code
1562 "/compiled code object at %x: header words = %d, code words = %d\n",
1563 code
, nheader_words
, ncode_words
));
1565 "/const start = %x, end = %x\n",
1566 constants_start_addr
, constants_end_addr
));
1568 "/code start = %x, end = %x\n",
1569 code_start_addr
, code_end_addr
));
1574 gencgc_apply_code_fixups(struct code
*old_code
, struct code
*new_code
)
1576 long nheader_words
, ncode_words
, nwords
;
1577 void *constants_start_addr
, *constants_end_addr
;
1578 void *code_start_addr
, *code_end_addr
;
1579 lispobj fixups
= NIL
;
1580 unsigned displacement
= (unsigned)new_code
- (unsigned)old_code
;
1581 struct vector
*fixups_vector
;
1583 ncode_words
= fixnum_value(new_code
->code_size
);
1584 nheader_words
= HeaderValue(*(lispobj
*)new_code
);
1585 nwords
= ncode_words
+ nheader_words
;
1587 "/compiled code object at %x: header words = %d, code words = %d\n",
1588 new_code, nheader_words, ncode_words)); */
1589 constants_start_addr
= (void *)new_code
+ 5*N_WORD_BYTES
;
1590 constants_end_addr
= (void *)new_code
+ nheader_words
*N_WORD_BYTES
;
1591 code_start_addr
= (void *)new_code
+ nheader_words
*N_WORD_BYTES
;
1592 code_end_addr
= (void *)new_code
+ nwords
*N_WORD_BYTES
;
1595 "/const start = %x, end = %x\n",
1596 constants_start_addr,constants_end_addr));
1598 "/code start = %x; end = %x\n",
1599 code_start_addr,code_end_addr));
1602 /* The first constant should be a pointer to the fixups for this
1603 code objects. Check. */
1604 fixups
= new_code
->constants
[0];
1606 /* It will be 0 or the unbound-marker if there are no fixups (as
1607 * will be the case if the code object has been purified, for
1608 * example) and will be an other pointer if it is valid. */
1609 if ((fixups
== 0) || (fixups
== UNBOUND_MARKER_WIDETAG
) ||
1610 !is_lisp_pointer(fixups
)) {
1611 /* Check for possible errors. */
1612 if (check_code_fixups
)
1613 sniff_code_object(new_code
, displacement
);
1618 fixups_vector
= (struct vector
*)native_pointer(fixups
);
1620 /* Could be pointing to a forwarding pointer. */
1621 /* FIXME is this always in from_space? if so, could replace this code with
1622 * forwarding_pointer_p/forwarding_pointer_value */
1623 if (is_lisp_pointer(fixups
) &&
1624 (find_page_index((void*)fixups_vector
) != -1) &&
1625 (fixups_vector
->header
== 0x01)) {
1626 /* If so, then follow it. */
1627 /*SHOW("following pointer to a forwarding pointer");*/
1628 fixups_vector
= (struct vector
*)native_pointer((lispobj
)fixups_vector
->length
);
1631 /*SHOW("got fixups");*/
1633 if (widetag_of(fixups_vector
->header
) == SIMPLE_ARRAY_WORD_WIDETAG
) {
1634 /* Got the fixups for the code block. Now work through the vector,
1635 and apply a fixup at each address. */
1636 long length
= fixnum_value(fixups_vector
->length
);
1638 for (i
= 0; i
< length
; i
++) {
1639 unsigned offset
= fixups_vector
->data
[i
];
1640 /* Now check the current value of offset. */
1641 unsigned old_value
=
1642 *(unsigned *)((unsigned)code_start_addr
+ offset
);
1644 /* If it's within the old_code object then it must be an
1645 * absolute fixup (relative ones are not saved) */
1646 if ((old_value
>= (unsigned)old_code
)
1647 && (old_value
< ((unsigned)old_code
+ nwords
*N_WORD_BYTES
)))
1648 /* So add the dispacement. */
1649 *(unsigned *)((unsigned)code_start_addr
+ offset
) =
1650 old_value
+ displacement
;
1652 /* It is outside the old code object so it must be a
1653 * relative fixup (absolute fixups are not saved). So
1654 * subtract the displacement. */
1655 *(unsigned *)((unsigned)code_start_addr
+ offset
) =
1656 old_value
- displacement
;
1659 fprintf(stderr
, "widetag of fixup vector is %d\n", widetag_of(fixups_vector
->header
));
1662 /* Check for possible errors. */
1663 if (check_code_fixups
) {
1664 sniff_code_object(new_code
,displacement
);
1670 trans_boxed_large(lispobj object
)
1673 unsigned long length
;
1675 gc_assert(is_lisp_pointer(object
));
1677 header
= *((lispobj
*) native_pointer(object
));
1678 length
= HeaderValue(header
) + 1;
1679 length
= CEILING(length
, 2);
1681 return copy_large_object(object
, length
);
1686 trans_unboxed_large(lispobj object
)
1689 unsigned long length
;
1692 gc_assert(is_lisp_pointer(object
));
1694 header
= *((lispobj
*) native_pointer(object
));
1695 length
= HeaderValue(header
) + 1;
1696 length
= CEILING(length
, 2);
1698 return copy_large_unboxed_object(object
, length
);
1703 * vector-like objects
1707 /* FIXME: What does this mean? */
1708 int gencgc_hash
= 1;
1711 scav_vector(lispobj
*where
, lispobj object
)
1713 unsigned long kv_length
;
1715 unsigned long length
= 0; /* (0 = dummy to stop GCC warning) */
1716 struct hash_table
*hash_table
;
1717 lispobj empty_symbol
;
1718 unsigned long *index_vector
= NULL
; /* (NULL = dummy to stop GCC warning) */
1719 unsigned long *next_vector
= NULL
; /* (NULL = dummy to stop GCC warning) */
1720 unsigned long *hash_vector
= NULL
; /* (NULL = dummy to stop GCC warning) */
1722 unsigned next_vector_length
= 0;
1724 /* FIXME: A comment explaining this would be nice. It looks as
1725 * though SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based
1726 * hash tables in the Lisp HASH-TABLE code, and nowhere else. */
1727 if (HeaderValue(object
) != subtype_VectorValidHashing
)
1731 /* This is set for backward compatibility. FIXME: Do we need
1734 (subtype_VectorMustRehash
<<N_WIDETAG_BITS
) | SIMPLE_VECTOR_WIDETAG
;
1738 kv_length
= fixnum_value(where
[1]);
1739 kv_vector
= where
+ 2; /* Skip the header and length. */
1740 /*FSHOW((stderr,"/kv_length = %d\n", kv_length));*/
1742 /* Scavenge element 0, which may be a hash-table structure. */
1743 scavenge(where
+2, 1);
1744 if (!is_lisp_pointer(where
[2])) {
1745 lose("no pointer at %x in hash table", where
[2]);
1747 hash_table
= (lispobj
*)native_pointer(where
[2]);
1748 /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/
1749 if (widetag_of(hash_table
->header
) != INSTANCE_HEADER_WIDETAG
) {
1750 lose("hash table not instance (%x at %x)",
1755 /* Scavenge element 1, which should be some internal symbol that
1756 * the hash table code reserves for marking empty slots. */
1757 scavenge(where
+3, 1);
1758 if (!is_lisp_pointer(where
[3])) {
1759 lose("not empty-hash-table-slot symbol pointer: %x", where
[3]);
1761 empty_symbol
= where
[3];
1762 /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/
1763 if (widetag_of(*(lispobj
*)native_pointer(empty_symbol
)) !=
1764 SYMBOL_HEADER_WIDETAG
) {
1765 lose("not a symbol where empty-hash-table-slot symbol expected: %x",
1766 *(lispobj
*)native_pointer(empty_symbol
));
1769 /* Scavenge hash table, which will fix the positions of the other
1770 * needed objects. */
1771 scavenge(hash_table
, sizeof(struct hash_table
) / sizeof(lispobj
));
1773 /* Cross-check the kv_vector. */
1774 if (where
!= (lispobj
*)native_pointer(hash_table
->table
)) {
1775 lose("hash_table table!=this table %x", hash_table
->table
);
1779 weak_p_obj
= hash_table
->weak_p
;
1783 lispobj index_vector_obj
= hash_table
->index_vector
;
1785 if (is_lisp_pointer(index_vector_obj
) &&
1786 (widetag_of(*(lispobj
*)native_pointer(index_vector_obj
)) ==
1787 SIMPLE_ARRAY_WORD_WIDETAG
)) {
1788 index_vector
= ((lispobj
*)native_pointer(index_vector_obj
)) + 2;
1789 /*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/
1790 length
= fixnum_value(((lispobj
*)native_pointer(index_vector_obj
))[1]);
1791 /*FSHOW((stderr, "/length = %d\n", length));*/
1793 lose("invalid index_vector %x", index_vector_obj
);
1799 lispobj next_vector_obj
= hash_table
->next_vector
;
1801 if (is_lisp_pointer(next_vector_obj
) &&
1802 (widetag_of(*(lispobj
*)native_pointer(next_vector_obj
)) ==
1803 SIMPLE_ARRAY_WORD_WIDETAG
)) {
1804 next_vector
= ((lispobj
*)native_pointer(next_vector_obj
)) + 2;
1805 /*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/
1806 next_vector_length
= fixnum_value(((lispobj
*)native_pointer(next_vector_obj
))[1]);
1807 /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/
1809 lose("invalid next_vector %x", next_vector_obj
);
1813 /* maybe hash vector */
1815 lispobj hash_vector_obj
= hash_table
->hash_vector
;
1817 if (is_lisp_pointer(hash_vector_obj
) &&
1818 (widetag_of(*(lispobj
*)native_pointer(hash_vector_obj
)) ==
1819 SIMPLE_ARRAY_WORD_WIDETAG
)){
1820 hash_vector
= ((lispobj
*)native_pointer(hash_vector_obj
)) + 2;
1821 /*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/
1822 gc_assert(fixnum_value(((lispobj
*)native_pointer(hash_vector_obj
))[1])
1823 == next_vector_length
);
1826 /*FSHOW((stderr, "/no hash_vector: %x\n", hash_vector_obj));*/
1830 /* These lengths could be different as the index_vector can be a
1831 * different length from the others, a larger index_vector could help
1832 * reduce collisions. */
1833 gc_assert(next_vector_length
*2 == kv_length
);
1835 /* now all set up.. */
1837 /* Work through the KV vector. */
1840 for (i
= 1; i
< next_vector_length
; i
++) {
1841 lispobj old_key
= kv_vector
[2*i
];
1843 #if N_WORD_BITS == 32
1844 unsigned long old_index
= (old_key
& 0x1fffffff)%length
;
1845 #elif N_WORD_BITS == 64
1846 unsigned long old_index
= (old_key
& 0x1fffffffffffffff)%length
;
1849 /* Scavenge the key and value. */
1850 scavenge(&kv_vector
[2*i
],2);
1852 /* Check whether the key has moved and is EQ based. */
1854 lispobj new_key
= kv_vector
[2*i
];
1855 #if N_WORD_BITS == 32
1856 unsigned long new_index
= (new_key
& 0x1fffffff)%length
;
1857 #elif N_WORD_BITS == 64
1858 unsigned long new_index
= (new_key
& 0x1fffffffffffffff)%length
;
1861 if ((old_index
!= new_index
) &&
1862 ((!hash_vector
) || (hash_vector
[i
] == 0x80000000)) &&
1863 ((new_key
!= empty_symbol
) ||
1864 (kv_vector
[2*i
] != empty_symbol
))) {
1867 "* EQ key %d moved from %x to %x; index %d to %d\n",
1868 i, old_key, new_key, old_index, new_index));*/
1870 if (index_vector
[old_index
] != 0) {
1871 /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/
1873 /* Unlink the key from the old_index chain. */
1874 if (index_vector
[old_index
] == i
) {
1875 /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/
1876 index_vector
[old_index
] = next_vector
[i
];
1877 /* Link it into the needing rehash chain. */
1878 next_vector
[i
] = fixnum_value(hash_table
->needing_rehash
);
1879 hash_table
->needing_rehash
= make_fixnum(i
);
1882 unsigned prior
= index_vector
[old_index
];
1883 unsigned next
= next_vector
[prior
];
1885 /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/
1888 /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/
1891 next_vector
[prior
] = next_vector
[next
];
1892 /* Link it into the needing rehash
1895 fixnum_value(hash_table
->needing_rehash
);
1896 hash_table
->needing_rehash
= make_fixnum(next
);
1901 next
= next_vector
[next
];
1909 return (CEILING(kv_length
+ 2, 2));
1918 /* XX This is a hack adapted from cgc.c. These don't work too
1919 * efficiently with the gencgc as a list of the weak pointers is
1920 * maintained within the objects which causes writes to the pages. A
1921 * limited attempt is made to avoid unnecessary writes, but this needs
1923 #define WEAK_POINTER_NWORDS \
1924 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
1927 scav_weak_pointer(lispobj
*where
, lispobj object
)
1929 struct weak_pointer
*wp
= weak_pointers
;
1930 /* Push the weak pointer onto the list of weak pointers.
1931 * Do I have to watch for duplicates? Originally this was
1932 * part of trans_weak_pointer but that didn't work in the
1933 * case where the WP was in a promoted region.
1936 /* Check whether it's already in the list. */
1937 while (wp
!= NULL
) {
1938 if (wp
== (struct weak_pointer
*)where
) {
1944 /* Add it to the start of the list. */
1945 wp
= (struct weak_pointer
*)where
;
1946 if (wp
->next
!= weak_pointers
) {
1947 wp
->next
= weak_pointers
;
1949 /*SHOW("avoided write to weak pointer");*/
1954 /* Do not let GC scavenge the value slot of the weak pointer.
1955 * (That is why it is a weak pointer.) */
1957 return WEAK_POINTER_NWORDS
;
1962 search_read_only_space(void *pointer
)
1964 lispobj
*start
= (lispobj
*) READ_ONLY_SPACE_START
;
1965 lispobj
*end
= (lispobj
*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0);
1966 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
1968 return (gc_search_space(start
,
1969 (((lispobj
*)pointer
)+2)-start
,
1970 (lispobj
*) pointer
));
1974 search_static_space(void *pointer
)
1976 lispobj
*start
= (lispobj
*)STATIC_SPACE_START
;
1977 lispobj
*end
= (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0);
1978 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
1980 return (gc_search_space(start
,
1981 (((lispobj
*)pointer
)+2)-start
,
1982 (lispobj
*) pointer
));
1985 /* a faster version for searching the dynamic space. This will work even
1986 * if the object is in a current allocation region. */
1988 search_dynamic_space(void *pointer
)
1990 long page_index
= find_page_index(pointer
);
1993 /* The address may be invalid, so do some checks. */
1994 if ((page_index
== -1) ||
1995 (page_table
[page_index
].allocated
== FREE_PAGE_FLAG
))
1997 start
= (lispobj
*)((void *)page_address(page_index
)
1998 + page_table
[page_index
].first_object_offset
);
1999 return (gc_search_space(start
,
2000 (((lispobj
*)pointer
)+2)-start
,
2001 (lispobj
*)pointer
));
2004 /* Is there any possibility that pointer is a valid Lisp object
2005 * reference, and/or something else (e.g. subroutine call return
2006 * address) which should prevent us from moving the referred-to thing?
2007 * This is called from preserve_pointers() */
2009 possibly_valid_dynamic_space_pointer(lispobj
*pointer
)
2011 lispobj
*start_addr
;
2013 /* Find the object start address. */
2014 if ((start_addr
= search_dynamic_space(pointer
)) == NULL
) {
2018 /* We need to allow raw pointers into Code objects for return
2019 * addresses. This will also pick up pointers to functions in code
2021 if (widetag_of(*start_addr
) == CODE_HEADER_WIDETAG
) {
2022 /* XXX could do some further checks here */
2026 /* If it's not a return address then it needs to be a valid Lisp
2028 if (!is_lisp_pointer((lispobj
)pointer
)) {
2032 /* Check that the object pointed to is consistent with the pointer
2035 switch (lowtag_of((lispobj
)pointer
)) {
2036 case FUN_POINTER_LOWTAG
:
2037 /* Start_addr should be the enclosing code object, or a closure
2039 switch (widetag_of(*start_addr
)) {
2040 case CODE_HEADER_WIDETAG
:
2041 /* This case is probably caught above. */
2043 case CLOSURE_HEADER_WIDETAG
:
2044 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
2045 if ((unsigned)pointer
!=
2046 ((unsigned)start_addr
+FUN_POINTER_LOWTAG
)) {
2050 pointer
, start_addr
, *start_addr
));
2058 pointer
, start_addr
, *start_addr
));
2062 case LIST_POINTER_LOWTAG
:
2063 if ((unsigned)pointer
!=
2064 ((unsigned)start_addr
+LIST_POINTER_LOWTAG
)) {
2068 pointer
, start_addr
, *start_addr
));
2071 /* Is it plausible cons? */
2072 if ((is_lisp_pointer(start_addr
[0])
2073 || (fixnump(start_addr
[0]))
2074 || (widetag_of(start_addr
[0]) == CHARACTER_WIDETAG
)
2075 #if N_WORD_BITS == 64
2076 || (widetag_of(start_addr
[0]) == SINGLE_FLOAT_WIDETAG
)
2078 || (widetag_of(start_addr
[0]) == UNBOUND_MARKER_WIDETAG
))
2079 && (is_lisp_pointer(start_addr
[1])
2080 || (fixnump(start_addr
[1]))
2081 || (widetag_of(start_addr
[1]) == CHARACTER_WIDETAG
)
2082 #if N_WORD_BITS == 64
2083 || (widetag_of(start_addr
[1]) == SINGLE_FLOAT_WIDETAG
)
2085 || (widetag_of(start_addr
[1]) == UNBOUND_MARKER_WIDETAG
)))
2091 pointer
, start_addr
, *start_addr
));
2094 case INSTANCE_POINTER_LOWTAG
:
2095 if ((unsigned)pointer
!=
2096 ((unsigned)start_addr
+INSTANCE_POINTER_LOWTAG
)) {
2100 pointer
, start_addr
, *start_addr
));
2103 if (widetag_of(start_addr
[0]) != INSTANCE_HEADER_WIDETAG
) {
2107 pointer
, start_addr
, *start_addr
));
2111 case OTHER_POINTER_LOWTAG
:
2112 if ((unsigned)pointer
!=
2113 ((int)start_addr
+OTHER_POINTER_LOWTAG
)) {
2117 pointer
, start_addr
, *start_addr
));
2120 /* Is it plausible? Not a cons. XXX should check the headers. */
2121 if (is_lisp_pointer(start_addr
[0]) || ((start_addr
[0] & 3) == 0)) {
2125 pointer
, start_addr
, *start_addr
));
2128 switch (widetag_of(start_addr
[0])) {
2129 case UNBOUND_MARKER_WIDETAG
:
2130 case CHARACTER_WIDETAG
:
2131 #if N_WORD_BITS == 64
2132 case SINGLE_FLOAT_WIDETAG
:
2137 pointer
, start_addr
, *start_addr
));
2140 /* only pointed to by function pointers? */
2141 case CLOSURE_HEADER_WIDETAG
:
2142 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
2146 pointer
, start_addr
, *start_addr
));
2149 case INSTANCE_HEADER_WIDETAG
:
2153 pointer
, start_addr
, *start_addr
));
2156 /* the valid other immediate pointer objects */
2157 case SIMPLE_VECTOR_WIDETAG
:
2159 case COMPLEX_WIDETAG
:
2160 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
2161 case COMPLEX_SINGLE_FLOAT_WIDETAG
:
2163 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
2164 case COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2166 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
2167 case COMPLEX_LONG_FLOAT_WIDETAG
:
2169 case SIMPLE_ARRAY_WIDETAG
:
2170 case COMPLEX_BASE_STRING_WIDETAG
:
2171 #ifdef COMPLEX_CHARACTER_STRING_WIDETAG
2172 case COMPLEX_CHARACTER_STRING_WIDETAG
:
2174 case COMPLEX_VECTOR_NIL_WIDETAG
:
2175 case COMPLEX_BIT_VECTOR_WIDETAG
:
2176 case COMPLEX_VECTOR_WIDETAG
:
2177 case COMPLEX_ARRAY_WIDETAG
:
2178 case VALUE_CELL_HEADER_WIDETAG
:
2179 case SYMBOL_HEADER_WIDETAG
:
2181 case CODE_HEADER_WIDETAG
:
2182 case BIGNUM_WIDETAG
:
2183 #if N_WORD_BITS != 64
2184 case SINGLE_FLOAT_WIDETAG
:
2186 case DOUBLE_FLOAT_WIDETAG
:
2187 #ifdef LONG_FLOAT_WIDETAG
2188 case LONG_FLOAT_WIDETAG
:
2190 case SIMPLE_BASE_STRING_WIDETAG
:
2191 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
2192 case SIMPLE_CHARACTER_STRING_WIDETAG
:
2194 case SIMPLE_BIT_VECTOR_WIDETAG
:
2195 case SIMPLE_ARRAY_NIL_WIDETAG
:
2196 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
2197 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
2198 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
2199 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
2200 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
2201 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
2202 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
2203 case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
:
2205 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
2206 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
2207 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
2208 case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
:
2210 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
2211 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
2213 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
2214 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
2216 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
2217 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
2219 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
2220 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
2222 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
2223 case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
:
2225 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
2226 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
2228 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
2229 case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
:
2231 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
2232 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
2234 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
2235 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
2236 #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
2237 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
2239 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
2240 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
2242 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
2243 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2245 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
2246 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
2249 case WEAK_POINTER_WIDETAG
:
2256 pointer
, start_addr
, *start_addr
));
2264 pointer
, start_addr
, *start_addr
));
2272 /* Adjust large bignum and vector objects. This will adjust the
2273 * allocated region if the size has shrunk, and move unboxed objects
2274 * into unboxed pages. The pages are not promoted here, and the
2275 * promoted region is not added to the new_regions; this is really
2276 * only designed to be called from preserve_pointer(). Shouldn't fail
2277 * if this is missed, just may delay the moving of objects to unboxed
2278 * pages, and the freeing of pages. */
2280 maybe_adjust_large_object(lispobj
*where
)
2285 long remaining_bytes
;
2288 long old_bytes_used
;
2292 /* Check whether it's a vector or bignum object. */
2293 switch (widetag_of(where
[0])) {
2294 case SIMPLE_VECTOR_WIDETAG
:
2295 boxed
= BOXED_PAGE_FLAG
;
2297 case BIGNUM_WIDETAG
:
2298 case SIMPLE_BASE_STRING_WIDETAG
:
2299 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
2300 case SIMPLE_CHARACTER_STRING_WIDETAG
:
2302 case SIMPLE_BIT_VECTOR_WIDETAG
:
2303 case SIMPLE_ARRAY_NIL_WIDETAG
:
2304 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
2305 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
2306 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
2307 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
2308 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
2309 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
2310 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
2311 case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
:
2313 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
2314 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
2315 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
2316 case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
:
2318 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
2319 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
2321 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
2322 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
2324 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
2325 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
2327 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
2328 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
2330 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
2331 case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
:
2333 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
2334 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
2336 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
2337 case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
:
2339 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
2340 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
2342 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
2343 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
2344 #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
2345 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
2347 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
2348 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
2350 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
2351 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2353 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
2354 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
2356 boxed
= UNBOXED_PAGE_FLAG
;
2362 /* Find its current size. */
2363 nwords
= (sizetab
[widetag_of(where
[0])])(where
);
2365 first_page
= find_page_index((void *)where
);
2366 gc_assert(first_page
>= 0);
2368 /* Note: Any page write-protection must be removed, else a later
2369 * scavenge_newspace may incorrectly not scavenge these pages.
2370 * This would not be necessary if they are added to the new areas,
2371 * but lets do it for them all (they'll probably be written
2374 gc_assert(page_table
[first_page
].first_object_offset
== 0);
2376 next_page
= first_page
;
2377 remaining_bytes
= nwords
*N_WORD_BYTES
;
2378 while (remaining_bytes
> PAGE_BYTES
) {
2379 gc_assert(page_table
[next_page
].gen
== from_space
);
2380 gc_assert((page_table
[next_page
].allocated
== BOXED_PAGE_FLAG
)
2381 || (page_table
[next_page
].allocated
== UNBOXED_PAGE_FLAG
));
2382 gc_assert(page_table
[next_page
].large_object
);
2383 gc_assert(page_table
[next_page
].first_object_offset
==
2384 -PAGE_BYTES
*(next_page
-first_page
));
2385 gc_assert(page_table
[next_page
].bytes_used
== PAGE_BYTES
);
2387 page_table
[next_page
].allocated
= boxed
;
2389 /* Shouldn't be write-protected at this stage. Essential that the
2391 gc_assert(!page_table
[next_page
].write_protected
);
2392 remaining_bytes
-= PAGE_BYTES
;
2396 /* Now only one page remains, but the object may have shrunk so
2397 * there may be more unused pages which will be freed. */
2399 /* Object may have shrunk but shouldn't have grown - check. */
2400 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
2402 page_table
[next_page
].allocated
= boxed
;
2403 gc_assert(page_table
[next_page
].allocated
==
2404 page_table
[first_page
].allocated
);
2406 /* Adjust the bytes_used. */
2407 old_bytes_used
= page_table
[next_page
].bytes_used
;
2408 page_table
[next_page
].bytes_used
= remaining_bytes
;
2410 bytes_freed
= old_bytes_used
- remaining_bytes
;
2412 /* Free any remaining pages; needs care. */
2414 while ((old_bytes_used
== PAGE_BYTES
) &&
2415 (page_table
[next_page
].gen
== from_space
) &&
2416 ((page_table
[next_page
].allocated
== UNBOXED_PAGE_FLAG
)
2417 || (page_table
[next_page
].allocated
== BOXED_PAGE_FLAG
)) &&
2418 page_table
[next_page
].large_object
&&
2419 (page_table
[next_page
].first_object_offset
==
2420 -(next_page
- first_page
)*PAGE_BYTES
)) {
2421 /* It checks out OK, free the page. We don't need to both zeroing
2422 * pages as this should have been done before shrinking the
2423 * object. These pages shouldn't be write protected as they
2424 * should be zero filled. */
2425 gc_assert(page_table
[next_page
].write_protected
== 0);
2427 old_bytes_used
= page_table
[next_page
].bytes_used
;
2428 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
2429 page_table
[next_page
].bytes_used
= 0;
2430 bytes_freed
+= old_bytes_used
;
2434 if ((bytes_freed
> 0) && gencgc_verbose
) {
2436 "/maybe_adjust_large_object() freed %d\n",
2440 generations
[from_space
].bytes_allocated
-= bytes_freed
;
2441 bytes_allocated
-= bytes_freed
;
2446 /* Take a possible pointer to a Lisp object and mark its page in the
2447 * page_table so that it will not be relocated during a GC.
2449 * This involves locating the page it points to, then backing up to
2450 * the start of its region, then marking all pages dont_move from there
2451 * up to the first page that's not full or has a different generation
2453 * It is assumed that all the page static flags have been cleared at
2454 * the start of a GC.
2456 * It is also assumed that the current gc_alloc() region has been
2457 * flushed and the tables updated. */
2459 preserve_pointer(void *addr
)
2461 long addr_page_index
= find_page_index(addr
);
2464 unsigned region_allocation
;
2466 /* quick check 1: Address is quite likely to have been invalid. */
2467 if ((addr_page_index
== -1)
2468 || (page_table
[addr_page_index
].allocated
== FREE_PAGE_FLAG
)
2469 || (page_table
[addr_page_index
].bytes_used
== 0)
2470 || (page_table
[addr_page_index
].gen
!= from_space
)
2471 /* Skip if already marked dont_move. */
2472 || (page_table
[addr_page_index
].dont_move
!= 0))
2474 gc_assert(!(page_table
[addr_page_index
].allocated
&OPEN_REGION_PAGE_FLAG
));
2475 /* (Now that we know that addr_page_index is in range, it's
2476 * safe to index into page_table[] with it.) */
2477 region_allocation
= page_table
[addr_page_index
].allocated
;
2479 /* quick check 2: Check the offset within the page.
2482 if (((unsigned)addr
& (PAGE_BYTES
- 1)) > page_table
[addr_page_index
].bytes_used
)
2485 /* Filter out anything which can't be a pointer to a Lisp object
2486 * (or, as a special case which also requires dont_move, a return
2487 * address referring to something in a CodeObject). This is
2488 * expensive but important, since it vastly reduces the
2489 * probability that random garbage will be bogusly interpreted as
2490 * a pointer which prevents a page from moving. */
2491 if (!(possibly_valid_dynamic_space_pointer(addr
)))
2494 /* Find the beginning of the region. Note that there may be
2495 * objects in the region preceding the one that we were passed a
2496 * pointer to: if this is the case, we will write-protect all the
2497 * previous objects' pages too. */
2500 /* I think this'd work just as well, but without the assertions.
2501 * -dan 2004.01.01 */
2503 find_page_index(page_address(addr_page_index
)+
2504 page_table
[addr_page_index
].first_object_offset
);
2506 first_page
= addr_page_index
;
2507 while (page_table
[first_page
].first_object_offset
!= 0) {
2509 /* Do some checks. */
2510 gc_assert(page_table
[first_page
].bytes_used
== PAGE_BYTES
);
2511 gc_assert(page_table
[first_page
].gen
== from_space
);
2512 gc_assert(page_table
[first_page
].allocated
== region_allocation
);
2516 /* Adjust any large objects before promotion as they won't be
2517 * copied after promotion. */
2518 if (page_table
[first_page
].large_object
) {
2519 maybe_adjust_large_object(page_address(first_page
));
2520 /* If a large object has shrunk then addr may now point to a
2521 * free area in which case it's ignored here. Note it gets
2522 * through the valid pointer test above because the tail looks
2524 if ((page_table
[addr_page_index
].allocated
== FREE_PAGE_FLAG
)
2525 || (page_table
[addr_page_index
].bytes_used
== 0)
2526 /* Check the offset within the page. */
2527 || (((unsigned)addr
& (PAGE_BYTES
- 1))
2528 > page_table
[addr_page_index
].bytes_used
)) {
2530 "weird? ignore ptr 0x%x to freed area of large object\n",
2534 /* It may have moved to unboxed pages. */
2535 region_allocation
= page_table
[first_page
].allocated
;
2538 /* Now work forward until the end of this contiguous area is found,
2539 * marking all pages as dont_move. */
2540 for (i
= first_page
; ;i
++) {
2541 gc_assert(page_table
[i
].allocated
== region_allocation
);
2543 /* Mark the page static. */
2544 page_table
[i
].dont_move
= 1;
2546 /* Move the page to the new_space. XX I'd rather not do this
2547 * but the GC logic is not quite able to copy with the static
2548 * pages remaining in the from space. This also requires the
2549 * generation bytes_allocated counters be updated. */
2550 page_table
[i
].gen
= new_space
;
2551 generations
[new_space
].bytes_allocated
+= page_table
[i
].bytes_used
;
2552 generations
[from_space
].bytes_allocated
-= page_table
[i
].bytes_used
;
2554 /* It is essential that the pages are not write protected as
2555 * they may have pointers into the old-space which need
2556 * scavenging. They shouldn't be write protected at this
2558 gc_assert(!page_table
[i
].write_protected
);
2560 /* Check whether this is the last page in this contiguous block.. */
2561 if ((page_table
[i
].bytes_used
< PAGE_BYTES
)
2562 /* ..or it is PAGE_BYTES and is the last in the block */
2563 || (page_table
[i
+1].allocated
== FREE_PAGE_FLAG
)
2564 || (page_table
[i
+1].bytes_used
== 0) /* next page free */
2565 || (page_table
[i
+1].gen
!= from_space
) /* diff. gen */
2566 || (page_table
[i
+1].first_object_offset
== 0))
2570 /* Check that the page is now static. */
2571 gc_assert(page_table
[addr_page_index
].dont_move
!= 0);
2574 /* If the given page is not write-protected, then scan it for pointers
2575 * to younger generations or the top temp. generation, if no
2576 * suspicious pointers are found then the page is write-protected.
2578 * Care is taken to check for pointers to the current gc_alloc()
2579 * region if it is a younger generation or the temp. generation. This
2580 * frees the caller from doing a gc_alloc_update_page_tables(). Actually
2581 * the gc_alloc_generation does not need to be checked as this is only
2582 * called from scavenge_generation() when the gc_alloc generation is
2583 * younger, so it just checks if there is a pointer to the current
2586 * We return 1 if the page was write-protected, else 0. */
2588 update_page_write_prot(long page
)
2590 int gen
= page_table
[page
].gen
;
2593 void **page_addr
= (void **)page_address(page
);
2594 long num_words
= page_table
[page
].bytes_used
/ N_WORD_BYTES
;
2596 /* Shouldn't be a free page. */
2597 gc_assert(page_table
[page
].allocated
!= FREE_PAGE_FLAG
);
2598 gc_assert(page_table
[page
].bytes_used
!= 0);
2600 /* Skip if it's already write-protected, pinned, or unboxed */
2601 if (page_table
[page
].write_protected
2602 || page_table
[page
].dont_move
2603 || (page_table
[page
].allocated
& UNBOXED_PAGE_FLAG
))
2606 /* Scan the page for pointers to younger generations or the
2607 * top temp. generation. */
2609 for (j
= 0; j
< num_words
; j
++) {
2610 void *ptr
= *(page_addr
+j
);
2611 long index
= find_page_index(ptr
);
2613 /* Check that it's in the dynamic space */
2615 if (/* Does it point to a younger or the temp. generation? */
2616 ((page_table
[index
].allocated
!= FREE_PAGE_FLAG
)
2617 && (page_table
[index
].bytes_used
!= 0)
2618 && ((page_table
[index
].gen
< gen
)
2619 || (page_table
[index
].gen
== NUM_GENERATIONS
)))
2621 /* Or does it point within a current gc_alloc() region? */
2622 || ((boxed_region
.start_addr
<= ptr
)
2623 && (ptr
<= boxed_region
.free_pointer
))
2624 || ((unboxed_region
.start_addr
<= ptr
)
2625 && (ptr
<= unboxed_region
.free_pointer
))) {
2632 /* Write-protect the page. */
2633 /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
2635 os_protect((void *)page_addr
,
2637 OS_VM_PROT_READ
|OS_VM_PROT_EXECUTE
);
2639 /* Note the page as protected in the page tables. */
2640 page_table
[page
].write_protected
= 1;
2646 /* Scavenge a generation.
2648 * This will not resolve all pointers when generation is the new
2649 * space, as new objects may be added which are not checked here - use
2650 * scavenge_newspace generation.
2652 * Write-protected pages should not have any pointers to the
2653 * from_space so do need scavenging; thus write-protected pages are
2654 * not always scavenged. There is some code to check that these pages
2655 * are not written; but to check fully the write-protected pages need
2656 * to be scavenged by disabling the code to skip them.
2658 * Under the current scheme when a generation is GCed the younger
2659 * generations will be empty. So, when a generation is being GCed it
2660 * is only necessary to scavenge the older generations for pointers
2661 * not the younger. So a page that does not have pointers to younger
2662 * generations does not need to be scavenged.
2664 * The write-protection can be used to note pages that don't have
2665 * pointers to younger pages. But pages can be written without having
2666 * pointers to younger generations. After the pages are scavenged here
2667 * they can be scanned for pointers to younger generations and if
2668 * there are none the page can be write-protected.
2670 * One complication is when the newspace is the top temp. generation.
2672 * Enabling SC_GEN_CK scavenges the write-protected pages and checks
2673 * that none were written, which they shouldn't be as they should have
2674 * no pointers to younger generations. This breaks down for weak
2675 * pointers as the objects contain a link to the next and are written
2676 * if a weak pointer is scavenged. Still it's a useful check. */
2678 scavenge_generation(int generation
)
2685 /* Clear the write_protected_cleared flags on all pages. */
2686 for (i
= 0; i
< NUM_PAGES
; i
++)
2687 page_table
[i
].write_protected_cleared
= 0;
2690 for (i
= 0; i
< last_free_page
; i
++) {
2691 if ((page_table
[i
].allocated
& BOXED_PAGE_FLAG
)
2692 && (page_table
[i
].bytes_used
!= 0)
2693 && (page_table
[i
].gen
== generation
)) {
2695 int write_protected
=1;
2697 /* This should be the start of a region */
2698 gc_assert(page_table
[i
].first_object_offset
== 0);
2700 /* Now work forward until the end of the region */
2701 for (last_page
= i
; ; last_page
++) {
2703 write_protected
&& page_table
[last_page
].write_protected
;
2704 if ((page_table
[last_page
].bytes_used
< PAGE_BYTES
)
2705 /* Or it is PAGE_BYTES and is the last in the block */
2706 || (!(page_table
[last_page
+1].allocated
& BOXED_PAGE_FLAG
))
2707 || (page_table
[last_page
+1].bytes_used
== 0)
2708 || (page_table
[last_page
+1].gen
!= generation
)
2709 || (page_table
[last_page
+1].first_object_offset
== 0))
2712 if (!write_protected
) {
2713 scavenge(page_address(i
),
2714 (page_table
[last_page
].bytes_used
+
2715 (last_page
-i
)*PAGE_BYTES
)/N_WORD_BYTES
);
2717 /* Now scan the pages and write protect those that
2718 * don't have pointers to younger generations. */
2719 if (enable_page_protection
) {
2720 for (j
= i
; j
<= last_page
; j
++) {
2721 num_wp
+= update_page_write_prot(j
);
2728 if ((gencgc_verbose
> 1) && (num_wp
!= 0)) {
2730 "/write protected %d pages within generation %d\n",
2731 num_wp
, generation
));
2735 /* Check that none of the write_protected pages in this generation
2736 * have been written to. */
2737 for (i
= 0; i
< NUM_PAGES
; i
++) {
2738 if ((page_table
[i
].allocation
!= FREE_PAGE_FLAG
)
2739 && (page_table
[i
].bytes_used
!= 0)
2740 && (page_table
[i
].gen
== generation
)
2741 && (page_table
[i
].write_protected_cleared
!= 0)) {
2742 FSHOW((stderr
, "/scavenge_generation() %d\n", generation
));
2744 "/page bytes_used=%d first_object_offset=%d dont_move=%d\n",
2745 page_table
[i
].bytes_used
,
2746 page_table
[i
].first_object_offset
,
2747 page_table
[i
].dont_move
));
2748 lose("write to protected page %d in scavenge_generation()", i
);
2755 /* Scavenge a newspace generation. As it is scavenged new objects may
2756 * be allocated to it; these will also need to be scavenged. This
2757 * repeats until there are no more objects unscavenged in the
2758 * newspace generation.
2760 * To help improve the efficiency, areas written are recorded by
2761 * gc_alloc() and only these scavenged. Sometimes a little more will be
2762 * scavenged, but this causes no harm. An easy check is done that the
2763 * scavenged bytes equals the number allocated in the previous
2766 * Write-protected pages are not scanned except if they are marked
2767 * dont_move in which case they may have been promoted and still have
2768 * pointers to the from space.
2770 * Write-protected pages could potentially be written by alloc however
2771 * to avoid having to handle re-scavenging of write-protected pages
2772 * gc_alloc() does not write to write-protected pages.
2774 * New areas of objects allocated are recorded alternatively in the two
2775 * new_areas arrays below. */
2776 static struct new_area new_areas_1
[NUM_NEW_AREAS
];
2777 static struct new_area new_areas_2
[NUM_NEW_AREAS
];
2779 /* Do one full scan of the new space generation. This is not enough to
2780 * complete the job as new objects may be added to the generation in
2781 * the process which are not scavenged. */
2783 scavenge_newspace_generation_one_scan(int generation
)
2788 "/starting one full scan of newspace generation %d\n",
2790 for (i
= 0; i
< last_free_page
; i
++) {
2791 /* Note that this skips over open regions when it encounters them. */
2792 if ((page_table
[i
].allocated
& BOXED_PAGE_FLAG
)
2793 && (page_table
[i
].bytes_used
!= 0)
2794 && (page_table
[i
].gen
== generation
)
2795 && ((page_table
[i
].write_protected
== 0)
2796 /* (This may be redundant as write_protected is now
2797 * cleared before promotion.) */
2798 || (page_table
[i
].dont_move
== 1))) {
2802 /* The scavenge will start at the first_object_offset of page i.
2804 * We need to find the full extent of this contiguous
2805 * block in case objects span pages.
2807 * Now work forward until the end of this contiguous area
2808 * is found. A small area is preferred as there is a
2809 * better chance of its pages being write-protected. */
2810 for (last_page
= i
; ;last_page
++) {
2811 /* If all pages are write-protected and movable,
2812 * then no need to scavenge */
2813 all_wp
=all_wp
&& page_table
[last_page
].write_protected
&&
2814 !page_table
[last_page
].dont_move
;
2816 /* Check whether this is the last page in this
2817 * contiguous block */
2818 if ((page_table
[last_page
].bytes_used
< PAGE_BYTES
)
2819 /* Or it is PAGE_BYTES and is the last in the block */
2820 || (!(page_table
[last_page
+1].allocated
& BOXED_PAGE_FLAG
))
2821 || (page_table
[last_page
+1].bytes_used
== 0)
2822 || (page_table
[last_page
+1].gen
!= generation
)
2823 || (page_table
[last_page
+1].first_object_offset
== 0))
2827 /* Do a limited check for write-protected pages. */
2831 size
= (page_table
[last_page
].bytes_used
2832 + (last_page
-i
)*PAGE_BYTES
2833 - page_table
[i
].first_object_offset
)/N_WORD_BYTES
;
2834 new_areas_ignore_page
= last_page
;
2836 scavenge(page_address(i
) +
2837 page_table
[i
].first_object_offset
,
2845 "/done with one full scan of newspace generation %d\n",
2849 /* Do a complete scavenge of the newspace generation. */
2851 scavenge_newspace_generation(int generation
)
2855 /* the new_areas array currently being written to by gc_alloc() */
2856 struct new_area (*current_new_areas
)[] = &new_areas_1
;
2857 long current_new_areas_index
;
2859 /* the new_areas created by the previous scavenge cycle */
2860 struct new_area (*previous_new_areas
)[] = NULL
;
2861 long previous_new_areas_index
;
2863 /* Flush the current regions updating the tables. */
2864 gc_alloc_update_all_page_tables();
2866 /* Turn on the recording of new areas by gc_alloc(). */
2867 new_areas
= current_new_areas
;
2868 new_areas_index
= 0;
2870 /* Don't need to record new areas that get scavenged anyway during
2871 * scavenge_newspace_generation_one_scan. */
2872 record_new_objects
= 1;
2874 /* Start with a full scavenge. */
2875 scavenge_newspace_generation_one_scan(generation
);
2877 /* Record all new areas now. */
2878 record_new_objects
= 2;
2880 /* Flush the current regions updating the tables. */
2881 gc_alloc_update_all_page_tables();
2883 /* Grab new_areas_index. */
2884 current_new_areas_index
= new_areas_index
;
2887 "The first scan is finished; current_new_areas_index=%d.\n",
2888 current_new_areas_index));*/
2890 while (current_new_areas_index
> 0) {
2891 /* Move the current to the previous new areas */
2892 previous_new_areas
= current_new_areas
;
2893 previous_new_areas_index
= current_new_areas_index
;
2895 /* Scavenge all the areas in previous new areas. Any new areas
2896 * allocated are saved in current_new_areas. */
2898 /* Allocate an array for current_new_areas; alternating between
2899 * new_areas_1 and 2 */
2900 if (previous_new_areas
== &new_areas_1
)
2901 current_new_areas
= &new_areas_2
;
2903 current_new_areas
= &new_areas_1
;
2905 /* Set up for gc_alloc(). */
2906 new_areas
= current_new_areas
;
2907 new_areas_index
= 0;
2909 /* Check whether previous_new_areas had overflowed. */
2910 if (previous_new_areas_index
>= NUM_NEW_AREAS
) {
2912 /* New areas of objects allocated have been lost so need to do a
2913 * full scan to be sure! If this becomes a problem try
2914 * increasing NUM_NEW_AREAS. */
2916 SHOW("new_areas overflow, doing full scavenge");
2918 /* Don't need to record new areas that get scavenge anyway
2919 * during scavenge_newspace_generation_one_scan. */
2920 record_new_objects
= 1;
2922 scavenge_newspace_generation_one_scan(generation
);
2924 /* Record all new areas now. */
2925 record_new_objects
= 2;
2927 /* Flush the current regions updating the tables. */
2928 gc_alloc_update_all_page_tables();
2932 /* Work through previous_new_areas. */
2933 for (i
= 0; i
< previous_new_areas_index
; i
++) {
2934 long page
= (*previous_new_areas
)[i
].page
;
2935 long offset
= (*previous_new_areas
)[i
].offset
;
2936 long size
= (*previous_new_areas
)[i
].size
/ N_WORD_BYTES
;
2937 gc_assert((*previous_new_areas
)[i
].size
% N_WORD_BYTES
== 0);
2938 scavenge(page_address(page
)+offset
, size
);
2941 /* Flush the current regions updating the tables. */
2942 gc_alloc_update_all_page_tables();
2945 current_new_areas_index
= new_areas_index
;
2948 "The re-scan has finished; current_new_areas_index=%d.\n",
2949 current_new_areas_index));*/
2952 /* Turn off recording of areas allocated by gc_alloc(). */
2953 record_new_objects
= 0;
2956 /* Check that none of the write_protected pages in this generation
2957 * have been written to. */
2958 for (i
= 0; i
< NUM_PAGES
; i
++) {
2959 if ((page_table
[i
].allocation
!= FREE_PAGE_FLAG
)
2960 && (page_table
[i
].bytes_used
!= 0)
2961 && (page_table
[i
].gen
== generation
)
2962 && (page_table
[i
].write_protected_cleared
!= 0)
2963 && (page_table
[i
].dont_move
== 0)) {
2964 lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d",
2965 i
, generation
, page_table
[i
].dont_move
);
2971 /* Un-write-protect all the pages in from_space. This is done at the
2972 * start of a GC else there may be many page faults while scavenging
2973 * the newspace (I've seen drive the system time to 99%). These pages
2974 * would need to be unprotected anyway before unmapping in
2975 * free_oldspace; not sure what effect this has on paging.. */
2977 unprotect_oldspace(void)
2981 for (i
= 0; i
< last_free_page
; i
++) {
2982 if ((page_table
[i
].allocated
!= FREE_PAGE_FLAG
)
2983 && (page_table
[i
].bytes_used
!= 0)
2984 && (page_table
[i
].gen
== from_space
)) {
2987 page_start
= (void *)page_address(i
);
2989 /* Remove any write-protection. We should be able to rely
2990 * on the write-protect flag to avoid redundant calls. */
2991 if (page_table
[i
].write_protected
) {
2992 os_protect(page_start
, PAGE_BYTES
, OS_VM_PROT_ALL
);
2993 page_table
[i
].write_protected
= 0;
2999 /* Work through all the pages and free any in from_space. This
3000 * assumes that all objects have been copied or promoted to an older
3001 * generation. Bytes_allocated and the generation bytes_allocated
3002 * counter are updated. The number of bytes freed is returned. */
3006 long bytes_freed
= 0;
3007 long first_page
, last_page
;
3012 /* Find a first page for the next region of pages. */
3013 while ((first_page
< last_free_page
)
3014 && ((page_table
[first_page
].allocated
== FREE_PAGE_FLAG
)
3015 || (page_table
[first_page
].bytes_used
== 0)
3016 || (page_table
[first_page
].gen
!= from_space
)))
3019 if (first_page
>= last_free_page
)
3022 /* Find the last page of this region. */
3023 last_page
= first_page
;
3026 /* Free the page. */
3027 bytes_freed
+= page_table
[last_page
].bytes_used
;
3028 generations
[page_table
[last_page
].gen
].bytes_allocated
-=
3029 page_table
[last_page
].bytes_used
;
3030 page_table
[last_page
].allocated
= FREE_PAGE_FLAG
;
3031 page_table
[last_page
].bytes_used
= 0;
3033 /* Remove any write-protection. We should be able to rely
3034 * on the write-protect flag to avoid redundant calls. */
3036 void *page_start
= (void *)page_address(last_page
);
3038 if (page_table
[last_page
].write_protected
) {
3039 os_protect(page_start
, PAGE_BYTES
, OS_VM_PROT_ALL
);
3040 page_table
[last_page
].write_protected
= 0;
3045 while ((last_page
< last_free_page
)
3046 && (page_table
[last_page
].allocated
!= FREE_PAGE_FLAG
)
3047 && (page_table
[last_page
].bytes_used
!= 0)
3048 && (page_table
[last_page
].gen
== from_space
));
3050 /* Zero pages from first_page to (last_page-1).
3052 * FIXME: Why not use os_zero(..) function instead of
3053 * hand-coding this again? (Check other gencgc_unmap_zero
3055 if (gencgc_unmap_zero
) {
3056 void *page_start
, *addr
;
3058 page_start
= (void *)page_address(first_page
);
3060 os_invalidate(page_start
, PAGE_BYTES
*(last_page
-first_page
));
3061 addr
= os_validate(page_start
, PAGE_BYTES
*(last_page
-first_page
));
3062 if (addr
== NULL
|| addr
!= page_start
) {
3063 lose("free_oldspace: page moved, 0x%08x ==> 0x%08x",page_start
,
3069 page_start
= (long *)page_address(first_page
);
3070 memset(page_start
, 0,PAGE_BYTES
*(last_page
-first_page
));
3073 first_page
= last_page
;
3075 } while (first_page
< last_free_page
);
3077 bytes_allocated
-= bytes_freed
;
3082 /* Print some information about a pointer at the given address. */
3084 print_ptr(lispobj
*addr
)
3086 /* If addr is in the dynamic space then out the page information. */
3087 long pi1
= find_page_index((void*)addr
);
3090 fprintf(stderr
," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n",
3091 (unsigned long) addr
,
3093 page_table
[pi1
].allocated
,
3094 page_table
[pi1
].gen
,
3095 page_table
[pi1
].bytes_used
,
3096 page_table
[pi1
].first_object_offset
,
3097 page_table
[pi1
].dont_move
);
3098 fprintf(stderr
," %x %x %x %x (%x) %x %x %x %x\n",
3111 extern long undefined_tramp
;
3114 verify_space(lispobj
*start
, size_t words
)
3116 int is_in_dynamic_space
= (find_page_index((void*)start
) != -1);
3117 int is_in_readonly_space
=
3118 (READ_ONLY_SPACE_START
<= (unsigned)start
&&
3119 (unsigned)start
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0));
3123 lispobj thing
= *(lispobj
*)start
;
3125 if (is_lisp_pointer(thing
)) {
3126 long page_index
= find_page_index((void*)thing
);
3127 long to_readonly_space
=
3128 (READ_ONLY_SPACE_START
<= thing
&&
3129 thing
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0));
3130 long to_static_space
=
3131 (STATIC_SPACE_START
<= thing
&&
3132 thing
< SymbolValue(STATIC_SPACE_FREE_POINTER
,0));
3134 /* Does it point to the dynamic space? */
3135 if (page_index
!= -1) {
3136 /* If it's within the dynamic space it should point to a used
3137 * page. XX Could check the offset too. */
3138 if ((page_table
[page_index
].allocated
!= FREE_PAGE_FLAG
)
3139 && (page_table
[page_index
].bytes_used
== 0))
3140 lose ("Ptr %x @ %x sees free page.", thing
, start
);
3141 /* Check that it doesn't point to a forwarding pointer! */
3142 if (*((lispobj
*)native_pointer(thing
)) == 0x01) {
3143 lose("Ptr %x @ %x sees forwarding ptr.", thing
, start
);
3145 /* Check that its not in the RO space as it would then be a
3146 * pointer from the RO to the dynamic space. */
3147 if (is_in_readonly_space
) {
3148 lose("ptr to dynamic space %x from RO space %x",
3151 /* Does it point to a plausible object? This check slows
3152 * it down a lot (so it's commented out).
3154 * "a lot" is serious: it ate 50 minutes cpu time on
3155 * my duron 950 before I came back from lunch and
3158 * FIXME: Add a variable to enable this
3161 if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) {
3162 lose("ptr %x to invalid object %x", thing, start);
3166 /* Verify that it points to another valid space. */
3167 if (!to_readonly_space
&& !to_static_space
3168 && (thing
!= (unsigned)&undefined_tramp
)) {
3169 lose("Ptr %x @ %x sees junk.", thing
, start
);
3173 if (!(fixnump(thing
))) {
3175 switch(widetag_of(*start
)) {
3178 case SIMPLE_VECTOR_WIDETAG
:
3180 case COMPLEX_WIDETAG
:
3181 case SIMPLE_ARRAY_WIDETAG
:
3182 case COMPLEX_BASE_STRING_WIDETAG
:
3183 #ifdef COMPLEX_CHARACTER_STRING_WIDETAG
3184 case COMPLEX_CHARACTER_STRING_WIDETAG
:
3186 case COMPLEX_VECTOR_NIL_WIDETAG
:
3187 case COMPLEX_BIT_VECTOR_WIDETAG
:
3188 case COMPLEX_VECTOR_WIDETAG
:
3189 case COMPLEX_ARRAY_WIDETAG
:
3190 case CLOSURE_HEADER_WIDETAG
:
3191 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
3192 case VALUE_CELL_HEADER_WIDETAG
:
3193 case SYMBOL_HEADER_WIDETAG
:
3194 case CHARACTER_WIDETAG
:
3195 #if N_WORD_BITS == 64
3196 case SINGLE_FLOAT_WIDETAG
:
3198 case UNBOUND_MARKER_WIDETAG
:
3199 case INSTANCE_HEADER_WIDETAG
:
3204 case CODE_HEADER_WIDETAG
:
3206 lispobj object
= *start
;
3208 long nheader_words
, ncode_words
, nwords
;
3210 struct simple_fun
*fheaderp
;
3212 code
= (struct code
*) start
;
3214 /* Check that it's not in the dynamic space.
3215 * FIXME: Isn't is supposed to be OK for code
3216 * objects to be in the dynamic space these days? */
3217 if (is_in_dynamic_space
3218 /* It's ok if it's byte compiled code. The trace
3219 * table offset will be a fixnum if it's x86
3220 * compiled code - check.
3222 * FIXME: #^#@@! lack of abstraction here..
3223 * This line can probably go away now that
3224 * there's no byte compiler, but I've got
3225 * too much to worry about right now to try
3226 * to make sure. -- WHN 2001-10-06 */
3227 && fixnump(code
->trace_table_offset
)
3228 /* Only when enabled */
3229 && verify_dynamic_code_check
) {
3231 "/code object at %x in the dynamic space\n",
3235 ncode_words
= fixnum_value(code
->code_size
);
3236 nheader_words
= HeaderValue(object
);
3237 nwords
= ncode_words
+ nheader_words
;
3238 nwords
= CEILING(nwords
, 2);
3239 /* Scavenge the boxed section of the code data block */
3240 verify_space(start
+ 1, nheader_words
- 1);
3242 /* Scavenge the boxed section of each function
3243 * object in the code data block. */
3244 fheaderl
= code
->entry_points
;
3245 while (fheaderl
!= NIL
) {
3247 (struct simple_fun
*) native_pointer(fheaderl
);
3248 gc_assert(widetag_of(fheaderp
->header
) == SIMPLE_FUN_HEADER_WIDETAG
);
3249 verify_space(&fheaderp
->name
, 1);
3250 verify_space(&fheaderp
->arglist
, 1);
3251 verify_space(&fheaderp
->type
, 1);
3252 fheaderl
= fheaderp
->next
;
3258 /* unboxed objects */
3259 case BIGNUM_WIDETAG
:
3260 #if N_WORD_BITS != 64
3261 case SINGLE_FLOAT_WIDETAG
:
3263 case DOUBLE_FLOAT_WIDETAG
:
3264 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3265 case LONG_FLOAT_WIDETAG
:
3267 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
3268 case COMPLEX_SINGLE_FLOAT_WIDETAG
:
3270 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
3271 case COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3273 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3274 case COMPLEX_LONG_FLOAT_WIDETAG
:
3276 case SIMPLE_BASE_STRING_WIDETAG
:
3277 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
3278 case SIMPLE_CHARACTER_STRING_WIDETAG
:
3280 case SIMPLE_BIT_VECTOR_WIDETAG
:
3281 case SIMPLE_ARRAY_NIL_WIDETAG
:
3282 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
3283 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
3284 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
3285 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
3286 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
3287 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
3288 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
3289 case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
:
3291 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
3292 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
3293 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
3294 case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
:
3296 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
3297 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
3299 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
3300 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
3302 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
3303 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
3305 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
3306 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
3308 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
3309 case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
:
3311 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
3312 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
3314 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
3315 case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
:
3317 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
3318 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
3320 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
3321 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
3322 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3323 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
3325 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
3326 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
3328 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
3329 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3331 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3332 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
3335 case WEAK_POINTER_WIDETAG
:
3336 count
= (sizetab
[widetag_of(*start
)])(start
);
3352 /* FIXME: It would be nice to make names consistent so that
3353 * foo_size meant size *in* *bytes* instead of size in some
3354 * arbitrary units. (Yes, this caused a bug, how did you guess?:-)
3355 * Some counts of lispobjs are called foo_count; it might be good
3356 * to grep for all foo_size and rename the appropriate ones to
3358 long read_only_space_size
=
3359 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0)
3360 - (lispobj
*)READ_ONLY_SPACE_START
;
3361 long static_space_size
=
3362 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0)
3363 - (lispobj
*)STATIC_SPACE_START
;
3365 for_each_thread(th
) {
3366 long binding_stack_size
=
3367 (lispobj
*)SymbolValue(BINDING_STACK_POINTER
,th
)
3368 - (lispobj
*)th
->binding_stack_start
;
3369 verify_space(th
->binding_stack_start
, binding_stack_size
);
3371 verify_space((lispobj
*)READ_ONLY_SPACE_START
, read_only_space_size
);
3372 verify_space((lispobj
*)STATIC_SPACE_START
, static_space_size
);
3376 verify_generation(int generation
)
3380 for (i
= 0; i
< last_free_page
; i
++) {
3381 if ((page_table
[i
].allocated
!= FREE_PAGE_FLAG
)
3382 && (page_table
[i
].bytes_used
!= 0)
3383 && (page_table
[i
].gen
== generation
)) {
3385 int region_allocation
= page_table
[i
].allocated
;
3387 /* This should be the start of a contiguous block */
3388 gc_assert(page_table
[i
].first_object_offset
== 0);
3390 /* Need to find the full extent of this contiguous block in case
3391 objects span pages. */
3393 /* Now work forward until the end of this contiguous area is
3395 for (last_page
= i
; ;last_page
++)
3396 /* Check whether this is the last page in this contiguous
3398 if ((page_table
[last_page
].bytes_used
< PAGE_BYTES
)
3399 /* Or it is PAGE_BYTES and is the last in the block */
3400 || (page_table
[last_page
+1].allocated
!= region_allocation
)
3401 || (page_table
[last_page
+1].bytes_used
== 0)
3402 || (page_table
[last_page
+1].gen
!= generation
)
3403 || (page_table
[last_page
+1].first_object_offset
== 0))
3406 verify_space(page_address(i
), (page_table
[last_page
].bytes_used
3407 + (last_page
-i
)*PAGE_BYTES
)/N_WORD_BYTES
);
3413 /* Check that all the free space is zero filled. */
3415 verify_zero_fill(void)
3419 for (page
= 0; page
< last_free_page
; page
++) {
3420 if (page_table
[page
].allocated
== FREE_PAGE_FLAG
) {
3421 /* The whole page should be zero filled. */
3422 long *start_addr
= (long *)page_address(page
);
3425 for (i
= 0; i
< size
; i
++) {
3426 if (start_addr
[i
] != 0) {
3427 lose("free page not zero at %x", start_addr
+ i
);
3431 long free_bytes
= PAGE_BYTES
- page_table
[page
].bytes_used
;
3432 if (free_bytes
> 0) {
3433 long *start_addr
= (long *)((unsigned)page_address(page
)
3434 + page_table
[page
].bytes_used
);
3435 long size
= free_bytes
/ N_WORD_BYTES
;
3437 for (i
= 0; i
< size
; i
++) {
3438 if (start_addr
[i
] != 0) {
3439 lose("free region not zero at %x", start_addr
+ i
);
3447 /* External entry point for verify_zero_fill */
3449 gencgc_verify_zero_fill(void)
3451 /* Flush the alloc regions updating the tables. */
3452 gc_alloc_update_all_page_tables();
3453 SHOW("verifying zero fill");
3458 verify_dynamic_space(void)
3462 for (i
= 0; i
< NUM_GENERATIONS
; i
++)
3463 verify_generation(i
);
3465 if (gencgc_enable_verify_zero_fill
)
3469 /* Write-protect all the dynamic boxed pages in the given generation. */
3471 write_protect_generation_pages(int generation
)
3475 gc_assert(generation
< NUM_GENERATIONS
);
3477 for (i
= 0; i
< last_free_page
; i
++)
3478 if ((page_table
[i
].allocated
== BOXED_PAGE_FLAG
)
3479 && (page_table
[i
].bytes_used
!= 0)
3480 && !page_table
[i
].dont_move
3481 && (page_table
[i
].gen
== generation
)) {
3484 page_start
= (void *)page_address(i
);
3486 os_protect(page_start
,
3488 OS_VM_PROT_READ
| OS_VM_PROT_EXECUTE
);
3490 /* Note the page as protected in the page tables. */
3491 page_table
[i
].write_protected
= 1;
3494 if (gencgc_verbose
> 1) {
3496 "/write protected %d of %d pages in generation %d\n",
3497 count_write_protect_generation_pages(generation
),
3498 count_generation_pages(generation
),
3503 /* Garbage collect a generation. If raise is 0 then the remains of the
3504 * generation are not raised to the next generation. */
3506 garbage_collect_generation(int generation
, int raise
)
3508 unsigned long bytes_freed
;
3510 unsigned long static_space_size
;
3512 gc_assert(generation
<= (NUM_GENERATIONS
-1));
3514 /* The oldest generation can't be raised. */
3515 gc_assert((generation
!= (NUM_GENERATIONS
-1)) || (raise
== 0));
3517 /* Initialize the weak pointer list. */
3518 weak_pointers
= NULL
;
3520 /* When a generation is not being raised it is transported to a
3521 * temporary generation (NUM_GENERATIONS), and lowered when
3522 * done. Set up this new generation. There should be no pages
3523 * allocated to it yet. */
3525 gc_assert(generations
[NUM_GENERATIONS
].bytes_allocated
== 0);
3528 /* Set the global src and dest. generations */
3529 from_space
= generation
;
3531 new_space
= generation
+1;
3533 new_space
= NUM_GENERATIONS
;
3535 /* Change to a new space for allocation, resetting the alloc_start_page */
3536 gc_alloc_generation
= new_space
;
3537 generations
[new_space
].alloc_start_page
= 0;
3538 generations
[new_space
].alloc_unboxed_start_page
= 0;
3539 generations
[new_space
].alloc_large_start_page
= 0;
3540 generations
[new_space
].alloc_large_unboxed_start_page
= 0;
3542 /* Before any pointers are preserved, the dont_move flags on the
3543 * pages need to be cleared. */
3544 for (i
= 0; i
< last_free_page
; i
++)
3545 if(page_table
[i
].gen
==from_space
)
3546 page_table
[i
].dont_move
= 0;
3548 /* Un-write-protect the old-space pages. This is essential for the
3549 * promoted pages as they may contain pointers into the old-space
3550 * which need to be scavenged. It also helps avoid unnecessary page
3551 * faults as forwarding pointers are written into them. They need to
3552 * be un-protected anyway before unmapping later. */
3553 unprotect_oldspace();
3555 /* Scavenge the stacks' conservative roots. */
3557 /* there are potentially two stacks for each thread: the main
3558 * stack, which may contain Lisp pointers, and the alternate stack.
3559 * We don't ever run Lisp code on the altstack, but it may
3560 * host a sigcontext with lisp objects in it */
3562 /* what we need to do: (1) find the stack pointer for the main
3563 * stack; scavenge it (2) find the interrupt context on the
3564 * alternate stack that might contain lisp values, and scavenge
3567 /* we assume that none of the preceding applies to the thread that
3568 * initiates GC. If you ever call GC from inside an altstack
3569 * handler, you will lose. */
3570 for_each_thread(th
) {
3572 void **esp
=(void **)-1;
3573 #ifdef LISP_FEATURE_SB_THREAD
3575 if(th
==arch_os_get_current_thread()) {
3576 esp
= (void **) &raise
;
3579 free
=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
3580 for(i
=free
-1;i
>=0;i
--) {
3581 os_context_t
*c
=th
->interrupt_contexts
[i
];
3582 esp1
= (void **) *os_context_register_addr(c
,reg_SP
);
3583 if(esp1
>=th
->control_stack_start
&& esp1
<th
->control_stack_end
){
3584 if(esp1
<esp
) esp
=esp1
;
3585 for(ptr
= (void **)(c
+1); ptr
>=(void **)c
; ptr
--) {
3586 preserve_pointer(*ptr
);
3592 esp
= (void **) &raise
;
3594 for (ptr
= (void **)th
->control_stack_end
; ptr
> esp
; ptr
--) {
3595 preserve_pointer(*ptr
);
3600 if (gencgc_verbose
> 1) {
3601 long num_dont_move_pages
= count_dont_move_pages();
3603 "/non-movable pages due to conservative pointers = %d (%d bytes)\n",
3604 num_dont_move_pages
,
3605 num_dont_move_pages
* PAGE_BYTES
);
3609 /* Scavenge all the rest of the roots. */
3611 /* Scavenge the Lisp functions of the interrupt handlers, taking
3612 * care to avoid SIG_DFL and SIG_IGN. */
3613 for_each_thread(th
) {
3614 struct interrupt_data
*data
=th
->interrupt_data
;
3615 for (i
= 0; i
< NSIG
; i
++) {
3616 union interrupt_handler handler
= data
->interrupt_handlers
[i
];
3617 if (!ARE_SAME_HANDLER(handler
.c
, SIG_IGN
) &&
3618 !ARE_SAME_HANDLER(handler
.c
, SIG_DFL
)) {
3619 scavenge((lispobj
*)(data
->interrupt_handlers
+ i
), 1);
3623 /* Scavenge the binding stacks. */
3626 for_each_thread(th
) {
3627 long len
= (lispobj
*)SymbolValue(BINDING_STACK_POINTER
,th
) -
3628 th
->binding_stack_start
;
3629 scavenge((lispobj
*) th
->binding_stack_start
,len
);
3630 #ifdef LISP_FEATURE_SB_THREAD
3631 /* do the tls as well */
3632 len
=fixnum_value(SymbolValue(FREE_TLS_INDEX
,0)) -
3633 (sizeof (struct thread
))/(sizeof (lispobj
));
3634 scavenge((lispobj
*) (th
+1),len
);
3639 /* The original CMU CL code had scavenge-read-only-space code
3640 * controlled by the Lisp-level variable
3641 * *SCAVENGE-READ-ONLY-SPACE*. It was disabled by default, and it
3642 * wasn't documented under what circumstances it was useful or
3643 * safe to turn it on, so it's been turned off in SBCL. If you
3644 * want/need this functionality, and can test and document it,
3645 * please submit a patch. */
3647 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE
) != NIL
) {
3648 unsigned long read_only_space_size
=
3649 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
) -
3650 (lispobj
*)READ_ONLY_SPACE_START
;
3652 "/scavenge read only space: %d bytes\n",
3653 read_only_space_size
* sizeof(lispobj
)));
3654 scavenge( (lispobj
*) READ_ONLY_SPACE_START
, read_only_space_size
);
3658 /* Scavenge static space. */
3660 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0) -
3661 (lispobj
*)STATIC_SPACE_START
;
3662 if (gencgc_verbose
> 1) {
3664 "/scavenge static space: %d bytes\n",
3665 static_space_size
* sizeof(lispobj
)));
3667 scavenge( (lispobj
*) STATIC_SPACE_START
, static_space_size
);
3669 /* All generations but the generation being GCed need to be
3670 * scavenged. The new_space generation needs special handling as
3671 * objects may be moved in - it is handled separately below. */
3672 for (i
= 0; i
< NUM_GENERATIONS
; i
++) {
3673 if ((i
!= generation
) && (i
!= new_space
)) {
3674 scavenge_generation(i
);
3678 /* Finally scavenge the new_space generation. Keep going until no
3679 * more objects are moved into the new generation */
3680 scavenge_newspace_generation(new_space
);
3682 /* FIXME: I tried reenabling this check when debugging unrelated
3683 * GC weirdness ca. sbcl-0.6.12.45, and it failed immediately.
3684 * Since the current GC code seems to work well, I'm guessing that
3685 * this debugging code is just stale, but I haven't tried to
3686 * figure it out. It should be figured out and then either made to
3687 * work or just deleted. */
3688 #define RESCAN_CHECK 0
3690 /* As a check re-scavenge the newspace once; no new objects should
3693 long old_bytes_allocated
= bytes_allocated
;
3694 long bytes_allocated
;
3696 /* Start with a full scavenge. */
3697 scavenge_newspace_generation_one_scan(new_space
);
3699 /* Flush the current regions, updating the tables. */
3700 gc_alloc_update_all_page_tables();
3702 bytes_allocated
= bytes_allocated
- old_bytes_allocated
;
3704 if (bytes_allocated
!= 0) {
3705 lose("Rescan of new_space allocated %d more bytes.",
3711 scan_weak_pointers();
3713 /* Flush the current regions, updating the tables. */
3714 gc_alloc_update_all_page_tables();
3716 /* Free the pages in oldspace, but not those marked dont_move. */
3717 bytes_freed
= free_oldspace();
3719 /* If the GC is not raising the age then lower the generation back
3720 * to its normal generation number */
3722 for (i
= 0; i
< last_free_page
; i
++)
3723 if ((page_table
[i
].bytes_used
!= 0)
3724 && (page_table
[i
].gen
== NUM_GENERATIONS
))
3725 page_table
[i
].gen
= generation
;
3726 gc_assert(generations
[generation
].bytes_allocated
== 0);
3727 generations
[generation
].bytes_allocated
=
3728 generations
[NUM_GENERATIONS
].bytes_allocated
;
3729 generations
[NUM_GENERATIONS
].bytes_allocated
= 0;
3732 /* Reset the alloc_start_page for generation. */
3733 generations
[generation
].alloc_start_page
= 0;
3734 generations
[generation
].alloc_unboxed_start_page
= 0;
3735 generations
[generation
].alloc_large_start_page
= 0;
3736 generations
[generation
].alloc_large_unboxed_start_page
= 0;
3738 if (generation
>= verify_gens
) {
3742 verify_dynamic_space();
3745 /* Set the new gc trigger for the GCed generation. */
3746 generations
[generation
].gc_trigger
=
3747 generations
[generation
].bytes_allocated
3748 + generations
[generation
].bytes_consed_between_gc
;
3751 generations
[generation
].num_gc
= 0;
3753 ++generations
[generation
].num_gc
;
3756 /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
3758 update_x86_dynamic_space_free_pointer(void)
3760 long last_page
= -1;
3763 for (i
= 0; i
< last_free_page
; i
++)
3764 if ((page_table
[i
].allocated
!= FREE_PAGE_FLAG
)
3765 && (page_table
[i
].bytes_used
!= 0))
3768 last_free_page
= last_page
+1;
3770 SetSymbolValue(ALLOCATION_POINTER
,
3771 (lispobj
)(((char *)heap_base
) + last_free_page
*PAGE_BYTES
),0);
3772 return 0; /* dummy value: return something ... */
3775 /* GC all generations newer than last_gen, raising the objects in each
3776 * to the next older generation - we finish when all generations below
3777 * last_gen are empty. Then if last_gen is due for a GC, or if
3778 * last_gen==NUM_GENERATIONS (the scratch generation? eh?) we GC that
3779 * too. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
3781 * We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than
3782 * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */
3785 collect_garbage(unsigned last_gen
)
3792 FSHOW((stderr
, "/entering collect_garbage(%d)\n", last_gen
));
3794 if (last_gen
> NUM_GENERATIONS
) {
3796 "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
3801 /* Flush the alloc regions updating the tables. */
3802 gc_alloc_update_all_page_tables();
3804 /* Verify the new objects created by Lisp code. */
3805 if (pre_verify_gen_0
) {
3806 FSHOW((stderr
, "pre-checking generation 0\n"));
3807 verify_generation(0);
3810 if (gencgc_verbose
> 1)
3811 print_generation_stats(0);
3814 /* Collect the generation. */
3816 if (gen
>= gencgc_oldest_gen_to_gc
) {
3817 /* Never raise the oldest generation. */
3822 || (generations
[gen
].num_gc
>= generations
[gen
].trigger_age
);
3825 if (gencgc_verbose
> 1) {
3827 "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
3830 generations
[gen
].bytes_allocated
,
3831 generations
[gen
].gc_trigger
,
3832 generations
[gen
].num_gc
));
3835 /* If an older generation is being filled, then update its
3838 generations
[gen
+1].cum_sum_bytes_allocated
+=
3839 generations
[gen
+1].bytes_allocated
;
3842 garbage_collect_generation(gen
, raise
);
3844 /* Reset the memory age cum_sum. */
3845 generations
[gen
].cum_sum_bytes_allocated
= 0;
3847 if (gencgc_verbose
> 1) {
3848 FSHOW((stderr
, "GC of generation %d finished:\n", gen
));
3849 print_generation_stats(0);
3853 } while ((gen
<= gencgc_oldest_gen_to_gc
)
3854 && ((gen
< last_gen
)
3855 || ((gen
<= gencgc_oldest_gen_to_gc
)
3857 && (generations
[gen
].bytes_allocated
3858 > generations
[gen
].gc_trigger
)
3859 && (gen_av_mem_age(gen
)
3860 > generations
[gen
].min_av_mem_age
))));
3862 /* Now if gen-1 was raised all generations before gen are empty.
3863 * If it wasn't raised then all generations before gen-1 are empty.
3865 * Now objects within this gen's pages cannot point to younger
3866 * generations unless they are written to. This can be exploited
3867 * by write-protecting the pages of gen; then when younger
3868 * generations are GCed only the pages which have been written
3873 gen_to_wp
= gen
- 1;
3875 /* There's not much point in WPing pages in generation 0 as it is
3876 * never scavenged (except promoted pages). */
3877 if ((gen_to_wp
> 0) && enable_page_protection
) {
3878 /* Check that they are all empty. */
3879 for (i
= 0; i
< gen_to_wp
; i
++) {
3880 if (generations
[i
].bytes_allocated
)
3881 lose("trying to write-protect gen. %d when gen. %d nonempty",
3884 write_protect_generation_pages(gen_to_wp
);
3887 /* Set gc_alloc() back to generation 0. The current regions should
3888 * be flushed after the above GCs. */
3889 gc_assert((boxed_region
.free_pointer
- boxed_region
.start_addr
) == 0);
3890 gc_alloc_generation
= 0;
3892 update_x86_dynamic_space_free_pointer();
3893 auto_gc_trigger
= bytes_allocated
+ bytes_consed_between_gcs
;
3895 fprintf(stderr
,"Next gc when %ld bytes have been consed\n",
3897 SHOW("returning from collect_garbage");
3900 /* This is called by Lisp PURIFY when it is finished. All live objects
3901 * will have been moved to the RO and Static heaps. The dynamic space
3902 * will need a full re-initialization. We don't bother having Lisp
3903 * PURIFY flush the current gc_alloc() region, as the page_tables are
3904 * re-initialized, and every page is zeroed to be sure. */
3910 if (gencgc_verbose
> 1)
3911 SHOW("entering gc_free_heap");
3913 for (page
= 0; page
< NUM_PAGES
; page
++) {
3914 /* Skip free pages which should already be zero filled. */
3915 if (page_table
[page
].allocated
!= FREE_PAGE_FLAG
) {
3916 void *page_start
, *addr
;
3918 /* Mark the page free. The other slots are assumed invalid
3919 * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
3920 * should not be write-protected -- except that the
3921 * generation is used for the current region but it sets
3923 page_table
[page
].allocated
= FREE_PAGE_FLAG
;
3924 page_table
[page
].bytes_used
= 0;
3926 /* Zero the page. */
3927 page_start
= (void *)page_address(page
);
3929 /* First, remove any write-protection. */
3930 os_protect(page_start
, PAGE_BYTES
, OS_VM_PROT_ALL
);
3931 page_table
[page
].write_protected
= 0;
3933 os_invalidate(page_start
,PAGE_BYTES
);
3934 addr
= os_validate(page_start
,PAGE_BYTES
);
3935 if (addr
== NULL
|| addr
!= page_start
) {
3936 lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x",
3940 } else if (gencgc_zero_check_during_free_heap
) {
3941 /* Double-check that the page is zero filled. */
3942 long *page_start
, i
;
3943 gc_assert(page_table
[page
].allocated
== FREE_PAGE_FLAG
);
3944 gc_assert(page_table
[page
].bytes_used
== 0);
3945 page_start
= (long *)page_address(page
);
3946 for (i
=0; i
<1024; i
++) {
3947 if (page_start
[i
] != 0) {
3948 lose("free region not zero at %x", page_start
+ i
);
3954 bytes_allocated
= 0;
3956 /* Initialize the generations. */
3957 for (page
= 0; page
< NUM_GENERATIONS
; page
++) {
3958 generations
[page
].alloc_start_page
= 0;
3959 generations
[page
].alloc_unboxed_start_page
= 0;
3960 generations
[page
].alloc_large_start_page
= 0;
3961 generations
[page
].alloc_large_unboxed_start_page
= 0;
3962 generations
[page
].bytes_allocated
= 0;
3963 generations
[page
].gc_trigger
= 2000000;
3964 generations
[page
].num_gc
= 0;
3965 generations
[page
].cum_sum_bytes_allocated
= 0;
3968 if (gencgc_verbose
> 1)
3969 print_generation_stats(0);
3971 /* Initialize gc_alloc(). */
3972 gc_alloc_generation
= 0;
3974 gc_set_region_empty(&boxed_region
);
3975 gc_set_region_empty(&unboxed_region
);
3978 SetSymbolValue(ALLOCATION_POINTER
, (lispobj
)((char *)heap_base
),0);
3980 if (verify_after_free_heap
) {
3981 /* Check whether purify has left any bad pointers. */
3983 SHOW("checking after free_heap\n");
3994 scavtab
[SIMPLE_VECTOR_WIDETAG
] = scav_vector
;
3995 scavtab
[WEAK_POINTER_WIDETAG
] = scav_weak_pointer
;
3996 transother
[SIMPLE_ARRAY_WIDETAG
] = trans_boxed_large
;
3998 heap_base
= (void*)DYNAMIC_SPACE_START
;
4000 /* Initialize each page structure. */
4001 for (i
= 0; i
< NUM_PAGES
; i
++) {
4002 /* Initialize all pages as free. */
4003 page_table
[i
].allocated
= FREE_PAGE_FLAG
;
4004 page_table
[i
].bytes_used
= 0;
4006 /* Pages are not write-protected at startup. */
4007 page_table
[i
].write_protected
= 0;
4010 bytes_allocated
= 0;
4012 /* Initialize the generations.
4014 * FIXME: very similar to code in gc_free_heap(), should be shared */
4015 for (i
= 0; i
< NUM_GENERATIONS
; i
++) {
4016 generations
[i
].alloc_start_page
= 0;
4017 generations
[i
].alloc_unboxed_start_page
= 0;
4018 generations
[i
].alloc_large_start_page
= 0;
4019 generations
[i
].alloc_large_unboxed_start_page
= 0;
4020 generations
[i
].bytes_allocated
= 0;
4021 generations
[i
].gc_trigger
= 2000000;
4022 generations
[i
].num_gc
= 0;
4023 generations
[i
].cum_sum_bytes_allocated
= 0;
4024 /* the tune-able parameters */
4025 generations
[i
].bytes_consed_between_gc
= 2000000;
4026 generations
[i
].trigger_age
= 1;
4027 generations
[i
].min_av_mem_age
= 0.75;
4030 /* Initialize gc_alloc. */
4031 gc_alloc_generation
= 0;
4032 gc_set_region_empty(&boxed_region
);
4033 gc_set_region_empty(&unboxed_region
);
4039 /* Pick up the dynamic space from after a core load.
4041 * The ALLOCATION_POINTER points to the end of the dynamic space.
4045 gencgc_pickup_dynamic(void)
4048 long alloc_ptr
= SymbolValue(ALLOCATION_POINTER
,0);
4049 lispobj
*prev
=(lispobj
*)page_address(page
);
4052 lispobj
*first
,*ptr
= (lispobj
*)page_address(page
);
4053 page_table
[page
].allocated
= BOXED_PAGE_FLAG
;
4054 page_table
[page
].gen
= 0;
4055 page_table
[page
].bytes_used
= PAGE_BYTES
;
4056 page_table
[page
].large_object
= 0;
4058 first
=gc_search_space(prev
,(ptr
+2)-prev
,ptr
);
4059 if(ptr
== first
) prev
=ptr
;
4060 page_table
[page
].first_object_offset
=
4061 (void *)prev
- page_address(page
);
4063 } while (page_address(page
) < alloc_ptr
);
4065 generations
[0].bytes_allocated
= PAGE_BYTES
*page
;
4066 bytes_allocated
= PAGE_BYTES
*page
;
4072 gc_initialize_pointers(void)
4074 gencgc_pickup_dynamic();
4080 /* alloc(..) is the external interface for memory allocation. It
4081 * allocates to generation 0. It is not called from within the garbage
4082 * collector as it is only external uses that need the check for heap
4083 * size (GC trigger) and to disable the interrupts (interrupts are
4084 * always disabled during a GC).
4086 * The vops that call alloc(..) assume that the returned space is zero-filled.
4087 * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
4089 * The check for a GC trigger is only performed when the current
4090 * region is full, so in most cases it's not needed. */
4095 struct thread
*th
=arch_os_get_current_thread();
4096 struct alloc_region
*region
=
4097 #ifdef LISP_FEATURE_SB_THREAD
4098 th
? &(th
->alloc_region
) : &boxed_region
;
4103 void *new_free_pointer
;
4104 gc_assert(nbytes
>0);
4105 /* Check for alignment allocation problems. */
4106 gc_assert((((unsigned)region
->free_pointer
& LOWTAG_MASK
) == 0)
4107 && ((nbytes
& LOWTAG_MASK
) == 0));
4110 /* there are a few places in the C code that allocate data in the
4111 * heap before Lisp starts. This is before interrupts are enabled,
4112 * so we don't need to check for pseudo-atomic */
4113 #ifdef LISP_FEATURE_SB_THREAD
4114 if(!SymbolValue(PSEUDO_ATOMIC_ATOMIC
,th
)) {
4116 fprintf(stderr
, "fatal error in thread 0x%x, tid=%ld\n",
4118 __asm__("movl %fs,%0" : "=r" (fs
) : );
4119 fprintf(stderr
, "fs is %x, th->tls_cookie=%x \n",
4120 debug_get_fs(),th
->tls_cookie
);
4121 lose("If you see this message before 2004.01.31, mail details to sbcl-devel\n");
4124 gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC
,th
));
4128 /* maybe we can do this quickly ... */
4129 new_free_pointer
= region
->free_pointer
+ nbytes
;
4130 if (new_free_pointer
<= region
->end_addr
) {
4131 new_obj
= (void*)(region
->free_pointer
);
4132 region
->free_pointer
= new_free_pointer
;
4133 return(new_obj
); /* yup */
4136 /* we have to go the long way around, it seems. Check whether
4137 * we should GC in the near future
4139 if (auto_gc_trigger
&& bytes_allocated
> auto_gc_trigger
) {
4140 struct thread
*thread
=arch_os_get_current_thread();
4141 /* Don't flood the system with interrupts if the need to gc is
4142 * already noted. This can happen for example when SUB-GC
4143 * allocates or after a gc triggered in a WITHOUT-GCING. */
4144 if (SymbolValue(NEED_TO_COLLECT_GARBAGE
,thread
) == NIL
) {
4145 /* set things up so that GC happens when we finish the PA
4146 * section. We only do this if there wasn't a pending
4147 * handler already, in case it was a gc. If it wasn't a
4148 * GC, the next allocation will get us back to this point
4149 * anyway, so no harm done
4151 struct interrupt_data
*data
=th
->interrupt_data
;
4152 sigset_t new_mask
,old_mask
;
4153 sigemptyset(&new_mask
);
4154 sigaddset_blockable(&new_mask
);
4155 thread_sigmask(SIG_BLOCK
,&new_mask
,&old_mask
);
4157 if(!data
->pending_handler
) {
4158 if(!maybe_defer_handler(interrupt_maybe_gc_int
,data
,0,0,0))
4159 lose("Not in atomic: %d.\n",
4160 SymbolValue(PSEUDO_ATOMIC_ATOMIC
,thread
));
4161 /* Leave the signals blocked just as if it was
4162 * deferred the normal way and set the
4164 sigcopyset(&(data
->pending_mask
),&old_mask
);
4165 SetSymbolValue(NEED_TO_COLLECT_GARBAGE
,T
,thread
);
4167 thread_sigmask(SIG_SETMASK
,&old_mask
,0);
4171 new_obj
= gc_alloc_with_region(nbytes
,0,region
,0);
4176 * shared support for the OS-dependent signal handlers which
4177 * catch GENCGC-related write-protect violations
4180 void unhandled_sigmemoryfault(void);
4182 /* Depending on which OS we're running under, different signals might
4183 * be raised for a violation of write protection in the heap. This
4184 * function factors out the common generational GC magic which needs
4185 * to invoked in this case, and should be called from whatever signal
4186 * handler is appropriate for the OS we're running under.
4188 * Return true if this signal is a normal generational GC thing that
4189 * we were able to handle, or false if it was abnormal and control
4190 * should fall through to the general SIGSEGV/SIGBUS/whatever logic. */
4193 gencgc_handle_wp_violation(void* fault_addr
)
4195 long page_index
= find_page_index(fault_addr
);
4197 #ifdef QSHOW_SIGNALS
4198 FSHOW((stderr
, "heap WP violation? fault_addr=%x, page_index=%d\n",
4199 fault_addr
, page_index
));
4202 /* Check whether the fault is within the dynamic space. */
4203 if (page_index
== (-1)) {
4205 /* It can be helpful to be able to put a breakpoint on this
4206 * case to help diagnose low-level problems. */
4207 unhandled_sigmemoryfault();
4209 /* not within the dynamic space -- not our responsibility */
4213 if (page_table
[page_index
].write_protected
) {
4214 /* Unprotect the page. */
4215 os_protect(page_address(page_index
), PAGE_BYTES
, OS_VM_PROT_ALL
);
4216 page_table
[page_index
].write_protected_cleared
= 1;
4217 page_table
[page_index
].write_protected
= 0;
4219 /* The only acceptable reason for this signal on a heap
4220 * access is that GENCGC write-protected the page.
4221 * However, if two CPUs hit a wp page near-simultaneously,
4222 * we had better not have the second one lose here if it
4223 * does this test after the first one has already set wp=0
4225 if(page_table
[page_index
].write_protected_cleared
!= 1)
4226 lose("fault in heap page not marked as write-protected");
4228 /* Don't worry, we can handle it. */
4232 /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
4233 * it's not just a case of the program hitting the write barrier, and
4234 * are about to let Lisp deal with it. It's basically just a
4235 * convenient place to set a gdb breakpoint. */
4237 unhandled_sigmemoryfault()
4240 void gc_alloc_update_all_page_tables(void)
4242 /* Flush the alloc regions updating the tables. */
4245 gc_alloc_update_page_tables(0, &th
->alloc_region
);
4246 gc_alloc_update_page_tables(1, &unboxed_region
);
4247 gc_alloc_update_page_tables(0, &boxed_region
);
4250 gc_set_region_empty(struct alloc_region
*region
)
4252 region
->first_page
= 0;
4253 region
->last_page
= -1;
4254 region
->start_addr
= page_address(0);
4255 region
->free_pointer
= page_address(0);
4256 region
->end_addr
= page_address(0);