2 * GENerational Conservative Garbage Collector for SBCL x86
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 * For a review of garbage collection techniques (e.g. generational
18 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
19 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
20 * had been accepted for _ACM Computing Surveys_ and was available
21 * as a PostScript preprint through
22 * <http://www.cs.utexas.edu/users/oops/papers.html>
24 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
29 #include <sys/ptrace.h>
30 #include <linux/user.h>
37 #include "interrupt.h"
42 #include "gc-internal.h"
43 #include "genesis/vector.h"
44 #include "genesis/weak-pointer.h"
45 #include "genesis/simple-fun.h"
46 #include "genesis/static-symbols.h"
47 #include "genesis/symbol.h"
48 /* assembly language stub that executes trap_PendingInterrupt */
49 void do_pending_interrupt(void);
56 /* the number of actual generations. (The number of 'struct
57 * generation' objects is one more than this, because one object
58 * serves as scratch when GC'ing.) */
59 #define NUM_GENERATIONS 6
61 /* Should we use page protection to help avoid the scavenging of pages
62 * that don't have pointers to younger generations? */
63 boolean enable_page_protection
= 1;
65 /* Should we unmap a page and re-mmap it to have it zero filled? */
66 #if defined(__FreeBSD__) || defined(__OpenBSD__)
67 /* comment from cmucl-2.4.8: This can waste a lot of swap on FreeBSD
68 * so don't unmap there.
70 * The CMU CL comment didn't specify a version, but was probably an
71 * old version of FreeBSD (pre-4.0), so this might no longer be true.
72 * OTOH, if it is true, this behavior might exist on OpenBSD too, so
73 * for now we don't unmap there either. -- WHN 2001-04-07 */
74 boolean gencgc_unmap_zero
= 0;
76 boolean gencgc_unmap_zero
= 1;
79 /* the minimum size (in bytes) for a large object*/
80 unsigned large_object_size
= 4 * 4096;
88 /* the verbosity level. All non-error messages are disabled at level 0;
89 * and only a few rare messages are printed at level 1. */
90 unsigned gencgc_verbose
= (QSHOW
? 1 : 0);
92 /* FIXME: At some point enable the various error-checking things below
93 * and see what they say. */
95 /* We hunt for pointers to old-space, when GCing generations >= verify_gen.
96 * Set verify_gens to NUM_GENERATIONS to disable this kind of check. */
97 int verify_gens
= NUM_GENERATIONS
;
99 /* Should we do a pre-scan verify of generation 0 before it's GCed? */
100 boolean pre_verify_gen_0
= 0;
102 /* Should we check for bad pointers after gc_free_heap is called
103 * from Lisp PURIFY? */
104 boolean verify_after_free_heap
= 0;
106 /* Should we print a note when code objects are found in the dynamic space
107 * during a heap verify? */
108 boolean verify_dynamic_code_check
= 0;
110 /* Should we check code objects for fixup errors after they are transported? */
111 boolean check_code_fixups
= 0;
113 /* Should we check that newly allocated regions are zero filled? */
114 boolean gencgc_zero_check
= 0;
116 /* Should we check that the free space is zero filled? */
117 boolean gencgc_enable_verify_zero_fill
= 0;
119 /* Should we check that free pages are zero filled during gc_free_heap
120 * called after Lisp PURIFY? */
121 boolean gencgc_zero_check_during_free_heap
= 0;
124 * GC structures and variables
127 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
128 unsigned long bytes_allocated
= 0;
129 static unsigned long auto_gc_trigger
= 0;
131 /* the source and destination generations. These are set before a GC starts
137 /* FIXME: It would be nice to use this symbolic constant instead of
138 * bare 4096 almost everywhere. We could also use an assertion that
139 * it's equal to getpagesize(). */
141 #define PAGE_BYTES 4096
143 /* An array of page structures is statically allocated.
144 * This helps quickly map between an address its page structure.
145 * NUM_PAGES is set from the size of the dynamic space. */
146 struct page page_table
[NUM_PAGES
];
148 /* To map addresses to page structures the address of the first page
150 static void *heap_base
= NULL
;
153 /* Calculate the start address for the given page number. */
155 page_address(int page_num
)
157 return (heap_base
+ (page_num
* 4096));
160 /* Find the page index within the page_table for the given
161 * address. Return -1 on failure. */
163 find_page_index(void *addr
)
165 int index
= addr
-heap_base
;
168 index
= ((unsigned int)index
)/4096;
169 if (index
< NUM_PAGES
)
176 /* a structure to hold the state of a generation */
179 /* the first page that gc_alloc() checks on its next call */
180 int alloc_start_page
;
182 /* the first page that gc_alloc_unboxed() checks on its next call */
183 int alloc_unboxed_start_page
;
185 /* the first page that gc_alloc_large (boxed) considers on its next
186 * call. (Although it always allocates after the boxed_region.) */
187 int alloc_large_start_page
;
189 /* the first page that gc_alloc_large (unboxed) considers on its
190 * next call. (Although it always allocates after the
191 * current_unboxed_region.) */
192 int alloc_large_unboxed_start_page
;
194 /* the bytes allocated to this generation */
197 /* the number of bytes at which to trigger a GC */
200 /* to calculate a new level for gc_trigger */
201 int bytes_consed_between_gc
;
203 /* the number of GCs since the last raise */
206 /* the average age after which a GC will raise objects to the
210 /* the cumulative sum of the bytes allocated to this generation. It is
211 * cleared after a GC on this generations, and update before new
212 * objects are added from a GC of a younger generation. Dividing by
213 * the bytes_allocated will give the average age of the memory in
214 * this generation since its last GC. */
215 int cum_sum_bytes_allocated
;
217 /* a minimum average memory age before a GC will occur helps
218 * prevent a GC when a large number of new live objects have been
219 * added, in which case a GC could be a waste of time */
220 double min_av_mem_age
;
222 /* the number of actual generations. (The number of 'struct
223 * generation' objects is one more than this, because one object
224 * serves as scratch when GC'ing.) */
225 #define NUM_GENERATIONS 6
227 /* an array of generation structures. There needs to be one more
228 * generation structure than actual generations as the oldest
229 * generation is temporarily raised then lowered. */
230 struct generation generations
[NUM_GENERATIONS
+1];
232 /* the oldest generation that is will currently be GCed by default.
233 * Valid values are: 0, 1, ... (NUM_GENERATIONS-1)
235 * The default of (NUM_GENERATIONS-1) enables GC on all generations.
237 * Setting this to 0 effectively disables the generational nature of
238 * the GC. In some applications generational GC may not be useful
239 * because there are no long-lived objects.
241 * An intermediate value could be handy after moving long-lived data
242 * into an older generation so an unnecessary GC of this long-lived
243 * data can be avoided. */
244 unsigned int gencgc_oldest_gen_to_gc
= NUM_GENERATIONS
-1;
246 /* The maximum free page in the heap is maintained and used to update
247 * ALLOCATION_POINTER which is used by the room function to limit its
248 * search of the heap. XX Gencgc obviously needs to be better
249 * integrated with the Lisp code. */
250 static int last_free_page
;
253 * miscellaneous heap functions
256 /* Count the number of pages which are write-protected within the
257 * given generation. */
259 count_write_protect_generation_pages(int generation
)
264 for (i
= 0; i
< last_free_page
; i
++)
265 if ((page_table
[i
].allocated
!= FREE_PAGE
)
266 && (page_table
[i
].gen
== generation
)
267 && (page_table
[i
].write_protected
== 1))
272 /* Count the number of pages within the given generation. */
274 count_generation_pages(int generation
)
279 for (i
= 0; i
< last_free_page
; i
++)
280 if ((page_table
[i
].allocated
!= 0)
281 && (page_table
[i
].gen
== generation
))
286 /* Count the number of dont_move pages. */
288 count_dont_move_pages(void)
292 for (i
= 0; i
< last_free_page
; i
++) {
293 if ((page_table
[i
].allocated
!= 0) && (page_table
[i
].dont_move
!= 0)) {
300 /* Work through the pages and add up the number of bytes used for the
301 * given generation. */
303 count_generation_bytes_allocated (int gen
)
307 for (i
= 0; i
< last_free_page
; i
++) {
308 if ((page_table
[i
].allocated
!= 0) && (page_table
[i
].gen
== gen
))
309 result
+= page_table
[i
].bytes_used
;
314 /* Return the average age of the memory in a generation. */
316 gen_av_mem_age(int gen
)
318 if (generations
[gen
].bytes_allocated
== 0)
322 ((double)generations
[gen
].cum_sum_bytes_allocated
)
323 / ((double)generations
[gen
].bytes_allocated
);
326 /* The verbose argument controls how much to print: 0 for normal
327 * level of detail; 1 for debugging. */
329 print_generation_stats(int verbose
) /* FIXME: should take FILE argument */
334 /* This code uses the FP instructions which may be set up for Lisp
335 * so they need to be saved and reset for C. */
338 /* number of generations to print */
340 gens
= NUM_GENERATIONS
+1;
342 gens
= NUM_GENERATIONS
;
344 /* Print the heap stats. */
346 " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
348 for (i
= 0; i
< gens
; i
++) {
352 int large_boxed_cnt
= 0;
353 int large_unboxed_cnt
= 0;
355 for (j
= 0; j
< last_free_page
; j
++)
356 if (page_table
[j
].gen
== i
) {
358 /* Count the number of boxed pages within the given
360 if (page_table
[j
].allocated
& BOXED_PAGE
) {
361 if (page_table
[j
].large_object
)
367 /* Count the number of unboxed pages within the given
369 if (page_table
[j
].allocated
& UNBOXED_PAGE
) {
370 if (page_table
[j
].large_object
)
377 gc_assert(generations
[i
].bytes_allocated
378 == count_generation_bytes_allocated(i
));
380 " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
382 boxed_cnt
, unboxed_cnt
, large_boxed_cnt
, large_unboxed_cnt
,
383 generations
[i
].bytes_allocated
,
384 (count_generation_pages(i
)*4096
385 - generations
[i
].bytes_allocated
),
386 generations
[i
].gc_trigger
,
387 count_write_protect_generation_pages(i
),
388 generations
[i
].num_gc
,
391 fprintf(stderr
," Total bytes allocated=%ld\n", bytes_allocated
);
393 fpu_restore(fpu_state
);
397 * allocation routines
401 * To support quick and inline allocation, regions of memory can be
402 * allocated and then allocated from with just a free pointer and a
403 * check against an end address.
405 * Since objects can be allocated to spaces with different properties
406 * e.g. boxed/unboxed, generation, ages; there may need to be many
407 * allocation regions.
409 * Each allocation region may be start within a partly used page. Many
410 * features of memory use are noted on a page wise basis, e.g. the
411 * generation; so if a region starts within an existing allocated page
412 * it must be consistent with this page.
414 * During the scavenging of the newspace, objects will be transported
415 * into an allocation region, and pointers updated to point to this
416 * allocation region. It is possible that these pointers will be
417 * scavenged again before the allocation region is closed, e.g. due to
418 * trans_list which jumps all over the place to cleanup the list. It
419 * is important to be able to determine properties of all objects
420 * pointed to when scavenging, e.g to detect pointers to the oldspace.
421 * Thus it's important that the allocation regions have the correct
422 * properties set when allocated, and not just set when closed. The
423 * region allocation routines return regions with the specified
424 * properties, and grab all the pages, setting their properties
425 * appropriately, except that the amount used is not known.
427 * These regions are used to support quicker allocation using just a
428 * free pointer. The actual space used by the region is not reflected
429 * in the pages tables until it is closed. It can't be scavenged until
432 * When finished with the region it should be closed, which will
433 * update the page tables for the actual space used returning unused
434 * space. Further it may be noted in the new regions which is
435 * necessary when scavenging the newspace.
437 * Large objects may be allocated directly without an allocation
438 * region, the page tables are updated immediately.
440 * Unboxed objects don't contain pointers to other objects and so
441 * don't need scavenging. Further they can't contain pointers to
442 * younger generations so WP is not needed. By allocating pages to
443 * unboxed objects the whole page never needs scavenging or
444 * write-protecting. */
446 /* We are only using two regions at present. Both are for the current
447 * newspace generation. */
448 struct alloc_region boxed_region
;
449 struct alloc_region unboxed_region
;
451 /* The generation currently being allocated to. */
452 static int gc_alloc_generation
;
454 /* Find a new region with room for at least the given number of bytes.
456 * It starts looking at the current generation's alloc_start_page. So
457 * may pick up from the previous region if there is enough space. This
458 * keeps the allocation contiguous when scavenging the newspace.
460 * The alloc_region should have been closed by a call to
461 * gc_alloc_update_page_tables(), and will thus be in an empty state.
463 * To assist the scavenging functions write-protected pages are not
464 * used. Free pages should not be write-protected.
466 * It is critical to the conservative GC that the start of regions be
467 * known. To help achieve this only small regions are allocated at a
470 * During scavenging, pointers may be found to within the current
471 * region and the page generation must be set so that pointers to the
472 * from space can be recognized. Therefore the generation of pages in
473 * the region are set to gc_alloc_generation. To prevent another
474 * allocation call using the same pages, all the pages in the region
475 * are allocated, although they will initially be empty.
478 gc_alloc_new_region(int nbytes
, int unboxed
, struct alloc_region
*alloc_region
)
487 "/alloc_new_region for %d bytes from gen %d\n",
488 nbytes, gc_alloc_generation));
491 /* Check that the region is in a reset state. */
492 gc_assert((alloc_region
->first_page
== 0)
493 && (alloc_region
->last_page
== -1)
494 && (alloc_region
->free_pointer
== alloc_region
->end_addr
));
498 generations
[gc_alloc_generation
].alloc_unboxed_start_page
;
501 generations
[gc_alloc_generation
].alloc_start_page
;
503 last_page
=gc_find_freeish_pages(&first_page
,nbytes
,unboxed
,alloc_region
);
504 bytes_found
=(4096 - page_table
[first_page
].bytes_used
)
505 + 4096*(last_page
-first_page
);
507 /* Set up the alloc_region. */
508 alloc_region
->first_page
= first_page
;
509 alloc_region
->last_page
= last_page
;
510 alloc_region
->start_addr
= page_table
[first_page
].bytes_used
511 + page_address(first_page
);
512 alloc_region
->free_pointer
= alloc_region
->start_addr
;
513 alloc_region
->end_addr
= alloc_region
->start_addr
+ bytes_found
;
515 if (gencgc_zero_check
) {
517 for (p
= (int *)alloc_region
->start_addr
;
518 p
< (int *)alloc_region
->end_addr
; p
++) {
520 /* KLUDGE: It would be nice to use %lx and explicit casts
521 * (long) in code like this, so that it is less likely to
522 * break randomly when running on a machine with different
523 * word sizes. -- WHN 19991129 */
524 lose("The new region at %x is not zero.", p
);
529 /* Set up the pages. */
531 /* The first page may have already been in use. */
532 if (page_table
[first_page
].bytes_used
== 0) {
534 page_table
[first_page
].allocated
= UNBOXED_PAGE
;
536 page_table
[first_page
].allocated
= BOXED_PAGE
;
537 page_table
[first_page
].gen
= gc_alloc_generation
;
538 page_table
[first_page
].large_object
= 0;
539 page_table
[first_page
].first_object_offset
= 0;
543 gc_assert(page_table
[first_page
].allocated
== UNBOXED_PAGE
);
545 gc_assert(page_table
[first_page
].allocated
== BOXED_PAGE
);
546 page_table
[first_page
].allocated
|= OPEN_REGION_PAGE
;
548 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
549 gc_assert(page_table
[first_page
].large_object
== 0);
551 for (i
= first_page
+1; i
<= last_page
; i
++) {
553 page_table
[i
].allocated
= UNBOXED_PAGE
;
555 page_table
[i
].allocated
= BOXED_PAGE
;
556 page_table
[i
].gen
= gc_alloc_generation
;
557 page_table
[i
].large_object
= 0;
558 /* This may not be necessary for unboxed regions (think it was
560 page_table
[i
].first_object_offset
=
561 alloc_region
->start_addr
- page_address(i
);
562 page_table
[i
].allocated
|= OPEN_REGION_PAGE
;
565 /* Bump up last_free_page. */
566 if (last_page
+1 > last_free_page
) {
567 last_free_page
= last_page
+1;
568 SetSymbolValue(ALLOCATION_POINTER
,
569 (lispobj
)(((char *)heap_base
) + last_free_page
*4096));
573 /* If the record_new_objects flag is 2 then all new regions created
576 * If it's 1 then then it is only recorded if the first page of the
577 * current region is <= new_areas_ignore_page. This helps avoid
578 * unnecessary recording when doing full scavenge pass.
580 * The new_object structure holds the page, byte offset, and size of
581 * new regions of objects. Each new area is placed in the array of
582 * these structures pointer to by new_areas. new_areas_index holds the
583 * offset into new_areas.
585 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
586 * later code must detect this and handle it, probably by doing a full
587 * scavenge of a generation. */
588 #define NUM_NEW_AREAS 512
589 static int record_new_objects
= 0;
590 static int new_areas_ignore_page
;
596 static struct new_area (*new_areas
)[];
597 static int new_areas_index
;
600 /* Add a new area to new_areas. */
602 add_new_area(int first_page
, int offset
, int size
)
604 unsigned new_area_start
,c
;
607 /* Ignore if full. */
608 if (new_areas_index
>= NUM_NEW_AREAS
)
611 switch (record_new_objects
) {
615 if (first_page
> new_areas_ignore_page
)
624 new_area_start
= 4096*first_page
+ offset
;
626 /* Search backwards for a prior area that this follows from. If
627 found this will save adding a new area. */
628 for (i
= new_areas_index
-1, c
= 0; (i
>= 0) && (c
< 8); i
--, c
++) {
630 4096*((*new_areas
)[i
].page
)
631 + (*new_areas
)[i
].offset
632 + (*new_areas
)[i
].size
;
634 "/add_new_area S1 %d %d %d %d\n",
635 i, c, new_area_start, area_end));*/
636 if (new_area_start
== area_end
) {
638 "/adding to [%d] %d %d %d with %d %d %d:\n",
640 (*new_areas)[i].page,
641 (*new_areas)[i].offset,
642 (*new_areas)[i].size,
646 (*new_areas
)[i
].size
+= size
;
651 (*new_areas
)[new_areas_index
].page
= first_page
;
652 (*new_areas
)[new_areas_index
].offset
= offset
;
653 (*new_areas
)[new_areas_index
].size
= size
;
655 "/new_area %d page %d offset %d size %d\n",
656 new_areas_index, first_page, offset, size));*/
659 /* Note the max new_areas used. */
660 if (new_areas_index
> max_new_areas
)
661 max_new_areas
= new_areas_index
;
664 /* Update the tables for the alloc_region. The region maybe added to
667 * When done the alloc_region is set up so that the next quick alloc
668 * will fail safely and thus a new region will be allocated. Further
669 * it is safe to try to re-update the page table of this reset
672 gc_alloc_update_page_tables(int unboxed
, struct alloc_region
*alloc_region
)
678 int orig_first_page_bytes_used
;
684 "/gc_alloc_update_page_tables() to gen %d:\n",
685 gc_alloc_generation));
688 first_page
= alloc_region
->first_page
;
690 /* Catch an unused alloc_region. */
691 if ((first_page
== 0) && (alloc_region
->last_page
== -1))
694 next_page
= first_page
+1;
696 /* Skip if no bytes were allocated. */
697 if (alloc_region
->free_pointer
!= alloc_region
->start_addr
) {
698 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
700 gc_assert(alloc_region
->start_addr
== (page_address(first_page
) + page_table
[first_page
].bytes_used
));
702 /* All the pages used need to be updated */
704 /* Update the first page. */
706 /* If the page was free then set up the gen, and
707 * first_object_offset. */
708 if (page_table
[first_page
].bytes_used
== 0)
709 gc_assert(page_table
[first_page
].first_object_offset
== 0);
710 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE
);
713 gc_assert(page_table
[first_page
].allocated
== UNBOXED_PAGE
);
715 gc_assert(page_table
[first_page
].allocated
== BOXED_PAGE
);
716 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
717 gc_assert(page_table
[first_page
].large_object
== 0);
721 /* Calculate the number of bytes used in this page. This is not
722 * always the number of new bytes, unless it was free. */
724 if ((bytes_used
= (alloc_region
->free_pointer
- page_address(first_page
)))>4096) {
728 page_table
[first_page
].bytes_used
= bytes_used
;
729 byte_cnt
+= bytes_used
;
732 /* All the rest of the pages should be free. We need to set their
733 * first_object_offset pointer to the start of the region, and set
736 page_table
[next_page
].allocated
&= ~(OPEN_REGION_PAGE
);
738 gc_assert(page_table
[next_page
].allocated
== UNBOXED_PAGE
);
740 gc_assert(page_table
[next_page
].allocated
== BOXED_PAGE
);
741 gc_assert(page_table
[next_page
].bytes_used
== 0);
742 gc_assert(page_table
[next_page
].gen
== gc_alloc_generation
);
743 gc_assert(page_table
[next_page
].large_object
== 0);
745 gc_assert(page_table
[next_page
].first_object_offset
==
746 alloc_region
->start_addr
- page_address(next_page
));
748 /* Calculate the number of bytes used in this page. */
750 if ((bytes_used
= (alloc_region
->free_pointer
751 - page_address(next_page
)))>4096) {
755 page_table
[next_page
].bytes_used
= bytes_used
;
756 byte_cnt
+= bytes_used
;
761 region_size
= alloc_region
->free_pointer
- alloc_region
->start_addr
;
762 bytes_allocated
+= region_size
;
763 generations
[gc_alloc_generation
].bytes_allocated
+= region_size
;
765 gc_assert((byte_cnt
- orig_first_page_bytes_used
) == region_size
);
767 /* Set the generations alloc restart page to the last page of
770 generations
[gc_alloc_generation
].alloc_unboxed_start_page
=
773 generations
[gc_alloc_generation
].alloc_start_page
= next_page
-1;
775 /* Add the region to the new_areas if requested. */
777 add_new_area(first_page
,orig_first_page_bytes_used
, region_size
);
781 "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
783 gc_alloc_generation));
786 /* There are no bytes allocated. Unallocate the first_page if
787 * there are 0 bytes_used. */
788 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE
);
789 if (page_table
[first_page
].bytes_used
== 0)
790 page_table
[first_page
].allocated
= FREE_PAGE
;
793 /* Unallocate any unused pages. */
794 while (next_page
<= alloc_region
->last_page
) {
795 gc_assert(page_table
[next_page
].bytes_used
== 0);
796 page_table
[next_page
].allocated
= FREE_PAGE
;
800 gc_set_region_empty(alloc_region
);
803 static inline void *gc_quick_alloc(int nbytes
);
805 /* Allocate a possibly large object. */
807 gc_alloc_large(int nbytes
, int unboxed
, struct alloc_region
*alloc_region
)
811 int orig_first_page_bytes_used
;
816 int large
= (nbytes
>= large_object_size
);
820 FSHOW((stderr, "/alloc_large %d\n", nbytes));
825 "/gc_alloc_large() for %d bytes from gen %d\n",
826 nbytes, gc_alloc_generation));
829 /* If the object is small, and there is room in the current region
830 then allocate it in the current region. */
832 && ((alloc_region
->end_addr
-alloc_region
->free_pointer
) >= nbytes
))
833 return gc_quick_alloc(nbytes
);
835 /* To allow the allocation of small objects without the danger of
836 using a page in the current boxed region, the search starts after
837 the current boxed free region. XX could probably keep a page
838 index ahead of the current region and bumped up here to save a
839 lot of re-scanning. */
843 generations
[gc_alloc_generation
].alloc_large_unboxed_start_page
;
845 first_page
= generations
[gc_alloc_generation
].alloc_large_start_page
;
847 if (first_page
<= alloc_region
->last_page
) {
848 first_page
= alloc_region
->last_page
+1;
851 last_page
=gc_find_freeish_pages(&first_page
,nbytes
,unboxed
,0);
853 gc_assert(first_page
> alloc_region
->last_page
);
855 generations
[gc_alloc_generation
].alloc_large_unboxed_start_page
=
858 generations
[gc_alloc_generation
].alloc_large_start_page
= last_page
;
860 /* Set up the pages. */
861 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
863 /* If the first page was free then set up the gen, and
864 * first_object_offset. */
865 if (page_table
[first_page
].bytes_used
== 0) {
867 page_table
[first_page
].allocated
= UNBOXED_PAGE
;
869 page_table
[first_page
].allocated
= BOXED_PAGE
;
870 page_table
[first_page
].gen
= gc_alloc_generation
;
871 page_table
[first_page
].first_object_offset
= 0;
872 page_table
[first_page
].large_object
= large
;
876 gc_assert(page_table
[first_page
].allocated
== UNBOXED_PAGE
);
878 gc_assert(page_table
[first_page
].allocated
== BOXED_PAGE
);
879 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
880 gc_assert(page_table
[first_page
].large_object
== large
);
884 /* Calc. the number of bytes used in this page. This is not
885 * always the number of new bytes, unless it was free. */
887 if ((bytes_used
= nbytes
+orig_first_page_bytes_used
) > 4096) {
891 page_table
[first_page
].bytes_used
= bytes_used
;
892 byte_cnt
+= bytes_used
;
894 next_page
= first_page
+1;
896 /* All the rest of the pages should be free. We need to set their
897 * first_object_offset pointer to the start of the region, and
898 * set the bytes_used. */
900 gc_assert(page_table
[next_page
].allocated
== FREE_PAGE
);
901 gc_assert(page_table
[next_page
].bytes_used
== 0);
903 page_table
[next_page
].allocated
= UNBOXED_PAGE
;
905 page_table
[next_page
].allocated
= BOXED_PAGE
;
906 page_table
[next_page
].gen
= gc_alloc_generation
;
907 page_table
[next_page
].large_object
= large
;
909 page_table
[next_page
].first_object_offset
=
910 orig_first_page_bytes_used
- 4096*(next_page
-first_page
);
912 /* Calculate the number of bytes used in this page. */
914 if ((bytes_used
=(nbytes
+orig_first_page_bytes_used
)-byte_cnt
) > 4096) {
918 page_table
[next_page
].bytes_used
= bytes_used
;
919 byte_cnt
+= bytes_used
;
924 gc_assert((byte_cnt
-orig_first_page_bytes_used
) == nbytes
);
926 bytes_allocated
+= nbytes
;
927 generations
[gc_alloc_generation
].bytes_allocated
+= nbytes
;
929 /* Add the region to the new_areas if requested. */
931 add_new_area(first_page
,orig_first_page_bytes_used
,nbytes
);
933 /* Bump up last_free_page */
934 if (last_page
+1 > last_free_page
) {
935 last_free_page
= last_page
+1;
936 SetSymbolValue(ALLOCATION_POINTER
,
937 (lispobj
)(((char *)heap_base
) + last_free_page
*4096));
940 return((void *)(page_address(first_page
)+orig_first_page_bytes_used
));
944 gc_find_freeish_pages(int *restart_page_ptr
, int nbytes
, int unboxed
, struct alloc_region
*alloc_region
)
946 /* if alloc_region is 0, we assume this is for a potentially large
951 int restart_page
=*restart_page_ptr
;
954 int large
= !alloc_region
&& (nbytes
>= large_object_size
);
956 /* Search for a contiguous free space of at least nbytes. If it's a
957 large object then align it on a page boundary by searching for a
960 /* To allow the allocation of small objects without the danger of
961 using a page in the current boxed region, the search starts after
962 the current boxed free region. XX could probably keep a page
963 index ahead of the current region and bumped up here to save a
964 lot of re-scanning. */
967 first_page
= restart_page
;
969 while ((first_page
< NUM_PAGES
)
970 && (page_table
[first_page
].allocated
!= FREE_PAGE
))
973 while (first_page
< NUM_PAGES
) {
974 if(page_table
[first_page
].allocated
== FREE_PAGE
)
976 /* I don't know why we need the gen=0 test, but it
977 * breaks randomly if that's omitted -dan 2003.02.26
979 if((page_table
[first_page
].allocated
==
980 (unboxed
? UNBOXED_PAGE
: BOXED_PAGE
)) &&
981 (page_table
[first_page
].large_object
== 0) &&
982 (gc_alloc_genration
== 0) &&
983 (page_table
[first_page
].gen
== gc_alloc_generation
) &&
984 (page_table
[first_page
].bytes_used
< (4096-32)) &&
985 (page_table
[first_page
].write_protected
== 0) &&
986 (page_table
[first_page
].dont_move
== 0))
991 if (first_page
>= NUM_PAGES
) {
993 "Argh! gc_find_free_space failed (first_page), nbytes=%d.\n",
995 print_generation_stats(1);
999 gc_assert(page_table
[first_page
].write_protected
== 0);
1001 last_page
= first_page
;
1002 bytes_found
= 4096 - page_table
[first_page
].bytes_used
;
1004 while (((bytes_found
< nbytes
)
1005 || (alloc_region
&& (num_pages
< 2)))
1006 && (last_page
< (NUM_PAGES
-1))
1007 && (page_table
[last_page
+1].allocated
== FREE_PAGE
)) {
1010 bytes_found
+= 4096;
1011 gc_assert(page_table
[last_page
].write_protected
== 0);
1014 region_size
= (4096 - page_table
[first_page
].bytes_used
)
1015 + 4096*(last_page
-first_page
);
1017 gc_assert(bytes_found
== region_size
);
1018 restart_page
= last_page
+ 1;
1019 } while ((restart_page
< NUM_PAGES
) && (bytes_found
< nbytes
));
1021 /* Check for a failure */
1022 if ((restart_page
>= NUM_PAGES
) && (bytes_found
< nbytes
)) {
1024 "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%d.\n",
1026 print_generation_stats(1);
1029 *restart_page_ptr
=first_page
;
1033 /* Allocate bytes. All the rest of the special-purpose allocation
1034 * functions will eventually call this (instead of just duplicating
1035 * parts of its code) */
1038 gc_alloc_with_region(int nbytes
,int unboxed_p
, struct alloc_region
*my_region
,
1041 void *new_free_pointer
;
1043 /* FSHOW((stderr, "/gc_alloc %d\n", nbytes)); */
1045 /* Check whether there is room in the current alloc region. */
1046 new_free_pointer
= my_region
->free_pointer
+ nbytes
;
1048 if (new_free_pointer
<= my_region
->end_addr
) {
1049 /* If so then allocate from the current alloc region. */
1050 void *new_obj
= my_region
->free_pointer
;
1051 my_region
->free_pointer
= new_free_pointer
;
1053 /* Unless a `quick' alloc was requested, check whether the
1054 alloc region is almost empty. */
1056 (my_region
->end_addr
- my_region
->free_pointer
) <= 32) {
1057 /* If so, finished with the current region. */
1058 gc_alloc_update_page_tables(unboxed_p
, my_region
);
1059 /* Set up a new region. */
1060 gc_alloc_new_region(32 /*bytes*/, unboxed_p
, my_region
);
1063 return((void *)new_obj
);
1066 /* Else not enough free space in the current region. */
1068 /* If there some room left in the current region, enough to be worth
1069 * saving, then allocate a large object. */
1070 /* FIXME: "32" should be a named parameter. */
1071 if ((my_region
->end_addr
-my_region
->free_pointer
) > 32)
1072 return gc_alloc_large(nbytes
, unboxed_p
, my_region
);
1074 /* Else find a new region. */
1076 /* Finished with the current region. */
1077 gc_alloc_update_page_tables(unboxed_p
, my_region
);
1079 /* Set up a new region. */
1080 gc_alloc_new_region(nbytes
, unboxed_p
, my_region
);
1082 /* Should now be enough room. */
1084 /* Check whether there is room in the current region. */
1085 new_free_pointer
= my_region
->free_pointer
+ nbytes
;
1087 if (new_free_pointer
<= my_region
->end_addr
) {
1088 /* If so then allocate from the current region. */
1089 void *new_obj
= my_region
->free_pointer
;
1090 my_region
->free_pointer
= new_free_pointer
;
1091 /* Check whether the current region is almost empty. */
1092 if ((my_region
->end_addr
- my_region
->free_pointer
) <= 32) {
1093 /* If so find, finished with the current region. */
1094 gc_alloc_update_page_tables(unboxed_p
, my_region
);
1096 /* Set up a new region. */
1097 gc_alloc_new_region(32, unboxed_p
, my_region
);
1100 return((void *)new_obj
);
1103 /* shouldn't happen */
1105 return((void *) NIL
); /* dummy value: return something ... */
1109 gc_general_alloc(int nbytes
,int unboxed_p
,int quick_p
)
1111 struct alloc_region
*my_region
=
1112 unboxed_p
? &unboxed_region
: &boxed_region
;
1113 return gc_alloc_with_region(nbytes
,unboxed_p
, my_region
,quick_p
);
1119 gc_alloc(int nbytes
,int unboxed_p
)
1121 /* this is the only function that the external interface to
1122 * allocation presently knows how to call: Lisp code will never
1123 * allocate large objects, or to unboxed space, or `quick'ly.
1124 * Any of that stuff will only ever happen inside of GC */
1125 return gc_general_alloc(nbytes
,unboxed_p
,0);
1128 /* Allocate space from the boxed_region. If there is not enough free
1129 * space then call gc_alloc to do the job. A pointer to the start of
1130 * the object is returned. */
1131 static inline void *
1132 gc_quick_alloc(int nbytes
)
1134 return gc_general_alloc(nbytes
,ALLOC_BOXED
,ALLOC_QUICK
);
1137 /* Allocate space for the possibly large boxed object. If it is a
1138 * large object then do a large alloc else use gc_quick_alloc. Note
1139 * that gc_quick_alloc will eventually fall through to
1140 * gc_general_alloc which may allocate the object in a large way
1141 * anyway, but based on decisions about the free space in the current
1142 * region, not the object size itself */
1144 static inline void *
1145 gc_quick_alloc_large(int nbytes
)
1147 if (nbytes
>= large_object_size
)
1148 return gc_alloc_large(nbytes
, ALLOC_BOXED
, &boxed_region
);
1150 return gc_general_alloc(nbytes
,ALLOC_BOXED
,ALLOC_QUICK
);
1153 static inline void *
1154 gc_alloc_unboxed(int nbytes
)
1156 return gc_general_alloc(nbytes
,ALLOC_UNBOXED
,0);
1159 static inline void *
1160 gc_quick_alloc_unboxed(int nbytes
)
1162 return gc_general_alloc(nbytes
,ALLOC_UNBOXED
,ALLOC_QUICK
);
1165 /* Allocate space for the object. If it is a large object then do a
1166 * large alloc else allocate from the current region. If there is not
1167 * enough free space then call general gc_alloc_unboxed() to do the job.
1169 * A pointer to the start of the object is returned. */
1170 static inline void *
1171 gc_quick_alloc_large_unboxed(int nbytes
)
1173 if (nbytes
>= large_object_size
)
1174 return gc_alloc_large(nbytes
,ALLOC_UNBOXED
,&unboxed_region
);
1176 return gc_quick_alloc_unboxed(nbytes
);
1180 * scavenging/transporting routines derived from gc.c in CMU CL ca. 18b
1183 extern int (*scavtab
[256])(lispobj
*where
, lispobj object
);
1184 extern lispobj (*transother
[256])(lispobj object
);
1185 extern int (*sizetab
[256])(lispobj
*where
);
1187 /* Copy a large boxed object. If the object is in a large object
1188 * region then it is simply promoted, else it is copied. If it's large
1189 * enough then it's copied to a large object region.
1191 * Vectors may have shrunk. If the object is not copied the space
1192 * needs to be reclaimed, and the page_tables corrected. */
1194 copy_large_object(lispobj object
, int nwords
)
1198 lispobj
*source
, *dest
;
1201 gc_assert(is_lisp_pointer(object
));
1202 gc_assert(from_space_p(object
));
1203 gc_assert((nwords
& 0x01) == 0);
1206 /* Check whether it's a large object. */
1207 first_page
= find_page_index((void *)object
);
1208 gc_assert(first_page
>= 0);
1210 if (page_table
[first_page
].large_object
) {
1212 /* Promote the object. */
1214 int remaining_bytes
;
1219 /* Note: Any page write-protection must be removed, else a
1220 * later scavenge_newspace may incorrectly not scavenge these
1221 * pages. This would not be necessary if they are added to the
1222 * new areas, but let's do it for them all (they'll probably
1223 * be written anyway?). */
1225 gc_assert(page_table
[first_page
].first_object_offset
== 0);
1227 next_page
= first_page
;
1228 remaining_bytes
= nwords
*4;
1229 while (remaining_bytes
> 4096) {
1230 gc_assert(page_table
[next_page
].gen
== from_space
);
1231 gc_assert(page_table
[next_page
].allocated
== BOXED_PAGE
);
1232 gc_assert(page_table
[next_page
].large_object
);
1233 gc_assert(page_table
[next_page
].first_object_offset
==
1234 -4096*(next_page
-first_page
));
1235 gc_assert(page_table
[next_page
].bytes_used
== 4096);
1237 page_table
[next_page
].gen
= new_space
;
1239 /* Remove any write-protection. We should be able to rely
1240 * on the write-protect flag to avoid redundant calls. */
1241 if (page_table
[next_page
].write_protected
) {
1242 os_protect(page_address(next_page
), 4096, OS_VM_PROT_ALL
);
1243 page_table
[next_page
].write_protected
= 0;
1245 remaining_bytes
-= 4096;
1249 /* Now only one page remains, but the object may have shrunk
1250 * so there may be more unused pages which will be freed. */
1252 /* The object may have shrunk but shouldn't have grown. */
1253 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
1255 page_table
[next_page
].gen
= new_space
;
1256 gc_assert(page_table
[next_page
].allocated
== BOXED_PAGE
);
1258 /* Adjust the bytes_used. */
1259 old_bytes_used
= page_table
[next_page
].bytes_used
;
1260 page_table
[next_page
].bytes_used
= remaining_bytes
;
1262 bytes_freed
= old_bytes_used
- remaining_bytes
;
1264 /* Free any remaining pages; needs care. */
1266 while ((old_bytes_used
== 4096) &&
1267 (page_table
[next_page
].gen
== from_space
) &&
1268 (page_table
[next_page
].allocated
== BOXED_PAGE
) &&
1269 page_table
[next_page
].large_object
&&
1270 (page_table
[next_page
].first_object_offset
==
1271 -(next_page
- first_page
)*4096)) {
1272 /* Checks out OK, free the page. Don't need to bother zeroing
1273 * pages as this should have been done before shrinking the
1274 * object. These pages shouldn't be write-protected as they
1275 * should be zero filled. */
1276 gc_assert(page_table
[next_page
].write_protected
== 0);
1278 old_bytes_used
= page_table
[next_page
].bytes_used
;
1279 page_table
[next_page
].allocated
= FREE_PAGE
;
1280 page_table
[next_page
].bytes_used
= 0;
1281 bytes_freed
+= old_bytes_used
;
1285 generations
[from_space
].bytes_allocated
-= 4*nwords
+ bytes_freed
;
1286 generations
[new_space
].bytes_allocated
+= 4*nwords
;
1287 bytes_allocated
-= bytes_freed
;
1289 /* Add the region to the new_areas if requested. */
1290 add_new_area(first_page
,0,nwords
*4);
1294 /* Get tag of object. */
1295 tag
= lowtag_of(object
);
1297 /* Allocate space. */
1298 new = gc_quick_alloc_large(nwords
*4);
1301 source
= (lispobj
*) native_pointer(object
);
1303 /* Copy the object. */
1304 while (nwords
> 0) {
1305 dest
[0] = source
[0];
1306 dest
[1] = source
[1];
1312 /* Return Lisp pointer of new object. */
1313 return ((lispobj
) new) | tag
;
1317 /* to copy unboxed objects */
1319 copy_unboxed_object(lispobj object
, int nwords
)
1323 lispobj
*source
, *dest
;
1325 gc_assert(is_lisp_pointer(object
));
1326 gc_assert(from_space_p(object
));
1327 gc_assert((nwords
& 0x01) == 0);
1329 /* Get tag of object. */
1330 tag
= lowtag_of(object
);
1332 /* Allocate space. */
1333 new = gc_quick_alloc_unboxed(nwords
*4);
1336 source
= (lispobj
*) native_pointer(object
);
1338 /* Copy the object. */
1339 while (nwords
> 0) {
1340 dest
[0] = source
[0];
1341 dest
[1] = source
[1];
1347 /* Return Lisp pointer of new object. */
1348 return ((lispobj
) new) | tag
;
1351 /* to copy large unboxed objects
1353 * If the object is in a large object region then it is simply
1354 * promoted, else it is copied. If it's large enough then it's copied
1355 * to a large object region.
1357 * Bignums and vectors may have shrunk. If the object is not copied
1358 * the space needs to be reclaimed, and the page_tables corrected.
1360 * KLUDGE: There's a lot of cut-and-paste duplication between this
1361 * function and copy_large_object(..). -- WHN 20000619 */
1363 copy_large_unboxed_object(lispobj object
, int nwords
)
1367 lispobj
*source
, *dest
;
1370 gc_assert(is_lisp_pointer(object
));
1371 gc_assert(from_space_p(object
));
1372 gc_assert((nwords
& 0x01) == 0);
1374 if ((nwords
> 1024*1024) && gencgc_verbose
)
1375 FSHOW((stderr
, "/copy_large_unboxed_object: %d bytes\n", nwords
*4));
1377 /* Check whether it's a large object. */
1378 first_page
= find_page_index((void *)object
);
1379 gc_assert(first_page
>= 0);
1381 if (page_table
[first_page
].large_object
) {
1382 /* Promote the object. Note: Unboxed objects may have been
1383 * allocated to a BOXED region so it may be necessary to
1384 * change the region to UNBOXED. */
1385 int remaining_bytes
;
1390 gc_assert(page_table
[first_page
].first_object_offset
== 0);
1392 next_page
= first_page
;
1393 remaining_bytes
= nwords
*4;
1394 while (remaining_bytes
> 4096) {
1395 gc_assert(page_table
[next_page
].gen
== from_space
);
1396 gc_assert((page_table
[next_page
].allocated
== UNBOXED_PAGE
)
1397 || (page_table
[next_page
].allocated
== BOXED_PAGE
));
1398 gc_assert(page_table
[next_page
].large_object
);
1399 gc_assert(page_table
[next_page
].first_object_offset
==
1400 -4096*(next_page
-first_page
));
1401 gc_assert(page_table
[next_page
].bytes_used
== 4096);
1403 page_table
[next_page
].gen
= new_space
;
1404 page_table
[next_page
].allocated
= UNBOXED_PAGE
;
1405 remaining_bytes
-= 4096;
1409 /* Now only one page remains, but the object may have shrunk so
1410 * there may be more unused pages which will be freed. */
1412 /* Object may have shrunk but shouldn't have grown - check. */
1413 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
1415 page_table
[next_page
].gen
= new_space
;
1416 page_table
[next_page
].allocated
= UNBOXED_PAGE
;
1418 /* Adjust the bytes_used. */
1419 old_bytes_used
= page_table
[next_page
].bytes_used
;
1420 page_table
[next_page
].bytes_used
= remaining_bytes
;
1422 bytes_freed
= old_bytes_used
- remaining_bytes
;
1424 /* Free any remaining pages; needs care. */
1426 while ((old_bytes_used
== 4096) &&
1427 (page_table
[next_page
].gen
== from_space
) &&
1428 ((page_table
[next_page
].allocated
== UNBOXED_PAGE
)
1429 || (page_table
[next_page
].allocated
== BOXED_PAGE
)) &&
1430 page_table
[next_page
].large_object
&&
1431 (page_table
[next_page
].first_object_offset
==
1432 -(next_page
- first_page
)*4096)) {
1433 /* Checks out OK, free the page. Don't need to both zeroing
1434 * pages as this should have been done before shrinking the
1435 * object. These pages shouldn't be write-protected, even if
1436 * boxed they should be zero filled. */
1437 gc_assert(page_table
[next_page
].write_protected
== 0);
1439 old_bytes_used
= page_table
[next_page
].bytes_used
;
1440 page_table
[next_page
].allocated
= FREE_PAGE
;
1441 page_table
[next_page
].bytes_used
= 0;
1442 bytes_freed
+= old_bytes_used
;
1446 if ((bytes_freed
> 0) && gencgc_verbose
)
1448 "/copy_large_unboxed bytes_freed=%d\n",
1451 generations
[from_space
].bytes_allocated
-= 4*nwords
+ bytes_freed
;
1452 generations
[new_space
].bytes_allocated
+= 4*nwords
;
1453 bytes_allocated
-= bytes_freed
;
1458 /* Get tag of object. */
1459 tag
= lowtag_of(object
);
1461 /* Allocate space. */
1462 new = gc_quick_alloc_large_unboxed(nwords
*4);
1465 source
= (lispobj
*) native_pointer(object
);
1467 /* Copy the object. */
1468 while (nwords
> 0) {
1469 dest
[0] = source
[0];
1470 dest
[1] = source
[1];
1476 /* Return Lisp pointer of new object. */
1477 return ((lispobj
) new) | tag
;
1486 * code and code-related objects
1489 static lispobj trans_fun_header(lispobj object);
1490 static lispobj trans_boxed(lispobj object);
1493 /* Scan a x86 compiled code object, looking for possible fixups that
1494 * have been missed after a move.
1496 * Two types of fixups are needed:
1497 * 1. Absolute fixups to within the code object.
1498 * 2. Relative fixups to outside the code object.
1500 * Currently only absolute fixups to the constant vector, or to the
1501 * code area are checked. */
1503 sniff_code_object(struct code
*code
, unsigned displacement
)
1505 int nheader_words
, ncode_words
, nwords
;
1507 void *constants_start_addr
, *constants_end_addr
;
1508 void *code_start_addr
, *code_end_addr
;
1509 int fixup_found
= 0;
1511 if (!check_code_fixups
)
1514 ncode_words
= fixnum_value(code
->code_size
);
1515 nheader_words
= HeaderValue(*(lispobj
*)code
);
1516 nwords
= ncode_words
+ nheader_words
;
1518 constants_start_addr
= (void *)code
+ 5*4;
1519 constants_end_addr
= (void *)code
+ nheader_words
*4;
1520 code_start_addr
= (void *)code
+ nheader_words
*4;
1521 code_end_addr
= (void *)code
+ nwords
*4;
1523 /* Work through the unboxed code. */
1524 for (p
= code_start_addr
; p
< code_end_addr
; p
++) {
1525 void *data
= *(void **)p
;
1526 unsigned d1
= *((unsigned char *)p
- 1);
1527 unsigned d2
= *((unsigned char *)p
- 2);
1528 unsigned d3
= *((unsigned char *)p
- 3);
1529 unsigned d4
= *((unsigned char *)p
- 4);
1531 unsigned d5
= *((unsigned char *)p
- 5);
1532 unsigned d6
= *((unsigned char *)p
- 6);
1535 /* Check for code references. */
1536 /* Check for a 32 bit word that looks like an absolute
1537 reference to within the code adea of the code object. */
1538 if ((data
>= (code_start_addr
-displacement
))
1539 && (data
< (code_end_addr
-displacement
))) {
1540 /* function header */
1542 && (((unsigned)p
- 4 - 4*HeaderValue(*((unsigned *)p
-1))) == (unsigned)code
)) {
1543 /* Skip the function header */
1547 /* the case of PUSH imm32 */
1551 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1552 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1553 FSHOW((stderr
, "/PUSH $0x%.8x\n", data
));
1555 /* the case of MOV [reg-8],imm32 */
1557 && (d2
==0x40 || d2
==0x41 || d2
==0x42 || d2
==0x43
1558 || d2
==0x45 || d2
==0x46 || d2
==0x47)
1562 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1563 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1564 FSHOW((stderr
, "/MOV [reg-8],$0x%.8x\n", data
));
1566 /* the case of LEA reg,[disp32] */
1567 if ((d2
== 0x8d) && ((d1
& 0xc7) == 5)) {
1570 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1571 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1572 FSHOW((stderr
,"/LEA reg,[$0x%.8x]\n", data
));
1576 /* Check for constant references. */
1577 /* Check for a 32 bit word that looks like an absolute
1578 reference to within the constant vector. Constant references
1580 if ((data
>= (constants_start_addr
-displacement
))
1581 && (data
< (constants_end_addr
-displacement
))
1582 && (((unsigned)data
& 0x3) == 0)) {
1587 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1588 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1589 FSHOW((stderr
,"/MOV eax,0x%.8x\n", data
));
1592 /* the case of MOV m32,EAX */
1596 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1597 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1598 FSHOW((stderr
, "/MOV 0x%.8x,eax\n", data
));
1601 /* the case of CMP m32,imm32 */
1602 if ((d1
== 0x3d) && (d2
== 0x81)) {
1605 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1606 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1608 FSHOW((stderr
, "/CMP 0x%.8x,immed32\n", data
));
1611 /* Check for a mod=00, r/m=101 byte. */
1612 if ((d1
& 0xc7) == 5) {
1617 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1618 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1619 FSHOW((stderr
,"/CMP 0x%.8x,reg\n", data
));
1621 /* the case of CMP reg32,m32 */
1625 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1626 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1627 FSHOW((stderr
, "/CMP reg32,0x%.8x\n", data
));
1629 /* the case of MOV m32,reg32 */
1633 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1634 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1635 FSHOW((stderr
, "/MOV 0x%.8x,reg32\n", data
));
1637 /* the case of MOV reg32,m32 */
1641 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1642 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1643 FSHOW((stderr
, "/MOV reg32,0x%.8x\n", data
));
1645 /* the case of LEA reg32,m32 */
1649 "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1650 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1651 FSHOW((stderr
, "/LEA reg32,0x%.8x\n", data
));
1657 /* If anything was found, print some information on the code
1661 "/compiled code object at %x: header words = %d, code words = %d\n",
1662 code
, nheader_words
, ncode_words
));
1664 "/const start = %x, end = %x\n",
1665 constants_start_addr
, constants_end_addr
));
1667 "/code start = %x, end = %x\n",
1668 code_start_addr
, code_end_addr
));
1673 gencgc_apply_code_fixups(struct code
*old_code
, struct code
*new_code
)
1675 int nheader_words
, ncode_words
, nwords
;
1676 void *constants_start_addr
, *constants_end_addr
;
1677 void *code_start_addr
, *code_end_addr
;
1678 lispobj fixups
= NIL
;
1679 unsigned displacement
= (unsigned)new_code
- (unsigned)old_code
;
1680 struct vector
*fixups_vector
;
1682 ncode_words
= fixnum_value(new_code
->code_size
);
1683 nheader_words
= HeaderValue(*(lispobj
*)new_code
);
1684 nwords
= ncode_words
+ nheader_words
;
1686 "/compiled code object at %x: header words = %d, code words = %d\n",
1687 new_code, nheader_words, ncode_words)); */
1688 constants_start_addr
= (void *)new_code
+ 5*4;
1689 constants_end_addr
= (void *)new_code
+ nheader_words
*4;
1690 code_start_addr
= (void *)new_code
+ nheader_words
*4;
1691 code_end_addr
= (void *)new_code
+ nwords
*4;
1694 "/const start = %x, end = %x\n",
1695 constants_start_addr,constants_end_addr));
1697 "/code start = %x; end = %x\n",
1698 code_start_addr,code_end_addr));
1701 /* The first constant should be a pointer to the fixups for this
1702 code objects. Check. */
1703 fixups
= new_code
->constants
[0];
1705 /* It will be 0 or the unbound-marker if there are no fixups, and
1706 * will be an other pointer if it is valid. */
1707 if ((fixups
== 0) || (fixups
== UNBOUND_MARKER_WIDETAG
) ||
1708 !is_lisp_pointer(fixups
)) {
1709 /* Check for possible errors. */
1710 if (check_code_fixups
)
1711 sniff_code_object(new_code
, displacement
);
1713 /*fprintf(stderr,"Fixups for code object not found!?\n");
1714 fprintf(stderr,"*** Compiled code object at %x: header_words=%d code_words=%d .\n",
1715 new_code, nheader_words, ncode_words);
1716 fprintf(stderr,"*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
1717 constants_start_addr,constants_end_addr,
1718 code_start_addr,code_end_addr);*/
1722 fixups_vector
= (struct vector
*)native_pointer(fixups
);
1724 /* Could be pointing to a forwarding pointer. */
1725 if (is_lisp_pointer(fixups
) &&
1726 (find_page_index((void*)fixups_vector
) != -1) &&
1727 (fixups_vector
->header
== 0x01)) {
1728 /* If so, then follow it. */
1729 /*SHOW("following pointer to a forwarding pointer");*/
1730 fixups_vector
= (struct vector
*)native_pointer((lispobj
)fixups_vector
->length
);
1733 /*SHOW("got fixups");*/
1735 if (widetag_of(fixups_vector
->header
) ==
1736 SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
) {
1737 /* Got the fixups for the code block. Now work through the vector,
1738 and apply a fixup at each address. */
1739 int length
= fixnum_value(fixups_vector
->length
);
1741 for (i
= 0; i
< length
; i
++) {
1742 unsigned offset
= fixups_vector
->data
[i
];
1743 /* Now check the current value of offset. */
1744 unsigned old_value
=
1745 *(unsigned *)((unsigned)code_start_addr
+ offset
);
1747 /* If it's within the old_code object then it must be an
1748 * absolute fixup (relative ones are not saved) */
1749 if ((old_value
>= (unsigned)old_code
)
1750 && (old_value
< ((unsigned)old_code
+ nwords
*4)))
1751 /* So add the dispacement. */
1752 *(unsigned *)((unsigned)code_start_addr
+ offset
) =
1753 old_value
+ displacement
;
1755 /* It is outside the old code object so it must be a
1756 * relative fixup (absolute fixups are not saved). So
1757 * subtract the displacement. */
1758 *(unsigned *)((unsigned)code_start_addr
+ offset
) =
1759 old_value
- displacement
;
1763 /* Check for possible errors. */
1764 if (check_code_fixups
) {
1765 sniff_code_object(new_code
,displacement
);
1771 trans_boxed_large(lispobj object
)
1774 unsigned long length
;
1776 gc_assert(is_lisp_pointer(object
));
1778 header
= *((lispobj
*) native_pointer(object
));
1779 length
= HeaderValue(header
) + 1;
1780 length
= CEILING(length
, 2);
1782 return copy_large_object(object
, length
);
1787 trans_unboxed_large(lispobj object
)
1790 unsigned long length
;
1793 gc_assert(is_lisp_pointer(object
));
1795 header
= *((lispobj
*) native_pointer(object
));
1796 length
= HeaderValue(header
) + 1;
1797 length
= CEILING(length
, 2);
1799 return copy_large_unboxed_object(object
, length
);
1804 * vector-like objects
1808 /* FIXME: What does this mean? */
1809 int gencgc_hash
= 1;
1812 scav_vector(lispobj
*where
, lispobj object
)
1814 unsigned int kv_length
;
1816 unsigned int length
= 0; /* (0 = dummy to stop GCC warning) */
1817 lispobj
*hash_table
;
1818 lispobj empty_symbol
;
1819 unsigned int *index_vector
= NULL
; /* (NULL = dummy to stop GCC warning) */
1820 unsigned int *next_vector
= NULL
; /* (NULL = dummy to stop GCC warning) */
1821 unsigned int *hash_vector
= NULL
; /* (NULL = dummy to stop GCC warning) */
1823 unsigned next_vector_length
= 0;
1825 /* FIXME: A comment explaining this would be nice. It looks as
1826 * though SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based
1827 * hash tables in the Lisp HASH-TABLE code, and nowhere else. */
1828 if (HeaderValue(object
) != subtype_VectorValidHashing
)
1832 /* This is set for backward compatibility. FIXME: Do we need
1835 (subtype_VectorMustRehash
<<N_WIDETAG_BITS
) | SIMPLE_VECTOR_WIDETAG
;
1839 kv_length
= fixnum_value(where
[1]);
1840 kv_vector
= where
+ 2; /* Skip the header and length. */
1841 /*FSHOW((stderr,"/kv_length = %d\n", kv_length));*/
1843 /* Scavenge element 0, which may be a hash-table structure. */
1844 scavenge(where
+2, 1);
1845 if (!is_lisp_pointer(where
[2])) {
1846 lose("no pointer at %x in hash table", where
[2]);
1848 hash_table
= (lispobj
*)native_pointer(where
[2]);
1849 /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/
1850 if (widetag_of(hash_table
[0]) != INSTANCE_HEADER_WIDETAG
) {
1851 lose("hash table not instance (%x at %x)", hash_table
[0], hash_table
);
1854 /* Scavenge element 1, which should be some internal symbol that
1855 * the hash table code reserves for marking empty slots. */
1856 scavenge(where
+3, 1);
1857 if (!is_lisp_pointer(where
[3])) {
1858 lose("not empty-hash-table-slot symbol pointer: %x", where
[3]);
1860 empty_symbol
= where
[3];
1861 /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/
1862 if (widetag_of(*(lispobj
*)native_pointer(empty_symbol
)) !=
1863 SYMBOL_HEADER_WIDETAG
) {
1864 lose("not a symbol where empty-hash-table-slot symbol expected: %x",
1865 *(lispobj
*)native_pointer(empty_symbol
));
1868 /* Scavenge hash table, which will fix the positions of the other
1869 * needed objects. */
1870 scavenge(hash_table
, 16);
1872 /* Cross-check the kv_vector. */
1873 if (where
!= (lispobj
*)native_pointer(hash_table
[9])) {
1874 lose("hash_table table!=this table %x", hash_table
[9]);
1878 weak_p_obj
= hash_table
[10];
1882 lispobj index_vector_obj
= hash_table
[13];
1884 if (is_lisp_pointer(index_vector_obj
) &&
1885 (widetag_of(*(lispobj
*)native_pointer(index_vector_obj
)) ==
1886 SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
)) {
1887 index_vector
= ((unsigned int *)native_pointer(index_vector_obj
)) + 2;
1888 /*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/
1889 length
= fixnum_value(((unsigned int *)native_pointer(index_vector_obj
))[1]);
1890 /*FSHOW((stderr, "/length = %d\n", length));*/
1892 lose("invalid index_vector %x", index_vector_obj
);
1898 lispobj next_vector_obj
= hash_table
[14];
1900 if (is_lisp_pointer(next_vector_obj
) &&
1901 (widetag_of(*(lispobj
*)native_pointer(next_vector_obj
)) ==
1902 SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
)) {
1903 next_vector
= ((unsigned int *)native_pointer(next_vector_obj
)) + 2;
1904 /*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/
1905 next_vector_length
= fixnum_value(((unsigned int *)native_pointer(next_vector_obj
))[1]);
1906 /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/
1908 lose("invalid next_vector %x", next_vector_obj
);
1912 /* maybe hash vector */
1914 /* FIXME: This bare "15" offset should become a symbolic
1915 * expression of some sort. And all the other bare offsets
1916 * too. And the bare "16" in scavenge(hash_table, 16). And
1917 * probably other stuff too. Ugh.. */
1918 lispobj hash_vector_obj
= hash_table
[15];
1920 if (is_lisp_pointer(hash_vector_obj
) &&
1921 (widetag_of(*(lispobj
*)native_pointer(hash_vector_obj
))
1922 == SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
)) {
1923 hash_vector
= ((unsigned int *)native_pointer(hash_vector_obj
)) + 2;
1924 /*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/
1925 gc_assert(fixnum_value(((unsigned int *)native_pointer(hash_vector_obj
))[1])
1926 == next_vector_length
);
1929 /*FSHOW((stderr, "/no hash_vector: %x\n", hash_vector_obj));*/
1933 /* These lengths could be different as the index_vector can be a
1934 * different length from the others, a larger index_vector could help
1935 * reduce collisions. */
1936 gc_assert(next_vector_length
*2 == kv_length
);
1938 /* now all set up.. */
1940 /* Work through the KV vector. */
1943 for (i
= 1; i
< next_vector_length
; i
++) {
1944 lispobj old_key
= kv_vector
[2*i
];
1945 unsigned int old_index
= (old_key
& 0x1fffffff)%length
;
1947 /* Scavenge the key and value. */
1948 scavenge(&kv_vector
[2*i
],2);
1950 /* Check whether the key has moved and is EQ based. */
1952 lispobj new_key
= kv_vector
[2*i
];
1953 unsigned int new_index
= (new_key
& 0x1fffffff)%length
;
1955 if ((old_index
!= new_index
) &&
1956 ((!hash_vector
) || (hash_vector
[i
] == 0x80000000)) &&
1957 ((new_key
!= empty_symbol
) ||
1958 (kv_vector
[2*i
] != empty_symbol
))) {
1961 "* EQ key %d moved from %x to %x; index %d to %d\n",
1962 i, old_key, new_key, old_index, new_index));*/
1964 if (index_vector
[old_index
] != 0) {
1965 /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/
1967 /* Unlink the key from the old_index chain. */
1968 if (index_vector
[old_index
] == i
) {
1969 /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/
1970 index_vector
[old_index
] = next_vector
[i
];
1971 /* Link it into the needing rehash chain. */
1972 next_vector
[i
] = fixnum_value(hash_table
[11]);
1973 hash_table
[11] = make_fixnum(i
);
1976 unsigned prior
= index_vector
[old_index
];
1977 unsigned next
= next_vector
[prior
];
1979 /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/
1982 /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/
1985 next_vector
[prior
] = next_vector
[next
];
1986 /* Link it into the needing rehash
1989 fixnum_value(hash_table
[11]);
1990 hash_table
[11] = make_fixnum(next
);
1995 next
= next_vector
[next
];
2003 return (CEILING(kv_length
+ 2, 2));
2012 /* XX This is a hack adapted from cgc.c. These don't work too
2013 * efficiently with the gencgc as a list of the weak pointers is
2014 * maintained within the objects which causes writes to the pages. A
2015 * limited attempt is made to avoid unnecessary writes, but this needs
2017 #define WEAK_POINTER_NWORDS \
2018 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
2021 scav_weak_pointer(lispobj
*where
, lispobj object
)
2023 struct weak_pointer
*wp
= weak_pointers
;
2024 /* Push the weak pointer onto the list of weak pointers.
2025 * Do I have to watch for duplicates? Originally this was
2026 * part of trans_weak_pointer but that didn't work in the
2027 * case where the WP was in a promoted region.
2030 /* Check whether it's already in the list. */
2031 while (wp
!= NULL
) {
2032 if (wp
== (struct weak_pointer
*)where
) {
2038 /* Add it to the start of the list. */
2039 wp
= (struct weak_pointer
*)where
;
2040 if (wp
->next
!= weak_pointers
) {
2041 wp
->next
= weak_pointers
;
2043 /*SHOW("avoided write to weak pointer");*/
2048 /* Do not let GC scavenge the value slot of the weak pointer.
2049 * (That is why it is a weak pointer.) */
2051 return WEAK_POINTER_NWORDS
;
2055 /* Scan an area looking for an object which encloses the given pointer.
2056 * Return the object start on success or NULL on failure. */
2058 search_space(lispobj
*start
, size_t words
, lispobj
*pointer
)
2062 lispobj thing
= *start
;
2064 /* If thing is an immediate then this is a cons. */
2065 if (is_lisp_pointer(thing
)
2066 || ((thing
& 3) == 0) /* fixnum */
2067 || (widetag_of(thing
) == BASE_CHAR_WIDETAG
)
2068 || (widetag_of(thing
) == UNBOUND_MARKER_WIDETAG
))
2071 count
= (sizetab
[widetag_of(thing
)])(start
);
2073 /* Check whether the pointer is within this object. */
2074 if ((pointer
>= start
) && (pointer
< (start
+count
))) {
2076 /*FSHOW((stderr,"/found %x in %x %x\n", pointer, start, thing));*/
2080 /* Round up the count. */
2081 count
= CEILING(count
,2);
2090 search_read_only_space(lispobj
*pointer
)
2092 lispobj
* start
= (lispobj
*)READ_ONLY_SPACE_START
;
2093 lispobj
* end
= (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
);
2094 if ((pointer
< start
) || (pointer
>= end
))
2096 return (search_space(start
, (pointer
+2)-start
, pointer
));
2100 search_static_space(lispobj
*pointer
)
2102 lispobj
* start
= (lispobj
*)STATIC_SPACE_START
;
2103 lispobj
* end
= (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
);
2104 if ((pointer
< start
) || (pointer
>= end
))
2106 return (search_space(start
, (pointer
+2)-start
, pointer
));
2109 /* a faster version for searching the dynamic space. This will work even
2110 * if the object is in a current allocation region. */
2112 search_dynamic_space(lispobj
*pointer
)
2114 int page_index
= find_page_index(pointer
);
2117 /* The address may be invalid, so do some checks. */
2118 if ((page_index
== -1) || (page_table
[page_index
].allocated
== FREE_PAGE
))
2120 start
= (lispobj
*)((void *)page_address(page_index
)
2121 + page_table
[page_index
].first_object_offset
);
2122 return (search_space(start
, (pointer
+2)-start
, pointer
));
2125 /* Is there any possibility that pointer is a valid Lisp object
2126 * reference, and/or something else (e.g. subroutine call return
2127 * address) which should prevent us from moving the referred-to thing? */
2129 possibly_valid_dynamic_space_pointer(lispobj
*pointer
)
2131 lispobj
*start_addr
;
2133 /* Find the object start address. */
2134 if ((start_addr
= search_dynamic_space(pointer
)) == NULL
) {
2138 /* We need to allow raw pointers into Code objects for return
2139 * addresses. This will also pick up pointers to functions in code
2141 if (widetag_of(*start_addr
) == CODE_HEADER_WIDETAG
) {
2142 /* XXX could do some further checks here */
2146 /* If it's not a return address then it needs to be a valid Lisp
2148 if (!is_lisp_pointer((lispobj
)pointer
)) {
2152 /* Check that the object pointed to is consistent with the pointer
2155 * FIXME: It's not safe to rely on the result from this check
2156 * before an object is initialized. Thus, if we were interrupted
2157 * just as an object had been allocated but not initialized, the
2158 * GC relying on this result could bogusly reclaim the memory.
2159 * However, we can't really afford to do without this check. So
2160 * we should make it safe somehow.
2161 * (1) Perhaps just review the code to make sure
2162 * that WITHOUT-GCING or WITHOUT-INTERRUPTS or some such
2163 * thing is wrapped around critical sections where allocated
2164 * memory type bits haven't been set.
2165 * (2) Perhaps find some other hack to protect against this, e.g.
2166 * recording the result of the last call to allocate-lisp-memory,
2167 * and returning true from this function when *pointer is
2168 * a reference to that result. */
2169 switch (lowtag_of((lispobj
)pointer
)) {
2170 case FUN_POINTER_LOWTAG
:
2171 /* Start_addr should be the enclosing code object, or a closure
2173 switch (widetag_of(*start_addr
)) {
2174 case CODE_HEADER_WIDETAG
:
2175 /* This case is probably caught above. */
2177 case CLOSURE_HEADER_WIDETAG
:
2178 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
2179 if ((unsigned)pointer
!=
2180 ((unsigned)start_addr
+FUN_POINTER_LOWTAG
)) {
2184 pointer
, start_addr
, *start_addr
));
2192 pointer
, start_addr
, *start_addr
));
2196 case LIST_POINTER_LOWTAG
:
2197 if ((unsigned)pointer
!=
2198 ((unsigned)start_addr
+LIST_POINTER_LOWTAG
)) {
2202 pointer
, start_addr
, *start_addr
));
2205 /* Is it plausible cons? */
2206 if ((is_lisp_pointer(start_addr
[0])
2207 || ((start_addr
[0] & 3) == 0) /* fixnum */
2208 || (widetag_of(start_addr
[0]) == BASE_CHAR_WIDETAG
)
2209 || (widetag_of(start_addr
[0]) == UNBOUND_MARKER_WIDETAG
))
2210 && (is_lisp_pointer(start_addr
[1])
2211 || ((start_addr
[1] & 3) == 0) /* fixnum */
2212 || (widetag_of(start_addr
[1]) == BASE_CHAR_WIDETAG
)
2213 || (widetag_of(start_addr
[1]) == UNBOUND_MARKER_WIDETAG
)))
2219 pointer
, start_addr
, *start_addr
));
2222 case INSTANCE_POINTER_LOWTAG
:
2223 if ((unsigned)pointer
!=
2224 ((unsigned)start_addr
+INSTANCE_POINTER_LOWTAG
)) {
2228 pointer
, start_addr
, *start_addr
));
2231 if (widetag_of(start_addr
[0]) != INSTANCE_HEADER_WIDETAG
) {
2235 pointer
, start_addr
, *start_addr
));
2239 case OTHER_POINTER_LOWTAG
:
2240 if ((unsigned)pointer
!=
2241 ((int)start_addr
+OTHER_POINTER_LOWTAG
)) {
2245 pointer
, start_addr
, *start_addr
));
2248 /* Is it plausible? Not a cons. XXX should check the headers. */
2249 if (is_lisp_pointer(start_addr
[0]) || ((start_addr
[0] & 3) == 0)) {
2253 pointer
, start_addr
, *start_addr
));
2256 switch (widetag_of(start_addr
[0])) {
2257 case UNBOUND_MARKER_WIDETAG
:
2258 case BASE_CHAR_WIDETAG
:
2262 pointer
, start_addr
, *start_addr
));
2265 /* only pointed to by function pointers? */
2266 case CLOSURE_HEADER_WIDETAG
:
2267 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
2271 pointer
, start_addr
, *start_addr
));
2274 case INSTANCE_HEADER_WIDETAG
:
2278 pointer
, start_addr
, *start_addr
));
2281 /* the valid other immediate pointer objects */
2282 case SIMPLE_VECTOR_WIDETAG
:
2284 case COMPLEX_WIDETAG
:
2285 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
2286 case COMPLEX_SINGLE_FLOAT_WIDETAG
:
2288 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
2289 case COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2291 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
2292 case COMPLEX_LONG_FLOAT_WIDETAG
:
2294 case SIMPLE_ARRAY_WIDETAG
:
2295 case COMPLEX_STRING_WIDETAG
:
2296 case COMPLEX_BIT_VECTOR_WIDETAG
:
2297 case COMPLEX_VECTOR_WIDETAG
:
2298 case COMPLEX_ARRAY_WIDETAG
:
2299 case VALUE_CELL_HEADER_WIDETAG
:
2300 case SYMBOL_HEADER_WIDETAG
:
2302 case CODE_HEADER_WIDETAG
:
2303 case BIGNUM_WIDETAG
:
2304 case SINGLE_FLOAT_WIDETAG
:
2305 case DOUBLE_FLOAT_WIDETAG
:
2306 #ifdef LONG_FLOAT_WIDETAG
2307 case LONG_FLOAT_WIDETAG
:
2309 case SIMPLE_STRING_WIDETAG
:
2310 case SIMPLE_BIT_VECTOR_WIDETAG
:
2311 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
2312 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
2313 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
2314 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
2315 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
2316 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
2317 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
2319 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
2320 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
2322 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
2323 case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
:
2325 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
2326 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
2328 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
2329 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
2330 #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
2331 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
2333 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
2334 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
2336 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
2337 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2339 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
2340 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
2343 case WEAK_POINTER_WIDETAG
:
2350 pointer
, start_addr
, *start_addr
));
2358 pointer
, start_addr
, *start_addr
));
2366 /* Adjust large bignum and vector objects. This will adjust the
2367 * allocated region if the size has shrunk, and move unboxed objects
2368 * into unboxed pages. The pages are not promoted here, and the
2369 * promoted region is not added to the new_regions; this is really
2370 * only designed to be called from preserve_pointer(). Shouldn't fail
2371 * if this is missed, just may delay the moving of objects to unboxed
2372 * pages, and the freeing of pages. */
2374 maybe_adjust_large_object(lispobj
*where
)
2379 int remaining_bytes
;
2386 /* Check whether it's a vector or bignum object. */
2387 switch (widetag_of(where
[0])) {
2388 case SIMPLE_VECTOR_WIDETAG
:
2391 case BIGNUM_WIDETAG
:
2392 case SIMPLE_STRING_WIDETAG
:
2393 case SIMPLE_BIT_VECTOR_WIDETAG
:
2394 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
2395 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
2396 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
2397 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
2398 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
2399 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
2400 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
2402 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
2403 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
2405 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
2406 case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
:
2408 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
2409 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
2411 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
2412 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
2413 #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
2414 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
2416 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
2417 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
2419 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
2420 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2422 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
2423 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
2425 boxed
= UNBOXED_PAGE
;
2431 /* Find its current size. */
2432 nwords
= (sizetab
[widetag_of(where
[0])])(where
);
2434 first_page
= find_page_index((void *)where
);
2435 gc_assert(first_page
>= 0);
2437 /* Note: Any page write-protection must be removed, else a later
2438 * scavenge_newspace may incorrectly not scavenge these pages.
2439 * This would not be necessary if they are added to the new areas,
2440 * but lets do it for them all (they'll probably be written
2443 gc_assert(page_table
[first_page
].first_object_offset
== 0);
2445 next_page
= first_page
;
2446 remaining_bytes
= nwords
*4;
2447 while (remaining_bytes
> 4096) {
2448 gc_assert(page_table
[next_page
].gen
== from_space
);
2449 gc_assert((page_table
[next_page
].allocated
== BOXED_PAGE
)
2450 || (page_table
[next_page
].allocated
== UNBOXED_PAGE
));
2451 gc_assert(page_table
[next_page
].large_object
);
2452 gc_assert(page_table
[next_page
].first_object_offset
==
2453 -4096*(next_page
-first_page
));
2454 gc_assert(page_table
[next_page
].bytes_used
== 4096);
2456 page_table
[next_page
].allocated
= boxed
;
2458 /* Shouldn't be write-protected at this stage. Essential that the
2460 gc_assert(!page_table
[next_page
].write_protected
);
2461 remaining_bytes
-= 4096;
2465 /* Now only one page remains, but the object may have shrunk so
2466 * there may be more unused pages which will be freed. */
2468 /* Object may have shrunk but shouldn't have grown - check. */
2469 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
2471 page_table
[next_page
].allocated
= boxed
;
2472 gc_assert(page_table
[next_page
].allocated
==
2473 page_table
[first_page
].allocated
);
2475 /* Adjust the bytes_used. */
2476 old_bytes_used
= page_table
[next_page
].bytes_used
;
2477 page_table
[next_page
].bytes_used
= remaining_bytes
;
2479 bytes_freed
= old_bytes_used
- remaining_bytes
;
2481 /* Free any remaining pages; needs care. */
2483 while ((old_bytes_used
== 4096) &&
2484 (page_table
[next_page
].gen
== from_space
) &&
2485 ((page_table
[next_page
].allocated
== UNBOXED_PAGE
)
2486 || (page_table
[next_page
].allocated
== BOXED_PAGE
)) &&
2487 page_table
[next_page
].large_object
&&
2488 (page_table
[next_page
].first_object_offset
==
2489 -(next_page
- first_page
)*4096)) {
2490 /* It checks out OK, free the page. We don't need to both zeroing
2491 * pages as this should have been done before shrinking the
2492 * object. These pages shouldn't be write protected as they
2493 * should be zero filled. */
2494 gc_assert(page_table
[next_page
].write_protected
== 0);
2496 old_bytes_used
= page_table
[next_page
].bytes_used
;
2497 page_table
[next_page
].allocated
= FREE_PAGE
;
2498 page_table
[next_page
].bytes_used
= 0;
2499 bytes_freed
+= old_bytes_used
;
2503 if ((bytes_freed
> 0) && gencgc_verbose
) {
2505 "/maybe_adjust_large_object() freed %d\n",
2509 generations
[from_space
].bytes_allocated
-= bytes_freed
;
2510 bytes_allocated
-= bytes_freed
;
2515 /* Take a possible pointer to a Lisp object and mark its page in the
2516 * page_table so that it will not be relocated during a GC.
2518 * This involves locating the page it points to, then backing up to
2519 * the first page that has its first object start at offset 0, and
2520 * then marking all pages dont_move from the first until a page that
2521 * ends by being full, or having free gen.
2523 * This ensures that objects spanning pages are not broken.
2525 * It is assumed that all the page static flags have been cleared at
2526 * the start of a GC.
2528 * It is also assumed that the current gc_alloc() region has been
2529 * flushed and the tables updated. */
2531 preserve_pointer(void *addr
)
2533 int addr_page_index
= find_page_index(addr
);
2536 unsigned region_allocation
;
2538 /* quick check 1: Address is quite likely to have been invalid. */
2539 if ((addr_page_index
== -1)
2540 || (page_table
[addr_page_index
].allocated
== FREE_PAGE
)
2541 || (page_table
[addr_page_index
].bytes_used
== 0)
2542 || (page_table
[addr_page_index
].gen
!= from_space
)
2543 /* Skip if already marked dont_move. */
2544 || (page_table
[addr_page_index
].dont_move
!= 0))
2546 gc_assert(!(page_table
[addr_page_index
].allocated
& OPEN_REGION_PAGE
));
2547 /* (Now that we know that addr_page_index is in range, it's
2548 * safe to index into page_table[] with it.) */
2549 region_allocation
= page_table
[addr_page_index
].allocated
;
2551 /* quick check 2: Check the offset within the page.
2553 * FIXME: The mask should have a symbolic name, and ideally should
2554 * be derived from page size instead of hardwired to 0xfff.
2555 * (Also fix other uses of 0xfff, elsewhere.) */
2556 if (((unsigned)addr
& 0xfff) > page_table
[addr_page_index
].bytes_used
)
2559 /* Filter out anything which can't be a pointer to a Lisp object
2560 * (or, as a special case which also requires dont_move, a return
2561 * address referring to something in a CodeObject). This is
2562 * expensive but important, since it vastly reduces the
2563 * probability that random garbage will be bogusly interpreter as
2564 * a pointer which prevents a page from moving. */
2565 if (!(possibly_valid_dynamic_space_pointer(addr
)))
2567 first_page
= addr_page_index
;
2569 /* Work backwards to find a page with a first_object_offset of 0.
2570 * The pages should be contiguous with all bytes used in the same
2571 * gen. Assumes the first_object_offset is negative or zero. */
2573 /* this is probably needlessly conservative. The first object in
2574 * the page may not even be the one we were passed a pointer to:
2575 * if this is the case, we will write-protect all the previous
2576 * object's pages too.
2579 while (page_table
[first_page
].first_object_offset
!= 0) {
2581 /* Do some checks. */
2582 gc_assert(page_table
[first_page
].bytes_used
== 4096);
2583 gc_assert(page_table
[first_page
].gen
== from_space
);
2584 gc_assert(page_table
[first_page
].allocated
== region_allocation
);
2587 /* Adjust any large objects before promotion as they won't be
2588 * copied after promotion. */
2589 if (page_table
[first_page
].large_object
) {
2590 maybe_adjust_large_object(page_address(first_page
));
2591 /* If a large object has shrunk then addr may now point to a
2592 * free area in which case it's ignored here. Note it gets
2593 * through the valid pointer test above because the tail looks
2595 if ((page_table
[addr_page_index
].allocated
== FREE_PAGE
)
2596 || (page_table
[addr_page_index
].bytes_used
== 0)
2597 /* Check the offset within the page. */
2598 || (((unsigned)addr
& 0xfff)
2599 > page_table
[addr_page_index
].bytes_used
)) {
2601 "weird? ignore ptr 0x%x to freed area of large object\n",
2605 /* It may have moved to unboxed pages. */
2606 region_allocation
= page_table
[first_page
].allocated
;
2609 /* Now work forward until the end of this contiguous area is found,
2610 * marking all pages as dont_move. */
2611 for (i
= first_page
; ;i
++) {
2612 gc_assert(page_table
[i
].allocated
== region_allocation
);
2614 /* Mark the page static. */
2615 page_table
[i
].dont_move
= 1;
2617 /* Move the page to the new_space. XX I'd rather not do this
2618 * but the GC logic is not quite able to copy with the static
2619 * pages remaining in the from space. This also requires the
2620 * generation bytes_allocated counters be updated. */
2621 page_table
[i
].gen
= new_space
;
2622 generations
[new_space
].bytes_allocated
+= page_table
[i
].bytes_used
;
2623 generations
[from_space
].bytes_allocated
-= page_table
[i
].bytes_used
;
2625 /* It is essential that the pages are not write protected as
2626 * they may have pointers into the old-space which need
2627 * scavenging. They shouldn't be write protected at this
2629 gc_assert(!page_table
[i
].write_protected
);
2631 /* Check whether this is the last page in this contiguous block.. */
2632 if ((page_table
[i
].bytes_used
< 4096)
2633 /* ..or it is 4096 and is the last in the block */
2634 || (page_table
[i
+1].allocated
== FREE_PAGE
)
2635 || (page_table
[i
+1].bytes_used
== 0) /* next page free */
2636 || (page_table
[i
+1].gen
!= from_space
) /* diff. gen */
2637 || (page_table
[i
+1].first_object_offset
== 0))
2641 /* Check that the page is now static. */
2642 gc_assert(page_table
[addr_page_index
].dont_move
!= 0);
2645 /* If the given page is not write-protected, then scan it for pointers
2646 * to younger generations or the top temp. generation, if no
2647 * suspicious pointers are found then the page is write-protected.
2649 * Care is taken to check for pointers to the current gc_alloc()
2650 * region if it is a younger generation or the temp. generation. This
2651 * frees the caller from doing a gc_alloc_update_page_tables(). Actually
2652 * the gc_alloc_generation does not need to be checked as this is only
2653 * called from scavenge_generation() when the gc_alloc generation is
2654 * younger, so it just checks if there is a pointer to the current
2657 * We return 1 if the page was write-protected, else 0. */
2659 update_page_write_prot(int page
)
2661 int gen
= page_table
[page
].gen
;
2664 void **page_addr
= (void **)page_address(page
);
2665 int num_words
= page_table
[page
].bytes_used
/ 4;
2667 /* Shouldn't be a free page. */
2668 gc_assert(page_table
[page
].allocated
!= FREE_PAGE
);
2669 gc_assert(page_table
[page
].bytes_used
!= 0);
2671 /* Skip if it's already write-protected or an unboxed page. */
2672 if (page_table
[page
].write_protected
2673 || (page_table
[page
].allocated
& UNBOXED_PAGE
))
2676 /* Scan the page for pointers to younger generations or the
2677 * top temp. generation. */
2679 for (j
= 0; j
< num_words
; j
++) {
2680 void *ptr
= *(page_addr
+j
);
2681 int index
= find_page_index(ptr
);
2683 /* Check that it's in the dynamic space */
2685 if (/* Does it point to a younger or the temp. generation? */
2686 ((page_table
[index
].allocated
!= FREE_PAGE
)
2687 && (page_table
[index
].bytes_used
!= 0)
2688 && ((page_table
[index
].gen
< gen
)
2689 || (page_table
[index
].gen
== NUM_GENERATIONS
)))
2691 /* Or does it point within a current gc_alloc() region? */
2692 || ((boxed_region
.start_addr
<= ptr
)
2693 && (ptr
<= boxed_region
.free_pointer
))
2694 || ((unboxed_region
.start_addr
<= ptr
)
2695 && (ptr
<= unboxed_region
.free_pointer
))) {
2702 /* Write-protect the page. */
2703 /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
2705 os_protect((void *)page_addr
,
2707 OS_VM_PROT_READ
|OS_VM_PROT_EXECUTE
);
2709 /* Note the page as protected in the page tables. */
2710 page_table
[page
].write_protected
= 1;
2716 /* Scavenge a generation.
2718 * This will not resolve all pointers when generation is the new
2719 * space, as new objects may be added which are not check here - use
2720 * scavenge_newspace generation.
2722 * Write-protected pages should not have any pointers to the
2723 * from_space so do need scavenging; thus write-protected pages are
2724 * not always scavenged. There is some code to check that these pages
2725 * are not written; but to check fully the write-protected pages need
2726 * to be scavenged by disabling the code to skip them.
2728 * Under the current scheme when a generation is GCed the younger
2729 * generations will be empty. So, when a generation is being GCed it
2730 * is only necessary to scavenge the older generations for pointers
2731 * not the younger. So a page that does not have pointers to younger
2732 * generations does not need to be scavenged.
2734 * The write-protection can be used to note pages that don't have
2735 * pointers to younger pages. But pages can be written without having
2736 * pointers to younger generations. After the pages are scavenged here
2737 * they can be scanned for pointers to younger generations and if
2738 * there are none the page can be write-protected.
2740 * One complication is when the newspace is the top temp. generation.
2742 * Enabling SC_GEN_CK scavenges the write-protected pages and checks
2743 * that none were written, which they shouldn't be as they should have
2744 * no pointers to younger generations. This breaks down for weak
2745 * pointers as the objects contain a link to the next and are written
2746 * if a weak pointer is scavenged. Still it's a useful check. */
2748 scavenge_generation(int generation
)
2755 /* Clear the write_protected_cleared flags on all pages. */
2756 for (i
= 0; i
< NUM_PAGES
; i
++)
2757 page_table
[i
].write_protected_cleared
= 0;
2760 for (i
= 0; i
< last_free_page
; i
++) {
2761 if ((page_table
[i
].allocated
& BOXED_PAGE
)
2762 && (page_table
[i
].bytes_used
!= 0)
2763 && (page_table
[i
].gen
== generation
)) {
2766 /* This should be the start of a contiguous block. */
2767 gc_assert(page_table
[i
].first_object_offset
== 0);
2769 /* We need to find the full extent of this contiguous
2770 * block in case objects span pages. */
2772 /* Now work forward until the end of this contiguous area
2773 * is found. A small area is preferred as there is a
2774 * better chance of its pages being write-protected. */
2775 for (last_page
= i
; ; last_page
++)
2776 /* Check whether this is the last page in this contiguous
2778 if ((page_table
[last_page
].bytes_used
< 4096)
2779 /* Or it is 4096 and is the last in the block */
2780 || (!(page_table
[last_page
+1].allocated
& BOXED_PAGE
))
2781 || (page_table
[last_page
+1].bytes_used
== 0)
2782 || (page_table
[last_page
+1].gen
!= generation
)
2783 || (page_table
[last_page
+1].first_object_offset
== 0))
2786 /* Do a limited check for write_protected pages. If all pages
2787 * are write_protected then there is no need to scavenge. */
2790 for (j
= i
; j
<= last_page
; j
++)
2791 if (page_table
[j
].write_protected
== 0) {
2799 scavenge(page_address(i
), (page_table
[last_page
].bytes_used
2800 + (last_page
-i
)*4096)/4);
2802 /* Now scan the pages and write protect those
2803 * that don't have pointers to younger
2805 if (enable_page_protection
) {
2806 for (j
= i
; j
<= last_page
; j
++) {
2807 num_wp
+= update_page_write_prot(j
);
2816 if ((gencgc_verbose
> 1) && (num_wp
!= 0)) {
2818 "/write protected %d pages within generation %d\n",
2819 num_wp
, generation
));
2823 /* Check that none of the write_protected pages in this generation
2824 * have been written to. */
2825 for (i
= 0; i
< NUM_PAGES
; i
++) {
2826 if ((page_table
[i
].allocation
! =FREE_PAGE
)
2827 && (page_table
[i
].bytes_used
!= 0)
2828 && (page_table
[i
].gen
== generation
)
2829 && (page_table
[i
].write_protected_cleared
!= 0)) {
2830 FSHOW((stderr
, "/scavenge_generation() %d\n", generation
));
2832 "/page bytes_used=%d first_object_offset=%d dont_move=%d\n",
2833 page_table
[i
].bytes_used
,
2834 page_table
[i
].first_object_offset
,
2835 page_table
[i
].dont_move
));
2836 lose("write to protected page %d in scavenge_generation()", i
);
2843 /* Scavenge a newspace generation. As it is scavenged new objects may
2844 * be allocated to it; these will also need to be scavenged. This
2845 * repeats until there are no more objects unscavenged in the
2846 * newspace generation.
2848 * To help improve the efficiency, areas written are recorded by
2849 * gc_alloc() and only these scavenged. Sometimes a little more will be
2850 * scavenged, but this causes no harm. An easy check is done that the
2851 * scavenged bytes equals the number allocated in the previous
2854 * Write-protected pages are not scanned except if they are marked
2855 * dont_move in which case they may have been promoted and still have
2856 * pointers to the from space.
2858 * Write-protected pages could potentially be written by alloc however
2859 * to avoid having to handle re-scavenging of write-protected pages
2860 * gc_alloc() does not write to write-protected pages.
2862 * New areas of objects allocated are recorded alternatively in the two
2863 * new_areas arrays below. */
2864 static struct new_area new_areas_1
[NUM_NEW_AREAS
];
2865 static struct new_area new_areas_2
[NUM_NEW_AREAS
];
2867 /* Do one full scan of the new space generation. This is not enough to
2868 * complete the job as new objects may be added to the generation in
2869 * the process which are not scavenged. */
2871 scavenge_newspace_generation_one_scan(int generation
)
2876 "/starting one full scan of newspace generation %d\n",
2878 for (i
= 0; i
< last_free_page
; i
++) {
2879 /* note that this skips over open regions when it encounters them */
2880 if ((page_table
[i
].allocated
== BOXED_PAGE
)
2881 && (page_table
[i
].bytes_used
!= 0)
2882 && (page_table
[i
].gen
== generation
)
2883 && ((page_table
[i
].write_protected
== 0)
2884 /* (This may be redundant as write_protected is now
2885 * cleared before promotion.) */
2886 || (page_table
[i
].dont_move
== 1))) {
2889 /* The scavenge will start at the first_object_offset of page i.
2891 * We need to find the full extent of this contiguous
2892 * block in case objects span pages.
2894 * Now work forward until the end of this contiguous area
2895 * is found. A small area is preferred as there is a
2896 * better chance of its pages being write-protected. */
2897 for (last_page
= i
; ;last_page
++) {
2898 /* Check whether this is the last page in this
2899 * contiguous block */
2900 if ((page_table
[last_page
].bytes_used
< 4096)
2901 /* Or it is 4096 and is the last in the block */
2902 || (!(page_table
[last_page
+1].allocated
& BOXED_PAGE
))
2903 || (page_table
[last_page
+1].bytes_used
== 0)
2904 || (page_table
[last_page
+1].gen
!= generation
)
2905 || (page_table
[last_page
+1].first_object_offset
== 0))
2909 /* Do a limited check for write-protected pages. If all
2910 * pages are write-protected then no need to scavenge,
2911 * except if the pages are marked dont_move. */
2914 for (j
= i
; j
<= last_page
; j
++)
2915 if ((page_table
[j
].write_protected
== 0)
2916 || (page_table
[j
].dont_move
!= 0)) {
2924 /* Calculate the size. */
2926 size
= (page_table
[last_page
].bytes_used
2927 - page_table
[i
].first_object_offset
)/4;
2929 size
= (page_table
[last_page
].bytes_used
2930 + (last_page
-i
)*4096
2931 - page_table
[i
].first_object_offset
)/4;
2934 new_areas_ignore_page
= last_page
;
2936 scavenge(page_address(i
) +
2937 page_table
[i
].first_object_offset
,
2948 "/done with one full scan of newspace generation %d\n",
2952 /* Do a complete scavenge of the newspace generation. */
2954 scavenge_newspace_generation(int generation
)
2958 /* the new_areas array currently being written to by gc_alloc() */
2959 struct new_area (*current_new_areas
)[] = &new_areas_1
;
2960 int current_new_areas_index
;
2962 /* the new_areas created but the previous scavenge cycle */
2963 struct new_area (*previous_new_areas
)[] = NULL
;
2964 int previous_new_areas_index
;
2966 /* Flush the current regions updating the tables. */
2967 gc_alloc_update_all_page_tables();
2969 /* Turn on the recording of new areas by gc_alloc(). */
2970 new_areas
= current_new_areas
;
2971 new_areas_index
= 0;
2973 /* Don't need to record new areas that get scavenged anyway during
2974 * scavenge_newspace_generation_one_scan. */
2975 record_new_objects
= 1;
2977 /* Start with a full scavenge. */
2978 scavenge_newspace_generation_one_scan(generation
);
2980 /* Record all new areas now. */
2981 record_new_objects
= 2;
2983 /* Flush the current regions updating the tables. */
2984 gc_alloc_update_all_page_tables();
2986 /* Grab new_areas_index. */
2987 current_new_areas_index
= new_areas_index
;
2990 "The first scan is finished; current_new_areas_index=%d.\n",
2991 current_new_areas_index));*/
2993 while (current_new_areas_index
> 0) {
2994 /* Move the current to the previous new areas */
2995 previous_new_areas
= current_new_areas
;
2996 previous_new_areas_index
= current_new_areas_index
;
2998 /* Scavenge all the areas in previous new areas. Any new areas
2999 * allocated are saved in current_new_areas. */
3001 /* Allocate an array for current_new_areas; alternating between
3002 * new_areas_1 and 2 */
3003 if (previous_new_areas
== &new_areas_1
)
3004 current_new_areas
= &new_areas_2
;
3006 current_new_areas
= &new_areas_1
;
3008 /* Set up for gc_alloc(). */
3009 new_areas
= current_new_areas
;
3010 new_areas_index
= 0;
3012 /* Check whether previous_new_areas had overflowed. */
3013 if (previous_new_areas_index
>= NUM_NEW_AREAS
) {
3015 /* New areas of objects allocated have been lost so need to do a
3016 * full scan to be sure! If this becomes a problem try
3017 * increasing NUM_NEW_AREAS. */
3019 SHOW("new_areas overflow, doing full scavenge");
3021 /* Don't need to record new areas that get scavenge anyway
3022 * during scavenge_newspace_generation_one_scan. */
3023 record_new_objects
= 1;
3025 scavenge_newspace_generation_one_scan(generation
);
3027 /* Record all new areas now. */
3028 record_new_objects
= 2;
3030 /* Flush the current regions updating the tables. */
3031 gc_alloc_update_all_page_tables();
3035 /* Work through previous_new_areas. */
3036 for (i
= 0; i
< previous_new_areas_index
; i
++) {
3037 /* FIXME: All these bare *4 and /4 should be something
3038 * like BYTES_PER_WORD or WBYTES. */
3039 int page
= (*previous_new_areas
)[i
].page
;
3040 int offset
= (*previous_new_areas
)[i
].offset
;
3041 int size
= (*previous_new_areas
)[i
].size
/ 4;
3042 gc_assert((*previous_new_areas
)[i
].size
% 4 == 0);
3043 scavenge(page_address(page
)+offset
, size
);
3046 /* Flush the current regions updating the tables. */
3047 gc_alloc_update_all_page_tables();
3050 current_new_areas_index
= new_areas_index
;
3053 "The re-scan has finished; current_new_areas_index=%d.\n",
3054 current_new_areas_index));*/
3057 /* Turn off recording of areas allocated by gc_alloc(). */
3058 record_new_objects
= 0;
3061 /* Check that none of the write_protected pages in this generation
3062 * have been written to. */
3063 for (i
= 0; i
< NUM_PAGES
; i
++) {
3064 if ((page_table
[i
].allocation
!= FREE_PAGE
)
3065 && (page_table
[i
].bytes_used
!= 0)
3066 && (page_table
[i
].gen
== generation
)
3067 && (page_table
[i
].write_protected_cleared
!= 0)
3068 && (page_table
[i
].dont_move
== 0)) {
3069 lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d",
3070 i
, generation
, page_table
[i
].dont_move
);
3076 /* Un-write-protect all the pages in from_space. This is done at the
3077 * start of a GC else there may be many page faults while scavenging
3078 * the newspace (I've seen drive the system time to 99%). These pages
3079 * would need to be unprotected anyway before unmapping in
3080 * free_oldspace; not sure what effect this has on paging.. */
3082 unprotect_oldspace(void)
3086 for (i
= 0; i
< last_free_page
; i
++) {
3087 if ((page_table
[i
].allocated
!= FREE_PAGE
)
3088 && (page_table
[i
].bytes_used
!= 0)
3089 && (page_table
[i
].gen
== from_space
)) {
3092 page_start
= (void *)page_address(i
);
3094 /* Remove any write-protection. We should be able to rely
3095 * on the write-protect flag to avoid redundant calls. */
3096 if (page_table
[i
].write_protected
) {
3097 os_protect(page_start
, 4096, OS_VM_PROT_ALL
);
3098 page_table
[i
].write_protected
= 0;
3104 /* Work through all the pages and free any in from_space. This
3105 * assumes that all objects have been copied or promoted to an older
3106 * generation. Bytes_allocated and the generation bytes_allocated
3107 * counter are updated. The number of bytes freed is returned. */
3108 extern void i586_bzero(void *addr
, int nbytes
);
3112 int bytes_freed
= 0;
3113 int first_page
, last_page
;
3118 /* Find a first page for the next region of pages. */
3119 while ((first_page
< last_free_page
)
3120 && ((page_table
[first_page
].allocated
== FREE_PAGE
)
3121 || (page_table
[first_page
].bytes_used
== 0)
3122 || (page_table
[first_page
].gen
!= from_space
)))
3125 if (first_page
>= last_free_page
)
3128 /* Find the last page of this region. */
3129 last_page
= first_page
;
3132 /* Free the page. */
3133 bytes_freed
+= page_table
[last_page
].bytes_used
;
3134 generations
[page_table
[last_page
].gen
].bytes_allocated
-=
3135 page_table
[last_page
].bytes_used
;
3136 page_table
[last_page
].allocated
= FREE_PAGE
;
3137 page_table
[last_page
].bytes_used
= 0;
3139 /* Remove any write-protection. We should be able to rely
3140 * on the write-protect flag to avoid redundant calls. */
3142 void *page_start
= (void *)page_address(last_page
);
3144 if (page_table
[last_page
].write_protected
) {
3145 os_protect(page_start
, 4096, OS_VM_PROT_ALL
);
3146 page_table
[last_page
].write_protected
= 0;
3151 while ((last_page
< last_free_page
)
3152 && (page_table
[last_page
].allocated
!= FREE_PAGE
)
3153 && (page_table
[last_page
].bytes_used
!= 0)
3154 && (page_table
[last_page
].gen
== from_space
));
3156 /* Zero pages from first_page to (last_page-1).
3158 * FIXME: Why not use os_zero(..) function instead of
3159 * hand-coding this again? (Check other gencgc_unmap_zero
3161 if (gencgc_unmap_zero
) {
3162 void *page_start
, *addr
;
3164 page_start
= (void *)page_address(first_page
);
3166 os_invalidate(page_start
, 4096*(last_page
-first_page
));
3167 addr
= os_validate(page_start
, 4096*(last_page
-first_page
));
3168 if (addr
== NULL
|| addr
!= page_start
) {
3169 /* Is this an error condition? I couldn't really tell from
3170 * the old CMU CL code, which fprintf'ed a message with
3171 * an exclamation point at the end. But I've never seen the
3172 * message, so it must at least be unusual..
3174 * (The same condition is also tested for in gc_free_heap.)
3176 * -- WHN 19991129 */
3177 lose("i586_bzero: page moved, 0x%08x ==> 0x%08x",
3184 page_start
= (int *)page_address(first_page
);
3185 i586_bzero(page_start
, 4096*(last_page
-first_page
));
3188 first_page
= last_page
;
3190 } while (first_page
< last_free_page
);
3192 bytes_allocated
-= bytes_freed
;
3197 /* Print some information about a pointer at the given address. */
3199 print_ptr(lispobj
*addr
)
3201 /* If addr is in the dynamic space then out the page information. */
3202 int pi1
= find_page_index((void*)addr
);
3205 fprintf(stderr
," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n",
3206 (unsigned int) addr
,
3208 page_table
[pi1
].allocated
,
3209 page_table
[pi1
].gen
,
3210 page_table
[pi1
].bytes_used
,
3211 page_table
[pi1
].first_object_offset
,
3212 page_table
[pi1
].dont_move
);
3213 fprintf(stderr
," %x %x %x %x (%x) %x %x %x %x\n",
3226 extern int undefined_tramp
;
3229 verify_space(lispobj
*start
, size_t words
)
3231 int is_in_dynamic_space
= (find_page_index((void*)start
) != -1);
3232 int is_in_readonly_space
=
3233 (READ_ONLY_SPACE_START
<= (unsigned)start
&&
3234 (unsigned)start
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
));
3238 lispobj thing
= *(lispobj
*)start
;
3240 if (is_lisp_pointer(thing
)) {
3241 int page_index
= find_page_index((void*)thing
);
3242 int to_readonly_space
=
3243 (READ_ONLY_SPACE_START
<= thing
&&
3244 thing
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
));
3245 int to_static_space
=
3246 (STATIC_SPACE_START
<= thing
&&
3247 thing
< SymbolValue(STATIC_SPACE_FREE_POINTER
));
3249 /* Does it point to the dynamic space? */
3250 if (page_index
!= -1) {
3251 /* If it's within the dynamic space it should point to a used
3252 * page. XX Could check the offset too. */
3253 if ((page_table
[page_index
].allocated
!= FREE_PAGE
)
3254 && (page_table
[page_index
].bytes_used
== 0))
3255 lose ("Ptr %x @ %x sees free page.", thing
, start
);
3256 /* Check that it doesn't point to a forwarding pointer! */
3257 if (*((lispobj
*)native_pointer(thing
)) == 0x01) {
3258 lose("Ptr %x @ %x sees forwarding ptr.", thing
, start
);
3260 /* Check that its not in the RO space as it would then be a
3261 * pointer from the RO to the dynamic space. */
3262 if (is_in_readonly_space
) {
3263 lose("ptr to dynamic space %x from RO space %x",
3266 /* Does it point to a plausible object? This check slows
3267 * it down a lot (so it's commented out).
3269 * "a lot" is serious: it ate 50 minutes cpu time on
3270 * my duron 950 before I came back from lunch and
3273 * FIXME: Add a variable to enable this
3276 if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) {
3277 lose("ptr %x to invalid object %x", thing, start);
3281 /* Verify that it points to another valid space. */
3282 if (!to_readonly_space
&& !to_static_space
3283 && (thing
!= (unsigned)&undefined_tramp
)) {
3284 lose("Ptr %x @ %x sees junk.", thing
, start
);
3288 if (thing
& 0x3) { /* Skip fixnums. FIXME: There should be an
3289 * is_fixnum for this. */
3291 switch(widetag_of(*start
)) {
3294 case SIMPLE_VECTOR_WIDETAG
:
3296 case COMPLEX_WIDETAG
:
3297 case SIMPLE_ARRAY_WIDETAG
:
3298 case COMPLEX_STRING_WIDETAG
:
3299 case COMPLEX_BIT_VECTOR_WIDETAG
:
3300 case COMPLEX_VECTOR_WIDETAG
:
3301 case COMPLEX_ARRAY_WIDETAG
:
3302 case CLOSURE_HEADER_WIDETAG
:
3303 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
3304 case VALUE_CELL_HEADER_WIDETAG
:
3305 case SYMBOL_HEADER_WIDETAG
:
3306 case BASE_CHAR_WIDETAG
:
3307 case UNBOUND_MARKER_WIDETAG
:
3308 case INSTANCE_HEADER_WIDETAG
:
3313 case CODE_HEADER_WIDETAG
:
3315 lispobj object
= *start
;
3317 int nheader_words
, ncode_words
, nwords
;
3319 struct simple_fun
*fheaderp
;
3321 code
= (struct code
*) start
;
3323 /* Check that it's not in the dynamic space.
3324 * FIXME: Isn't is supposed to be OK for code
3325 * objects to be in the dynamic space these days? */
3326 if (is_in_dynamic_space
3327 /* It's ok if it's byte compiled code. The trace
3328 * table offset will be a fixnum if it's x86
3329 * compiled code - check.
3331 * FIXME: #^#@@! lack of abstraction here..
3332 * This line can probably go away now that
3333 * there's no byte compiler, but I've got
3334 * too much to worry about right now to try
3335 * to make sure. -- WHN 2001-10-06 */
3336 && !(code
->trace_table_offset
& 0x3)
3337 /* Only when enabled */
3338 && verify_dynamic_code_check
) {
3340 "/code object at %x in the dynamic space\n",
3344 ncode_words
= fixnum_value(code
->code_size
);
3345 nheader_words
= HeaderValue(object
);
3346 nwords
= ncode_words
+ nheader_words
;
3347 nwords
= CEILING(nwords
, 2);
3348 /* Scavenge the boxed section of the code data block */
3349 verify_space(start
+ 1, nheader_words
- 1);
3351 /* Scavenge the boxed section of each function
3352 * object in the code data block. */
3353 fheaderl
= code
->entry_points
;
3354 while (fheaderl
!= NIL
) {
3356 (struct simple_fun
*) native_pointer(fheaderl
);
3357 gc_assert(widetag_of(fheaderp
->header
) == SIMPLE_FUN_HEADER_WIDETAG
);
3358 verify_space(&fheaderp
->name
, 1);
3359 verify_space(&fheaderp
->arglist
, 1);
3360 verify_space(&fheaderp
->type
, 1);
3361 fheaderl
= fheaderp
->next
;
3367 /* unboxed objects */
3368 case BIGNUM_WIDETAG
:
3369 case SINGLE_FLOAT_WIDETAG
:
3370 case DOUBLE_FLOAT_WIDETAG
:
3371 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3372 case LONG_FLOAT_WIDETAG
:
3374 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
3375 case COMPLEX_SINGLE_FLOAT_WIDETAG
:
3377 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
3378 case COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3380 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3381 case COMPLEX_LONG_FLOAT_WIDETAG
:
3383 case SIMPLE_STRING_WIDETAG
:
3384 case SIMPLE_BIT_VECTOR_WIDETAG
:
3385 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
3386 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
3387 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
3388 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
3389 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
3390 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
3391 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
3393 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
3394 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
3396 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
3397 case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
:
3399 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
3400 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
3402 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
3403 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
3404 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3405 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
3407 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
3408 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
3410 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
3411 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3413 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3414 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
3417 case WEAK_POINTER_WIDETAG
:
3418 count
= (sizetab
[widetag_of(*start
)])(start
);
3434 /* FIXME: It would be nice to make names consistent so that
3435 * foo_size meant size *in* *bytes* instead of size in some
3436 * arbitrary units. (Yes, this caused a bug, how did you guess?:-)
3437 * Some counts of lispobjs are called foo_count; it might be good
3438 * to grep for all foo_size and rename the appropriate ones to
3440 int read_only_space_size
=
3441 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
)
3442 - (lispobj
*)READ_ONLY_SPACE_START
;
3443 int static_space_size
=
3444 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
)
3445 - (lispobj
*)STATIC_SPACE_START
;
3446 int binding_stack_size
=
3447 (lispobj
*)SymbolValue(BINDING_STACK_POINTER
)
3448 - (lispobj
*)BINDING_STACK_START
;
3450 verify_space((lispobj
*)READ_ONLY_SPACE_START
, read_only_space_size
);
3451 verify_space((lispobj
*)STATIC_SPACE_START
, static_space_size
);
3452 verify_space((lispobj
*)BINDING_STACK_START
, binding_stack_size
);
3456 verify_generation(int generation
)
3460 for (i
= 0; i
< last_free_page
; i
++) {
3461 if ((page_table
[i
].allocated
!= FREE_PAGE
)
3462 && (page_table
[i
].bytes_used
!= 0)
3463 && (page_table
[i
].gen
== generation
)) {
3465 int region_allocation
= page_table
[i
].allocated
;
3467 /* This should be the start of a contiguous block */
3468 gc_assert(page_table
[i
].first_object_offset
== 0);
3470 /* Need to find the full extent of this contiguous block in case
3471 objects span pages. */
3473 /* Now work forward until the end of this contiguous area is
3475 for (last_page
= i
; ;last_page
++)
3476 /* Check whether this is the last page in this contiguous
3478 if ((page_table
[last_page
].bytes_used
< 4096)
3479 /* Or it is 4096 and is the last in the block */
3480 || (page_table
[last_page
+1].allocated
!= region_allocation
)
3481 || (page_table
[last_page
+1].bytes_used
== 0)
3482 || (page_table
[last_page
+1].gen
!= generation
)
3483 || (page_table
[last_page
+1].first_object_offset
== 0))
3486 verify_space(page_address(i
), (page_table
[last_page
].bytes_used
3487 + (last_page
-i
)*4096)/4);
3493 /* Check that all the free space is zero filled. */
3495 verify_zero_fill(void)
3499 for (page
= 0; page
< last_free_page
; page
++) {
3500 if (page_table
[page
].allocated
== FREE_PAGE
) {
3501 /* The whole page should be zero filled. */
3502 int *start_addr
= (int *)page_address(page
);
3505 for (i
= 0; i
< size
; i
++) {
3506 if (start_addr
[i
] != 0) {
3507 lose("free page not zero at %x", start_addr
+ i
);
3511 int free_bytes
= 4096 - page_table
[page
].bytes_used
;
3512 if (free_bytes
> 0) {
3513 int *start_addr
= (int *)((unsigned)page_address(page
)
3514 + page_table
[page
].bytes_used
);
3515 int size
= free_bytes
/ 4;
3517 for (i
= 0; i
< size
; i
++) {
3518 if (start_addr
[i
] != 0) {
3519 lose("free region not zero at %x", start_addr
+ i
);
3527 /* External entry point for verify_zero_fill */
3529 gencgc_verify_zero_fill(void)
3531 /* Flush the alloc regions updating the tables. */
3532 gc_alloc_update_all_page_tables();
3533 SHOW("verifying zero fill");
3538 verify_dynamic_space(void)
3542 for (i
= 0; i
< NUM_GENERATIONS
; i
++)
3543 verify_generation(i
);
3545 if (gencgc_enable_verify_zero_fill
)
3549 /* Write-protect all the dynamic boxed pages in the given generation. */
3551 write_protect_generation_pages(int generation
)
3555 gc_assert(generation
< NUM_GENERATIONS
);
3557 for (i
= 0; i
< last_free_page
; i
++)
3558 if ((page_table
[i
].allocated
== BOXED_PAGE
)
3559 && (page_table
[i
].bytes_used
!= 0)
3560 && (page_table
[i
].gen
== generation
)) {
3563 page_start
= (void *)page_address(i
);
3565 os_protect(page_start
,
3567 OS_VM_PROT_READ
| OS_VM_PROT_EXECUTE
);
3569 /* Note the page as protected in the page tables. */
3570 page_table
[i
].write_protected
= 1;
3573 if (gencgc_verbose
> 1) {
3575 "/write protected %d of %d pages in generation %d\n",
3576 count_write_protect_generation_pages(generation
),
3577 count_generation_pages(generation
),
3582 /* Garbage collect a generation. If raise is 0 then the remains of the
3583 * generation are not raised to the next generation. */
3585 garbage_collect_generation(int generation
, int raise
)
3587 unsigned long bytes_freed
;
3589 unsigned long static_space_size
;
3591 gc_assert(generation
<= (NUM_GENERATIONS
-1));
3593 /* The oldest generation can't be raised. */
3594 gc_assert((generation
!= (NUM_GENERATIONS
-1)) || (raise
== 0));
3596 /* Initialize the weak pointer list. */
3597 weak_pointers
= NULL
;
3599 /* When a generation is not being raised it is transported to a
3600 * temporary generation (NUM_GENERATIONS), and lowered when
3601 * done. Set up this new generation. There should be no pages
3602 * allocated to it yet. */
3604 gc_assert(generations
[NUM_GENERATIONS
].bytes_allocated
== 0);
3606 /* Set the global src and dest. generations */
3607 from_space
= generation
;
3609 new_space
= generation
+1;
3611 new_space
= NUM_GENERATIONS
;
3613 /* Change to a new space for allocation, resetting the alloc_start_page */
3614 gc_alloc_generation
= new_space
;
3615 generations
[new_space
].alloc_start_page
= 0;
3616 generations
[new_space
].alloc_unboxed_start_page
= 0;
3617 generations
[new_space
].alloc_large_start_page
= 0;
3618 generations
[new_space
].alloc_large_unboxed_start_page
= 0;
3620 /* Before any pointers are preserved, the dont_move flags on the
3621 * pages need to be cleared. */
3622 for (i
= 0; i
< last_free_page
; i
++)
3623 page_table
[i
].dont_move
= 0;
3625 /* Un-write-protect the old-space pages. This is essential for the
3626 * promoted pages as they may contain pointers into the old-space
3627 * which need to be scavenged. It also helps avoid unnecessary page
3628 * faults as forwarding pointers are written into them. They need to
3629 * be un-protected anyway before unmapping later. */
3630 unprotect_oldspace();
3632 /* Scavenge the stack's conservative roots. */
3635 for (ptr
= (void **)CONTROL_STACK_END
- 1;
3636 ptr
> (void **)&raise
;
3638 preserve_pointer(*ptr
);
3643 if (gencgc_verbose
> 1) {
3644 int num_dont_move_pages
= count_dont_move_pages();
3646 "/non-movable pages due to conservative pointers = %d (%d bytes)\n",
3647 num_dont_move_pages
,
3648 /* FIXME: 4096 should be symbolic constant here and
3649 * prob'ly elsewhere too. */
3650 num_dont_move_pages
* 4096);
3654 /* Scavenge all the rest of the roots. */
3656 /* Scavenge the Lisp functions of the interrupt handlers, taking
3657 * care to avoid SIG_DFL and SIG_IGN. */
3658 for (i
= 0; i
< NSIG
; i
++) {
3659 union interrupt_handler handler
= interrupt_handlers
[i
];
3660 if (!ARE_SAME_HANDLER(handler
.c
, SIG_IGN
) &&
3661 !ARE_SAME_HANDLER(handler
.c
, SIG_DFL
)) {
3662 scavenge((lispobj
*)(interrupt_handlers
+ i
), 1);
3666 /* Scavenge the binding stack. */
3667 scavenge((lispobj
*) BINDING_STACK_START
,
3668 (lispobj
*)SymbolValue(BINDING_STACK_POINTER
) -
3669 (lispobj
*)BINDING_STACK_START
);
3671 /* The original CMU CL code had scavenge-read-only-space code
3672 * controlled by the Lisp-level variable
3673 * *SCAVENGE-READ-ONLY-SPACE*. It was disabled by default, and it
3674 * wasn't documented under what circumstances it was useful or
3675 * safe to turn it on, so it's been turned off in SBCL. If you
3676 * want/need this functionality, and can test and document it,
3677 * please submit a patch. */
3679 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE
) != NIL
) {
3680 unsigned long read_only_space_size
=
3681 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
) -
3682 (lispobj
*)READ_ONLY_SPACE_START
;
3684 "/scavenge read only space: %d bytes\n",
3685 read_only_space_size
* sizeof(lispobj
)));
3686 scavenge( (lispobj
*) READ_ONLY_SPACE_START
, read_only_space_size
);
3690 /* Scavenge static space. */
3692 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
) -
3693 (lispobj
*)STATIC_SPACE_START
;
3694 if (gencgc_verbose
> 1) {
3696 "/scavenge static space: %d bytes\n",
3697 static_space_size
* sizeof(lispobj
)));
3699 scavenge( (lispobj
*) STATIC_SPACE_START
, static_space_size
);
3701 /* All generations but the generation being GCed need to be
3702 * scavenged. The new_space generation needs special handling as
3703 * objects may be moved in - it is handled separately below. */
3704 for (i
= 0; i
< NUM_GENERATIONS
; i
++) {
3705 if ((i
!= generation
) && (i
!= new_space
)) {
3706 scavenge_generation(i
);
3710 /* Finally scavenge the new_space generation. Keep going until no
3711 * more objects are moved into the new generation */
3712 scavenge_newspace_generation(new_space
);
3714 /* FIXME: I tried reenabling this check when debugging unrelated
3715 * GC weirdness ca. sbcl-0.6.12.45, and it failed immediately.
3716 * Since the current GC code seems to work well, I'm guessing that
3717 * this debugging code is just stale, but I haven't tried to
3718 * figure it out. It should be figured out and then either made to
3719 * work or just deleted. */
3720 #define RESCAN_CHECK 0
3722 /* As a check re-scavenge the newspace once; no new objects should
3725 int old_bytes_allocated
= bytes_allocated
;
3726 int bytes_allocated
;
3728 /* Start with a full scavenge. */
3729 scavenge_newspace_generation_one_scan(new_space
);
3731 /* Flush the current regions, updating the tables. */
3732 gc_alloc_update_all_page_tables();
3734 bytes_allocated
= bytes_allocated
- old_bytes_allocated
;
3736 if (bytes_allocated
!= 0) {
3737 lose("Rescan of new_space allocated %d more bytes.",
3743 scan_weak_pointers();
3745 /* Flush the current regions, updating the tables. */
3746 gc_alloc_update_all_page_tables();
3748 /* Free the pages in oldspace, but not those marked dont_move. */
3749 bytes_freed
= free_oldspace();
3751 /* If the GC is not raising the age then lower the generation back
3752 * to its normal generation number */
3754 for (i
= 0; i
< last_free_page
; i
++)
3755 if ((page_table
[i
].bytes_used
!= 0)
3756 && (page_table
[i
].gen
== NUM_GENERATIONS
))
3757 page_table
[i
].gen
= generation
;
3758 gc_assert(generations
[generation
].bytes_allocated
== 0);
3759 generations
[generation
].bytes_allocated
=
3760 generations
[NUM_GENERATIONS
].bytes_allocated
;
3761 generations
[NUM_GENERATIONS
].bytes_allocated
= 0;
3764 /* Reset the alloc_start_page for generation. */
3765 generations
[generation
].alloc_start_page
= 0;
3766 generations
[generation
].alloc_unboxed_start_page
= 0;
3767 generations
[generation
].alloc_large_start_page
= 0;
3768 generations
[generation
].alloc_large_unboxed_start_page
= 0;
3770 if (generation
>= verify_gens
) {
3774 verify_dynamic_space();
3777 /* Set the new gc trigger for the GCed generation. */
3778 generations
[generation
].gc_trigger
=
3779 generations
[generation
].bytes_allocated
3780 + generations
[generation
].bytes_consed_between_gc
;
3783 generations
[generation
].num_gc
= 0;
3785 ++generations
[generation
].num_gc
;
3788 /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
3790 update_x86_dynamic_space_free_pointer(void)
3795 for (i
= 0; i
< NUM_PAGES
; i
++)
3796 if ((page_table
[i
].allocated
!= FREE_PAGE
)
3797 && (page_table
[i
].bytes_used
!= 0))
3800 last_free_page
= last_page
+1;
3802 SetSymbolValue(ALLOCATION_POINTER
,
3803 (lispobj
)(((char *)heap_base
) + last_free_page
*4096));
3804 return 0; /* dummy value: return something ... */
3807 /* GC all generations newer than last_gen, raising the objects in each
3808 * to the next older generation - we finish when all generations below
3809 * last_gen are empty. Then if last_gen is due for a GC, or if
3810 * last_gen==NUM_GENERATIONS (the scratch generation? eh?) we GC that
3811 * too. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
3813 * We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than
3814 * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */
3817 collect_garbage(unsigned last_gen
)
3824 FSHOW((stderr
, "/entering collect_garbage(%d)\n", last_gen
));
3826 if (last_gen
> NUM_GENERATIONS
) {
3828 "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
3833 /* Flush the alloc regions updating the tables. */
3834 gc_alloc_update_all_page_tables();
3836 /* Verify the new objects created by Lisp code. */
3837 if (pre_verify_gen_0
) {
3838 FSHOW((stderr
, "pre-checking generation 0\n"));
3839 verify_generation(0);
3842 if (gencgc_verbose
> 1)
3843 print_generation_stats(0);
3846 /* Collect the generation. */
3848 if (gen
>= gencgc_oldest_gen_to_gc
) {
3849 /* Never raise the oldest generation. */
3854 || (generations
[gen
].num_gc
>= generations
[gen
].trigger_age
);
3857 if (gencgc_verbose
> 1) {
3859 "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
3862 generations
[gen
].bytes_allocated
,
3863 generations
[gen
].gc_trigger
,
3864 generations
[gen
].num_gc
));
3867 /* If an older generation is being filled, then update its
3870 generations
[gen
+1].cum_sum_bytes_allocated
+=
3871 generations
[gen
+1].bytes_allocated
;
3874 garbage_collect_generation(gen
, raise
);
3876 /* Reset the memory age cum_sum. */
3877 generations
[gen
].cum_sum_bytes_allocated
= 0;
3879 if (gencgc_verbose
> 1) {
3880 FSHOW((stderr
, "GC of generation %d finished:\n", gen
));
3881 print_generation_stats(0);
3885 } while ((gen
<= gencgc_oldest_gen_to_gc
)
3886 && ((gen
< last_gen
)
3887 || ((gen
<= gencgc_oldest_gen_to_gc
)
3889 && (generations
[gen
].bytes_allocated
3890 > generations
[gen
].gc_trigger
)
3891 && (gen_av_mem_age(gen
)
3892 > generations
[gen
].min_av_mem_age
))));
3894 /* Now if gen-1 was raised all generations before gen are empty.
3895 * If it wasn't raised then all generations before gen-1 are empty.
3897 * Now objects within this gen's pages cannot point to younger
3898 * generations unless they are written to. This can be exploited
3899 * by write-protecting the pages of gen; then when younger
3900 * generations are GCed only the pages which have been written
3905 gen_to_wp
= gen
- 1;
3907 /* There's not much point in WPing pages in generation 0 as it is
3908 * never scavenged (except promoted pages). */
3909 if ((gen_to_wp
> 0) && enable_page_protection
) {
3910 /* Check that they are all empty. */
3911 for (i
= 0; i
< gen_to_wp
; i
++) {
3912 if (generations
[i
].bytes_allocated
)
3913 lose("trying to write-protect gen. %d when gen. %d nonempty",
3916 write_protect_generation_pages(gen_to_wp
);
3919 /* Set gc_alloc() back to generation 0. The current regions should
3920 * be flushed after the above GCs. */
3921 gc_assert((boxed_region
.free_pointer
- boxed_region
.start_addr
) == 0);
3922 gc_alloc_generation
= 0;
3924 update_x86_dynamic_space_free_pointer();
3926 SHOW("returning from collect_garbage");
3929 /* This is called by Lisp PURIFY when it is finished. All live objects
3930 * will have been moved to the RO and Static heaps. The dynamic space
3931 * will need a full re-initialization. We don't bother having Lisp
3932 * PURIFY flush the current gc_alloc() region, as the page_tables are
3933 * re-initialized, and every page is zeroed to be sure. */
3939 if (gencgc_verbose
> 1)
3940 SHOW("entering gc_free_heap");
3942 for (page
= 0; page
< NUM_PAGES
; page
++) {
3943 /* Skip free pages which should already be zero filled. */
3944 if (page_table
[page
].allocated
!= FREE_PAGE
) {
3945 void *page_start
, *addr
;
3947 /* Mark the page free. The other slots are assumed invalid
3948 * when it is a FREE_PAGE and bytes_used is 0 and it
3949 * should not be write-protected -- except that the
3950 * generation is used for the current region but it sets
3952 page_table
[page
].allocated
= FREE_PAGE
;
3953 page_table
[page
].bytes_used
= 0;
3955 /* Zero the page. */
3956 page_start
= (void *)page_address(page
);
3958 /* First, remove any write-protection. */
3959 os_protect(page_start
, 4096, OS_VM_PROT_ALL
);
3960 page_table
[page
].write_protected
= 0;
3962 os_invalidate(page_start
,4096);
3963 addr
= os_validate(page_start
,4096);
3964 if (addr
== NULL
|| addr
!= page_start
) {
3965 lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x",
3969 } else if (gencgc_zero_check_during_free_heap
) {
3970 /* Double-check that the page is zero filled. */
3972 gc_assert(page_table
[page
].allocated
== FREE_PAGE
);
3973 gc_assert(page_table
[page
].bytes_used
== 0);
3974 page_start
= (int *)page_address(page
);
3975 for (i
=0; i
<1024; i
++) {
3976 if (page_start
[i
] != 0) {
3977 lose("free region not zero at %x", page_start
+ i
);
3983 bytes_allocated
= 0;
3985 /* Initialize the generations. */
3986 for (page
= 0; page
< NUM_GENERATIONS
; page
++) {
3987 generations
[page
].alloc_start_page
= 0;
3988 generations
[page
].alloc_unboxed_start_page
= 0;
3989 generations
[page
].alloc_large_start_page
= 0;
3990 generations
[page
].alloc_large_unboxed_start_page
= 0;
3991 generations
[page
].bytes_allocated
= 0;
3992 generations
[page
].gc_trigger
= 2000000;
3993 generations
[page
].num_gc
= 0;
3994 generations
[page
].cum_sum_bytes_allocated
= 0;
3997 if (gencgc_verbose
> 1)
3998 print_generation_stats(0);
4000 /* Initialize gc_alloc(). */
4001 gc_alloc_generation
= 0;
4003 gc_set_region_empty(&boxed_region
);
4004 gc_set_region_empty(&unboxed_region
);
4007 SetSymbolValue(ALLOCATION_POINTER
, (lispobj
)((char *)heap_base
));
4009 if (verify_after_free_heap
) {
4010 /* Check whether purify has left any bad pointers. */
4012 SHOW("checking after free_heap\n");
4023 scavtab
[SIMPLE_VECTOR_WIDETAG
] = scav_vector
;
4024 scavtab
[WEAK_POINTER_WIDETAG
] = scav_weak_pointer
;
4025 transother
[SIMPLE_ARRAY_WIDETAG
] = trans_boxed_large
;
4027 heap_base
= (void*)DYNAMIC_SPACE_START
;
4029 /* Initialize each page structure. */
4030 for (i
= 0; i
< NUM_PAGES
; i
++) {
4031 /* Initialize all pages as free. */
4032 page_table
[i
].allocated
= FREE_PAGE
;
4033 page_table
[i
].bytes_used
= 0;
4035 /* Pages are not write-protected at startup. */
4036 page_table
[i
].write_protected
= 0;
4039 bytes_allocated
= 0;
4041 /* Initialize the generations.
4043 * FIXME: very similar to code in gc_free_heap(), should be shared */
4044 for (i
= 0; i
< NUM_GENERATIONS
; i
++) {
4045 generations
[i
].alloc_start_page
= 0;
4046 generations
[i
].alloc_unboxed_start_page
= 0;
4047 generations
[i
].alloc_large_start_page
= 0;
4048 generations
[i
].alloc_large_unboxed_start_page
= 0;
4049 generations
[i
].bytes_allocated
= 0;
4050 generations
[i
].gc_trigger
= 2000000;
4051 generations
[i
].num_gc
= 0;
4052 generations
[i
].cum_sum_bytes_allocated
= 0;
4053 /* the tune-able parameters */
4054 generations
[i
].bytes_consed_between_gc
= 2000000;
4055 generations
[i
].trigger_age
= 1;
4056 generations
[i
].min_av_mem_age
= 0.75;
4059 /* Initialize gc_alloc. */
4060 gc_alloc_generation
= 0;
4061 gc_set_region_empty(&boxed_region
);
4062 gc_set_region_empty(&unboxed_region
);
4068 /* Pick up the dynamic space from after a core load.
4070 * The ALLOCATION_POINTER points to the end of the dynamic space.
4072 * XX A scan is needed to identify the closest first objects for pages. */
4074 gencgc_pickup_dynamic(void)
4077 int addr
= DYNAMIC_SPACE_START
;
4078 int alloc_ptr
= SymbolValue(ALLOCATION_POINTER
);
4080 /* Initialize the first region. */
4082 page_table
[page
].allocated
= BOXED_PAGE
;
4083 page_table
[page
].gen
= 0;
4084 page_table
[page
].bytes_used
= 4096;
4085 page_table
[page
].large_object
= 0;
4086 page_table
[page
].first_object_offset
=
4087 (void *)DYNAMIC_SPACE_START
- page_address(page
);
4090 } while (addr
< alloc_ptr
);
4092 generations
[0].bytes_allocated
= 4096*page
;
4093 bytes_allocated
= 4096*page
;
4098 gc_initialize_pointers(void)
4100 gencgc_pickup_dynamic();
4106 extern boolean maybe_gc_pending
;
4107 /* alloc(..) is the external interface for memory allocation. It
4108 * allocates to generation 0. It is not called from within the garbage
4109 * collector as it is only external uses that need the check for heap
4110 * size (GC trigger) and to disable the interrupts (interrupts are
4111 * always disabled during a GC).
4113 * The vops that call alloc(..) assume that the returned space is zero-filled.
4114 * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
4116 * The check for a GC trigger is only performed when the current
4117 * region is full, so in most cases it's not needed. */
4122 struct alloc_region
*region
= &boxed_region
;
4124 void *new_free_pointer
;
4126 /* Check for alignment allocation problems. */
4127 gc_assert((((unsigned)region
->free_pointer
& 0x7) == 0)
4128 && ((nbytes
& 0x7) == 0));
4129 /* At this point we should either be in pseudo-atomic, or early
4130 * enough in cold initn that interrupts are not yet enabled anyway.
4131 * It would be nice to assert same.
4133 gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC
));
4135 /* maybe we can do this quickly ... */
4136 new_free_pointer
= region
->free_pointer
+ nbytes
;
4137 if (new_free_pointer
<= region
->end_addr
) {
4138 new_obj
= (void*)(region
->free_pointer
);
4139 region
->free_pointer
= new_free_pointer
;
4140 return(new_obj
); /* yup */
4143 /* we have to go the long way around, it seems. Check whether
4144 * we should GC in the near future
4146 if (auto_gc_trigger
&& bytes_allocated
> auto_gc_trigger
) {
4147 auto_gc_trigger
*= 2;
4148 /* set things up so that GC happens when we finish the PA
4151 SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED
, make_fixnum(1));
4153 new_obj
= gc_alloc_with_region(nbytes
,0,region
,0);
4159 * noise to manipulate the gc trigger stuff
4163 set_auto_gc_trigger(os_vm_size_t dynamic_usage
)
4165 auto_gc_trigger
+= dynamic_usage
;
4169 clear_auto_gc_trigger(void)
4171 auto_gc_trigger
= 0;
4174 /* Find the code object for the given pc, or return NULL on failure.
4176 * FIXME: PC shouldn't be lispobj*, should it? Maybe void*? */
4178 component_ptr_from_pc(lispobj
*pc
)
4180 lispobj
*object
= NULL
;
4182 if ( (object
= search_read_only_space(pc
)) )
4184 else if ( (object
= search_static_space(pc
)) )
4187 object
= search_dynamic_space(pc
);
4189 if (object
) /* if we found something */
4190 if (widetag_of(*object
) == CODE_HEADER_WIDETAG
) /* if it's a code object */
4197 * shared support for the OS-dependent signal handlers which
4198 * catch GENCGC-related write-protect violations
4201 void unhandled_sigmemoryfault(void);
4203 /* Depending on which OS we're running under, different signals might
4204 * be raised for a violation of write protection in the heap. This
4205 * function factors out the common generational GC magic which needs
4206 * to invoked in this case, and should be called from whatever signal
4207 * handler is appropriate for the OS we're running under.
4209 * Return true if this signal is a normal generational GC thing that
4210 * we were able to handle, or false if it was abnormal and control
4211 * should fall through to the general SIGSEGV/SIGBUS/whatever logic. */
4214 gencgc_handle_wp_violation(void* fault_addr
)
4216 int page_index
= find_page_index(fault_addr
);
4218 #if defined QSHOW_SIGNALS
4219 FSHOW((stderr
, "heap WP violation? fault_addr=%x, page_index=%d\n",
4220 fault_addr
, page_index
));
4223 /* Check whether the fault is within the dynamic space. */
4224 if (page_index
== (-1)) {
4226 /* It can be helpful to be able to put a breakpoint on this
4227 * case to help diagnose low-level problems. */
4228 unhandled_sigmemoryfault();
4230 /* not within the dynamic space -- not our responsibility */
4235 /* The only acceptable reason for an signal like this from the
4236 * heap is that the generational GC write-protected the page. */
4237 if (page_table
[page_index
].write_protected
!= 1) {
4238 lose("access failure in heap page not marked as write-protected");
4241 /* Unprotect the page. */
4242 os_protect(page_address(page_index
), 4096, OS_VM_PROT_ALL
);
4243 page_table
[page_index
].write_protected
= 0;
4244 page_table
[page_index
].write_protected_cleared
= 1;
4246 /* Don't worry, we can handle it. */
4251 /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
4252 * it's not just a case of the program hitting the write barrier, and
4253 * are about to let Lisp deal with it. It's basically just a
4254 * convenient place to set a gdb breakpoint. */
4256 unhandled_sigmemoryfault()
4259 gc_alloc_update_all_page_tables(void)
4261 /* Flush the alloc regions updating the tables. */
4262 gc_alloc_update_page_tables(1, &unboxed_region
);
4263 gc_alloc_update_page_tables(0, &boxed_region
);
4266 gc_set_region_empty(struct alloc_region
*region
)
4268 region
->first_page
= 0;
4269 region
->last_page
= -1;
4270 region
->start_addr
= page_address(0);
4271 region
->free_pointer
= page_address(0);
4272 region
->end_addr
= page_address(0);