2 * GENerational Conservative Garbage Collector for SBCL x86
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 * For a review of garbage collection techniques (e.g. generational
18 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
19 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
20 * had been accepted for _ACM Computing Surveys_ and was available
21 * as a PostScript preprint through
22 * <http://www.cs.utexas.edu/users/oops/papers.html>
24 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
36 #include "interrupt.h"
42 #include "gc-internal.h"
44 #include "genesis/vector.h"
45 #include "genesis/weak-pointer.h"
46 #include "genesis/simple-fun.h"
48 /* assembly language stub that executes trap_PendingInterrupt */
49 void do_pending_interrupt(void);
51 /* forward declarations */
52 long gc_find_freeish_pages(long *restart_page_ptr
, long nbytes
, int unboxed
);
53 static void gencgc_pickup_dynamic(void);
54 boolean
interrupt_maybe_gc_int(int, siginfo_t
*, void *);
61 /* the number of actual generations. (The number of 'struct
62 * generation' objects is one more than this, because one object
63 * serves as scratch when GC'ing.) */
64 #define NUM_GENERATIONS 6
66 /* Should we use page protection to help avoid the scavenging of pages
67 * that don't have pointers to younger generations? */
68 boolean enable_page_protection
= 1;
70 /* Should we unmap a page and re-mmap it to have it zero filled? */
71 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
72 /* comment from cmucl-2.4.8: This can waste a lot of swap on FreeBSD
73 * so don't unmap there.
75 * The CMU CL comment didn't specify a version, but was probably an
76 * old version of FreeBSD (pre-4.0), so this might no longer be true.
77 * OTOH, if it is true, this behavior might exist on OpenBSD too, so
78 * for now we don't unmap there either. -- WHN 2001-04-07 */
79 boolean gencgc_unmap_zero
= 0;
81 boolean gencgc_unmap_zero
= 1;
84 /* the minimum size (in bytes) for a large object*/
85 unsigned large_object_size
= 4 * PAGE_BYTES
;
94 /* the verbosity level. All non-error messages are disabled at level 0;
95 * and only a few rare messages are printed at level 1. */
97 unsigned gencgc_verbose
= 1;
99 unsigned gencgc_verbose
= 0;
102 /* FIXME: At some point enable the various error-checking things below
103 * and see what they say. */
105 /* We hunt for pointers to old-space, when GCing generations >= verify_gen.
106 * Set verify_gens to NUM_GENERATIONS to disable this kind of check. */
107 int verify_gens
= NUM_GENERATIONS
;
109 /* Should we do a pre-scan verify of generation 0 before it's GCed? */
110 boolean pre_verify_gen_0
= 0;
112 /* Should we check for bad pointers after gc_free_heap is called
113 * from Lisp PURIFY? */
114 boolean verify_after_free_heap
= 0;
116 /* Should we print a note when code objects are found in the dynamic space
117 * during a heap verify? */
118 boolean verify_dynamic_code_check
= 0;
120 /* Should we check code objects for fixup errors after they are transported? */
121 boolean check_code_fixups
= 0;
123 /* Should we check that newly allocated regions are zero filled? */
124 boolean gencgc_zero_check
= 0;
126 /* Should we check that the free space is zero filled? */
127 boolean gencgc_enable_verify_zero_fill
= 0;
129 /* Should we check that free pages are zero filled during gc_free_heap
130 * called after Lisp PURIFY? */
131 boolean gencgc_zero_check_during_free_heap
= 0;
134 * GC structures and variables
137 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
138 unsigned long bytes_allocated
= 0;
139 extern unsigned long bytes_consed_between_gcs
; /* gc-common.c */
140 unsigned long auto_gc_trigger
= 0;
142 /* the source and destination generations. These are set before a GC starts
148 /* An array of page structures is statically allocated.
149 * This helps quickly map between an address its page structure.
150 * NUM_PAGES is set from the size of the dynamic space. */
151 struct page page_table
[NUM_PAGES
];
153 /* To map addresses to page structures the address of the first page
155 static void *heap_base
= NULL
;
157 #if N_WORD_BITS == 32
158 #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
159 #elif N_WORD_BITS == 64
160 #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
163 /* Calculate the start address for the given page number. */
165 page_address(long page_num
)
167 return (heap_base
+ (page_num
* PAGE_BYTES
));
170 /* Find the page index within the page_table for the given
171 * address. Return -1 on failure. */
173 find_page_index(void *addr
)
175 long index
= addr
-heap_base
;
178 index
= ((unsigned long)index
)/PAGE_BYTES
;
179 if (index
< NUM_PAGES
)
186 /* a structure to hold the state of a generation */
189 /* the first page that gc_alloc() checks on its next call */
190 long alloc_start_page
;
192 /* the first page that gc_alloc_unboxed() checks on its next call */
193 long alloc_unboxed_start_page
;
195 /* the first page that gc_alloc_large (boxed) considers on its next
196 * call. (Although it always allocates after the boxed_region.) */
197 long alloc_large_start_page
;
199 /* the first page that gc_alloc_large (unboxed) considers on its
200 * next call. (Although it always allocates after the
201 * current_unboxed_region.) */
202 long alloc_large_unboxed_start_page
;
204 /* the bytes allocated to this generation */
205 long bytes_allocated
;
207 /* the number of bytes at which to trigger a GC */
210 /* to calculate a new level for gc_trigger */
211 long bytes_consed_between_gc
;
213 /* the number of GCs since the last raise */
216 /* the average age after which a GC will raise objects to the
220 /* the cumulative sum of the bytes allocated to this generation. It is
221 * cleared after a GC on this generations, and update before new
222 * objects are added from a GC of a younger generation. Dividing by
223 * the bytes_allocated will give the average age of the memory in
224 * this generation since its last GC. */
225 long cum_sum_bytes_allocated
;
227 /* a minimum average memory age before a GC will occur helps
228 * prevent a GC when a large number of new live objects have been
229 * added, in which case a GC could be a waste of time */
230 double min_av_mem_age
;
232 /* the number of actual generations. (The number of 'struct
233 * generation' objects is one more than this, because one object
234 * serves as scratch when GC'ing.) */
235 #define NUM_GENERATIONS 6
237 /* an array of generation structures. There needs to be one more
238 * generation structure than actual generations as the oldest
239 * generation is temporarily raised then lowered. */
240 struct generation generations
[NUM_GENERATIONS
+1];
242 /* the oldest generation that is will currently be GCed by default.
243 * Valid values are: 0, 1, ... (NUM_GENERATIONS-1)
245 * The default of (NUM_GENERATIONS-1) enables GC on all generations.
247 * Setting this to 0 effectively disables the generational nature of
248 * the GC. In some applications generational GC may not be useful
249 * because there are no long-lived objects.
251 * An intermediate value could be handy after moving long-lived data
252 * into an older generation so an unnecessary GC of this long-lived
253 * data can be avoided. */
254 unsigned int gencgc_oldest_gen_to_gc
= NUM_GENERATIONS
-1;
256 /* The maximum free page in the heap is maintained and used to update
257 * ALLOCATION_POINTER which is used by the room function to limit its
258 * search of the heap. XX Gencgc obviously needs to be better
259 * integrated with the Lisp code. */
260 static long last_free_page
;
262 /* This lock is to prevent multiple threads from simultaneously
263 * allocating new regions which overlap each other. Note that the
264 * majority of GC is single-threaded, but alloc() may be called from
265 * >1 thread at a time and must be thread-safe. This lock must be
266 * seized before all accesses to generations[] or to parts of
267 * page_table[] that other threads may want to see */
269 static lispobj free_pages_lock
=0;
273 * miscellaneous heap functions
276 /* Count the number of pages which are write-protected within the
277 * given generation. */
279 count_write_protect_generation_pages(int generation
)
284 for (i
= 0; i
< last_free_page
; i
++)
285 if ((page_table
[i
].allocated
!= FREE_PAGE_FLAG
)
286 && (page_table
[i
].gen
== generation
)
287 && (page_table
[i
].write_protected
== 1))
292 /* Count the number of pages within the given generation. */
294 count_generation_pages(int generation
)
299 for (i
= 0; i
< last_free_page
; i
++)
300 if ((page_table
[i
].allocated
!= 0)
301 && (page_table
[i
].gen
== generation
))
308 count_dont_move_pages(void)
312 for (i
= 0; i
< last_free_page
; i
++) {
313 if ((page_table
[i
].allocated
!= 0) && (page_table
[i
].dont_move
!= 0)) {
321 /* Work through the pages and add up the number of bytes used for the
322 * given generation. */
324 count_generation_bytes_allocated (int gen
)
328 for (i
= 0; i
< last_free_page
; i
++) {
329 if ((page_table
[i
].allocated
!= 0) && (page_table
[i
].gen
== gen
))
330 result
+= page_table
[i
].bytes_used
;
335 /* Return the average age of the memory in a generation. */
337 gen_av_mem_age(int gen
)
339 if (generations
[gen
].bytes_allocated
== 0)
343 ((double)generations
[gen
].cum_sum_bytes_allocated
)
344 / ((double)generations
[gen
].bytes_allocated
);
347 void fpu_save(int *); /* defined in x86-assem.S */
348 void fpu_restore(int *); /* defined in x86-assem.S */
349 /* The verbose argument controls how much to print: 0 for normal
350 * level of detail; 1 for debugging. */
352 print_generation_stats(int verbose
) /* FIXME: should take FILE argument */
357 /* This code uses the FP instructions which may be set up for Lisp
358 * so they need to be saved and reset for C. */
361 /* number of generations to print */
363 gens
= NUM_GENERATIONS
+1;
365 gens
= NUM_GENERATIONS
;
367 /* Print the heap stats. */
369 " Gen Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
371 for (i
= 0; i
< gens
; i
++) {
375 int large_boxed_cnt
= 0;
376 int large_unboxed_cnt
= 0;
379 for (j
= 0; j
< last_free_page
; j
++)
380 if (page_table
[j
].gen
== i
) {
382 /* Count the number of boxed pages within the given
384 if (page_table
[j
].allocated
& BOXED_PAGE_FLAG
) {
385 if (page_table
[j
].large_object
)
390 if(page_table
[j
].dont_move
) pinned_cnt
++;
391 /* Count the number of unboxed pages within the given
393 if (page_table
[j
].allocated
& UNBOXED_PAGE_FLAG
) {
394 if (page_table
[j
].large_object
)
401 gc_assert(generations
[i
].bytes_allocated
402 == count_generation_bytes_allocated(i
));
404 " %1d: %5d %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
406 boxed_cnt
, unboxed_cnt
, large_boxed_cnt
, large_unboxed_cnt
,
408 generations
[i
].bytes_allocated
,
409 (count_generation_pages(i
)*PAGE_BYTES
410 - generations
[i
].bytes_allocated
),
411 generations
[i
].gc_trigger
,
412 count_write_protect_generation_pages(i
),
413 generations
[i
].num_gc
,
416 fprintf(stderr
," Total bytes allocated=%ld\n", bytes_allocated
);
418 fpu_restore(fpu_state
);
422 * allocation routines
426 * To support quick and inline allocation, regions of memory can be
427 * allocated and then allocated from with just a free pointer and a
428 * check against an end address.
430 * Since objects can be allocated to spaces with different properties
431 * e.g. boxed/unboxed, generation, ages; there may need to be many
432 * allocation regions.
434 * Each allocation region may be start within a partly used page. Many
435 * features of memory use are noted on a page wise basis, e.g. the
436 * generation; so if a region starts within an existing allocated page
437 * it must be consistent with this page.
439 * During the scavenging of the newspace, objects will be transported
440 * into an allocation region, and pointers updated to point to this
441 * allocation region. It is possible that these pointers will be
442 * scavenged again before the allocation region is closed, e.g. due to
443 * trans_list which jumps all over the place to cleanup the list. It
444 * is important to be able to determine properties of all objects
445 * pointed to when scavenging, e.g to detect pointers to the oldspace.
446 * Thus it's important that the allocation regions have the correct
447 * properties set when allocated, and not just set when closed. The
448 * region allocation routines return regions with the specified
449 * properties, and grab all the pages, setting their properties
450 * appropriately, except that the amount used is not known.
452 * These regions are used to support quicker allocation using just a
453 * free pointer. The actual space used by the region is not reflected
454 * in the pages tables until it is closed. It can't be scavenged until
457 * When finished with the region it should be closed, which will
458 * update the page tables for the actual space used returning unused
459 * space. Further it may be noted in the new regions which is
460 * necessary when scavenging the newspace.
462 * Large objects may be allocated directly without an allocation
463 * region, the page tables are updated immediately.
465 * Unboxed objects don't contain pointers to other objects and so
466 * don't need scavenging. Further they can't contain pointers to
467 * younger generations so WP is not needed. By allocating pages to
468 * unboxed objects the whole page never needs scavenging or
469 * write-protecting. */
471 /* We are only using two regions at present. Both are for the current
472 * newspace generation. */
473 struct alloc_region boxed_region
;
474 struct alloc_region unboxed_region
;
476 /* The generation currently being allocated to. */
477 static int gc_alloc_generation
;
479 /* Find a new region with room for at least the given number of bytes.
481 * It starts looking at the current generation's alloc_start_page. So
482 * may pick up from the previous region if there is enough space. This
483 * keeps the allocation contiguous when scavenging the newspace.
485 * The alloc_region should have been closed by a call to
486 * gc_alloc_update_page_tables(), and will thus be in an empty state.
488 * To assist the scavenging functions write-protected pages are not
489 * used. Free pages should not be write-protected.
491 * It is critical to the conservative GC that the start of regions be
492 * known. To help achieve this only small regions are allocated at a
495 * During scavenging, pointers may be found to within the current
496 * region and the page generation must be set so that pointers to the
497 * from space can be recognized. Therefore the generation of pages in
498 * the region are set to gc_alloc_generation. To prevent another
499 * allocation call using the same pages, all the pages in the region
500 * are allocated, although they will initially be empty.
503 gc_alloc_new_region(long nbytes
, int unboxed
, struct alloc_region
*alloc_region
)
512 "/alloc_new_region for %d bytes from gen %d\n",
513 nbytes, gc_alloc_generation));
516 /* Check that the region is in a reset state. */
517 gc_assert((alloc_region
->first_page
== 0)
518 && (alloc_region
->last_page
== -1)
519 && (alloc_region
->free_pointer
== alloc_region
->end_addr
));
520 get_spinlock(&free_pages_lock
,(long) alloc_region
);
523 generations
[gc_alloc_generation
].alloc_unboxed_start_page
;
526 generations
[gc_alloc_generation
].alloc_start_page
;
528 last_page
=gc_find_freeish_pages(&first_page
,nbytes
,unboxed
);
529 bytes_found
=(PAGE_BYTES
- page_table
[first_page
].bytes_used
)
530 + PAGE_BYTES
*(last_page
-first_page
);
532 /* Set up the alloc_region. */
533 alloc_region
->first_page
= first_page
;
534 alloc_region
->last_page
= last_page
;
535 alloc_region
->start_addr
= page_table
[first_page
].bytes_used
536 + page_address(first_page
);
537 alloc_region
->free_pointer
= alloc_region
->start_addr
;
538 alloc_region
->end_addr
= alloc_region
->start_addr
+ bytes_found
;
540 /* Set up the pages. */
542 /* The first page may have already been in use. */
543 if (page_table
[first_page
].bytes_used
== 0) {
545 page_table
[first_page
].allocated
= UNBOXED_PAGE_FLAG
;
547 page_table
[first_page
].allocated
= BOXED_PAGE_FLAG
;
548 page_table
[first_page
].gen
= gc_alloc_generation
;
549 page_table
[first_page
].large_object
= 0;
550 page_table
[first_page
].first_object_offset
= 0;
554 gc_assert(page_table
[first_page
].allocated
== UNBOXED_PAGE_FLAG
);
556 gc_assert(page_table
[first_page
].allocated
== BOXED_PAGE_FLAG
);
557 page_table
[first_page
].allocated
|= OPEN_REGION_PAGE_FLAG
;
559 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
560 gc_assert(page_table
[first_page
].large_object
== 0);
562 for (i
= first_page
+1; i
<= last_page
; i
++) {
564 page_table
[i
].allocated
= UNBOXED_PAGE_FLAG
;
566 page_table
[i
].allocated
= BOXED_PAGE_FLAG
;
567 page_table
[i
].gen
= gc_alloc_generation
;
568 page_table
[i
].large_object
= 0;
569 /* This may not be necessary for unboxed regions (think it was
571 page_table
[i
].first_object_offset
=
572 alloc_region
->start_addr
- page_address(i
);
573 page_table
[i
].allocated
|= OPEN_REGION_PAGE_FLAG
;
575 /* Bump up last_free_page. */
576 if (last_page
+1 > last_free_page
) {
577 last_free_page
= last_page
+1;
578 SetSymbolValue(ALLOCATION_POINTER
,
579 (lispobj
)(((char *)heap_base
) + last_free_page
*PAGE_BYTES
),
582 release_spinlock(&free_pages_lock
);
584 /* we can do this after releasing free_pages_lock */
585 if (gencgc_zero_check
) {
587 for (p
= (long *)alloc_region
->start_addr
;
588 p
< (long *)alloc_region
->end_addr
; p
++) {
590 /* KLUDGE: It would be nice to use %lx and explicit casts
591 * (long) in code like this, so that it is less likely to
592 * break randomly when running on a machine with different
593 * word sizes. -- WHN 19991129 */
594 lose("The new region at %x is not zero.", p
);
601 /* If the record_new_objects flag is 2 then all new regions created
604 * If it's 1 then then it is only recorded if the first page of the
605 * current region is <= new_areas_ignore_page. This helps avoid
606 * unnecessary recording when doing full scavenge pass.
608 * The new_object structure holds the page, byte offset, and size of
609 * new regions of objects. Each new area is placed in the array of
610 * these structures pointer to by new_areas. new_areas_index holds the
611 * offset into new_areas.
613 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
614 * later code must detect this and handle it, probably by doing a full
615 * scavenge of a generation. */
616 #define NUM_NEW_AREAS 512
617 static int record_new_objects
= 0;
618 static long new_areas_ignore_page
;
624 static struct new_area (*new_areas
)[];
625 static long new_areas_index
;
628 /* Add a new area to new_areas. */
630 add_new_area(long first_page
, long offset
, long size
)
632 unsigned new_area_start
,c
;
635 /* Ignore if full. */
636 if (new_areas_index
>= NUM_NEW_AREAS
)
639 switch (record_new_objects
) {
643 if (first_page
> new_areas_ignore_page
)
652 new_area_start
= PAGE_BYTES
*first_page
+ offset
;
654 /* Search backwards for a prior area that this follows from. If
655 found this will save adding a new area. */
656 for (i
= new_areas_index
-1, c
= 0; (i
>= 0) && (c
< 8); i
--, c
++) {
658 PAGE_BYTES
*((*new_areas
)[i
].page
)
659 + (*new_areas
)[i
].offset
660 + (*new_areas
)[i
].size
;
662 "/add_new_area S1 %d %d %d %d\n",
663 i, c, new_area_start, area_end));*/
664 if (new_area_start
== area_end
) {
666 "/adding to [%d] %d %d %d with %d %d %d:\n",
668 (*new_areas)[i].page,
669 (*new_areas)[i].offset,
670 (*new_areas)[i].size,
674 (*new_areas
)[i
].size
+= size
;
679 (*new_areas
)[new_areas_index
].page
= first_page
;
680 (*new_areas
)[new_areas_index
].offset
= offset
;
681 (*new_areas
)[new_areas_index
].size
= size
;
683 "/new_area %d page %d offset %d size %d\n",
684 new_areas_index, first_page, offset, size));*/
687 /* Note the max new_areas used. */
688 if (new_areas_index
> max_new_areas
)
689 max_new_areas
= new_areas_index
;
692 /* Update the tables for the alloc_region. The region may be added to
695 * When done the alloc_region is set up so that the next quick alloc
696 * will fail safely and thus a new region will be allocated. Further
697 * it is safe to try to re-update the page table of this reset
700 gc_alloc_update_page_tables(int unboxed
, struct alloc_region
*alloc_region
)
706 long orig_first_page_bytes_used
;
711 first_page
= alloc_region
->first_page
;
713 /* Catch an unused alloc_region. */
714 if ((first_page
== 0) && (alloc_region
->last_page
== -1))
717 next_page
= first_page
+1;
719 get_spinlock(&free_pages_lock
,(long) alloc_region
);
720 if (alloc_region
->free_pointer
!= alloc_region
->start_addr
) {
721 /* some bytes were allocated in the region */
722 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
724 gc_assert(alloc_region
->start_addr
== (page_address(first_page
) + page_table
[first_page
].bytes_used
));
726 /* All the pages used need to be updated */
728 /* Update the first page. */
730 /* If the page was free then set up the gen, and
731 * first_object_offset. */
732 if (page_table
[first_page
].bytes_used
== 0)
733 gc_assert(page_table
[first_page
].first_object_offset
== 0);
734 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
737 gc_assert(page_table
[first_page
].allocated
== UNBOXED_PAGE_FLAG
);
739 gc_assert(page_table
[first_page
].allocated
== BOXED_PAGE_FLAG
);
740 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
741 gc_assert(page_table
[first_page
].large_object
== 0);
745 /* Calculate the number of bytes used in this page. This is not
746 * always the number of new bytes, unless it was free. */
748 if ((bytes_used
= (alloc_region
->free_pointer
- page_address(first_page
)))>PAGE_BYTES
) {
749 bytes_used
= PAGE_BYTES
;
752 page_table
[first_page
].bytes_used
= bytes_used
;
753 byte_cnt
+= bytes_used
;
756 /* All the rest of the pages should be free. We need to set their
757 * first_object_offset pointer to the start of the region, and set
760 page_table
[next_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
762 gc_assert(page_table
[next_page
].allocated
==UNBOXED_PAGE_FLAG
);
764 gc_assert(page_table
[next_page
].allocated
== BOXED_PAGE_FLAG
);
765 gc_assert(page_table
[next_page
].bytes_used
== 0);
766 gc_assert(page_table
[next_page
].gen
== gc_alloc_generation
);
767 gc_assert(page_table
[next_page
].large_object
== 0);
769 gc_assert(page_table
[next_page
].first_object_offset
==
770 alloc_region
->start_addr
- page_address(next_page
));
772 /* Calculate the number of bytes used in this page. */
774 if ((bytes_used
= (alloc_region
->free_pointer
775 - page_address(next_page
)))>PAGE_BYTES
) {
776 bytes_used
= PAGE_BYTES
;
779 page_table
[next_page
].bytes_used
= bytes_used
;
780 byte_cnt
+= bytes_used
;
785 region_size
= alloc_region
->free_pointer
- alloc_region
->start_addr
;
786 bytes_allocated
+= region_size
;
787 generations
[gc_alloc_generation
].bytes_allocated
+= region_size
;
789 gc_assert((byte_cnt
- orig_first_page_bytes_used
) == region_size
);
791 /* Set the generations alloc restart page to the last page of
794 generations
[gc_alloc_generation
].alloc_unboxed_start_page
=
797 generations
[gc_alloc_generation
].alloc_start_page
= next_page
-1;
799 /* Add the region to the new_areas if requested. */
801 add_new_area(first_page
,orig_first_page_bytes_used
, region_size
);
805 "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
807 gc_alloc_generation));
810 /* There are no bytes allocated. Unallocate the first_page if
811 * there are 0 bytes_used. */
812 page_table
[first_page
].allocated
&= ~(OPEN_REGION_PAGE_FLAG
);
813 if (page_table
[first_page
].bytes_used
== 0)
814 page_table
[first_page
].allocated
= FREE_PAGE_FLAG
;
817 /* Unallocate any unused pages. */
818 while (next_page
<= alloc_region
->last_page
) {
819 gc_assert(page_table
[next_page
].bytes_used
== 0);
820 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
823 release_spinlock(&free_pages_lock
);
824 /* alloc_region is per-thread, we're ok to do this unlocked */
825 gc_set_region_empty(alloc_region
);
828 static inline void *gc_quick_alloc(long nbytes
);
830 /* Allocate a possibly large object. */
832 gc_alloc_large(long nbytes
, int unboxed
, struct alloc_region
*alloc_region
)
836 long orig_first_page_bytes_used
;
842 get_spinlock(&free_pages_lock
,(long) alloc_region
);
846 generations
[gc_alloc_generation
].alloc_large_unboxed_start_page
;
848 first_page
= generations
[gc_alloc_generation
].alloc_large_start_page
;
850 if (first_page
<= alloc_region
->last_page
) {
851 first_page
= alloc_region
->last_page
+1;
854 last_page
=gc_find_freeish_pages(&first_page
,nbytes
,unboxed
);
856 gc_assert(first_page
> alloc_region
->last_page
);
858 generations
[gc_alloc_generation
].alloc_large_unboxed_start_page
=
861 generations
[gc_alloc_generation
].alloc_large_start_page
= last_page
;
863 /* Set up the pages. */
864 orig_first_page_bytes_used
= page_table
[first_page
].bytes_used
;
866 /* If the first page was free then set up the gen, and
867 * first_object_offset. */
868 if (page_table
[first_page
].bytes_used
== 0) {
870 page_table
[first_page
].allocated
= UNBOXED_PAGE_FLAG
;
872 page_table
[first_page
].allocated
= BOXED_PAGE_FLAG
;
873 page_table
[first_page
].gen
= gc_alloc_generation
;
874 page_table
[first_page
].first_object_offset
= 0;
875 page_table
[first_page
].large_object
= 1;
879 gc_assert(page_table
[first_page
].allocated
== UNBOXED_PAGE_FLAG
);
881 gc_assert(page_table
[first_page
].allocated
== BOXED_PAGE_FLAG
);
882 gc_assert(page_table
[first_page
].gen
== gc_alloc_generation
);
883 gc_assert(page_table
[first_page
].large_object
== 1);
887 /* Calc. the number of bytes used in this page. This is not
888 * always the number of new bytes, unless it was free. */
890 if ((bytes_used
= nbytes
+orig_first_page_bytes_used
) > PAGE_BYTES
) {
891 bytes_used
= PAGE_BYTES
;
894 page_table
[first_page
].bytes_used
= bytes_used
;
895 byte_cnt
+= bytes_used
;
897 next_page
= first_page
+1;
899 /* All the rest of the pages should be free. We need to set their
900 * first_object_offset pointer to the start of the region, and
901 * set the bytes_used. */
903 gc_assert(page_table
[next_page
].allocated
== FREE_PAGE_FLAG
);
904 gc_assert(page_table
[next_page
].bytes_used
== 0);
906 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
908 page_table
[next_page
].allocated
= BOXED_PAGE_FLAG
;
909 page_table
[next_page
].gen
= gc_alloc_generation
;
910 page_table
[next_page
].large_object
= 1;
912 page_table
[next_page
].first_object_offset
=
913 orig_first_page_bytes_used
- PAGE_BYTES
*(next_page
-first_page
);
915 /* Calculate the number of bytes used in this page. */
917 if ((bytes_used
=(nbytes
+orig_first_page_bytes_used
)-byte_cnt
) > PAGE_BYTES
) {
918 bytes_used
= PAGE_BYTES
;
921 page_table
[next_page
].bytes_used
= bytes_used
;
922 page_table
[next_page
].write_protected
=0;
923 page_table
[next_page
].dont_move
=0;
924 byte_cnt
+= bytes_used
;
928 gc_assert((byte_cnt
-orig_first_page_bytes_used
) == nbytes
);
930 bytes_allocated
+= nbytes
;
931 generations
[gc_alloc_generation
].bytes_allocated
+= nbytes
;
933 /* Add the region to the new_areas if requested. */
935 add_new_area(first_page
,orig_first_page_bytes_used
,nbytes
);
937 /* Bump up last_free_page */
938 if (last_page
+1 > last_free_page
) {
939 last_free_page
= last_page
+1;
940 SetSymbolValue(ALLOCATION_POINTER
,
941 (lispobj
)(((char *)heap_base
) + last_free_page
*PAGE_BYTES
),0);
943 release_spinlock(&free_pages_lock
);
945 return((void *)(page_address(first_page
)+orig_first_page_bytes_used
));
949 gc_find_freeish_pages(long *restart_page_ptr
, long nbytes
, int unboxed
)
954 long restart_page
=*restart_page_ptr
;
957 long large_p
=(nbytes
>=large_object_size
);
958 gc_assert(free_pages_lock
);
960 /* Search for a contiguous free space of at least nbytes. If it's
961 * a large object then align it on a page boundary by searching
962 * for a free page. */
965 first_page
= restart_page
;
967 while ((first_page
< NUM_PAGES
)
968 && (page_table
[first_page
].allocated
!= FREE_PAGE_FLAG
))
971 while (first_page
< NUM_PAGES
) {
972 if(page_table
[first_page
].allocated
== FREE_PAGE_FLAG
)
974 if((page_table
[first_page
].allocated
==
975 (unboxed
? UNBOXED_PAGE_FLAG
: BOXED_PAGE_FLAG
)) &&
976 (page_table
[first_page
].large_object
== 0) &&
977 (page_table
[first_page
].gen
== gc_alloc_generation
) &&
978 (page_table
[first_page
].bytes_used
< (PAGE_BYTES
-32)) &&
979 (page_table
[first_page
].write_protected
== 0) &&
980 (page_table
[first_page
].dont_move
== 0)) {
986 if (first_page
>= NUM_PAGES
) {
988 "Argh! gc_find_free_space failed (first_page), nbytes=%d.\n",
990 print_generation_stats(1);
994 gc_assert(page_table
[first_page
].write_protected
== 0);
996 last_page
= first_page
;
997 bytes_found
= PAGE_BYTES
- page_table
[first_page
].bytes_used
;
999 while (((bytes_found
< nbytes
)
1000 || (!large_p
&& (num_pages
< 2)))
1001 && (last_page
< (NUM_PAGES
-1))
1002 && (page_table
[last_page
+1].allocated
== FREE_PAGE_FLAG
)) {
1005 bytes_found
+= PAGE_BYTES
;
1006 gc_assert(page_table
[last_page
].write_protected
== 0);
1009 region_size
= (PAGE_BYTES
- page_table
[first_page
].bytes_used
)
1010 + PAGE_BYTES
*(last_page
-first_page
);
1012 gc_assert(bytes_found
== region_size
);
1013 restart_page
= last_page
+ 1;
1014 } while ((restart_page
< NUM_PAGES
) && (bytes_found
< nbytes
));
1016 /* Check for a failure */
1017 if ((restart_page
>= NUM_PAGES
) && (bytes_found
< nbytes
)) {
1019 "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%d.\n",
1021 print_generation_stats(1);
1024 *restart_page_ptr
=first_page
;
1028 /* Allocate bytes. All the rest of the special-purpose allocation
1029 * functions will eventually call this */
1032 gc_alloc_with_region(long nbytes
,int unboxed_p
, struct alloc_region
*my_region
,
1035 void *new_free_pointer
;
1037 if(nbytes
>=large_object_size
)
1038 return gc_alloc_large(nbytes
,unboxed_p
,my_region
);
1040 /* Check whether there is room in the current alloc region. */
1041 new_free_pointer
= my_region
->free_pointer
+ nbytes
;
1043 /* fprintf(stderr, "alloc %d bytes from %p to %p\n", nbytes,
1044 my_region->free_pointer, new_free_pointer); */
1046 if (new_free_pointer
<= my_region
->end_addr
) {
1047 /* If so then allocate from the current alloc region. */
1048 void *new_obj
= my_region
->free_pointer
;
1049 my_region
->free_pointer
= new_free_pointer
;
1051 /* Unless a `quick' alloc was requested, check whether the
1052 alloc region is almost empty. */
1054 (my_region
->end_addr
- my_region
->free_pointer
) <= 32) {
1055 /* If so, finished with the current region. */
1056 gc_alloc_update_page_tables(unboxed_p
, my_region
);
1057 /* Set up a new region. */
1058 gc_alloc_new_region(32 /*bytes*/, unboxed_p
, my_region
);
1061 return((void *)new_obj
);
1064 /* Else not enough free space in the current region: retry with a
1067 gc_alloc_update_page_tables(unboxed_p
, my_region
);
1068 gc_alloc_new_region(nbytes
, unboxed_p
, my_region
);
1069 return gc_alloc_with_region(nbytes
,unboxed_p
,my_region
,0);
1072 /* these are only used during GC: all allocation from the mutator calls
1073 * alloc() -> gc_alloc_with_region() with the appropriate per-thread
1077 gc_general_alloc(long nbytes
,int unboxed_p
,int quick_p
)
1079 struct alloc_region
*my_region
=
1080 unboxed_p
? &unboxed_region
: &boxed_region
;
1081 return gc_alloc_with_region(nbytes
,unboxed_p
, my_region
,quick_p
);
1084 static inline void *
1085 gc_quick_alloc(long nbytes
)
1087 return gc_general_alloc(nbytes
,ALLOC_BOXED
,ALLOC_QUICK
);
1090 static inline void *
1091 gc_quick_alloc_large(long nbytes
)
1093 return gc_general_alloc(nbytes
,ALLOC_BOXED
,ALLOC_QUICK
);
1096 static inline void *
1097 gc_alloc_unboxed(long nbytes
)
1099 return gc_general_alloc(nbytes
,ALLOC_UNBOXED
,0);
1102 static inline void *
1103 gc_quick_alloc_unboxed(long nbytes
)
1105 return gc_general_alloc(nbytes
,ALLOC_UNBOXED
,ALLOC_QUICK
);
1108 static inline void *
1109 gc_quick_alloc_large_unboxed(long nbytes
)
1111 return gc_general_alloc(nbytes
,ALLOC_UNBOXED
,ALLOC_QUICK
);
1115 * scavenging/transporting routines derived from gc.c in CMU CL ca. 18b
1118 extern long (*scavtab
[256])(lispobj
*where
, lispobj object
);
1119 extern lispobj (*transother
[256])(lispobj object
);
1120 extern long (*sizetab
[256])(lispobj
*where
);
1122 /* Copy a large boxed object. If the object is in a large object
1123 * region then it is simply promoted, else it is copied. If it's large
1124 * enough then it's copied to a large object region.
1126 * Vectors may have shrunk. If the object is not copied the space
1127 * needs to be reclaimed, and the page_tables corrected. */
1129 copy_large_object(lispobj object
, long nwords
)
1135 gc_assert(is_lisp_pointer(object
));
1136 gc_assert(from_space_p(object
));
1137 gc_assert((nwords
& 0x01) == 0);
1140 /* Check whether it's in a large object region. */
1141 first_page
= find_page_index((void *)object
);
1142 gc_assert(first_page
>= 0);
1144 if (page_table
[first_page
].large_object
) {
1146 /* Promote the object. */
1148 long remaining_bytes
;
1151 long old_bytes_used
;
1153 /* Note: Any page write-protection must be removed, else a
1154 * later scavenge_newspace may incorrectly not scavenge these
1155 * pages. This would not be necessary if they are added to the
1156 * new areas, but let's do it for them all (they'll probably
1157 * be written anyway?). */
1159 gc_assert(page_table
[first_page
].first_object_offset
== 0);
1161 next_page
= first_page
;
1162 remaining_bytes
= nwords
*N_WORD_BYTES
;
1163 while (remaining_bytes
> PAGE_BYTES
) {
1164 gc_assert(page_table
[next_page
].gen
== from_space
);
1165 gc_assert(page_table
[next_page
].allocated
== BOXED_PAGE_FLAG
);
1166 gc_assert(page_table
[next_page
].large_object
);
1167 gc_assert(page_table
[next_page
].first_object_offset
==
1168 -PAGE_BYTES
*(next_page
-first_page
));
1169 gc_assert(page_table
[next_page
].bytes_used
== PAGE_BYTES
);
1171 page_table
[next_page
].gen
= new_space
;
1173 /* Remove any write-protection. We should be able to rely
1174 * on the write-protect flag to avoid redundant calls. */
1175 if (page_table
[next_page
].write_protected
) {
1176 os_protect(page_address(next_page
), PAGE_BYTES
, OS_VM_PROT_ALL
);
1177 page_table
[next_page
].write_protected
= 0;
1179 remaining_bytes
-= PAGE_BYTES
;
1183 /* Now only one page remains, but the object may have shrunk
1184 * so there may be more unused pages which will be freed. */
1186 /* The object may have shrunk but shouldn't have grown. */
1187 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
1189 page_table
[next_page
].gen
= new_space
;
1190 gc_assert(page_table
[next_page
].allocated
== BOXED_PAGE_FLAG
);
1192 /* Adjust the bytes_used. */
1193 old_bytes_used
= page_table
[next_page
].bytes_used
;
1194 page_table
[next_page
].bytes_used
= remaining_bytes
;
1196 bytes_freed
= old_bytes_used
- remaining_bytes
;
1198 /* Free any remaining pages; needs care. */
1200 while ((old_bytes_used
== PAGE_BYTES
) &&
1201 (page_table
[next_page
].gen
== from_space
) &&
1202 (page_table
[next_page
].allocated
== BOXED_PAGE_FLAG
) &&
1203 page_table
[next_page
].large_object
&&
1204 (page_table
[next_page
].first_object_offset
==
1205 -(next_page
- first_page
)*PAGE_BYTES
)) {
1206 /* Checks out OK, free the page. Don't need to bother zeroing
1207 * pages as this should have been done before shrinking the
1208 * object. These pages shouldn't be write-protected as they
1209 * should be zero filled. */
1210 gc_assert(page_table
[next_page
].write_protected
== 0);
1212 old_bytes_used
= page_table
[next_page
].bytes_used
;
1213 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1214 page_table
[next_page
].bytes_used
= 0;
1215 bytes_freed
+= old_bytes_used
;
1219 generations
[from_space
].bytes_allocated
-= N_WORD_BYTES
*nwords
+
1221 generations
[new_space
].bytes_allocated
+= N_WORD_BYTES
*nwords
;
1222 bytes_allocated
-= bytes_freed
;
1224 /* Add the region to the new_areas if requested. */
1225 add_new_area(first_page
,0,nwords
*N_WORD_BYTES
);
1229 /* Get tag of object. */
1230 tag
= lowtag_of(object
);
1232 /* Allocate space. */
1233 new = gc_quick_alloc_large(nwords
*N_WORD_BYTES
);
1235 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1237 /* Return Lisp pointer of new object. */
1238 return ((lispobj
) new) | tag
;
1242 /* to copy unboxed objects */
1244 copy_unboxed_object(lispobj object
, long nwords
)
1249 gc_assert(is_lisp_pointer(object
));
1250 gc_assert(from_space_p(object
));
1251 gc_assert((nwords
& 0x01) == 0);
1253 /* Get tag of object. */
1254 tag
= lowtag_of(object
);
1256 /* Allocate space. */
1257 new = gc_quick_alloc_unboxed(nwords
*N_WORD_BYTES
);
1259 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1261 /* Return Lisp pointer of new object. */
1262 return ((lispobj
) new) | tag
;
1265 /* to copy large unboxed objects
1267 * If the object is in a large object region then it is simply
1268 * promoted, else it is copied. If it's large enough then it's copied
1269 * to a large object region.
1271 * Bignums and vectors may have shrunk. If the object is not copied
1272 * the space needs to be reclaimed, and the page_tables corrected.
1274 * KLUDGE: There's a lot of cut-and-paste duplication between this
1275 * function and copy_large_object(..). -- WHN 20000619 */
1277 copy_large_unboxed_object(lispobj object
, long nwords
)
1283 gc_assert(is_lisp_pointer(object
));
1284 gc_assert(from_space_p(object
));
1285 gc_assert((nwords
& 0x01) == 0);
1287 if ((nwords
> 1024*1024) && gencgc_verbose
)
1288 FSHOW((stderr
, "/copy_large_unboxed_object: %d bytes\n", nwords
*N_WORD_BYTES
));
1290 /* Check whether it's a large object. */
1291 first_page
= find_page_index((void *)object
);
1292 gc_assert(first_page
>= 0);
1294 if (page_table
[first_page
].large_object
) {
1295 /* Promote the object. Note: Unboxed objects may have been
1296 * allocated to a BOXED region so it may be necessary to
1297 * change the region to UNBOXED. */
1298 long remaining_bytes
;
1301 long old_bytes_used
;
1303 gc_assert(page_table
[first_page
].first_object_offset
== 0);
1305 next_page
= first_page
;
1306 remaining_bytes
= nwords
*N_WORD_BYTES
;
1307 while (remaining_bytes
> PAGE_BYTES
) {
1308 gc_assert(page_table
[next_page
].gen
== from_space
);
1309 gc_assert((page_table
[next_page
].allocated
== UNBOXED_PAGE_FLAG
)
1310 || (page_table
[next_page
].allocated
== BOXED_PAGE_FLAG
));
1311 gc_assert(page_table
[next_page
].large_object
);
1312 gc_assert(page_table
[next_page
].first_object_offset
==
1313 -PAGE_BYTES
*(next_page
-first_page
));
1314 gc_assert(page_table
[next_page
].bytes_used
== PAGE_BYTES
);
1316 page_table
[next_page
].gen
= new_space
;
1317 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1318 remaining_bytes
-= PAGE_BYTES
;
1322 /* Now only one page remains, but the object may have shrunk so
1323 * there may be more unused pages which will be freed. */
1325 /* Object may have shrunk but shouldn't have grown - check. */
1326 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
1328 page_table
[next_page
].gen
= new_space
;
1329 page_table
[next_page
].allocated
= UNBOXED_PAGE_FLAG
;
1331 /* Adjust the bytes_used. */
1332 old_bytes_used
= page_table
[next_page
].bytes_used
;
1333 page_table
[next_page
].bytes_used
= remaining_bytes
;
1335 bytes_freed
= old_bytes_used
- remaining_bytes
;
1337 /* Free any remaining pages; needs care. */
1339 while ((old_bytes_used
== PAGE_BYTES
) &&
1340 (page_table
[next_page
].gen
== from_space
) &&
1341 ((page_table
[next_page
].allocated
== UNBOXED_PAGE_FLAG
)
1342 || (page_table
[next_page
].allocated
== BOXED_PAGE_FLAG
)) &&
1343 page_table
[next_page
].large_object
&&
1344 (page_table
[next_page
].first_object_offset
==
1345 -(next_page
- first_page
)*PAGE_BYTES
)) {
1346 /* Checks out OK, free the page. Don't need to both zeroing
1347 * pages as this should have been done before shrinking the
1348 * object. These pages shouldn't be write-protected, even if
1349 * boxed they should be zero filled. */
1350 gc_assert(page_table
[next_page
].write_protected
== 0);
1352 old_bytes_used
= page_table
[next_page
].bytes_used
;
1353 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
1354 page_table
[next_page
].bytes_used
= 0;
1355 bytes_freed
+= old_bytes_used
;
1359 if ((bytes_freed
> 0) && gencgc_verbose
)
1361 "/copy_large_unboxed bytes_freed=%d\n",
1364 generations
[from_space
].bytes_allocated
-= nwords
*N_WORD_BYTES
+ bytes_freed
;
1365 generations
[new_space
].bytes_allocated
+= nwords
*N_WORD_BYTES
;
1366 bytes_allocated
-= bytes_freed
;
1371 /* Get tag of object. */
1372 tag
= lowtag_of(object
);
1374 /* Allocate space. */
1375 new = gc_quick_alloc_large_unboxed(nwords
*N_WORD_BYTES
);
1377 /* Copy the object. */
1378 memcpy(new,native_pointer(object
),nwords
*N_WORD_BYTES
);
1380 /* Return Lisp pointer of new object. */
1381 return ((lispobj
) new) | tag
;
1390 * code and code-related objects
1393 static lispobj trans_fun_header(lispobj object);
1394 static lispobj trans_boxed(lispobj object);
1397 /* Scan a x86 compiled code object, looking for possible fixups that
1398 * have been missed after a move.
1400 * Two types of fixups are needed:
1401 * 1. Absolute fixups to within the code object.
1402 * 2. Relative fixups to outside the code object.
1404 * Currently only absolute fixups to the constant vector, or to the
1405 * code area are checked. */
1407 sniff_code_object(struct code
*code
, unsigned displacement
)
1409 long nheader_words
, ncode_words
, nwords
;
1411 void *constants_start_addr
, *constants_end_addr
;
1412 void *code_start_addr
, *code_end_addr
;
1413 int fixup_found
= 0;
1415 if (!check_code_fixups
)
1418 ncode_words
= fixnum_value(code
->code_size
);
1419 nheader_words
= HeaderValue(*(lispobj
*)code
);
1420 nwords
= ncode_words
+ nheader_words
;
1422 constants_start_addr
= (void *)code
+ 5*N_WORD_BYTES
;
1423 constants_end_addr
= (void *)code
+ nheader_words
*N_WORD_BYTES
;
1424 code_start_addr
= (void *)code
+ nheader_words
*N_WORD_BYTES
;
1425 code_end_addr
= (void *)code
+ nwords
*N_WORD_BYTES
;
1427 /* Work through the unboxed code. */
1428 for (p
= code_start_addr
; p
< code_end_addr
; p
++) {
1429 void *data
= *(void **)p
;
1430 unsigned d1
= *((unsigned char *)p
- 1);
1431 unsigned d2
= *((unsigned char *)p
- 2);
1432 unsigned d3
= *((unsigned char *)p
- 3);
1433 unsigned d4
= *((unsigned char *)p
- 4);
1435 unsigned d5
= *((unsigned char *)p
- 5);
1436 unsigned d6
= *((unsigned char *)p
- 6);
1439 /* Check for code references. */
1440 /* Check for a 32 bit word that looks like an absolute
1441 reference to within the code adea of the code object. */
1442 if ((data
>= (code_start_addr
-displacement
))
1443 && (data
< (code_end_addr
-displacement
))) {
1444 /* function header */
1446 && (((unsigned)p
- 4 - 4*HeaderValue(*((unsigned *)p
-1))) == (unsigned)code
)) {
1447 /* Skip the function header */
1451 /* the case of PUSH imm32 */
1455 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1456 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1457 FSHOW((stderr
, "/PUSH $0x%.8x\n", data
));
1459 /* the case of MOV [reg-8],imm32 */
1461 && (d2
==0x40 || d2
==0x41 || d2
==0x42 || d2
==0x43
1462 || d2
==0x45 || d2
==0x46 || d2
==0x47)
1466 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1467 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1468 FSHOW((stderr
, "/MOV [reg-8],$0x%.8x\n", data
));
1470 /* the case of LEA reg,[disp32] */
1471 if ((d2
== 0x8d) && ((d1
& 0xc7) == 5)) {
1474 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1475 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1476 FSHOW((stderr
,"/LEA reg,[$0x%.8x]\n", data
));
1480 /* Check for constant references. */
1481 /* Check for a 32 bit word that looks like an absolute
1482 reference to within the constant vector. Constant references
1484 if ((data
>= (constants_start_addr
-displacement
))
1485 && (data
< (constants_end_addr
-displacement
))
1486 && (((unsigned)data
& 0x3) == 0)) {
1491 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1492 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1493 FSHOW((stderr
,"/MOV eax,0x%.8x\n", data
));
1496 /* the case of MOV m32,EAX */
1500 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1501 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1502 FSHOW((stderr
, "/MOV 0x%.8x,eax\n", data
));
1505 /* the case of CMP m32,imm32 */
1506 if ((d1
== 0x3d) && (d2
== 0x81)) {
1509 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1510 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1512 FSHOW((stderr
, "/CMP 0x%.8x,immed32\n", data
));
1515 /* Check for a mod=00, r/m=101 byte. */
1516 if ((d1
& 0xc7) == 5) {
1521 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1522 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1523 FSHOW((stderr
,"/CMP 0x%.8x,reg\n", data
));
1525 /* the case of CMP reg32,m32 */
1529 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1530 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1531 FSHOW((stderr
, "/CMP reg32,0x%.8x\n", data
));
1533 /* the case of MOV m32,reg32 */
1537 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1538 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1539 FSHOW((stderr
, "/MOV 0x%.8x,reg32\n", data
));
1541 /* the case of MOV reg32,m32 */
1545 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1546 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1547 FSHOW((stderr
, "/MOV reg32,0x%.8x\n", data
));
1549 /* the case of LEA reg32,m32 */
1553 "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1554 p
, d6
, d5
, d4
, d3
, d2
, d1
, data
));
1555 FSHOW((stderr
, "/LEA reg32,0x%.8x\n", data
));
1561 /* If anything was found, print some information on the code
1565 "/compiled code object at %x: header words = %d, code words = %d\n",
1566 code
, nheader_words
, ncode_words
));
1568 "/const start = %x, end = %x\n",
1569 constants_start_addr
, constants_end_addr
));
1571 "/code start = %x, end = %x\n",
1572 code_start_addr
, code_end_addr
));
1577 gencgc_apply_code_fixups(struct code
*old_code
, struct code
*new_code
)
1579 long nheader_words
, ncode_words
, nwords
;
1580 void *constants_start_addr
, *constants_end_addr
;
1581 void *code_start_addr
, *code_end_addr
;
1582 lispobj fixups
= NIL
;
1583 unsigned displacement
= (unsigned)new_code
- (unsigned)old_code
;
1584 struct vector
*fixups_vector
;
1586 ncode_words
= fixnum_value(new_code
->code_size
);
1587 nheader_words
= HeaderValue(*(lispobj
*)new_code
);
1588 nwords
= ncode_words
+ nheader_words
;
1590 "/compiled code object at %x: header words = %d, code words = %d\n",
1591 new_code, nheader_words, ncode_words)); */
1592 constants_start_addr
= (void *)new_code
+ 5*N_WORD_BYTES
;
1593 constants_end_addr
= (void *)new_code
+ nheader_words
*N_WORD_BYTES
;
1594 code_start_addr
= (void *)new_code
+ nheader_words
*N_WORD_BYTES
;
1595 code_end_addr
= (void *)new_code
+ nwords
*N_WORD_BYTES
;
1598 "/const start = %x, end = %x\n",
1599 constants_start_addr,constants_end_addr));
1601 "/code start = %x; end = %x\n",
1602 code_start_addr,code_end_addr));
1605 /* The first constant should be a pointer to the fixups for this
1606 code objects. Check. */
1607 fixups
= new_code
->constants
[0];
1609 /* It will be 0 or the unbound-marker if there are no fixups (as
1610 * will be the case if the code object has been purified, for
1611 * example) and will be an other pointer if it is valid. */
1612 if ((fixups
== 0) || (fixups
== UNBOUND_MARKER_WIDETAG
) ||
1613 !is_lisp_pointer(fixups
)) {
1614 /* Check for possible errors. */
1615 if (check_code_fixups
)
1616 sniff_code_object(new_code
, displacement
);
1621 fixups_vector
= (struct vector
*)native_pointer(fixups
);
1623 /* Could be pointing to a forwarding pointer. */
1624 /* FIXME is this always in from_space? if so, could replace this code with
1625 * forwarding_pointer_p/forwarding_pointer_value */
1626 if (is_lisp_pointer(fixups
) &&
1627 (find_page_index((void*)fixups_vector
) != -1) &&
1628 (fixups_vector
->header
== 0x01)) {
1629 /* If so, then follow it. */
1630 /*SHOW("following pointer to a forwarding pointer");*/
1631 fixups_vector
= (struct vector
*)native_pointer((lispobj
)fixups_vector
->length
);
1634 /*SHOW("got fixups");*/
1636 if (widetag_of(fixups_vector
->header
) == SIMPLE_ARRAY_WORD_WIDETAG
) {
1637 /* Got the fixups for the code block. Now work through the vector,
1638 and apply a fixup at each address. */
1639 long length
= fixnum_value(fixups_vector
->length
);
1641 for (i
= 0; i
< length
; i
++) {
1642 unsigned offset
= fixups_vector
->data
[i
];
1643 /* Now check the current value of offset. */
1644 unsigned old_value
=
1645 *(unsigned *)((unsigned)code_start_addr
+ offset
);
1647 /* If it's within the old_code object then it must be an
1648 * absolute fixup (relative ones are not saved) */
1649 if ((old_value
>= (unsigned)old_code
)
1650 && (old_value
< ((unsigned)old_code
+ nwords
*N_WORD_BYTES
)))
1651 /* So add the dispacement. */
1652 *(unsigned *)((unsigned)code_start_addr
+ offset
) =
1653 old_value
+ displacement
;
1655 /* It is outside the old code object so it must be a
1656 * relative fixup (absolute fixups are not saved). So
1657 * subtract the displacement. */
1658 *(unsigned *)((unsigned)code_start_addr
+ offset
) =
1659 old_value
- displacement
;
1662 fprintf(stderr
, "widetag of fixup vector is %d\n", widetag_of(fixups_vector
->header
));
1665 /* Check for possible errors. */
1666 if (check_code_fixups
) {
1667 sniff_code_object(new_code
,displacement
);
1673 trans_boxed_large(lispobj object
)
1676 unsigned long length
;
1678 gc_assert(is_lisp_pointer(object
));
1680 header
= *((lispobj
*) native_pointer(object
));
1681 length
= HeaderValue(header
) + 1;
1682 length
= CEILING(length
, 2);
1684 return copy_large_object(object
, length
);
1689 trans_unboxed_large(lispobj object
)
1692 unsigned long length
;
1695 gc_assert(is_lisp_pointer(object
));
1697 header
= *((lispobj
*) native_pointer(object
));
1698 length
= HeaderValue(header
) + 1;
1699 length
= CEILING(length
, 2);
1701 return copy_large_unboxed_object(object
, length
);
1706 * vector-like objects
1710 /* FIXME: What does this mean? */
1711 int gencgc_hash
= 1;
1714 scav_vector(lispobj
*where
, lispobj object
)
1716 unsigned long kv_length
;
1718 unsigned long length
= 0; /* (0 = dummy to stop GCC warning) */
1719 lispobj
*hash_table
;
1720 lispobj empty_symbol
;
1721 unsigned long *index_vector
= NULL
; /* (NULL = dummy to stop GCC warning) */
1722 unsigned long *next_vector
= NULL
; /* (NULL = dummy to stop GCC warning) */
1723 unsigned long *hash_vector
= NULL
; /* (NULL = dummy to stop GCC warning) */
1725 unsigned next_vector_length
= 0;
1727 /* FIXME: A comment explaining this would be nice. It looks as
1728 * though SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based
1729 * hash tables in the Lisp HASH-TABLE code, and nowhere else. */
1730 if (HeaderValue(object
) != subtype_VectorValidHashing
)
1734 /* This is set for backward compatibility. FIXME: Do we need
1737 (subtype_VectorMustRehash
<<N_WIDETAG_BITS
) | SIMPLE_VECTOR_WIDETAG
;
1741 kv_length
= fixnum_value(where
[1]);
1742 kv_vector
= where
+ 2; /* Skip the header and length. */
1743 /*FSHOW((stderr,"/kv_length = %d\n", kv_length));*/
1745 /* Scavenge element 0, which may be a hash-table structure. */
1746 scavenge(where
+2, 1);
1747 if (!is_lisp_pointer(where
[2])) {
1748 lose("no pointer at %x in hash table", where
[2]);
1750 hash_table
= (lispobj
*)native_pointer(where
[2]);
1751 /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/
1752 if (widetag_of(hash_table
[0]) != INSTANCE_HEADER_WIDETAG
) {
1753 lose("hash table not instance (%x at %x)", hash_table
[0], hash_table
);
1756 /* Scavenge element 1, which should be some internal symbol that
1757 * the hash table code reserves for marking empty slots. */
1758 scavenge(where
+3, 1);
1759 if (!is_lisp_pointer(where
[3])) {
1760 lose("not empty-hash-table-slot symbol pointer: %x", where
[3]);
1762 empty_symbol
= where
[3];
1763 /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/
1764 if (widetag_of(*(lispobj
*)native_pointer(empty_symbol
)) !=
1765 SYMBOL_HEADER_WIDETAG
) {
1766 lose("not a symbol where empty-hash-table-slot symbol expected: %x",
1767 *(lispobj
*)native_pointer(empty_symbol
));
1770 /* Scavenge hash table, which will fix the positions of the other
1771 * needed objects. */
1772 scavenge(hash_table
, 16);
1774 /* Cross-check the kv_vector. */
1775 if (where
!= (lispobj
*)native_pointer(hash_table
[9])) {
1776 lose("hash_table table!=this table %x", hash_table
[9]);
1780 weak_p_obj
= hash_table
[10];
1784 lispobj index_vector_obj
= hash_table
[13];
1786 if (is_lisp_pointer(index_vector_obj
) &&
1787 (widetag_of(*(lispobj
*)native_pointer(index_vector_obj
)) ==
1788 SIMPLE_ARRAY_WORD_WIDETAG
)) {
1789 index_vector
= ((lispobj
*)native_pointer(index_vector_obj
)) + 2;
1790 /*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/
1791 length
= fixnum_value(((lispobj
*)native_pointer(index_vector_obj
))[1]);
1792 /*FSHOW((stderr, "/length = %d\n", length));*/
1794 lose("invalid index_vector %x", index_vector_obj
);
1800 lispobj next_vector_obj
= hash_table
[14];
1802 if (is_lisp_pointer(next_vector_obj
) &&
1803 (widetag_of(*(lispobj
*)native_pointer(next_vector_obj
)) ==
1804 SIMPLE_ARRAY_WORD_WIDETAG
)) {
1805 next_vector
= ((lispobj
*)native_pointer(next_vector_obj
)) + 2;
1806 /*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/
1807 next_vector_length
= fixnum_value(((lispobj
*)native_pointer(next_vector_obj
))[1]);
1808 /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/
1810 lose("invalid next_vector %x", next_vector_obj
);
1814 /* maybe hash vector */
1816 /* FIXME: This bare "15" offset should become a symbolic
1817 * expression of some sort. And all the other bare offsets
1818 * too. And the bare "16" in scavenge(hash_table, 16). And
1819 * probably other stuff too. Ugh.. */
1820 lispobj hash_vector_obj
= hash_table
[15];
1822 if (is_lisp_pointer(hash_vector_obj
) &&
1823 (widetag_of(*(lispobj
*)native_pointer(hash_vector_obj
)) ==
1824 SIMPLE_ARRAY_WORD_WIDETAG
)){
1825 hash_vector
= ((lispobj
*)native_pointer(hash_vector_obj
)) + 2;
1826 /*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/
1827 gc_assert(fixnum_value(((lispobj
*)native_pointer(hash_vector_obj
))[1])
1828 == next_vector_length
);
1831 /*FSHOW((stderr, "/no hash_vector: %x\n", hash_vector_obj));*/
1835 /* These lengths could be different as the index_vector can be a
1836 * different length from the others, a larger index_vector could help
1837 * reduce collisions. */
1838 gc_assert(next_vector_length
*2 == kv_length
);
1840 /* now all set up.. */
1842 /* Work through the KV vector. */
1845 for (i
= 1; i
< next_vector_length
; i
++) {
1846 lispobj old_key
= kv_vector
[2*i
];
1848 #if N_WORD_BITS == 32
1849 unsigned long old_index
= (old_key
& 0x1fffffff)%length
;
1850 #elif N_WORD_BITS == 64
1851 unsigned long old_index
= (old_key
& 0x1fffffffffffffff)%length
;
1854 /* Scavenge the key and value. */
1855 scavenge(&kv_vector
[2*i
],2);
1857 /* Check whether the key has moved and is EQ based. */
1859 lispobj new_key
= kv_vector
[2*i
];
1860 #if N_WORD_BITS == 32
1861 unsigned long new_index
= (new_key
& 0x1fffffff)%length
;
1862 #elif N_WORD_BITS == 64
1863 unsigned long new_index
= (new_key
& 0x1fffffffffffffff)%length
;
1866 if ((old_index
!= new_index
) &&
1867 ((!hash_vector
) || (hash_vector
[i
] == 0x80000000)) &&
1868 ((new_key
!= empty_symbol
) ||
1869 (kv_vector
[2*i
] != empty_symbol
))) {
1872 "* EQ key %d moved from %x to %x; index %d to %d\n",
1873 i, old_key, new_key, old_index, new_index));*/
1875 if (index_vector
[old_index
] != 0) {
1876 /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/
1878 /* Unlink the key from the old_index chain. */
1879 if (index_vector
[old_index
] == i
) {
1880 /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/
1881 index_vector
[old_index
] = next_vector
[i
];
1882 /* Link it into the needing rehash chain. */
1883 next_vector
[i
] = fixnum_value(hash_table
[11]);
1884 hash_table
[11] = make_fixnum(i
);
1887 unsigned prior
= index_vector
[old_index
];
1888 unsigned next
= next_vector
[prior
];
1890 /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/
1893 /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/
1896 next_vector
[prior
] = next_vector
[next
];
1897 /* Link it into the needing rehash
1900 fixnum_value(hash_table
[11]);
1901 hash_table
[11] = make_fixnum(next
);
1906 next
= next_vector
[next
];
1914 return (CEILING(kv_length
+ 2, 2));
1923 /* XX This is a hack adapted from cgc.c. These don't work too
1924 * efficiently with the gencgc as a list of the weak pointers is
1925 * maintained within the objects which causes writes to the pages. A
1926 * limited attempt is made to avoid unnecessary writes, but this needs
1928 #define WEAK_POINTER_NWORDS \
1929 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
1932 scav_weak_pointer(lispobj
*where
, lispobj object
)
1934 struct weak_pointer
*wp
= weak_pointers
;
1935 /* Push the weak pointer onto the list of weak pointers.
1936 * Do I have to watch for duplicates? Originally this was
1937 * part of trans_weak_pointer but that didn't work in the
1938 * case where the WP was in a promoted region.
1941 /* Check whether it's already in the list. */
1942 while (wp
!= NULL
) {
1943 if (wp
== (struct weak_pointer
*)where
) {
1949 /* Add it to the start of the list. */
1950 wp
= (struct weak_pointer
*)where
;
1951 if (wp
->next
!= weak_pointers
) {
1952 wp
->next
= weak_pointers
;
1954 /*SHOW("avoided write to weak pointer");*/
1959 /* Do not let GC scavenge the value slot of the weak pointer.
1960 * (That is why it is a weak pointer.) */
1962 return WEAK_POINTER_NWORDS
;
1967 search_read_only_space(void *pointer
)
1969 lispobj
*start
= (lispobj
*) READ_ONLY_SPACE_START
;
1970 lispobj
*end
= (lispobj
*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0);
1971 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
1973 return (search_space(start
,
1974 (((lispobj
*)pointer
)+2)-start
,
1975 (lispobj
*) pointer
));
1979 search_static_space(void *pointer
)
1981 lispobj
*start
= (lispobj
*)STATIC_SPACE_START
;
1982 lispobj
*end
= (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0);
1983 if ((pointer
< (void *)start
) || (pointer
>= (void *)end
))
1985 return (search_space(start
,
1986 (((lispobj
*)pointer
)+2)-start
,
1987 (lispobj
*) pointer
));
1990 /* a faster version for searching the dynamic space. This will work even
1991 * if the object is in a current allocation region. */
1993 search_dynamic_space(void *pointer
)
1995 long page_index
= find_page_index(pointer
);
1998 /* The address may be invalid, so do some checks. */
1999 if ((page_index
== -1) ||
2000 (page_table
[page_index
].allocated
== FREE_PAGE_FLAG
))
2002 start
= (lispobj
*)((void *)page_address(page_index
)
2003 + page_table
[page_index
].first_object_offset
);
2004 return (search_space(start
,
2005 (((lispobj
*)pointer
)+2)-start
,
2006 (lispobj
*)pointer
));
2009 /* Is there any possibility that pointer is a valid Lisp object
2010 * reference, and/or something else (e.g. subroutine call return
2011 * address) which should prevent us from moving the referred-to thing?
2012 * This is called from preserve_pointers() */
2014 possibly_valid_dynamic_space_pointer(lispobj
*pointer
)
2016 lispobj
*start_addr
;
2018 /* Find the object start address. */
2019 if ((start_addr
= search_dynamic_space(pointer
)) == NULL
) {
2023 /* We need to allow raw pointers into Code objects for return
2024 * addresses. This will also pick up pointers to functions in code
2026 if (widetag_of(*start_addr
) == CODE_HEADER_WIDETAG
) {
2027 /* XXX could do some further checks here */
2031 /* If it's not a return address then it needs to be a valid Lisp
2033 if (!is_lisp_pointer((lispobj
)pointer
)) {
2037 /* Check that the object pointed to is consistent with the pointer
2040 switch (lowtag_of((lispobj
)pointer
)) {
2041 case FUN_POINTER_LOWTAG
:
2042 /* Start_addr should be the enclosing code object, or a closure
2044 switch (widetag_of(*start_addr
)) {
2045 case CODE_HEADER_WIDETAG
:
2046 /* This case is probably caught above. */
2048 case CLOSURE_HEADER_WIDETAG
:
2049 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
2050 if ((unsigned)pointer
!=
2051 ((unsigned)start_addr
+FUN_POINTER_LOWTAG
)) {
2055 pointer
, start_addr
, *start_addr
));
2063 pointer
, start_addr
, *start_addr
));
2067 case LIST_POINTER_LOWTAG
:
2068 if ((unsigned)pointer
!=
2069 ((unsigned)start_addr
+LIST_POINTER_LOWTAG
)) {
2073 pointer
, start_addr
, *start_addr
));
2076 /* Is it plausible cons? */
2077 if ((is_lisp_pointer(start_addr
[0])
2078 || (fixnump(start_addr
[0]))
2079 || (widetag_of(start_addr
[0]) == CHARACTER_WIDETAG
)
2080 #if N_WORD_BITS == 64
2081 || (widetag_of(start_addr
[0]) == SINGLE_FLOAT_WIDETAG
)
2083 || (widetag_of(start_addr
[0]) == UNBOUND_MARKER_WIDETAG
))
2084 && (is_lisp_pointer(start_addr
[1])
2085 || (fixnump(start_addr
[1]))
2086 || (widetag_of(start_addr
[1]) == CHARACTER_WIDETAG
)
2087 #if N_WORD_BITS == 64
2088 || (widetag_of(start_addr
[1]) == SINGLE_FLOAT_WIDETAG
)
2090 || (widetag_of(start_addr
[1]) == UNBOUND_MARKER_WIDETAG
)))
2096 pointer
, start_addr
, *start_addr
));
2099 case INSTANCE_POINTER_LOWTAG
:
2100 if ((unsigned)pointer
!=
2101 ((unsigned)start_addr
+INSTANCE_POINTER_LOWTAG
)) {
2105 pointer
, start_addr
, *start_addr
));
2108 if (widetag_of(start_addr
[0]) != INSTANCE_HEADER_WIDETAG
) {
2112 pointer
, start_addr
, *start_addr
));
2116 case OTHER_POINTER_LOWTAG
:
2117 if ((unsigned)pointer
!=
2118 ((int)start_addr
+OTHER_POINTER_LOWTAG
)) {
2122 pointer
, start_addr
, *start_addr
));
2125 /* Is it plausible? Not a cons. XXX should check the headers. */
2126 if (is_lisp_pointer(start_addr
[0]) || ((start_addr
[0] & 3) == 0)) {
2130 pointer
, start_addr
, *start_addr
));
2133 switch (widetag_of(start_addr
[0])) {
2134 case UNBOUND_MARKER_WIDETAG
:
2135 case CHARACTER_WIDETAG
:
2136 #if N_WORD_BITS == 64
2137 case SINGLE_FLOAT_WIDETAG
:
2142 pointer
, start_addr
, *start_addr
));
2145 /* only pointed to by function pointers? */
2146 case CLOSURE_HEADER_WIDETAG
:
2147 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
2151 pointer
, start_addr
, *start_addr
));
2154 case INSTANCE_HEADER_WIDETAG
:
2158 pointer
, start_addr
, *start_addr
));
2161 /* the valid other immediate pointer objects */
2162 case SIMPLE_VECTOR_WIDETAG
:
2164 case COMPLEX_WIDETAG
:
2165 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
2166 case COMPLEX_SINGLE_FLOAT_WIDETAG
:
2168 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
2169 case COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2171 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
2172 case COMPLEX_LONG_FLOAT_WIDETAG
:
2174 case SIMPLE_ARRAY_WIDETAG
:
2175 case COMPLEX_BASE_STRING_WIDETAG
:
2176 #ifdef COMPLEX_CHARACTER_STRING_WIDETAG
2177 case COMPLEX_CHARACTER_STRING_WIDETAG
:
2179 case COMPLEX_VECTOR_NIL_WIDETAG
:
2180 case COMPLEX_BIT_VECTOR_WIDETAG
:
2181 case COMPLEX_VECTOR_WIDETAG
:
2182 case COMPLEX_ARRAY_WIDETAG
:
2183 case VALUE_CELL_HEADER_WIDETAG
:
2184 case SYMBOL_HEADER_WIDETAG
:
2186 case CODE_HEADER_WIDETAG
:
2187 case BIGNUM_WIDETAG
:
2188 #if N_WORD_BITS != 64
2189 case SINGLE_FLOAT_WIDETAG
:
2191 case DOUBLE_FLOAT_WIDETAG
:
2192 #ifdef LONG_FLOAT_WIDETAG
2193 case LONG_FLOAT_WIDETAG
:
2195 case SIMPLE_BASE_STRING_WIDETAG
:
2196 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
2197 case SIMPLE_CHARACTER_STRING_WIDETAG
:
2199 case SIMPLE_BIT_VECTOR_WIDETAG
:
2200 case SIMPLE_ARRAY_NIL_WIDETAG
:
2201 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
2202 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
2203 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
2204 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
2205 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
2206 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
2207 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
2208 case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
:
2210 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
2211 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
2212 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
2213 case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
:
2215 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
2216 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
2218 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG:
2219 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
2221 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
2222 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
2224 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
2225 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
2227 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
2228 case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
:
2230 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
2231 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
2233 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
2234 case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
:
2236 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
2237 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
2239 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
2240 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
2241 #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
2242 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
2244 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
2245 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
2247 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
2248 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2250 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
2251 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
2254 case WEAK_POINTER_WIDETAG
:
2261 pointer
, start_addr
, *start_addr
));
2269 pointer
, start_addr
, *start_addr
));
2277 /* Adjust large bignum and vector objects. This will adjust the
2278 * allocated region if the size has shrunk, and move unboxed objects
2279 * into unboxed pages. The pages are not promoted here, and the
2280 * promoted region is not added to the new_regions; this is really
2281 * only designed to be called from preserve_pointer(). Shouldn't fail
2282 * if this is missed, just may delay the moving of objects to unboxed
2283 * pages, and the freeing of pages. */
2285 maybe_adjust_large_object(lispobj
*where
)
2290 long remaining_bytes
;
2293 long old_bytes_used
;
2297 /* Check whether it's a vector or bignum object. */
2298 switch (widetag_of(where
[0])) {
2299 case SIMPLE_VECTOR_WIDETAG
:
2300 boxed
= BOXED_PAGE_FLAG
;
2302 case BIGNUM_WIDETAG
:
2303 case SIMPLE_BASE_STRING_WIDETAG
:
2304 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
2305 case SIMPLE_CHARACTER_STRING_WIDETAG
:
2307 case SIMPLE_BIT_VECTOR_WIDETAG
:
2308 case SIMPLE_ARRAY_NIL_WIDETAG
:
2309 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
2310 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
2311 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
2312 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
2313 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
2314 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
2315 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
2316 case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
:
2318 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
2319 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
2320 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
2321 case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
:
2323 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
2324 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
2326 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
2327 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
2329 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
2330 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
2332 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
2333 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
2335 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
2336 case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
:
2338 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
2339 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
2341 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
2342 case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
:
2344 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
2345 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
2347 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
2348 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
2349 #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
2350 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
2352 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
2353 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
2355 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
2356 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
2358 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
2359 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
2361 boxed
= UNBOXED_PAGE_FLAG
;
2367 /* Find its current size. */
2368 nwords
= (sizetab
[widetag_of(where
[0])])(where
);
2370 first_page
= find_page_index((void *)where
);
2371 gc_assert(first_page
>= 0);
2373 /* Note: Any page write-protection must be removed, else a later
2374 * scavenge_newspace may incorrectly not scavenge these pages.
2375 * This would not be necessary if they are added to the new areas,
2376 * but lets do it for them all (they'll probably be written
2379 gc_assert(page_table
[first_page
].first_object_offset
== 0);
2381 next_page
= first_page
;
2382 remaining_bytes
= nwords
*N_WORD_BYTES
;
2383 while (remaining_bytes
> PAGE_BYTES
) {
2384 gc_assert(page_table
[next_page
].gen
== from_space
);
2385 gc_assert((page_table
[next_page
].allocated
== BOXED_PAGE_FLAG
)
2386 || (page_table
[next_page
].allocated
== UNBOXED_PAGE_FLAG
));
2387 gc_assert(page_table
[next_page
].large_object
);
2388 gc_assert(page_table
[next_page
].first_object_offset
==
2389 -PAGE_BYTES
*(next_page
-first_page
));
2390 gc_assert(page_table
[next_page
].bytes_used
== PAGE_BYTES
);
2392 page_table
[next_page
].allocated
= boxed
;
2394 /* Shouldn't be write-protected at this stage. Essential that the
2396 gc_assert(!page_table
[next_page
].write_protected
);
2397 remaining_bytes
-= PAGE_BYTES
;
2401 /* Now only one page remains, but the object may have shrunk so
2402 * there may be more unused pages which will be freed. */
2404 /* Object may have shrunk but shouldn't have grown - check. */
2405 gc_assert(page_table
[next_page
].bytes_used
>= remaining_bytes
);
2407 page_table
[next_page
].allocated
= boxed
;
2408 gc_assert(page_table
[next_page
].allocated
==
2409 page_table
[first_page
].allocated
);
2411 /* Adjust the bytes_used. */
2412 old_bytes_used
= page_table
[next_page
].bytes_used
;
2413 page_table
[next_page
].bytes_used
= remaining_bytes
;
2415 bytes_freed
= old_bytes_used
- remaining_bytes
;
2417 /* Free any remaining pages; needs care. */
2419 while ((old_bytes_used
== PAGE_BYTES
) &&
2420 (page_table
[next_page
].gen
== from_space
) &&
2421 ((page_table
[next_page
].allocated
== UNBOXED_PAGE_FLAG
)
2422 || (page_table
[next_page
].allocated
== BOXED_PAGE_FLAG
)) &&
2423 page_table
[next_page
].large_object
&&
2424 (page_table
[next_page
].first_object_offset
==
2425 -(next_page
- first_page
)*PAGE_BYTES
)) {
2426 /* It checks out OK, free the page. We don't need to both zeroing
2427 * pages as this should have been done before shrinking the
2428 * object. These pages shouldn't be write protected as they
2429 * should be zero filled. */
2430 gc_assert(page_table
[next_page
].write_protected
== 0);
2432 old_bytes_used
= page_table
[next_page
].bytes_used
;
2433 page_table
[next_page
].allocated
= FREE_PAGE_FLAG
;
2434 page_table
[next_page
].bytes_used
= 0;
2435 bytes_freed
+= old_bytes_used
;
2439 if ((bytes_freed
> 0) && gencgc_verbose
) {
2441 "/maybe_adjust_large_object() freed %d\n",
2445 generations
[from_space
].bytes_allocated
-= bytes_freed
;
2446 bytes_allocated
-= bytes_freed
;
2451 /* Take a possible pointer to a Lisp object and mark its page in the
2452 * page_table so that it will not be relocated during a GC.
2454 * This involves locating the page it points to, then backing up to
2455 * the start of its region, then marking all pages dont_move from there
2456 * up to the first page that's not full or has a different generation
2458 * It is assumed that all the page static flags have been cleared at
2459 * the start of a GC.
2461 * It is also assumed that the current gc_alloc() region has been
2462 * flushed and the tables updated. */
2464 preserve_pointer(void *addr
)
2466 long addr_page_index
= find_page_index(addr
);
2469 unsigned region_allocation
;
2471 /* quick check 1: Address is quite likely to have been invalid. */
2472 if ((addr_page_index
== -1)
2473 || (page_table
[addr_page_index
].allocated
== FREE_PAGE_FLAG
)
2474 || (page_table
[addr_page_index
].bytes_used
== 0)
2475 || (page_table
[addr_page_index
].gen
!= from_space
)
2476 /* Skip if already marked dont_move. */
2477 || (page_table
[addr_page_index
].dont_move
!= 0))
2479 gc_assert(!(page_table
[addr_page_index
].allocated
&OPEN_REGION_PAGE_FLAG
));
2480 /* (Now that we know that addr_page_index is in range, it's
2481 * safe to index into page_table[] with it.) */
2482 region_allocation
= page_table
[addr_page_index
].allocated
;
2484 /* quick check 2: Check the offset within the page.
2487 if (((unsigned)addr
& (PAGE_BYTES
- 1)) > page_table
[addr_page_index
].bytes_used
)
2490 /* Filter out anything which can't be a pointer to a Lisp object
2491 * (or, as a special case which also requires dont_move, a return
2492 * address referring to something in a CodeObject). This is
2493 * expensive but important, since it vastly reduces the
2494 * probability that random garbage will be bogusly interpreted as
2495 * a pointer which prevents a page from moving. */
2496 if (!(possibly_valid_dynamic_space_pointer(addr
)))
2499 /* Find the beginning of the region. Note that there may be
2500 * objects in the region preceding the one that we were passed a
2501 * pointer to: if this is the case, we will write-protect all the
2502 * previous objects' pages too. */
2505 /* I think this'd work just as well, but without the assertions.
2506 * -dan 2004.01.01 */
2508 find_page_index(page_address(addr_page_index
)+
2509 page_table
[addr_page_index
].first_object_offset
);
2511 first_page
= addr_page_index
;
2512 while (page_table
[first_page
].first_object_offset
!= 0) {
2514 /* Do some checks. */
2515 gc_assert(page_table
[first_page
].bytes_used
== PAGE_BYTES
);
2516 gc_assert(page_table
[first_page
].gen
== from_space
);
2517 gc_assert(page_table
[first_page
].allocated
== region_allocation
);
2521 /* Adjust any large objects before promotion as they won't be
2522 * copied after promotion. */
2523 if (page_table
[first_page
].large_object
) {
2524 maybe_adjust_large_object(page_address(first_page
));
2525 /* If a large object has shrunk then addr may now point to a
2526 * free area in which case it's ignored here. Note it gets
2527 * through the valid pointer test above because the tail looks
2529 if ((page_table
[addr_page_index
].allocated
== FREE_PAGE_FLAG
)
2530 || (page_table
[addr_page_index
].bytes_used
== 0)
2531 /* Check the offset within the page. */
2532 || (((unsigned)addr
& (PAGE_BYTES
- 1))
2533 > page_table
[addr_page_index
].bytes_used
)) {
2535 "weird? ignore ptr 0x%x to freed area of large object\n",
2539 /* It may have moved to unboxed pages. */
2540 region_allocation
= page_table
[first_page
].allocated
;
2543 /* Now work forward until the end of this contiguous area is found,
2544 * marking all pages as dont_move. */
2545 for (i
= first_page
; ;i
++) {
2546 gc_assert(page_table
[i
].allocated
== region_allocation
);
2548 /* Mark the page static. */
2549 page_table
[i
].dont_move
= 1;
2551 /* Move the page to the new_space. XX I'd rather not do this
2552 * but the GC logic is not quite able to copy with the static
2553 * pages remaining in the from space. This also requires the
2554 * generation bytes_allocated counters be updated. */
2555 page_table
[i
].gen
= new_space
;
2556 generations
[new_space
].bytes_allocated
+= page_table
[i
].bytes_used
;
2557 generations
[from_space
].bytes_allocated
-= page_table
[i
].bytes_used
;
2559 /* It is essential that the pages are not write protected as
2560 * they may have pointers into the old-space which need
2561 * scavenging. They shouldn't be write protected at this
2563 gc_assert(!page_table
[i
].write_protected
);
2565 /* Check whether this is the last page in this contiguous block.. */
2566 if ((page_table
[i
].bytes_used
< PAGE_BYTES
)
2567 /* ..or it is PAGE_BYTES and is the last in the block */
2568 || (page_table
[i
+1].allocated
== FREE_PAGE_FLAG
)
2569 || (page_table
[i
+1].bytes_used
== 0) /* next page free */
2570 || (page_table
[i
+1].gen
!= from_space
) /* diff. gen */
2571 || (page_table
[i
+1].first_object_offset
== 0))
2575 /* Check that the page is now static. */
2576 gc_assert(page_table
[addr_page_index
].dont_move
!= 0);
2579 /* If the given page is not write-protected, then scan it for pointers
2580 * to younger generations or the top temp. generation, if no
2581 * suspicious pointers are found then the page is write-protected.
2583 * Care is taken to check for pointers to the current gc_alloc()
2584 * region if it is a younger generation or the temp. generation. This
2585 * frees the caller from doing a gc_alloc_update_page_tables(). Actually
2586 * the gc_alloc_generation does not need to be checked as this is only
2587 * called from scavenge_generation() when the gc_alloc generation is
2588 * younger, so it just checks if there is a pointer to the current
2591 * We return 1 if the page was write-protected, else 0. */
2593 update_page_write_prot(long page
)
2595 int gen
= page_table
[page
].gen
;
2598 void **page_addr
= (void **)page_address(page
);
2599 long num_words
= page_table
[page
].bytes_used
/ N_WORD_BYTES
;
2601 /* Shouldn't be a free page. */
2602 gc_assert(page_table
[page
].allocated
!= FREE_PAGE_FLAG
);
2603 gc_assert(page_table
[page
].bytes_used
!= 0);
2605 /* Skip if it's already write-protected, pinned, or unboxed */
2606 if (page_table
[page
].write_protected
2607 || page_table
[page
].dont_move
2608 || (page_table
[page
].allocated
& UNBOXED_PAGE_FLAG
))
2611 /* Scan the page for pointers to younger generations or the
2612 * top temp. generation. */
2614 for (j
= 0; j
< num_words
; j
++) {
2615 void *ptr
= *(page_addr
+j
);
2616 long index
= find_page_index(ptr
);
2618 /* Check that it's in the dynamic space */
2620 if (/* Does it point to a younger or the temp. generation? */
2621 ((page_table
[index
].allocated
!= FREE_PAGE_FLAG
)
2622 && (page_table
[index
].bytes_used
!= 0)
2623 && ((page_table
[index
].gen
< gen
)
2624 || (page_table
[index
].gen
== NUM_GENERATIONS
)))
2626 /* Or does it point within a current gc_alloc() region? */
2627 || ((boxed_region
.start_addr
<= ptr
)
2628 && (ptr
<= boxed_region
.free_pointer
))
2629 || ((unboxed_region
.start_addr
<= ptr
)
2630 && (ptr
<= unboxed_region
.free_pointer
))) {
2637 /* Write-protect the page. */
2638 /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
2640 os_protect((void *)page_addr
,
2642 OS_VM_PROT_READ
|OS_VM_PROT_EXECUTE
);
2644 /* Note the page as protected in the page tables. */
2645 page_table
[page
].write_protected
= 1;
2651 /* Scavenge a generation.
2653 * This will not resolve all pointers when generation is the new
2654 * space, as new objects may be added which are not checked here - use
2655 * scavenge_newspace generation.
2657 * Write-protected pages should not have any pointers to the
2658 * from_space so do need scavenging; thus write-protected pages are
2659 * not always scavenged. There is some code to check that these pages
2660 * are not written; but to check fully the write-protected pages need
2661 * to be scavenged by disabling the code to skip them.
2663 * Under the current scheme when a generation is GCed the younger
2664 * generations will be empty. So, when a generation is being GCed it
2665 * is only necessary to scavenge the older generations for pointers
2666 * not the younger. So a page that does not have pointers to younger
2667 * generations does not need to be scavenged.
2669 * The write-protection can be used to note pages that don't have
2670 * pointers to younger pages. But pages can be written without having
2671 * pointers to younger generations. After the pages are scavenged here
2672 * they can be scanned for pointers to younger generations and if
2673 * there are none the page can be write-protected.
2675 * One complication is when the newspace is the top temp. generation.
2677 * Enabling SC_GEN_CK scavenges the write-protected pages and checks
2678 * that none were written, which they shouldn't be as they should have
2679 * no pointers to younger generations. This breaks down for weak
2680 * pointers as the objects contain a link to the next and are written
2681 * if a weak pointer is scavenged. Still it's a useful check. */
2683 scavenge_generation(int generation
)
2690 /* Clear the write_protected_cleared flags on all pages. */
2691 for (i
= 0; i
< NUM_PAGES
; i
++)
2692 page_table
[i
].write_protected_cleared
= 0;
2695 for (i
= 0; i
< last_free_page
; i
++) {
2696 if ((page_table
[i
].allocated
& BOXED_PAGE_FLAG
)
2697 && (page_table
[i
].bytes_used
!= 0)
2698 && (page_table
[i
].gen
== generation
)) {
2700 int write_protected
=1;
2702 /* This should be the start of a region */
2703 gc_assert(page_table
[i
].first_object_offset
== 0);
2705 /* Now work forward until the end of the region */
2706 for (last_page
= i
; ; last_page
++) {
2708 write_protected
&& page_table
[last_page
].write_protected
;
2709 if ((page_table
[last_page
].bytes_used
< PAGE_BYTES
)
2710 /* Or it is PAGE_BYTES and is the last in the block */
2711 || (!(page_table
[last_page
+1].allocated
& BOXED_PAGE_FLAG
))
2712 || (page_table
[last_page
+1].bytes_used
== 0)
2713 || (page_table
[last_page
+1].gen
!= generation
)
2714 || (page_table
[last_page
+1].first_object_offset
== 0))
2717 if (!write_protected
) {
2718 scavenge(page_address(i
),
2719 (page_table
[last_page
].bytes_used
+
2720 (last_page
-i
)*PAGE_BYTES
)/N_WORD_BYTES
);
2722 /* Now scan the pages and write protect those that
2723 * don't have pointers to younger generations. */
2724 if (enable_page_protection
) {
2725 for (j
= i
; j
<= last_page
; j
++) {
2726 num_wp
+= update_page_write_prot(j
);
2733 if ((gencgc_verbose
> 1) && (num_wp
!= 0)) {
2735 "/write protected %d pages within generation %d\n",
2736 num_wp
, generation
));
2740 /* Check that none of the write_protected pages in this generation
2741 * have been written to. */
2742 for (i
= 0; i
< NUM_PAGES
; i
++) {
2743 if ((page_table
[i
].allocation
!= FREE_PAGE_FLAG
)
2744 && (page_table
[i
].bytes_used
!= 0)
2745 && (page_table
[i
].gen
== generation
)
2746 && (page_table
[i
].write_protected_cleared
!= 0)) {
2747 FSHOW((stderr
, "/scavenge_generation() %d\n", generation
));
2749 "/page bytes_used=%d first_object_offset=%d dont_move=%d\n",
2750 page_table
[i
].bytes_used
,
2751 page_table
[i
].first_object_offset
,
2752 page_table
[i
].dont_move
));
2753 lose("write to protected page %d in scavenge_generation()", i
);
2760 /* Scavenge a newspace generation. As it is scavenged new objects may
2761 * be allocated to it; these will also need to be scavenged. This
2762 * repeats until there are no more objects unscavenged in the
2763 * newspace generation.
2765 * To help improve the efficiency, areas written are recorded by
2766 * gc_alloc() and only these scavenged. Sometimes a little more will be
2767 * scavenged, but this causes no harm. An easy check is done that the
2768 * scavenged bytes equals the number allocated in the previous
2771 * Write-protected pages are not scanned except if they are marked
2772 * dont_move in which case they may have been promoted and still have
2773 * pointers to the from space.
2775 * Write-protected pages could potentially be written by alloc however
2776 * to avoid having to handle re-scavenging of write-protected pages
2777 * gc_alloc() does not write to write-protected pages.
2779 * New areas of objects allocated are recorded alternatively in the two
2780 * new_areas arrays below. */
2781 static struct new_area new_areas_1
[NUM_NEW_AREAS
];
2782 static struct new_area new_areas_2
[NUM_NEW_AREAS
];
2784 /* Do one full scan of the new space generation. This is not enough to
2785 * complete the job as new objects may be added to the generation in
2786 * the process which are not scavenged. */
2788 scavenge_newspace_generation_one_scan(int generation
)
2793 "/starting one full scan of newspace generation %d\n",
2795 for (i
= 0; i
< last_free_page
; i
++) {
2796 /* Note that this skips over open regions when it encounters them. */
2797 if ((page_table
[i
].allocated
& BOXED_PAGE_FLAG
)
2798 && (page_table
[i
].bytes_used
!= 0)
2799 && (page_table
[i
].gen
== generation
)
2800 && ((page_table
[i
].write_protected
== 0)
2801 /* (This may be redundant as write_protected is now
2802 * cleared before promotion.) */
2803 || (page_table
[i
].dont_move
== 1))) {
2807 /* The scavenge will start at the first_object_offset of page i.
2809 * We need to find the full extent of this contiguous
2810 * block in case objects span pages.
2812 * Now work forward until the end of this contiguous area
2813 * is found. A small area is preferred as there is a
2814 * better chance of its pages being write-protected. */
2815 for (last_page
= i
; ;last_page
++) {
2816 /* If all pages are write-protected and movable,
2817 * then no need to scavenge */
2818 all_wp
=all_wp
&& page_table
[last_page
].write_protected
&&
2819 !page_table
[last_page
].dont_move
;
2821 /* Check whether this is the last page in this
2822 * contiguous block */
2823 if ((page_table
[last_page
].bytes_used
< PAGE_BYTES
)
2824 /* Or it is PAGE_BYTES and is the last in the block */
2825 || (!(page_table
[last_page
+1].allocated
& BOXED_PAGE_FLAG
))
2826 || (page_table
[last_page
+1].bytes_used
== 0)
2827 || (page_table
[last_page
+1].gen
!= generation
)
2828 || (page_table
[last_page
+1].first_object_offset
== 0))
2832 /* Do a limited check for write-protected pages. */
2836 size
= (page_table
[last_page
].bytes_used
2837 + (last_page
-i
)*PAGE_BYTES
2838 - page_table
[i
].first_object_offset
)/N_WORD_BYTES
;
2839 new_areas_ignore_page
= last_page
;
2841 scavenge(page_address(i
) +
2842 page_table
[i
].first_object_offset
,
2850 "/done with one full scan of newspace generation %d\n",
2854 /* Do a complete scavenge of the newspace generation. */
2856 scavenge_newspace_generation(int generation
)
2860 /* the new_areas array currently being written to by gc_alloc() */
2861 struct new_area (*current_new_areas
)[] = &new_areas_1
;
2862 long current_new_areas_index
;
2864 /* the new_areas created by the previous scavenge cycle */
2865 struct new_area (*previous_new_areas
)[] = NULL
;
2866 long previous_new_areas_index
;
2868 /* Flush the current regions updating the tables. */
2869 gc_alloc_update_all_page_tables();
2871 /* Turn on the recording of new areas by gc_alloc(). */
2872 new_areas
= current_new_areas
;
2873 new_areas_index
= 0;
2875 /* Don't need to record new areas that get scavenged anyway during
2876 * scavenge_newspace_generation_one_scan. */
2877 record_new_objects
= 1;
2879 /* Start with a full scavenge. */
2880 scavenge_newspace_generation_one_scan(generation
);
2882 /* Record all new areas now. */
2883 record_new_objects
= 2;
2885 /* Flush the current regions updating the tables. */
2886 gc_alloc_update_all_page_tables();
2888 /* Grab new_areas_index. */
2889 current_new_areas_index
= new_areas_index
;
2892 "The first scan is finished; current_new_areas_index=%d.\n",
2893 current_new_areas_index));*/
2895 while (current_new_areas_index
> 0) {
2896 /* Move the current to the previous new areas */
2897 previous_new_areas
= current_new_areas
;
2898 previous_new_areas_index
= current_new_areas_index
;
2900 /* Scavenge all the areas in previous new areas. Any new areas
2901 * allocated are saved in current_new_areas. */
2903 /* Allocate an array for current_new_areas; alternating between
2904 * new_areas_1 and 2 */
2905 if (previous_new_areas
== &new_areas_1
)
2906 current_new_areas
= &new_areas_2
;
2908 current_new_areas
= &new_areas_1
;
2910 /* Set up for gc_alloc(). */
2911 new_areas
= current_new_areas
;
2912 new_areas_index
= 0;
2914 /* Check whether previous_new_areas had overflowed. */
2915 if (previous_new_areas_index
>= NUM_NEW_AREAS
) {
2917 /* New areas of objects allocated have been lost so need to do a
2918 * full scan to be sure! If this becomes a problem try
2919 * increasing NUM_NEW_AREAS. */
2921 SHOW("new_areas overflow, doing full scavenge");
2923 /* Don't need to record new areas that get scavenge anyway
2924 * during scavenge_newspace_generation_one_scan. */
2925 record_new_objects
= 1;
2927 scavenge_newspace_generation_one_scan(generation
);
2929 /* Record all new areas now. */
2930 record_new_objects
= 2;
2932 /* Flush the current regions updating the tables. */
2933 gc_alloc_update_all_page_tables();
2937 /* Work through previous_new_areas. */
2938 for (i
= 0; i
< previous_new_areas_index
; i
++) {
2939 long page
= (*previous_new_areas
)[i
].page
;
2940 long offset
= (*previous_new_areas
)[i
].offset
;
2941 long size
= (*previous_new_areas
)[i
].size
/ N_WORD_BYTES
;
2942 gc_assert((*previous_new_areas
)[i
].size
% N_WORD_BYTES
== 0);
2943 scavenge(page_address(page
)+offset
, size
);
2946 /* Flush the current regions updating the tables. */
2947 gc_alloc_update_all_page_tables();
2950 current_new_areas_index
= new_areas_index
;
2953 "The re-scan has finished; current_new_areas_index=%d.\n",
2954 current_new_areas_index));*/
2957 /* Turn off recording of areas allocated by gc_alloc(). */
2958 record_new_objects
= 0;
2961 /* Check that none of the write_protected pages in this generation
2962 * have been written to. */
2963 for (i
= 0; i
< NUM_PAGES
; i
++) {
2964 if ((page_table
[i
].allocation
!= FREE_PAGE_FLAG
)
2965 && (page_table
[i
].bytes_used
!= 0)
2966 && (page_table
[i
].gen
== generation
)
2967 && (page_table
[i
].write_protected_cleared
!= 0)
2968 && (page_table
[i
].dont_move
== 0)) {
2969 lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d",
2970 i
, generation
, page_table
[i
].dont_move
);
2976 /* Un-write-protect all the pages in from_space. This is done at the
2977 * start of a GC else there may be many page faults while scavenging
2978 * the newspace (I've seen drive the system time to 99%). These pages
2979 * would need to be unprotected anyway before unmapping in
2980 * free_oldspace; not sure what effect this has on paging.. */
2982 unprotect_oldspace(void)
2986 for (i
= 0; i
< last_free_page
; i
++) {
2987 if ((page_table
[i
].allocated
!= FREE_PAGE_FLAG
)
2988 && (page_table
[i
].bytes_used
!= 0)
2989 && (page_table
[i
].gen
== from_space
)) {
2992 page_start
= (void *)page_address(i
);
2994 /* Remove any write-protection. We should be able to rely
2995 * on the write-protect flag to avoid redundant calls. */
2996 if (page_table
[i
].write_protected
) {
2997 os_protect(page_start
, PAGE_BYTES
, OS_VM_PROT_ALL
);
2998 page_table
[i
].write_protected
= 0;
3004 /* Work through all the pages and free any in from_space. This
3005 * assumes that all objects have been copied or promoted to an older
3006 * generation. Bytes_allocated and the generation bytes_allocated
3007 * counter are updated. The number of bytes freed is returned. */
3011 long bytes_freed
= 0;
3012 long first_page
, last_page
;
3017 /* Find a first page for the next region of pages. */
3018 while ((first_page
< last_free_page
)
3019 && ((page_table
[first_page
].allocated
== FREE_PAGE_FLAG
)
3020 || (page_table
[first_page
].bytes_used
== 0)
3021 || (page_table
[first_page
].gen
!= from_space
)))
3024 if (first_page
>= last_free_page
)
3027 /* Find the last page of this region. */
3028 last_page
= first_page
;
3031 /* Free the page. */
3032 bytes_freed
+= page_table
[last_page
].bytes_used
;
3033 generations
[page_table
[last_page
].gen
].bytes_allocated
-=
3034 page_table
[last_page
].bytes_used
;
3035 page_table
[last_page
].allocated
= FREE_PAGE_FLAG
;
3036 page_table
[last_page
].bytes_used
= 0;
3038 /* Remove any write-protection. We should be able to rely
3039 * on the write-protect flag to avoid redundant calls. */
3041 void *page_start
= (void *)page_address(last_page
);
3043 if (page_table
[last_page
].write_protected
) {
3044 os_protect(page_start
, PAGE_BYTES
, OS_VM_PROT_ALL
);
3045 page_table
[last_page
].write_protected
= 0;
3050 while ((last_page
< last_free_page
)
3051 && (page_table
[last_page
].allocated
!= FREE_PAGE_FLAG
)
3052 && (page_table
[last_page
].bytes_used
!= 0)
3053 && (page_table
[last_page
].gen
== from_space
));
3055 /* Zero pages from first_page to (last_page-1).
3057 * FIXME: Why not use os_zero(..) function instead of
3058 * hand-coding this again? (Check other gencgc_unmap_zero
3060 if (gencgc_unmap_zero
) {
3061 void *page_start
, *addr
;
3063 page_start
= (void *)page_address(first_page
);
3065 os_invalidate(page_start
, PAGE_BYTES
*(last_page
-first_page
));
3066 addr
= os_validate(page_start
, PAGE_BYTES
*(last_page
-first_page
));
3067 if (addr
== NULL
|| addr
!= page_start
) {
3068 lose("free_oldspace: page moved, 0x%08x ==> 0x%08x",page_start
,
3074 page_start
= (long *)page_address(first_page
);
3075 memset(page_start
, 0,PAGE_BYTES
*(last_page
-first_page
));
3078 first_page
= last_page
;
3080 } while (first_page
< last_free_page
);
3082 bytes_allocated
-= bytes_freed
;
3087 /* Print some information about a pointer at the given address. */
3089 print_ptr(lispobj
*addr
)
3091 /* If addr is in the dynamic space then out the page information. */
3092 long pi1
= find_page_index((void*)addr
);
3095 fprintf(stderr
," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n",
3096 (unsigned long) addr
,
3098 page_table
[pi1
].allocated
,
3099 page_table
[pi1
].gen
,
3100 page_table
[pi1
].bytes_used
,
3101 page_table
[pi1
].first_object_offset
,
3102 page_table
[pi1
].dont_move
);
3103 fprintf(stderr
," %x %x %x %x (%x) %x %x %x %x\n",
3116 extern long undefined_tramp
;
3119 verify_space(lispobj
*start
, size_t words
)
3121 int is_in_dynamic_space
= (find_page_index((void*)start
) != -1);
3122 int is_in_readonly_space
=
3123 (READ_ONLY_SPACE_START
<= (unsigned)start
&&
3124 (unsigned)start
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0));
3128 lispobj thing
= *(lispobj
*)start
;
3130 if (is_lisp_pointer(thing
)) {
3131 long page_index
= find_page_index((void*)thing
);
3132 long to_readonly_space
=
3133 (READ_ONLY_SPACE_START
<= thing
&&
3134 thing
< SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0));
3135 long to_static_space
=
3136 (STATIC_SPACE_START
<= thing
&&
3137 thing
< SymbolValue(STATIC_SPACE_FREE_POINTER
,0));
3139 /* Does it point to the dynamic space? */
3140 if (page_index
!= -1) {
3141 /* If it's within the dynamic space it should point to a used
3142 * page. XX Could check the offset too. */
3143 if ((page_table
[page_index
].allocated
!= FREE_PAGE_FLAG
)
3144 && (page_table
[page_index
].bytes_used
== 0))
3145 lose ("Ptr %x @ %x sees free page.", thing
, start
);
3146 /* Check that it doesn't point to a forwarding pointer! */
3147 if (*((lispobj
*)native_pointer(thing
)) == 0x01) {
3148 lose("Ptr %x @ %x sees forwarding ptr.", thing
, start
);
3150 /* Check that its not in the RO space as it would then be a
3151 * pointer from the RO to the dynamic space. */
3152 if (is_in_readonly_space
) {
3153 lose("ptr to dynamic space %x from RO space %x",
3156 /* Does it point to a plausible object? This check slows
3157 * it down a lot (so it's commented out).
3159 * "a lot" is serious: it ate 50 minutes cpu time on
3160 * my duron 950 before I came back from lunch and
3163 * FIXME: Add a variable to enable this
3166 if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) {
3167 lose("ptr %x to invalid object %x", thing, start);
3171 /* Verify that it points to another valid space. */
3172 if (!to_readonly_space
&& !to_static_space
3173 && (thing
!= (unsigned)&undefined_tramp
)) {
3174 lose("Ptr %x @ %x sees junk.", thing
, start
);
3178 if (!(fixnump(thing
))) {
3180 switch(widetag_of(*start
)) {
3183 case SIMPLE_VECTOR_WIDETAG
:
3185 case COMPLEX_WIDETAG
:
3186 case SIMPLE_ARRAY_WIDETAG
:
3187 case COMPLEX_BASE_STRING_WIDETAG
:
3188 #ifdef COMPLEX_CHARACTER_STRING_WIDETAG
3189 case COMPLEX_CHARACTER_STRING_WIDETAG
:
3191 case COMPLEX_VECTOR_NIL_WIDETAG
:
3192 case COMPLEX_BIT_VECTOR_WIDETAG
:
3193 case COMPLEX_VECTOR_WIDETAG
:
3194 case COMPLEX_ARRAY_WIDETAG
:
3195 case CLOSURE_HEADER_WIDETAG
:
3196 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG
:
3197 case VALUE_CELL_HEADER_WIDETAG
:
3198 case SYMBOL_HEADER_WIDETAG
:
3199 case CHARACTER_WIDETAG
:
3200 #if N_WORD_BITS == 64
3201 case SINGLE_FLOAT_WIDETAG
:
3203 case UNBOUND_MARKER_WIDETAG
:
3204 case INSTANCE_HEADER_WIDETAG
:
3209 case CODE_HEADER_WIDETAG
:
3211 lispobj object
= *start
;
3213 long nheader_words
, ncode_words
, nwords
;
3215 struct simple_fun
*fheaderp
;
3217 code
= (struct code
*) start
;
3219 /* Check that it's not in the dynamic space.
3220 * FIXME: Isn't is supposed to be OK for code
3221 * objects to be in the dynamic space these days? */
3222 if (is_in_dynamic_space
3223 /* It's ok if it's byte compiled code. The trace
3224 * table offset will be a fixnum if it's x86
3225 * compiled code - check.
3227 * FIXME: #^#@@! lack of abstraction here..
3228 * This line can probably go away now that
3229 * there's no byte compiler, but I've got
3230 * too much to worry about right now to try
3231 * to make sure. -- WHN 2001-10-06 */
3232 && fixnump(code
->trace_table_offset
)
3233 /* Only when enabled */
3234 && verify_dynamic_code_check
) {
3236 "/code object at %x in the dynamic space\n",
3240 ncode_words
= fixnum_value(code
->code_size
);
3241 nheader_words
= HeaderValue(object
);
3242 nwords
= ncode_words
+ nheader_words
;
3243 nwords
= CEILING(nwords
, 2);
3244 /* Scavenge the boxed section of the code data block */
3245 verify_space(start
+ 1, nheader_words
- 1);
3247 /* Scavenge the boxed section of each function
3248 * object in the code data block. */
3249 fheaderl
= code
->entry_points
;
3250 while (fheaderl
!= NIL
) {
3252 (struct simple_fun
*) native_pointer(fheaderl
);
3253 gc_assert(widetag_of(fheaderp
->header
) == SIMPLE_FUN_HEADER_WIDETAG
);
3254 verify_space(&fheaderp
->name
, 1);
3255 verify_space(&fheaderp
->arglist
, 1);
3256 verify_space(&fheaderp
->type
, 1);
3257 fheaderl
= fheaderp
->next
;
3263 /* unboxed objects */
3264 case BIGNUM_WIDETAG
:
3265 #if N_WORD_BITS != 64
3266 case SINGLE_FLOAT_WIDETAG
:
3268 case DOUBLE_FLOAT_WIDETAG
:
3269 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3270 case LONG_FLOAT_WIDETAG
:
3272 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
3273 case COMPLEX_SINGLE_FLOAT_WIDETAG
:
3275 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
3276 case COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3278 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3279 case COMPLEX_LONG_FLOAT_WIDETAG
:
3281 case SIMPLE_BASE_STRING_WIDETAG
:
3282 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
3283 case SIMPLE_CHARACTER_STRING_WIDETAG
:
3285 case SIMPLE_BIT_VECTOR_WIDETAG
:
3286 case SIMPLE_ARRAY_NIL_WIDETAG
:
3287 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
:
3288 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG
:
3289 case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG
:
3290 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG
:
3291 case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG
:
3292 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG
:
3293 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
3294 case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
:
3296 case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG
:
3297 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
:
3298 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
3299 case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
:
3301 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
3302 case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
:
3304 #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
3305 case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
:
3307 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
3308 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
:
3310 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
3311 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
:
3313 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
3314 case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
:
3316 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
3317 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
:
3319 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
3320 case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
:
3322 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
3323 case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
:
3325 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG
:
3326 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG
:
3327 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3328 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
:
3330 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
3331 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
:
3333 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
3334 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
:
3336 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3337 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
:
3340 case WEAK_POINTER_WIDETAG
:
3341 count
= (sizetab
[widetag_of(*start
)])(start
);
3357 /* FIXME: It would be nice to make names consistent so that
3358 * foo_size meant size *in* *bytes* instead of size in some
3359 * arbitrary units. (Yes, this caused a bug, how did you guess?:-)
3360 * Some counts of lispobjs are called foo_count; it might be good
3361 * to grep for all foo_size and rename the appropriate ones to
3363 long read_only_space_size
=
3364 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
,0)
3365 - (lispobj
*)READ_ONLY_SPACE_START
;
3366 long static_space_size
=
3367 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0)
3368 - (lispobj
*)STATIC_SPACE_START
;
3370 for_each_thread(th
) {
3371 long binding_stack_size
=
3372 (lispobj
*)SymbolValue(BINDING_STACK_POINTER
,th
)
3373 - (lispobj
*)th
->binding_stack_start
;
3374 verify_space(th
->binding_stack_start
, binding_stack_size
);
3376 verify_space((lispobj
*)READ_ONLY_SPACE_START
, read_only_space_size
);
3377 verify_space((lispobj
*)STATIC_SPACE_START
, static_space_size
);
3381 verify_generation(int generation
)
3385 for (i
= 0; i
< last_free_page
; i
++) {
3386 if ((page_table
[i
].allocated
!= FREE_PAGE_FLAG
)
3387 && (page_table
[i
].bytes_used
!= 0)
3388 && (page_table
[i
].gen
== generation
)) {
3390 int region_allocation
= page_table
[i
].allocated
;
3392 /* This should be the start of a contiguous block */
3393 gc_assert(page_table
[i
].first_object_offset
== 0);
3395 /* Need to find the full extent of this contiguous block in case
3396 objects span pages. */
3398 /* Now work forward until the end of this contiguous area is
3400 for (last_page
= i
; ;last_page
++)
3401 /* Check whether this is the last page in this contiguous
3403 if ((page_table
[last_page
].bytes_used
< PAGE_BYTES
)
3404 /* Or it is PAGE_BYTES and is the last in the block */
3405 || (page_table
[last_page
+1].allocated
!= region_allocation
)
3406 || (page_table
[last_page
+1].bytes_used
== 0)
3407 || (page_table
[last_page
+1].gen
!= generation
)
3408 || (page_table
[last_page
+1].first_object_offset
== 0))
3411 verify_space(page_address(i
), (page_table
[last_page
].bytes_used
3412 + (last_page
-i
)*PAGE_BYTES
)/N_WORD_BYTES
);
3418 /* Check that all the free space is zero filled. */
3420 verify_zero_fill(void)
3424 for (page
= 0; page
< last_free_page
; page
++) {
3425 if (page_table
[page
].allocated
== FREE_PAGE_FLAG
) {
3426 /* The whole page should be zero filled. */
3427 long *start_addr
= (long *)page_address(page
);
3430 for (i
= 0; i
< size
; i
++) {
3431 if (start_addr
[i
] != 0) {
3432 lose("free page not zero at %x", start_addr
+ i
);
3436 long free_bytes
= PAGE_BYTES
- page_table
[page
].bytes_used
;
3437 if (free_bytes
> 0) {
3438 long *start_addr
= (long *)((unsigned)page_address(page
)
3439 + page_table
[page
].bytes_used
);
3440 long size
= free_bytes
/ N_WORD_BYTES
;
3442 for (i
= 0; i
< size
; i
++) {
3443 if (start_addr
[i
] != 0) {
3444 lose("free region not zero at %x", start_addr
+ i
);
3452 /* External entry point for verify_zero_fill */
3454 gencgc_verify_zero_fill(void)
3456 /* Flush the alloc regions updating the tables. */
3457 gc_alloc_update_all_page_tables();
3458 SHOW("verifying zero fill");
3463 verify_dynamic_space(void)
3467 for (i
= 0; i
< NUM_GENERATIONS
; i
++)
3468 verify_generation(i
);
3470 if (gencgc_enable_verify_zero_fill
)
3474 /* Write-protect all the dynamic boxed pages in the given generation. */
3476 write_protect_generation_pages(int generation
)
3480 gc_assert(generation
< NUM_GENERATIONS
);
3482 for (i
= 0; i
< last_free_page
; i
++)
3483 if ((page_table
[i
].allocated
== BOXED_PAGE_FLAG
)
3484 && (page_table
[i
].bytes_used
!= 0)
3485 && !page_table
[i
].dont_move
3486 && (page_table
[i
].gen
== generation
)) {
3489 page_start
= (void *)page_address(i
);
3491 os_protect(page_start
,
3493 OS_VM_PROT_READ
| OS_VM_PROT_EXECUTE
);
3495 /* Note the page as protected in the page tables. */
3496 page_table
[i
].write_protected
= 1;
3499 if (gencgc_verbose
> 1) {
3501 "/write protected %d of %d pages in generation %d\n",
3502 count_write_protect_generation_pages(generation
),
3503 count_generation_pages(generation
),
3508 /* Garbage collect a generation. If raise is 0 then the remains of the
3509 * generation are not raised to the next generation. */
3511 garbage_collect_generation(int generation
, int raise
)
3513 unsigned long bytes_freed
;
3515 unsigned long static_space_size
;
3517 gc_assert(generation
<= (NUM_GENERATIONS
-1));
3519 /* The oldest generation can't be raised. */
3520 gc_assert((generation
!= (NUM_GENERATIONS
-1)) || (raise
== 0));
3522 /* Initialize the weak pointer list. */
3523 weak_pointers
= NULL
;
3525 /* When a generation is not being raised it is transported to a
3526 * temporary generation (NUM_GENERATIONS), and lowered when
3527 * done. Set up this new generation. There should be no pages
3528 * allocated to it yet. */
3530 gc_assert(generations
[NUM_GENERATIONS
].bytes_allocated
== 0);
3533 /* Set the global src and dest. generations */
3534 from_space
= generation
;
3536 new_space
= generation
+1;
3538 new_space
= NUM_GENERATIONS
;
3540 /* Change to a new space for allocation, resetting the alloc_start_page */
3541 gc_alloc_generation
= new_space
;
3542 generations
[new_space
].alloc_start_page
= 0;
3543 generations
[new_space
].alloc_unboxed_start_page
= 0;
3544 generations
[new_space
].alloc_large_start_page
= 0;
3545 generations
[new_space
].alloc_large_unboxed_start_page
= 0;
3547 /* Before any pointers are preserved, the dont_move flags on the
3548 * pages need to be cleared. */
3549 for (i
= 0; i
< last_free_page
; i
++)
3550 if(page_table
[i
].gen
==from_space
)
3551 page_table
[i
].dont_move
= 0;
3553 /* Un-write-protect the old-space pages. This is essential for the
3554 * promoted pages as they may contain pointers into the old-space
3555 * which need to be scavenged. It also helps avoid unnecessary page
3556 * faults as forwarding pointers are written into them. They need to
3557 * be un-protected anyway before unmapping later. */
3558 unprotect_oldspace();
3560 /* Scavenge the stacks' conservative roots. */
3562 /* there are potentially two stacks for each thread: the main
3563 * stack, which may contain Lisp pointers, and the alternate stack.
3564 * We don't ever run Lisp code on the altstack, but it may
3565 * host a sigcontext with lisp objects in it */
3567 /* what we need to do: (1) find the stack pointer for the main
3568 * stack; scavenge it (2) find the interrupt context on the
3569 * alternate stack that might contain lisp values, and scavenge
3572 /* we assume that none of the preceding applies to the thread that
3573 * initiates GC. If you ever call GC from inside an altstack
3574 * handler, you will lose. */
3575 for_each_thread(th
) {
3577 void **esp
=(void **)-1;
3578 #ifdef LISP_FEATURE_SB_THREAD
3580 if(th
==arch_os_get_current_thread()) {
3581 esp
= (void **) &raise
;
3584 free
=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,th
));
3585 for(i
=free
-1;i
>=0;i
--) {
3586 os_context_t
*c
=th
->interrupt_contexts
[i
];
3587 esp1
= (void **) *os_context_register_addr(c
,reg_ESP
);
3588 if(esp1
>=th
->control_stack_start
&& esp1
<th
->control_stack_end
){
3589 if(esp1
<esp
) esp
=esp1
;
3590 for(ptr
= (void **)(c
+1); ptr
>=(void **)c
; ptr
--) {
3591 preserve_pointer(*ptr
);
3597 esp
= (void **) &raise
;
3599 for (ptr
= (void **)th
->control_stack_end
; ptr
> esp
; ptr
--) {
3600 preserve_pointer(*ptr
);
3605 if (gencgc_verbose
> 1) {
3606 long num_dont_move_pages
= count_dont_move_pages();
3608 "/non-movable pages due to conservative pointers = %d (%d bytes)\n",
3609 num_dont_move_pages
,
3610 num_dont_move_pages
* PAGE_BYTES
);
3614 /* Scavenge all the rest of the roots. */
3616 /* Scavenge the Lisp functions of the interrupt handlers, taking
3617 * care to avoid SIG_DFL and SIG_IGN. */
3618 for_each_thread(th
) {
3619 struct interrupt_data
*data
=th
->interrupt_data
;
3620 for (i
= 0; i
< NSIG
; i
++) {
3621 union interrupt_handler handler
= data
->interrupt_handlers
[i
];
3622 if (!ARE_SAME_HANDLER(handler
.c
, SIG_IGN
) &&
3623 !ARE_SAME_HANDLER(handler
.c
, SIG_DFL
)) {
3624 scavenge((lispobj
*)(data
->interrupt_handlers
+ i
), 1);
3628 /* Scavenge the binding stacks. */
3631 for_each_thread(th
) {
3632 long len
= (lispobj
*)SymbolValue(BINDING_STACK_POINTER
,th
) -
3633 th
->binding_stack_start
;
3634 scavenge((lispobj
*) th
->binding_stack_start
,len
);
3635 #ifdef LISP_FEATURE_SB_THREAD
3636 /* do the tls as well */
3637 len
=fixnum_value(SymbolValue(FREE_TLS_INDEX
,0)) -
3638 (sizeof (struct thread
))/(sizeof (lispobj
));
3639 scavenge((lispobj
*) (th
+1),len
);
3644 /* The original CMU CL code had scavenge-read-only-space code
3645 * controlled by the Lisp-level variable
3646 * *SCAVENGE-READ-ONLY-SPACE*. It was disabled by default, and it
3647 * wasn't documented under what circumstances it was useful or
3648 * safe to turn it on, so it's been turned off in SBCL. If you
3649 * want/need this functionality, and can test and document it,
3650 * please submit a patch. */
3652 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE
) != NIL
) {
3653 unsigned long read_only_space_size
=
3654 (lispobj
*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER
) -
3655 (lispobj
*)READ_ONLY_SPACE_START
;
3657 "/scavenge read only space: %d bytes\n",
3658 read_only_space_size
* sizeof(lispobj
)));
3659 scavenge( (lispobj
*) READ_ONLY_SPACE_START
, read_only_space_size
);
3663 /* Scavenge static space. */
3665 (lispobj
*)SymbolValue(STATIC_SPACE_FREE_POINTER
,0) -
3666 (lispobj
*)STATIC_SPACE_START
;
3667 if (gencgc_verbose
> 1) {
3669 "/scavenge static space: %d bytes\n",
3670 static_space_size
* sizeof(lispobj
)));
3672 scavenge( (lispobj
*) STATIC_SPACE_START
, static_space_size
);
3674 /* All generations but the generation being GCed need to be
3675 * scavenged. The new_space generation needs special handling as
3676 * objects may be moved in - it is handled separately below. */
3677 for (i
= 0; i
< NUM_GENERATIONS
; i
++) {
3678 if ((i
!= generation
) && (i
!= new_space
)) {
3679 scavenge_generation(i
);
3683 /* Finally scavenge the new_space generation. Keep going until no
3684 * more objects are moved into the new generation */
3685 scavenge_newspace_generation(new_space
);
3687 /* FIXME: I tried reenabling this check when debugging unrelated
3688 * GC weirdness ca. sbcl-0.6.12.45, and it failed immediately.
3689 * Since the current GC code seems to work well, I'm guessing that
3690 * this debugging code is just stale, but I haven't tried to
3691 * figure it out. It should be figured out and then either made to
3692 * work or just deleted. */
3693 #define RESCAN_CHECK 0
3695 /* As a check re-scavenge the newspace once; no new objects should
3698 long old_bytes_allocated
= bytes_allocated
;
3699 long bytes_allocated
;
3701 /* Start with a full scavenge. */
3702 scavenge_newspace_generation_one_scan(new_space
);
3704 /* Flush the current regions, updating the tables. */
3705 gc_alloc_update_all_page_tables();
3707 bytes_allocated
= bytes_allocated
- old_bytes_allocated
;
3709 if (bytes_allocated
!= 0) {
3710 lose("Rescan of new_space allocated %d more bytes.",
3716 scan_weak_pointers();
3718 /* Flush the current regions, updating the tables. */
3719 gc_alloc_update_all_page_tables();
3721 /* Free the pages in oldspace, but not those marked dont_move. */
3722 bytes_freed
= free_oldspace();
3724 /* If the GC is not raising the age then lower the generation back
3725 * to its normal generation number */
3727 for (i
= 0; i
< last_free_page
; i
++)
3728 if ((page_table
[i
].bytes_used
!= 0)
3729 && (page_table
[i
].gen
== NUM_GENERATIONS
))
3730 page_table
[i
].gen
= generation
;
3731 gc_assert(generations
[generation
].bytes_allocated
== 0);
3732 generations
[generation
].bytes_allocated
=
3733 generations
[NUM_GENERATIONS
].bytes_allocated
;
3734 generations
[NUM_GENERATIONS
].bytes_allocated
= 0;
3737 /* Reset the alloc_start_page for generation. */
3738 generations
[generation
].alloc_start_page
= 0;
3739 generations
[generation
].alloc_unboxed_start_page
= 0;
3740 generations
[generation
].alloc_large_start_page
= 0;
3741 generations
[generation
].alloc_large_unboxed_start_page
= 0;
3743 if (generation
>= verify_gens
) {
3747 verify_dynamic_space();
3750 /* Set the new gc trigger for the GCed generation. */
3751 generations
[generation
].gc_trigger
=
3752 generations
[generation
].bytes_allocated
3753 + generations
[generation
].bytes_consed_between_gc
;
3756 generations
[generation
].num_gc
= 0;
3758 ++generations
[generation
].num_gc
;
3761 /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
3763 update_x86_dynamic_space_free_pointer(void)
3765 long last_page
= -1;
3768 for (i
= 0; i
< last_free_page
; i
++)
3769 if ((page_table
[i
].allocated
!= FREE_PAGE_FLAG
)
3770 && (page_table
[i
].bytes_used
!= 0))
3773 last_free_page
= last_page
+1;
3775 SetSymbolValue(ALLOCATION_POINTER
,
3776 (lispobj
)(((char *)heap_base
) + last_free_page
*PAGE_BYTES
),0);
3777 return 0; /* dummy value: return something ... */
3780 /* GC all generations newer than last_gen, raising the objects in each
3781 * to the next older generation - we finish when all generations below
3782 * last_gen are empty. Then if last_gen is due for a GC, or if
3783 * last_gen==NUM_GENERATIONS (the scratch generation? eh?) we GC that
3784 * too. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
3786 * We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than
3787 * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */
3790 collect_garbage(unsigned last_gen
)
3797 FSHOW((stderr
, "/entering collect_garbage(%d)\n", last_gen
));
3799 if (last_gen
> NUM_GENERATIONS
) {
3801 "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
3806 /* Flush the alloc regions updating the tables. */
3807 gc_alloc_update_all_page_tables();
3809 /* Verify the new objects created by Lisp code. */
3810 if (pre_verify_gen_0
) {
3811 FSHOW((stderr
, "pre-checking generation 0\n"));
3812 verify_generation(0);
3815 if (gencgc_verbose
> 1)
3816 print_generation_stats(0);
3819 /* Collect the generation. */
3821 if (gen
>= gencgc_oldest_gen_to_gc
) {
3822 /* Never raise the oldest generation. */
3827 || (generations
[gen
].num_gc
>= generations
[gen
].trigger_age
);
3830 if (gencgc_verbose
> 1) {
3832 "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
3835 generations
[gen
].bytes_allocated
,
3836 generations
[gen
].gc_trigger
,
3837 generations
[gen
].num_gc
));
3840 /* If an older generation is being filled, then update its
3843 generations
[gen
+1].cum_sum_bytes_allocated
+=
3844 generations
[gen
+1].bytes_allocated
;
3847 garbage_collect_generation(gen
, raise
);
3849 /* Reset the memory age cum_sum. */
3850 generations
[gen
].cum_sum_bytes_allocated
= 0;
3852 if (gencgc_verbose
> 1) {
3853 FSHOW((stderr
, "GC of generation %d finished:\n", gen
));
3854 print_generation_stats(0);
3858 } while ((gen
<= gencgc_oldest_gen_to_gc
)
3859 && ((gen
< last_gen
)
3860 || ((gen
<= gencgc_oldest_gen_to_gc
)
3862 && (generations
[gen
].bytes_allocated
3863 > generations
[gen
].gc_trigger
)
3864 && (gen_av_mem_age(gen
)
3865 > generations
[gen
].min_av_mem_age
))));
3867 /* Now if gen-1 was raised all generations before gen are empty.
3868 * If it wasn't raised then all generations before gen-1 are empty.
3870 * Now objects within this gen's pages cannot point to younger
3871 * generations unless they are written to. This can be exploited
3872 * by write-protecting the pages of gen; then when younger
3873 * generations are GCed only the pages which have been written
3878 gen_to_wp
= gen
- 1;
3880 /* There's not much point in WPing pages in generation 0 as it is
3881 * never scavenged (except promoted pages). */
3882 if ((gen_to_wp
> 0) && enable_page_protection
) {
3883 /* Check that they are all empty. */
3884 for (i
= 0; i
< gen_to_wp
; i
++) {
3885 if (generations
[i
].bytes_allocated
)
3886 lose("trying to write-protect gen. %d when gen. %d nonempty",
3889 write_protect_generation_pages(gen_to_wp
);
3892 /* Set gc_alloc() back to generation 0. The current regions should
3893 * be flushed after the above GCs. */
3894 gc_assert((boxed_region
.free_pointer
- boxed_region
.start_addr
) == 0);
3895 gc_alloc_generation
= 0;
3897 update_x86_dynamic_space_free_pointer();
3898 auto_gc_trigger
= bytes_allocated
+ bytes_consed_between_gcs
;
3900 fprintf(stderr
,"Next gc when %ld bytes have been consed\n",
3902 SHOW("returning from collect_garbage");
3905 /* This is called by Lisp PURIFY when it is finished. All live objects
3906 * will have been moved to the RO and Static heaps. The dynamic space
3907 * will need a full re-initialization. We don't bother having Lisp
3908 * PURIFY flush the current gc_alloc() region, as the page_tables are
3909 * re-initialized, and every page is zeroed to be sure. */
3915 if (gencgc_verbose
> 1)
3916 SHOW("entering gc_free_heap");
3918 for (page
= 0; page
< NUM_PAGES
; page
++) {
3919 /* Skip free pages which should already be zero filled. */
3920 if (page_table
[page
].allocated
!= FREE_PAGE_FLAG
) {
3921 void *page_start
, *addr
;
3923 /* Mark the page free. The other slots are assumed invalid
3924 * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
3925 * should not be write-protected -- except that the
3926 * generation is used for the current region but it sets
3928 page_table
[page
].allocated
= FREE_PAGE_FLAG
;
3929 page_table
[page
].bytes_used
= 0;
3931 /* Zero the page. */
3932 page_start
= (void *)page_address(page
);
3934 /* First, remove any write-protection. */
3935 os_protect(page_start
, PAGE_BYTES
, OS_VM_PROT_ALL
);
3936 page_table
[page
].write_protected
= 0;
3938 os_invalidate(page_start
,PAGE_BYTES
);
3939 addr
= os_validate(page_start
,PAGE_BYTES
);
3940 if (addr
== NULL
|| addr
!= page_start
) {
3941 lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x",
3945 } else if (gencgc_zero_check_during_free_heap
) {
3946 /* Double-check that the page is zero filled. */
3947 long *page_start
, i
;
3948 gc_assert(page_table
[page
].allocated
== FREE_PAGE_FLAG
);
3949 gc_assert(page_table
[page
].bytes_used
== 0);
3950 page_start
= (long *)page_address(page
);
3951 for (i
=0; i
<1024; i
++) {
3952 if (page_start
[i
] != 0) {
3953 lose("free region not zero at %x", page_start
+ i
);
3959 bytes_allocated
= 0;
3961 /* Initialize the generations. */
3962 for (page
= 0; page
< NUM_GENERATIONS
; page
++) {
3963 generations
[page
].alloc_start_page
= 0;
3964 generations
[page
].alloc_unboxed_start_page
= 0;
3965 generations
[page
].alloc_large_start_page
= 0;
3966 generations
[page
].alloc_large_unboxed_start_page
= 0;
3967 generations
[page
].bytes_allocated
= 0;
3968 generations
[page
].gc_trigger
= 2000000;
3969 generations
[page
].num_gc
= 0;
3970 generations
[page
].cum_sum_bytes_allocated
= 0;
3973 if (gencgc_verbose
> 1)
3974 print_generation_stats(0);
3976 /* Initialize gc_alloc(). */
3977 gc_alloc_generation
= 0;
3979 gc_set_region_empty(&boxed_region
);
3980 gc_set_region_empty(&unboxed_region
);
3983 SetSymbolValue(ALLOCATION_POINTER
, (lispobj
)((char *)heap_base
),0);
3985 if (verify_after_free_heap
) {
3986 /* Check whether purify has left any bad pointers. */
3988 SHOW("checking after free_heap\n");
3999 scavtab
[SIMPLE_VECTOR_WIDETAG
] = scav_vector
;
4000 scavtab
[WEAK_POINTER_WIDETAG
] = scav_weak_pointer
;
4001 transother
[SIMPLE_ARRAY_WIDETAG
] = trans_boxed_large
;
4003 heap_base
= (void*)DYNAMIC_SPACE_START
;
4005 /* Initialize each page structure. */
4006 for (i
= 0; i
< NUM_PAGES
; i
++) {
4007 /* Initialize all pages as free. */
4008 page_table
[i
].allocated
= FREE_PAGE_FLAG
;
4009 page_table
[i
].bytes_used
= 0;
4011 /* Pages are not write-protected at startup. */
4012 page_table
[i
].write_protected
= 0;
4015 bytes_allocated
= 0;
4017 /* Initialize the generations.
4019 * FIXME: very similar to code in gc_free_heap(), should be shared */
4020 for (i
= 0; i
< NUM_GENERATIONS
; i
++) {
4021 generations
[i
].alloc_start_page
= 0;
4022 generations
[i
].alloc_unboxed_start_page
= 0;
4023 generations
[i
].alloc_large_start_page
= 0;
4024 generations
[i
].alloc_large_unboxed_start_page
= 0;
4025 generations
[i
].bytes_allocated
= 0;
4026 generations
[i
].gc_trigger
= 2000000;
4027 generations
[i
].num_gc
= 0;
4028 generations
[i
].cum_sum_bytes_allocated
= 0;
4029 /* the tune-able parameters */
4030 generations
[i
].bytes_consed_between_gc
= 2000000;
4031 generations
[i
].trigger_age
= 1;
4032 generations
[i
].min_av_mem_age
= 0.75;
4035 /* Initialize gc_alloc. */
4036 gc_alloc_generation
= 0;
4037 gc_set_region_empty(&boxed_region
);
4038 gc_set_region_empty(&unboxed_region
);
4044 /* Pick up the dynamic space from after a core load.
4046 * The ALLOCATION_POINTER points to the end of the dynamic space.
4050 gencgc_pickup_dynamic(void)
4053 long alloc_ptr
= SymbolValue(ALLOCATION_POINTER
,0);
4054 lispobj
*prev
=(lispobj
*)page_address(page
);
4057 lispobj
*first
,*ptr
= (lispobj
*)page_address(page
);
4058 page_table
[page
].allocated
= BOXED_PAGE_FLAG
;
4059 page_table
[page
].gen
= 0;
4060 page_table
[page
].bytes_used
= PAGE_BYTES
;
4061 page_table
[page
].large_object
= 0;
4063 first
=search_space(prev
,(ptr
+2)-prev
,ptr
);
4064 if(ptr
== first
) prev
=ptr
;
4065 page_table
[page
].first_object_offset
=
4066 (void *)prev
- page_address(page
);
4068 } while (page_address(page
) < alloc_ptr
);
4070 generations
[0].bytes_allocated
= PAGE_BYTES
*page
;
4071 bytes_allocated
= PAGE_BYTES
*page
;
4077 gc_initialize_pointers(void)
4079 gencgc_pickup_dynamic();
4085 /* alloc(..) is the external interface for memory allocation. It
4086 * allocates to generation 0. It is not called from within the garbage
4087 * collector as it is only external uses that need the check for heap
4088 * size (GC trigger) and to disable the interrupts (interrupts are
4089 * always disabled during a GC).
4091 * The vops that call alloc(..) assume that the returned space is zero-filled.
4092 * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
4094 * The check for a GC trigger is only performed when the current
4095 * region is full, so in most cases it's not needed. */
4100 struct thread
*th
=arch_os_get_current_thread();
4101 struct alloc_region
*region
=
4102 #ifdef LISP_FEATURE_SB_THREAD
4103 th
? &(th
->alloc_region
) : &boxed_region
;
4108 void *new_free_pointer
;
4109 gc_assert(nbytes
>0);
4110 /* Check for alignment allocation problems. */
4111 gc_assert((((unsigned)region
->free_pointer
& LOWTAG_MASK
) == 0)
4112 && ((nbytes
& LOWTAG_MASK
) == 0));
4115 /* there are a few places in the C code that allocate data in the
4116 * heap before Lisp starts. This is before interrupts are enabled,
4117 * so we don't need to check for pseudo-atomic */
4118 #ifdef LISP_FEATURE_SB_THREAD
4119 if(!SymbolValue(PSEUDO_ATOMIC_ATOMIC
,th
)) {
4121 fprintf(stderr
, "fatal error in thread 0x%x, pid=%d\n",
4123 __asm__("movl %fs,%0" : "=r" (fs
) : );
4124 fprintf(stderr
, "fs is %x, th->tls_cookie=%x \n",
4125 debug_get_fs(),th
->tls_cookie
);
4126 lose("If you see this message before 2004.01.31, mail details to sbcl-devel\n");
4129 gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC
,th
));
4133 /* maybe we can do this quickly ... */
4134 new_free_pointer
= region
->free_pointer
+ nbytes
;
4135 if (new_free_pointer
<= region
->end_addr
) {
4136 new_obj
= (void*)(region
->free_pointer
);
4137 region
->free_pointer
= new_free_pointer
;
4138 return(new_obj
); /* yup */
4141 /* we have to go the long way around, it seems. Check whether
4142 * we should GC in the near future
4144 if (auto_gc_trigger
&& bytes_allocated
> auto_gc_trigger
) {
4145 /* set things up so that GC happens when we finish the PA
4146 * section. We only do this if there wasn't a pending handler
4147 * already, in case it was a gc. If it wasn't a GC, the next
4148 * allocation will get us back to this point anyway, so no harm done
4150 struct interrupt_data
*data
=th
->interrupt_data
;
4151 if(!data
->pending_handler
)
4152 maybe_defer_handler(interrupt_maybe_gc_int
,data
,0,0,0);
4154 new_obj
= gc_alloc_with_region(nbytes
,0,region
,0);
4159 * shared support for the OS-dependent signal handlers which
4160 * catch GENCGC-related write-protect violations
4163 void unhandled_sigmemoryfault(void);
4165 /* Depending on which OS we're running under, different signals might
4166 * be raised for a violation of write protection in the heap. This
4167 * function factors out the common generational GC magic which needs
4168 * to invoked in this case, and should be called from whatever signal
4169 * handler is appropriate for the OS we're running under.
4171 * Return true if this signal is a normal generational GC thing that
4172 * we were able to handle, or false if it was abnormal and control
4173 * should fall through to the general SIGSEGV/SIGBUS/whatever logic. */
4176 gencgc_handle_wp_violation(void* fault_addr
)
4178 long page_index
= find_page_index(fault_addr
);
4180 #ifdef QSHOW_SIGNALS
4181 FSHOW((stderr
, "heap WP violation? fault_addr=%x, page_index=%d\n",
4182 fault_addr
, page_index
));
4185 /* Check whether the fault is within the dynamic space. */
4186 if (page_index
== (-1)) {
4188 /* It can be helpful to be able to put a breakpoint on this
4189 * case to help diagnose low-level problems. */
4190 unhandled_sigmemoryfault();
4192 /* not within the dynamic space -- not our responsibility */
4196 if (page_table
[page_index
].write_protected
) {
4197 /* Unprotect the page. */
4198 os_protect(page_address(page_index
), PAGE_BYTES
, OS_VM_PROT_ALL
);
4199 page_table
[page_index
].write_protected_cleared
= 1;
4200 page_table
[page_index
].write_protected
= 0;
4202 /* The only acceptable reason for this signal on a heap
4203 * access is that GENCGC write-protected the page.
4204 * However, if two CPUs hit a wp page near-simultaneously,
4205 * we had better not have the second one lose here if it
4206 * does this test after the first one has already set wp=0
4208 if(page_table
[page_index
].write_protected_cleared
!= 1)
4209 lose("fault in heap page not marked as write-protected");
4211 /* Don't worry, we can handle it. */
4215 /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
4216 * it's not just a case of the program hitting the write barrier, and
4217 * are about to let Lisp deal with it. It's basically just a
4218 * convenient place to set a gdb breakpoint. */
4220 unhandled_sigmemoryfault()
4223 void gc_alloc_update_all_page_tables(void)
4225 /* Flush the alloc regions updating the tables. */
4228 gc_alloc_update_page_tables(0, &th
->alloc_region
);
4229 gc_alloc_update_page_tables(1, &unboxed_region
);
4230 gc_alloc_update_page_tables(0, &boxed_region
);
4233 gc_set_region_empty(struct alloc_region
*region
)
4235 region
->first_page
= 0;
4236 region
->last_page
= -1;
4237 region
->start_addr
= page_address(0);
4238 region
->free_pointer
= page_address(0);
4239 region
->end_addr
= page_address(0);