1 //////////////////////////////////////////////////////////////////////////////
4 // ADLib, Prop and their related set of tools and documentation are in the
5 // public domain. The author(s) of this software reserve no copyrights on
6 // the source code and any code generated using the tools. You are encouraged
7 // to use ADLib and Prop to develop software, in both academic and commercial
8 // settings, and are welcomed to incorporate any part of ADLib and Prop into
11 // Although you are under no obligation to do so, we strongly recommend that
12 // you give away all software developed using our tools.
14 // We also ask that credit be given to us when ADLib and/or Prop are used in
15 // your programs, and that this notice be preserved intact in all the source
18 // This software is still under development and we welcome(read crave for)
19 // any suggestions and help from the users.
21 // Allen Leung (leunga@cs.nyu.edu)
23 //////////////////////////////////////////////////////////////////////////////
31 #include <AD/gc/gcconfig.h> // system configuration
32 #include <AD/gc/bgc.h> // Bartlett's garbage collector
33 #include <AD/gc/gcobject.h> // garbage collectable objects
34 #include <AD/gc/gcheaps.h> // the heap manager
35 #include <AD/gc/gcmacros.h> // useful macros
36 #include <AD/gc/weakptr.h> // weak pointers
37 #include <AD/gc/gctimer.h>
39 //////////////////////////////////////////////////////////////////////////////
40 // Some identifying information.
41 //////////////////////////////////////////////////////////////////////////////
42 // #define DEBUG_GC // no debugging for production use
43 #define SANITY_CHECK // inexpensive checking (left in place for your assurance)
44 #define ALGORITHM "Mostly copying with blacklisting"
45 #define VERSION "0.5 (Feb 11th, 1995)"
51 //////////////////////////////////////////////////////////////////////////////
53 //////////////////////////////////////////////////////////////////////////////
54 typedef HM::GCPageId GCPageId
;
55 typedef HM::PageStatus PageStatus
;
57 //////////////////////////////////////////////////////////////////////////////
58 // Method that returns a name for this class
59 //////////////////////////////////////////////////////////////////////////////
60 const char * BGC::my_name() const { return "BGC"; }
62 //////////////////////////////////////////////////////////////////////////////
64 //////////////////////////////////////////////////////////////////////////////
67 // Perform initialization steps:
71 phase
= mutation_phase
;
74 initial_heap_size
= 128 * 1024;
75 min_heap_growth
= 512 * 1024;
76 should_collect
= false;
78 stat
.algorithm
= ALGORITHM
;
79 stat
.version
= VERSION
;
80 reset_statistics(stat
);
83 //////////////////////////////////////////////////////////////////////////////
85 //////////////////////////////////////////////////////////////////////////////
91 //////////////////////////////////////////////////////////////////////////////
92 // Method that releases all the memory within the collector.
93 // The collector is now in a clean slate (though the statistics are
94 // not reset). Use with caution!
95 //////////////////////////////////////////////////////////////////////////////
103 HM::release_all_pages(this);
106 //////////////////////////////////////////////////////////////////////////////
107 // Method that compute the minimal amount of heap expansion
108 // that should be done.
109 //////////////////////////////////////////////////////////////////////////////
110 size_t BGC::min_growth()
112 return heap_size
== 0 ? initial_heap_size
: min_heap_growth
;
115 //////////////////////////////////////////////////////////////////////////////
116 // Method to allocate a new object of a given size
117 //////////////////////////////////////////////////////////////////////////////
118 void * BGC::m_alloc(size_t n
)
119 { // round up the size
120 size_t bytes
= GC_ROUNDUP_SIZE(n
+ sizeof(GCHeader
));
122 // if we have enough space in current fragment, then use it.
123 if (heap_pointer
+ bytes
> gc_limit
) {
124 // No more space left, we'll try to get some more from the
125 // the heap manager and/or garbage collect.
128 // Try garbage collection first if the gc_ratio is reached.
130 if (should_collect
) collect();
133 // Now try to expand the heap if there is still a need to.
135 if (heap_pointer
+ bytes
> gc_limit
) grow_heap (bytes
);
138 // Get new storage for object.
139 // Stamp the starting address into the bitmap.
141 GCObject
* new_obj
= GC_OBJ_OBJECT_ADDR(heap_pointer
);
142 GC_OBJ_SET_HEADER(new_obj
, bytes
);
143 heap_pointer
+= bytes
;
144 HM::object_map
.mark(new_obj
);
148 //////////////////////////////////////////////////////////////////////////////
149 // Method to deallocate an object manually.
150 // Note: this method tries to catch errors involving deallocation.
151 //////////////////////////////////////////////////////////////////////////////
152 void BGC::free(void * obj
)
154 // Ignores all requests that do not lie on our heap space
155 if (! GC_IS_A_POINTER(obj
)) return;
156 if (HM::is_mapped(obj
) &&
157 HM::page_gc(obj
) == id
&&
158 HM::get_object_map().is_marked(obj
)) {
159 // Just unmark the object. The space is not really
160 // reclaimed at this point; instead it will be reclaimed
161 // at the next collection.
162 HM::object_map
.unmark(obj
);
165 // Hold on, Pedro! The object doesn't seem to exist!
167 << ": application tries to free non-existent object at "
168 << (void*)obj
<< " ]\n" << flush
;
172 //////////////////////////////////////////////////////////////////////////////
173 // Method for expanding the heap.
174 // We try to expand the heap only if the number of bytes requested
175 // is not immediately available.
176 //////////////////////////////////////////////////////////////////////////////
177 void BGC::grow_heap (size_t bytes
)
179 size_t old_total
= heap_size
+ HM::bytes_free();
180 size_t wanted
= bytes
+ sizeof(GCHeader
);
183 (Byte
*)HM::allocate_pages
185 (heap_size
== 0 ? initial_heap_size
: min_heap_growth
),
187 phase
== copying_phase
? HM::newly_allocated
:
193 cerr
<< "[ GC" << id
<< ": Out of memory! Can't allocate "
194 << bytes
<< " bytes from system. ]\n" << flush
;
198 heap_pointer
= storage
+ GC_ALIGNMENT
- sizeof(GCHeader
);
199 heap_limit
= storage
+ growth
;
200 heap_start
= heap_pointer
;
201 heap_size
+= heap_limit
- heap_pointer
;
202 heap_used
= heap_size
- (heap_limit
- heap_pointer
);
203 size_t new_total
= heap_size
+ HM::bytes_free();
204 gc_limit
= storage
+ new_total
* gc_ratio
/ 100 - heap_used
;
205 should_collect
= gc_limit
<= heap_limit
;
206 if (gc_limit
> heap_limit
) gc_limit
= heap_limit
;
207 if (heap_pointer
+ wanted
> gc_limit
) gc_limit
= heap_limit
;
208 if (new_total
> old_total
) {
209 stat
.number_of_heap_expansions
++;
210 heap_growth_message(heap_size
, growth
);
214 //////////////////////////////////////////////////////////////////////////////
215 // Method for tracing an object pointer.
216 // Tracing moves object currently in the heap into the to-space.
217 //////////////////////////////////////////////////////////////////////////////
218 GCObject
* BGC::trace (GCObject
* ptr
)
220 #ifdef GC_HAS_CROSS_HEAP_POINTERS
221 // make sure it is a pointer first.
222 // Since Prop uses small unmapped addresses to represent
223 // unit functors it is a good idea to check first if we allow
224 // cross heap pointer lest these things get mistaken for the real thing.
225 if (! GC_IS_A_POINTER(ptr
)) return ptr
;
228 // Objects not within the current collector heap are skipped.
229 if (! IS_WITHIN_BOUNDS(ptr
) || ! IS_USED_ADDR(ptr
,id
)) {
230 #ifdef GC_HAS_CROSS_HEAP_POINTERS
231 // We'll grow the traversed map incrementally
233 assert(HM::traversed_map
.is_within_bounds(ptr
));
235 if (! HM::traversed_map
.is_marked(ptr
)) {
236 HM::traversed_map
.mark(ptr
);
243 // Get starting address of object.
244 void * obj_ptr
= HM::object_map
.starting_addr(ptr
);
246 // If it is a new object then just return, since it must already
247 // have been properly traced.
248 if (IS_NEW_ADDR(obj_ptr
)) return ptr
;
252 cerr
<< "[ Warning: pointer is at " << (void*)ptr
<< " but object is "
253 << (void*)obj_ptr
<< " ]\n" << flush
;
256 // Get header of object
257 GCHeader header
= GC_OBJ_HEADER(obj_ptr
);
259 // If it is a forwarded object then set the forwarding address;
261 if (GC_OBJ_IS_FORWARDED(header
)) {
262 return (GCObject
*)((Byte
*)GC_OBJ_FORWARD_ADDR(header
) +
263 ((Byte
*)ptr
- (Byte
*)obj_ptr
));
266 // If the object is marked live then it cannot be moved, so just return
267 if (HM::live_map
.is_marked(obj_ptr
)) return ptr
;
269 // Okay, seems like we'll have to copy this object.
270 size_t size
= GC_OBJ_HEADER_LEN(header
);
271 if (heap_pointer
+ size
> heap_limit
) {
272 bytes_copied
+= heap_pointer
- heap_start
;
274 enqueue_pages_to_scan(heap_pointer
, heap_limit
);
278 GCObject
* new_obj
= GC_OBJ_OBJECT_ADDR(heap_pointer
);
279 HM::object_map
.mark(new_obj
);
282 int * src
= (int*)GC_OBJ_HEADER_ADDR(obj_ptr
);
283 int * dest
= (int*)heap_pointer
;
285 while (size
-- > 0) { *dest
++ = *src
++; }
286 heap_pointer
= (Byte
*)dest
;
288 // Leave a forwarding address in the header of the old object.
289 GC_OBJ_SET_FORWARD(obj_ptr
, new_obj
);
292 return (GCObject
*)((Byte
*)new_obj
+ ((Byte
*)ptr
- (Byte
*)obj_ptr
));
295 //////////////////////////////////////////////////////////////////////////////
296 // Method for starting the garbage collection
297 //////////////////////////////////////////////////////////////////////////////
298 void BGC::collect(int level
)
299 { // if we are already performing collection don't start
300 // it up again until it is finished.
301 if (phase
== mutation_phase
&& heap_size
> 0) do_collect (level
);
304 //////////////////////////////////////////////////////////////////////////////
305 // Method for doing the actual garbage collection
306 //////////////////////////////////////////////////////////////////////////////
307 void BGC::do_collect(int /* level */)
309 ///////////////////////////////////////////////////////////////////////////
310 // This will hold all the registers
311 ///////////////////////////////////////////////////////////////////////////
314 ///////////////////////////////////////////////////////////////////////////
315 // Flush registers into the jmp_buf, which'll become part of the
317 ///////////////////////////////////////////////////////////////////////////
319 // if (_setjmp(reg_roots) == 0) _longjmp(reg_roots,1);
320 if (setjmp(reg_roots
) == 0) longjmp(reg_roots
,1);
322 ///////////////////////////////////////////////////////////////////////////
323 // Setup various limits for scanning
324 ///////////////////////////////////////////////////////////////////////////
325 if (is_downward_stack
) {
326 stack_top
= (void**)((Byte
*)reg_roots
);
328 stack_top
= (void**)((Byte
*)reg_roots
+ sizeof(reg_roots
));
331 ///////////////////////////////////////////////////////////////////////////
332 // Move the heap pointer to the next page for the to-space.
333 // Compute the amount of heap used.
334 // Mark the new area as newly allocated.
335 ///////////////////////////////////////////////////////////////////////////
336 heap_pointer
= (Byte
*)GC_ROUNDUP_PAGE_ADDR(heap_pointer
)
337 + GC_ALIGNMENT
- sizeof(GCHeader
);
338 heap_used
= heap_size
- (heap_limit
- heap_pointer
)
339 + (GC_ALIGNMENT
- sizeof(GCHeader
));
340 heap_start
= heap_pointer
;
341 HM::mark_space(heap_pointer
, heap_limit
, HM::newly_allocated
);
342 gc_limit
= heap_limit
;
344 ///////////////////////////////////////////////////////////////////////////
345 // Setup various statistics.
346 ///////////////////////////////////////////////////////////////////////////
348 stat
.number_of_collections
++;
350 ///////////////////////////////////////////////////////////////////////////
352 ///////////////////////////////////////////////////////////////////////////
354 GCTimer user_time_at_start
;
355 GCTimer system_time_at_start
;
356 user_time_at_start
.get_user_time();
357 system_time_at_start
.get_system_time();
360 ///////////////////////////////////////////////////////////////////////////
361 // Print the annoying garbage collection message
362 ///////////////////////////////////////////////////////////////////////////
363 start_collection_message();
365 ///////////////////////////////////////////////////////////////////////////
366 // Setup the new scanning queue if necessary.
367 // The 4 below is just some slack;
368 ///////////////////////////////////////////////////////////////////////////
369 size_t max_pages
= 2 * heap_used
/ GC_PAGE_SIZE
+ 4;
371 grow_scan_queue(max_pages
);
373 ///////////////////////////////////////////////////////////////////////////
374 // Locate and mark all the roots in different areas of the memory
375 ///////////////////////////////////////////////////////////////////////////
376 phase
= marking_phase
;
377 scan_stack_area (); // scan the stack area
378 scan_static_area (); // scan the static area
379 get_heap_top(); // locate the start of the heap.
380 scan_heap_area (); // scan the heap area
381 scan_user_roots (); // scan the user supplied roots
383 ///////////////////////////////////////////////////////////////////////////
384 // Now scan all promoted pages and copy all promoted objects.
385 ///////////////////////////////////////////////////////////////////////////
386 phase
= copying_phase
;
387 copy_promoted_pages (); // now move objects around.
388 bytes_copied
+= heap_pointer
- heap_start
;
390 ///////////////////////////////////////////////////////////////////////////
391 // Free all unpromoted pages
392 ///////////////////////////////////////////////////////////////////////////
393 HM::discard_from_space(this, heap_size
, pages_removed
);
395 ///////////////////////////////////////////////////////////////////////////
396 // Recompute statistics and adjust gc limit.
397 ///////////////////////////////////////////////////////////////////////////
398 heap_used
= heap_size
- (heap_limit
- heap_pointer
);
399 size_t new_total
= heap_size
+ HM::bytes_free();
400 gc_limit
= heap_pointer
+ new_total
* gc_ratio
/ 100L - heap_used
;
401 should_collect
= gc_limit
<= heap_limit
;
402 if (gc_limit
> heap_limit
) gc_limit
= heap_limit
;
404 ///////////////////////////////////////////////////////////////////////////
405 // Compute the time used for collection.
406 ///////////////////////////////////////////////////////////////////////////
408 GCTimer user_time_at_end
;
409 GCTimer system_time_at_end
;
410 user_time_at_end
.get_user_time();
411 system_time_at_end
.get_system_time();
412 stat
.gc_user_time
= user_time_at_end
- user_time_at_start
;
413 stat
.gc_system_time
= system_time_at_end
- system_time_at_start
;
414 stat
.total_gc_user_time
+= stat
.gc_user_time
;
415 stat
.total_gc_system_time
+= stat
.gc_system_time
;
418 ///////////////////////////////////////////////////////////////////////////
419 // Now clean up properly
420 ///////////////////////////////////////////////////////////////////////////
421 phase
= mutation_phase
;
422 #ifdef GC_HAS_CROSS_HEAP_POINTERS
423 HM::traversed_map
.clear();
425 memset(reg_roots
, 0, sizeof(reg_roots
));
427 ///////////////////////////////////////////////////////////////////////////
428 // Print the equally annoying end of collection message.
429 ///////////////////////////////////////////////////////////////////////////
430 end_collection_message();
433 //////////////////////////////////////////////////////////////////////////////
434 // Method for scanning a consecutive block of memory for roots.
435 //////////////////////////////////////////////////////////////////////////////
436 inline void BGC::scan_for_roots (void ** start
, void ** stop
)
438 // Scan consecutively.
439 // Caution: addresses between start and stop (not including stop)
440 // must be mapped by the OS.
441 for (void ** P
= (void**)GC_TRUNC_ADDR(start
); P
< stop
; P
++) {
444 // Try to interpret each cell as a pointer and see if it
445 // falls within any pages of this collectable heap.
446 if (! IS_WITHIN_BOUNDS(ptr
)) continue;
447 if (! IS_USED_ADDR(ptr
, id
)) continue;
449 // If so, locate the starting address (the pointer may be
450 // an interior pointer) and trace the object.
451 Byte
* obj
= (Byte
*)HM::object_map
.starting_addr(ptr
);
452 if (obj
== 0) continue;
453 if (! IS_USED_ADDR(obj
, id
)) continue;
455 // Get the header of the object.
456 GCHeader header
= GC_OBJ_HEADER(obj
);
458 // Now check whether the pointer actually lies within the
459 // boundary of the object.
460 Byte
* limit
= obj
+ GC_OBJ_HEADER_LEN(header
) - sizeof(GCHeader
);
462 if ((Byte
*)ptr
>= limit
) continue; // Nope.
464 // Finally, we've found a object. Promote the page(s) in which
465 // it resides and mark the object as live.
467 cerr
<< "[ Promoting object *"
469 << (void*)ptr
<< ", "
470 << (void*)obj
<< " ... "
471 << (void*)limit
<< " ]\n" << flush
;
473 if (! IS_PROMOTED_ADDR(obj
) && ! IS_FROM_SPACE_ADDR(obj
)) {
474 cerr
<< "[ Shit! Object is not at from space, header = "
475 << (void*)header
<< " status = " << (int)SPACE_TYPE(obj
)
480 if (HM::live_map
.is_marked(obj
)) continue;
482 HM::live_map
.mark(obj
); // mark it as live
484 GCPageId p
= GC_PAGE_ID(GC_OBJ_HEADER_ADDR(obj
));
485 GCPageId q
= GC_PAGE_ID(limit
-1);
488 // Now make all pages straddled by object as promote. Be extremely
489 // careful in promote all pages. Make sure boundary conditions
490 // are taken care of.
491 for ( ;p
<= q
; p
++) {
492 if (! IS_PROMOTED_PAGE(p
)) {
494 enqueue_pages_to_scan(GC_PAGE_ADDR(p
), GC_PAGE_ADDR(p
+1));
498 cerr
<< "[ New promoted page "
499 << (void*)GC_PAGE_ADDR(p
)
501 << (void*)GC_PAGE_ADDR(p
+1)
502 << " gc = " << (int)PAGE_GC(p
)
503 << " space = " << (int)PAGE_SPACE_TYPE(p
)
511 //////////////////////////////////////////////////////////////////////////////
512 // Method for scanning the stack area
513 //////////////////////////////////////////////////////////////////////////////
514 void BGC::scan_stack_area()
516 if (is_downward_stack
) {
517 // stack grows to lower addresses
518 scanning_message("registers and stack", stack_top
, stack_bottom
);
519 scan_for_roots (stack_top
, stack_bottom
);
521 scanning_message("registers and stack", stack_bottom
, stack_top
);
522 scan_for_roots (stack_bottom
, stack_top
);
526 //////////////////////////////////////////////////////////////////////////////
527 // Method for scanning the static area.
528 // We'll have to take into account of all the static data used
529 // by the heap manager.
530 //////////////////////////////////////////////////////////////////////////////
531 void BGC::scan_static_area()
533 scanning_message("static data area", data_bottom
, data_top
);
535 const HM::AddrRange
* blacklisted
= HM::blacklisted_data(n
);
537 void ** start
= data_bottom
;
538 void ** stop
= data_top
;
539 for (start
= data_bottom
; start
< data_top
; start
= stop
) {
541 if (start
> (void**)blacklisted
[i
].stop
)
543 else if (start
>= (void**)blacklisted
[i
].start
)
544 { start
= (void**)blacklisted
[i
].stop
; i
++; }
547 if (i
< n
) stop
= (void**)blacklisted
[i
].start
;
548 else stop
= data_top
;
550 scan_for_roots (start
, stop
);
554 //////////////////////////////////////////////////////////////////////////////
555 // Method for scanning the heap area
556 //////////////////////////////////////////////////////////////////////////////
557 void BGC::scan_heap_area()
559 scanning_message("non-collectable heap", heap_bottom
, heap_top
);
561 // Scan consecutively.
562 // Caution: addresses between start and stop (not including stop)
563 // must be mapped by the OS.
566 // Scan all pages in the user heap that are NOT owned by this heap.
567 // We'll do it one page at a time for now.
569 void ** P
, ** page_limit
;
570 for (P
= heap_bottom
; P
< heap_top
; P
= page_limit
) {
571 page_limit
= (void**)GC_TRUNC_PAGE_ADDR((Byte
*)P
+ GC_PAGE_SIZE
);
572 if (page_limit
> heap_top
) page_limit
= heap_top
;
574 // Check to make sure that the address is not been currently
575 // used by us or allocated but not used.
576 GCPageId page_id
= GC_PAGE_ID(P
);
578 if (IS_WITHIN_TRACKED_LIMITS(P
)) {
579 switch (PAGE_SPACE_TYPE(page_id
)) {
580 // not allocated page will be scanned
581 case HM::not_allocated
: break;
583 // blacklisted pages will be skipped
584 case HM::blacklisted
: continue;
586 // This page has been allocated... check some more....
588 { GC_ID the_id
= PAGE_GC(page_id
);
589 // unused pages will be skipped.
590 if (the_id
== 0) continue;
592 // pages within this heap will be skipped.
593 if (the_id
== id
) continue;
595 // allocated pages on other heaps will be scanned.
598 // case HM::to_space: continue;
599 // case HM::newly_allocated: continue;
601 // promoted pages and newly allocated pages will be skipped.
606 // The rest of the stuff are scanned
607 scan_for_roots(P
, page_limit
);
611 //////////////////////////////////////////////////////////////////////////////
612 // Method to perform finalization after all promoted
613 // objects have been moved. Objects that hasn't been moved and
614 // is not promoted must be garbage and can be safely finalized.
615 //////////////////////////////////////////////////////////////////////////////
616 void BGC::do_finalization()
618 if (! should_finalize
) return;
621 // Invoke finalization methods of dying objects.
623 foreach_page_owned_by_collector(page_id
, this) {
624 switch (PAGE_SPACE_TYPE(page_id
)) {
625 case GCHeapManager::from_space
:
626 case GCHeapManager::to_space
:
627 { Byte
* page_addr
= (Byte
*)GC_PAGE_ADDR(page_id
);
628 Byte
* page_limit
= page_addr
+ GC_PAGE_SIZE
;
629 while (page_addr
< page_limit
) {
630 if (GCHeapManager::object_map
.is_marked(page_addr
)) {
631 GCHeader header
= GC_OBJ_HEADER(page_addr
);
632 if (GC_OBJ_IS_FORWARDED(header
)) {
633 // Object has been moved; get its length from the
638 GC_OBJ_FORWARD_ADDR(header
)));
640 // Object hasn't been forwarded.
641 // Finalize all unpromoted objects.
642 if (! GCHeapManager::live_map
.is_marked(page_addr
)) {
643 ((GCObject
*)page_addr
)->~GCObject();
645 page_addr
+= GC_OBJ_HEADER_LEN(header
);
648 page_addr
+= GC_ALIGNMENT
;
656 //////////////////////////////////////////////////////////////////////////////
657 // Method to scan pages within the scan queue and copy.
658 //////////////////////////////////////////////////////////////////////////////
659 void BGC::copy_promoted_pages ()
662 phase
= copying_phase
;
665 // Set up the pages to scan
666 enqueue_pages_to_scan(heap_pointer
, heap_limit
);
669 size_t objects_traced
= 0;
672 // Scan all promoted pages first.
673 for (i
= 0; i
< pages_promoted
; i
++) {
674 Byte
* page_addr
= (Byte
*)GC_PAGE_ADDR(scan_queue
[i
]);
675 Byte
* page_limit
= page_addr
+ GC_PAGE_SIZE
;
676 while (page_addr
< page_limit
) {
677 if (HM::object_map
.is_marked(page_addr
)) {
678 if (HM::live_map
.is_marked(page_addr
)) {
679 ((GCObject
*)page_addr
)->trace(this);
683 page_addr
+= GC_OBJ_HEADER_LEN(GC_OBJ_HEADER(page_addr
));
685 GCHeader header
= GC_OBJ_HEADER(page_addr
);
686 if (GC_OBJ_IS_FORWARDED(header
)) {
690 GC_OBJ_FORWARD_ADDR(header
)));
692 page_addr
+= GC_OBJ_HEADER_LEN(header
);
696 page_addr
+= GC_ALIGNMENT
;
702 if (objects_traced
!= objects_promoted
) {
703 cerr
<< "[ Bug: number of objects promoted = " << objects_promoted
704 << " while the number of objects traced = " << objects_traced
709 // Now scan all other pages on the queue.
710 // Since these are new objects they are guaranteed to packed together.
711 // We'll use faster code that avoids searching thru the bitmap bit by bit.
712 for ( ; i
< number_of_pages_to_scan
; i
++) {
714 (Byte
*)GC_PAGE_ADDR(scan_queue
[i
]) + GC_ALIGNMENT
;
715 Byte
* page_limit
= (Byte
*)GC_PAGE_ADDR(scan_limit_queue
[i
]);
716 while (page_addr
< page_limit
) {
717 if (! HM::object_map
.is_marked(page_addr
)) break;
718 ((GCObject
*)page_addr
)->trace(this);
719 page_addr
+= GC_OBJ_HEADER_LEN(GC_OBJ_HEADER(page_addr
));
724 // Scavenge the table of weak pointers.
726 do_weak_pointer_collection();
728 // All objects have now been properly moved to a new space.
729 // Perform proper finalization at this point.
732 // Finally, clear the affected part of the live map in the promoted
733 // pages. All object currently marked as allocated but not alive will
734 // be reset. All live bit will also be cleared in the same process.
735 for (i
= 0; i
< pages_promoted
; i
++) {
736 GCPageId page
= scan_queue
[i
];
737 Byte
* page_addr
= (Byte
*)GC_PAGE_ADDR(page
);
738 Byte
* page_limit
= page_addr
+ GC_PAGE_SIZE
;
740 // copy the live map into the object map.
741 HM::object_map
.copy(page_addr
,page_limit
,HM::live_map
);
742 // clear the live map.
743 HM::live_map
.clear(page_addr
, page_limit
);
747 //////////////////////////////////////////////////////////////////////////////
748 // Accounting method.
749 //////////////////////////////////////////////////////////////////////////////
750 BGC::Statistics
BGC::statistics ()
752 stat
.bytes_used
= heap_size
- (heap_limit
- heap_pointer
);
753 stat
.bytes_managed
= heap_size
;
754 stat
.bytes_free
= heap_limit
- heap_pointer
;
758 //////////////////////////////////////////////////////////////////////////////
759 // Method for printing a heap growth message
760 //////////////////////////////////////////////////////////////////////////////
761 void BGC::heap_growth_message(size_t/*heap_size_now*/, size_t /*growth*/) const
763 if ((verbosity_level
& gc_notify_heap_expansion
) && console
) {
764 size_t total
= heap_size
+ HM::bytes_free();
765 size_t percent
= heap_used
* 100 / total
;
766 (*console
) << "[ GC" << id
<< ": increasing heap... "
767 << heap_used
<< '/' << total
768 << " (" << percent
<< "%) ]\n" << flush
;
772 //////////////////////////////////////////////////////////////////////////////
773 // Method for printing a start collection message
774 //////////////////////////////////////////////////////////////////////////////
775 void BGC::start_collection_message() const
777 if (is_verbose() && console
) {
778 size_t total
= heap_size
+ HM::bytes_free();
779 size_t percent
= heap_used
* 100 / total
;
780 (*console
) << "[ GC" << id
<< ": collecting... "
781 << heap_used
<< '/' << total
782 << " (" << percent
<< "%) ]\n" << flush
;
786 //////////////////////////////////////////////////////////////////////////////
787 // Method for printing a end collection message
788 //////////////////////////////////////////////////////////////////////////////
789 void BGC::end_collection_message() const
791 if (is_verbose() && console
) {
792 size_t total
= heap_size
+ HM::bytes_free();
793 size_t percent
= heap_used
* 100 / total
;
794 (*console
) << "[ GC" << id
<< ": done... "
795 << heap_used
<< '/' << total
<< " ("
797 if (verbosity_level
& gc_notify_heap_info
)
800 << (pages_promoted
* GC_PAGE_SIZE
) << " promoted, "
801 << bytes_copied
<< " moved, "
802 << (long)(GC_PAGE_SIZE
* pages_removed
- bytes_copied
)
805 (*console
) << " ]\n" << flush
;
807 if (verbosity_level
& gc_print_collection_time
) {
808 (*console
) << "[ GC" << id
<< ": user time: "
811 << stat
.gc_system_time
819 //////////////////////////////////////////////////////////////////////////////
820 // Create an instance of BGC and make that the default collector.
821 // The users are free to create others if necessary.
822 //////////////////////////////////////////////////////////////////////////////
824 GC
* GC::default_gc
= &bgc
;
826 GC_Initializer::GC_Initializer()
827 { GC::set_default_gc(bgc
); }