make it compile with g++-3.3
[prop.git] / lib-src / gc / markswp.cc
blob756a72364e5f044740391ce987bc6e03fcaa5590
1 /////////////////////////////////////////////////////////////////////////////
2 // NOTICE:
3 //
4 // ADLib, Prop and their related set of tools and documentation are in the
5 // public domain. The author(s) of this software reserve no copyrights on
6 // the source code and any code generated using the tools. You are encouraged
7 // to use ADLib and Prop to develop software, in both academic and commercial
8 // settings, and are welcomed to incorporate any part of ADLib and Prop into
9 // your programs.
11 // Although you are under no obligation to do so, we strongly recommend that
12 // you give away all software developed using our tools.
14 // We also ask that credit be given to us when ADLib and/or Prop are used in
15 // your programs, and that this notice be preserved intact in all the source
16 // code.
18 // This software is still under development and we welcome(read crave for)
19 // any suggestions and help from the users.
21 // Allen Leung (leunga@cs.nyu.edu)
22 // 1994-1995
23 //////////////////////////////////////////////////////////////////////////////
25 #include <assert.h>
26 #include <iostream>
27 #include <strstream>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <setjmp.h>
31 #include <unistd.h>
32 #include <sys/types.h>
33 #include <AD/gc/gcconfig.h> // system configuration
34 #include <AD/gc/markswp.h> // Marksweep garbage collector
35 #include <AD/gc/gcobject.h> // garbage collectable objects
36 #include <AD/gc/gcheaps.h> // the heap manager
37 #include <AD/gc/gcmacros.h> // useful macros
38 #include <AD/gc/gctimer.h>
40 //////////////////////////////////////////////////////////////////////////////
41 // Implementation note: small objects are allocated from a set of
42 // free lists partitioned by sizes. We also keep a block of consecutive
43 // memory for allocation purposes. When we need a small object, the
44 // appropriate free list is checked first, then if the block of memory
45 // is used. If both are empty we'll try to perform garbage collection
46 // and/or heap expansion. No attempt is made to coalesce adjacent
47 // free memory together, so fragmentation may be a problem in
48 // degenerate cases.
50 // Large objects are allocated from the heap manager directly. They
51 // are rounded up to the page size.
52 //////////////////////////////////////////////////////////////////////////////
54 //////////////////////////////////////////////////////////////////////////////
55 // Some identifying information.
56 //////////////////////////////////////////////////////////////////////////////
57 // #define DEBUG_GC // no debugging for production use
58 #define ALGORITHM "Conservative mark/sweep"
59 #define VERSION "0.2 (Feb 7th, 1995)"
61 //////////////////////////////////////////////////////////////////////////////
62 // Import some types
63 //////////////////////////////////////////////////////////////////////////////
64 typedef HM::GCPageId GCPageId;
65 typedef HM::PageStatus PageStatus;
67 //////////////////////////////////////////////////////////////////////////////
68 // Method that returns a name for this class
69 //////////////////////////////////////////////////////////////////////////////
70 const char * MarkSweepGC::my_name() const { return "MarkSweepGC"; }
72 //////////////////////////////////////////////////////////////////////////////
73 // Small objects are allocated and deallocated using the free
74 // lists. Large objects are allocated by just calling the heap manager.
75 //////////////////////////////////////////////////////////////////////////////
76 #define FREE_LIST_SIZE 43
77 #define MAX_SMALL_OBJECT_SIZE (512 * GC_ALIGNMENT)
79 //////////////////////////////////////////////////////////////////////////////
80 // Method to select the freelist to go.
81 //////////////////////////////////////////////////////////////////////////////
82 inline int LEN_TO_FREELIST_INDEX(size_t len)
84 // Bytes 0 ... 128 -> Indices 0 ... 15
85 if (len <= 16 * GC_ALIGNMENT) return (len-1)/GC_ALIGNMENT;
86 // Bytes 129 ... 1024 -> Indices 16 ... 29
87 if (len <= 128 * GC_ALIGNMENT)
88 return 16 + (len-16* GC_ALIGNMENT-1)/GC_ALIGNMENT/8;
89 // Bytes 1025 ... 4096 -> Indices 30 ... 41
90 return 30 + (len-128*GC_ALIGNMENT-1)/GC_ALIGNMENT/32;
93 //////////////////////////////////////////////////////////////////////////////
94 // Index to length of cell.
95 //////////////////////////////////////////////////////////////////////////////
96 static size_t index_to_len[FREE_LIST_SIZE];
98 //////////////////////////////////////////////////////////////////////////////
99 // Constructor.
100 //////////////////////////////////////////////////////////////////////////////
101 MarkSweepGC::MarkSweepGC()
103 // Initialize the index_to_len table
104 { int i;
105 for (i = 0; i < 16; i++)
106 index_to_len[i] = (i+1) * GC_ALIGNMENT;
107 for (i = 16; i < 30; i++)
108 index_to_len[i] = (i-15) * GC_ALIGNMENT * 8 + 16 * GC_ALIGNMENT;
109 for (i = 30; i < 42; i++)
110 index_to_len[i] = (i-29) * GC_ALIGNMENT * 32 + 128 * GC_ALIGNMENT;
111 index_to_len[42] = MAX_SMALL_OBJECT_SIZE;
113 for (i = 0; i < 42; i++) {
114 assert(LEN_TO_FREELIST_INDEX(index_to_len[i]) == i);
115 assert(LEN_TO_FREELIST_INDEX(index_to_len[i]+1) == i+1);
119 // Initialize the statistics
120 stat.algorithm = ALGORITHM;
121 stat.version = VERSION;
122 reset_statistics(stat);
124 // Setup the limits
125 heap_size = 0;
126 heap_used = 0;
127 heap_free = 0;
128 heap_pointer = 0;
129 heap_limit = 0;
130 initial_heap_size = 128 * 1024;
131 min_heap_growth = 256 * 1024;
133 // Expand heap if this ratio is exceeded after collection.
134 gc_ratio = 50;
136 // Setup the free lists for small objects
137 free_list_size = FREE_LIST_SIZE;
138 free_lists = new FreeList * [ FREE_LIST_SIZE ];
139 for (size_t i = 0; i < free_list_size; i++)
140 free_lists[i] = 0;
143 //////////////////////////////////////////////////////////////////////////////
144 // Destructor
145 //////////////////////////////////////////////////////////////////////////////
146 MarkSweepGC::~MarkSweepGC()
148 clear();
149 delete [] free_lists;
152 //////////////////////////////////////////////////////////////////////////////
153 // Method to release all the memory within the collector
154 //////////////////////////////////////////////////////////////////////////////
155 void MarkSweepGC::clear()
157 // Return all the pages back to the heap manager
158 HM::release_all_pages(this);
160 // Reset the free lists
161 for (int i = 0; i < FREE_LIST_SIZE; i++)
162 free_lists[i] = 0;
164 heap_size = 0;
165 heap_used = 0;
166 heap_free = 0;
167 heap_pointer = 0;
168 heap_limit = 0;
171 //////////////////////////////////////////////////////////////////////////////
172 // Method that compute the minimal amount of heap expansion
173 // that should be done.
174 //////////////////////////////////////////////////////////////////////////////
175 size_t MarkSweepGC::min_growth()
176 { return min_heap_growth;
179 //////////////////////////////////////////////////////////////////////////////
180 // Method to allocate a new object of a given size
181 //////////////////////////////////////////////////////////////////////////////
182 void * MarkSweepGC::m_alloc(size_t n)
183 { register size_t bytes = GC_ROUNDUP_SIZE(n + sizeof(GCHeader));
185 if (bytes <= MAX_SMALL_OBJECT_SIZE) {
187 // Small objects are handled by the free lists.
189 int index = LEN_TO_FREELIST_INDEX(bytes);
190 size_t len = index_to_len[index];
191 for (;;) {
192 register FreeList * cell;
193 if (heap_pointer + len < heap_limit) {
195 // Try to use the new memory block first
197 register void * new_obj = GC_OBJ_OBJECT_ADDR(heap_pointer);
198 GC_OBJ_SET_HEADER(new_obj, len);
199 HM::get_object_map().mark(new_obj);
200 heap_pointer += len;
201 heap_used += len;
202 return new_obj;
203 } else if ((cell = free_lists[index])) {
205 // Then try free lists
207 free_lists[index] = cell->next;
208 register void * new_obj = cell;
209 HM::get_object_map().mark(new_obj);
210 memset(new_obj, 0, n);
211 heap_used += len;
212 return new_obj;
213 } else {
214 if (heap_size > 0 && HM::bytes_free() <= len) {
216 // Try garbage collection first
218 collect();
220 // Expand heap if gc_ratio is reached
222 if (heap_used >= heap_size * gc_ratio / 100)
223 grow_heap(bytes);
224 } else {
226 // Expand heap as a last resort.
228 grow_heap(bytes);
232 } else {
234 // Large objects are allocated by calling the heap manager directly.
235 // Hopefully most of the objects are small.
237 size_t bytes_gotten;
238 bytes += GC_ALIGNMENT;
239 void * new_memory = HM::allocate_pages(this, bytes, bytes, bytes_gotten);
240 if (new_memory == 0) {
241 cerr << "[ GC" << id << ": unable to allocate "
242 << bytes << " bytes! ]\n" << flush;
243 exit (1);
245 heap_size += bytes_gotten;
246 heap_used += bytes_gotten;
247 register void * new_obj = (Byte*)new_memory + GC_ALIGNMENT;
248 GC_OBJ_SET_HEADER(new_obj, bytes_gotten);
249 HM::get_object_map().mark(new_obj);
250 return new_obj;
254 //////////////////////////////////////////////////////////////////////////////
255 // Method to deallocate an object manually.
256 // Note: this method tries to catch errors involving deallocation.
257 //////////////////////////////////////////////////////////////////////////////
258 void MarkSweepGC::free(void * obj)
259 { if (! GC_IS_A_POINTER(obj)) return;
260 if ( HM::is_mapped(obj) &&
261 HM::page_gc(obj) == id &&
262 HM::get_object_map().is_marked(obj)
264 // clean up properly
265 HM::get_object_map().unmark(obj);
266 size_t obj_len = GC_OBJ_HEADER_LEN(GC_OBJ_HEADER(obj));
267 memset(GC_OBJ_HEADER_ADDR(obj), 0, obj_len);
269 // return object to free list
270 // ...
271 if (obj_len <= MAX_SMALL_OBJECT_SIZE) {
272 int index = LEN_TO_FREELIST_INDEX(obj_len);
273 ((FreeList*)obj)->next = free_lists[index];
274 free_lists[index] = (FreeList*)obj;
275 heap_used -= obj_len;
276 } else {
277 HM::deallocate_pages
278 (this, GC_PAGE_ID(obj), (obj_len + GC_PAGE_SIZE - 1)/GC_PAGE_SIZE);
279 heap_size -= obj_len;
280 heap_used -= obj_len;
282 } else {
283 cerr << "[ GC" << id << ": trying to free unallocated object at "
284 << obj << " ]\n";
288 //////////////////////////////////////////////////////////////////////////////
289 // Method for expanding the heap.
290 // We try to expand the heap only if the number of bytes requested
291 // is not immediately available.
292 //////////////////////////////////////////////////////////////////////////////
293 void MarkSweepGC::grow_heap (size_t bytes)
294 { size_t bytes_gotten;
295 void * new_memory =
296 HM::allocate_pages
297 ( this, bytes,
298 (heap_size == 0 ? initial_heap_size : min_heap_growth),
299 bytes_gotten);
300 if (new_memory == 0) {
301 cerr << "[ GC" << id << ": unable to allocate " << bytes << " bytes! ]\n"
302 << flush;
303 exit (1);
306 // Update statistics
307 heap_size += bytes_gotten;
308 stat.number_of_heap_expansions++;
310 // Add memory into the new memory queue.
311 heap_pointer = (Byte*)new_memory + GC_ALIGNMENT - sizeof(GCHeader);
312 heap_limit = (Byte*)new_memory + bytes_gotten;
314 heap_expansion_message();
317 //////////////////////////////////////////////////////////////////////////////
318 // Method for tracing an object pointer.
319 // This marks objects currently in the heap.
320 //////////////////////////////////////////////////////////////////////////////
321 GCObject * MarkSweepGC::trace (GCObject * ptr)
323 if (! GC_IS_A_POINTER(ptr)) return ptr;
325 // Trace out of heap objects if necessary
326 if (! HM::is_mapped(ptr) || HM::page_gc(ptr) != id) {
327 #ifdef GC_HAS_CROSS_HEAP_POINTERS
328 #ifdef DEBUG_GC
329 assert(HM::traversed_map.is_within_bounds(ptr));
330 #endif
331 if (! HM::traversed_map.is_marked(ptr)) {
332 HM::traversed_map.mark(ptr);
333 ptr->trace(this);
335 #endif
336 return ptr;
339 // Get the starting address of the object
340 void * obj_ptr = HM::get_object_map().starting_addr(ptr);
342 // If it is already marked live just return.
343 if (HM::get_live_map().is_marked(obj_ptr)) return ptr;
345 // Otherwise, mark the current object as live and trace all subobjects.
346 HM::get_live_map().mark(obj_ptr);
347 ptr->trace(this);
348 return ptr;
351 //////////////////////////////////////////////////////////////////////////////
352 // Method for starting the garbage collection
353 //////////////////////////////////////////////////////////////////////////////
354 void MarkSweepGC::collect(int level)
355 { do_collect(level);
358 //////////////////////////////////////////////////////////////////////////////
359 // Method that performs the actual garbage collection
360 //////////////////////////////////////////////////////////////////////////////
361 void MarkSweepGC::do_collect(int /*level*/)
363 ///////////////////////////////////////////////////////////////////////////
364 // Flush the registers onto the stack; setup the stack and heap limits.
365 ///////////////////////////////////////////////////////////////////////////
366 jmp_buf reg_roots; // flushed registers
367 flush_registers();
368 // if (_setjmp(reg_roots)) _longjmp(reg_roots,1);
369 if (setjmp(reg_roots)) longjmp(reg_roots,1);
370 if (is_downward_stack) {
371 stack_top = (void**)((Byte*)reg_roots);
372 } else {
373 stack_top = (void**)((Byte*)reg_roots + sizeof(reg_roots));
375 get_heap_top();
377 ///////////////////////////////////////////////////////////////////////////
378 // Print a message
379 ///////////////////////////////////////////////////////////////////////////
380 start_collection_message();
382 ///////////////////////////////////////////////////////////////////////////
383 // Get start time
384 ///////////////////////////////////////////////////////////////////////////
385 #ifdef GC_USE_TIMER
386 GCTimer user_time_at_start;
387 GCTimer system_time_at_start;
388 user_time_at_start.get_user_time();
389 system_time_at_start.get_system_time();
390 #endif
392 ///////////////////////////////////////////////////////////////////////////
393 // Mark the unused part of the heap space new so that it will be
394 // skipped during root scanning.
395 ///////////////////////////////////////////////////////////////////////////
396 HM::mark_space(GC_ROUNDUP_PAGE_ADDR(heap_pointer), heap_limit,
397 HM::newly_allocated);
399 #ifdef GC_HAS_CROSS_HEAP_POINTERS
400 HM::traversed_map.grow(HM::lo_mapped(), HM::hi_mapped());
401 #endif
403 ///////////////////////////////////////////////////////////////////////////
404 // Mark phase:
405 ///////////////////////////////////////////////////////////////////////////
406 scan_stack_area ();
407 scan_static_area ();
408 scan_heap_area ();
409 scan_user_roots ();
411 ///////////////////////////////////////////////////////////////////////////
412 // Scavenge any and all weakpointers.
413 ///////////////////////////////////////////////////////////////////////////
414 do_weak_pointer_collection();
416 ///////////////////////////////////////////////////////////////////////////
417 // Sweep phase:
418 // Now scan over all the pages owned by ourselves and
419 // reconstruct the free list.
420 ///////////////////////////////////////////////////////////////////////////
421 sweep_pages (); // reconstruct free list and reset bitmaps
423 ///////////////////////////////////////////////////////////////////////////
424 // Mark the unused part of the heap space as the from-space again.
425 ///////////////////////////////////////////////////////////////////////////
426 HM::mark_space(GC_ROUNDUP_PAGE_ADDR(heap_pointer), heap_limit,
427 HM::from_space);
429 ///////////////////////////////////////////////////////////////////////////
430 // Update statistics
431 ///////////////////////////////////////////////////////////////////////////
432 stat.number_of_collections++;
434 ///////////////////////////////////////////////////////////////////////////
435 // Compute time spent
436 ///////////////////////////////////////////////////////////////////////////
437 #ifdef GC_USE_TIMER
438 GCTimer user_time_at_end;
439 GCTimer system_time_at_end;
440 user_time_at_end.get_user_time();
441 system_time_at_end.get_system_time();
442 stat.gc_user_time = user_time_at_end - user_time_at_start;
443 stat.gc_system_time = system_time_at_end - system_time_at_start;
444 stat.total_gc_user_time += stat.gc_user_time;
445 stat.total_gc_system_time += stat.gc_system_time;
446 #endif
447 ///////////////////////////////////////////////////////////////////////////
448 // Print a message;
449 ///////////////////////////////////////////////////////////////////////////
450 end_collection_message();
452 ///////////////////////////////////////////////////////////////////////////
453 // Clean up
454 ///////////////////////////////////////////////////////////////////////////
455 #ifdef GC_HAS_CROSS_HEAP_POINTERS
456 HM::traversed_map.clear();
457 #endif
460 //////////////////////////////////////////////////////////////////////////////
461 // Method for scanning a consecutive block of memory for roots.
462 //////////////////////////////////////////////////////////////////////////////
463 inline void MarkSweepGC::scan_for_roots (void ** start, void ** stop)
465 // Scan consecutively.
466 // Caution: addresses between start and stop (not including stop)
467 // must be mapped by the OS.
468 for (register void ** P = (void**)GC_TRUNC_ADDR(start); P < stop; P++) {
469 register void * ptr = *P;
471 // Try to interpret each cell as a pointer and see if it
472 // falls within any pages of this collectable heap.
473 if (! HM::is_mapped(ptr)) continue;
474 if (HM::page_gc(ptr) != id) continue;
476 // If so, locate the starting address (the pointer may be
477 // an interior pointer) and trace the object.
478 Byte * obj = (Byte*)HM::get_object_map().starting_addr(ptr);
479 if (obj == 0) continue;
480 if (HM::page_gc(obj) != id) continue;
482 // Get the header of the object.
483 // If it is a forwarded header, then it is not really a live object
484 // at all, but one left over from previous collection phase that
485 // has not been unmarked. The reason why this may happen is that
486 // we are lazy only unmark objects one page at a time. So objects
487 // that falls half way between promoted and unpromoted and that
488 // are actually moved will not have their live bit reset.
489 // We'll reset the object now and continue.
490 GCHeader header = GC_OBJ_HEADER(obj);
492 // Now check whether the pointer actually lies within the
493 // boundary of the object.
494 Byte * limit = obj + GC_OBJ_HEADER_LEN(header) - sizeof(GCHeader);
496 if ((Byte*)ptr >= limit) continue; // Nope.
498 // Finally, we've found a object. Trace and mark it.
500 if (HM::get_live_map().is_marked(obj)) continue;
501 HM::get_live_map().mark(obj); // mark it as live
502 ((GCObject*)obj)->trace(this); // trace it
506 //////////////////////////////////////////////////////////////////////////////
507 // Method for scanning the stack area
508 //////////////////////////////////////////////////////////////////////////////
509 void MarkSweepGC::scan_stack_area()
511 if (is_downward_stack) {
512 // stack grows to lower addresses
513 scanning_message("registers and stack", stack_top, stack_bottom);
514 scan_for_roots (stack_top, stack_bottom);
515 } else {
516 scanning_message("registers and stack", stack_bottom, stack_top);
517 scan_for_roots (stack_bottom, stack_top);
521 //////////////////////////////////////////////////////////////////////////////
522 // Method for scanning the static area.
523 // We'll have to take into account of all the static data used
524 // by the heap manager.
525 //////////////////////////////////////////////////////////////////////////////
526 void MarkSweepGC::scan_static_area()
528 scanning_message("static data area", data_bottom, data_top);
529 size_t n;
530 register const HM::AddrRange * blacklisted = HM::blacklisted_data(n);
531 register size_t i = 0;
532 register void ** start = data_bottom;
533 register void ** stop = data_top;
534 for (start = data_bottom; start < data_top; start = stop) {
535 while (i < n) {
536 if (start > (void**)blacklisted[i].stop)
537 { i++; }
538 else if (start >= (void**)blacklisted[i].start)
539 { start = (void**)blacklisted[i].stop; i++; }
540 else break;
542 if (i < n) stop = (void**)blacklisted[i].start;
543 else stop = data_top;
544 if (start < stop)
545 scan_for_roots (start, stop);
549 //////////////////////////////////////////////////////////////////////////////
550 // Method for scanning the heap area
551 //////////////////////////////////////////////////////////////////////////////
552 void MarkSweepGC::scan_heap_area()
554 scanning_message("non-collectable heap", heap_bottom, heap_top);
556 // Scan consecutively.
557 // Caution: addresses between start and stop (not including stop)
558 // must be mapped by the OS.
561 // Scan all pages in the user heap that are NOT owned by this heap.
562 // We'll do it one page at a time for now.
564 register void ** P, ** page_limit;
565 for (P = heap_bottom; P < heap_top; P = page_limit) {
566 page_limit = (void**)GC_TRUNC_PAGE_ADDR((Byte*)P + GC_PAGE_SIZE);
567 if (page_limit > heap_top) page_limit = heap_top;
569 // Check to make sure that the address is not been currently
570 // used by us or allocated but not used.
571 register GCPageId page_id = GC_PAGE_ID(P);
573 if (HM::is_tracked(P)) {
574 switch (HM::page_status(page_id)) {
575 // not allocated page will be scanned
576 case HM::not_allocated: break;
578 // blacklisted pages will be skipped
579 case HM::blacklisted: continue;
581 // This page has been allocated... check some more....
582 case HM::from_space:
583 { register GC_ID the_id = HM::page_gc(page_id);
584 // unused pages will be skipped.
585 if (the_id == 0) continue;
587 // pages within this heap will be skipped.
588 if (the_id == id) continue;
590 // allocated pages on other heaps will be scanned.
591 } break;
593 // case HM::to_space: continue;
594 // case HM::newly_allocated: continue;
596 // promoted pages and newly allocated pages will be skipped.
597 default: continue;
601 // The rest of the stuff are scanned
602 scan_for_roots(P, page_limit);
606 //////////////////////////////////////////////////////////////////////////////
607 // Method to sweep pages in the heap and reconstruct the free list.
608 //////////////////////////////////////////////////////////////////////////////
609 void MarkSweepGC::sweep_pages()
611 foreach_page_owned_by_collector(p, this) {
612 if (HM::page_status(p) != HM::from_space) continue;
613 register Byte * page_start = (Byte*)GC_PAGE_ADDR(p);
614 register Byte * page_addr = page_start;
615 register Byte * page_limit = page_addr + GC_PAGE_SIZE;
616 while (page_addr < page_limit) {
617 if (HM::get_object_map().is_marked(page_addr)) {
618 if (! HM::get_live_map().is_marked(page_addr)) {
619 // Finalize the dying object.
620 HM::get_object_map().unmark(page_addr);
621 if (should_finalize)
622 ((GCObject*)page_addr)->~GCObject();
624 // Now reclaim the dying object.
625 size_t obj_len = GC_OBJ_HEADER_LEN(GC_OBJ_HEADER(page_addr));
626 if (obj_len <= MAX_SMALL_OBJECT_SIZE) {
627 int index = LEN_TO_FREELIST_INDEX(obj_len);
628 #ifdef DEBUG_GC
629 // clear freed storage to catch bugs.
630 memset(page_addr, 0, obj_len - sizeof(GCHeader));
631 #endif
632 ((FreeList*)page_addr)->next = free_lists[index];
633 free_lists[index] = (FreeList*)page_addr;
634 heap_used -= obj_len;
635 } else {
636 HM::deallocate_pages
637 (this, GC_PAGE_ID(page_addr),
638 (obj_len + GC_PAGE_SIZE - 1) / GC_PAGE_SIZE);
639 heap_size -= obj_len;
640 heap_used -= obj_len;
642 page_addr += obj_len;
643 } else {
644 page_addr += GC_OBJ_HEADER_LEN(GC_OBJ_HEADER(page_addr));
646 } else {
647 page_addr += GC_ALIGNMENT;
650 // Reset bitmap
651 HM::get_live_map().clear (page_start, page_limit);
655 //////////////////////////////////////////////////////////////////////////////
656 // Accounting method.
657 //////////////////////////////////////////////////////////////////////////////
658 MarkSweepGC::Statistics MarkSweepGC::statistics ()
660 stat.bytes_used = heap_used;
661 stat.bytes_free = heap_size - heap_used;
662 stat.bytes_managed = heap_size;
663 return stat;
666 //////////////////////////////////////////////////////////////////////////////
667 // Heap expansion message
668 //////////////////////////////////////////////////////////////////////////////
669 void MarkSweepGC::heap_expansion_message()
671 if ((verbosity_level & gc_notify_heap_expansion) && console) {
672 (*console) << "[ GC" << id << ": increasing heap... "
673 << heap_used << '/'
674 << (heap_size + HM::bytes_free()) << " ]\n";
678 //////////////////////////////////////////////////////////////////////////////
679 // Start collection message
680 //////////////////////////////////////////////////////////////////////////////
681 void MarkSweepGC::start_collection_message()
682 { if (is_verbose() && console) {
683 (*console) << "[ GC" << id << ": collecting... "
684 << heap_used << '/'
685 << (heap_size + HM::bytes_free()) << " ]\n";
689 //////////////////////////////////////////////////////////////////////////////
690 // End collection message
691 //////////////////////////////////////////////////////////////////////////////
692 void MarkSweepGC::end_collection_message()
693 { if (is_verbose() && console) {
694 (*console) << "[ GC" << id << ": done... "
695 << heap_used << '/'
696 << (heap_size + HM::bytes_free()) << " ]\n";
698 #ifdef GC_USE_TIMER
699 if ((verbosity_level & gc_print_collection_time) && console) {
700 (*console) << "[ user time: " << stat.gc_user_time
701 << " system time: " << stat.gc_system_time << " ]\n";
703 #endif