Fix LDC, LDC_W, and INSTANCEOF opcodes, more debugging
[jamvm-avr32-jem.git] / src / alloc.c
blobaaf22d34c9a008e2a70b5a44956459966b2b7ba4
1 /*
2 * Copyright (C) 2003, 2004, 2005, 2006, 2007
3 * Robert Lougher <rob@lougher.org.uk>.
5 * This file is part of JamVM.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2,
10 * or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include <unistd.h>
23 #include <string.h>
24 #include <stdlib.h>
25 #include <sys/time.h>
26 #include <sys/mman.h>
27 #include <errno.h>
28 #include <limits.h>
30 #include "jam.h"
31 #include "alloc.h"
32 #include "thread.h"
33 #include "lock.h"
35 /* Trace GC heap mark/sweep phases - useful for debugging heap
36 * corruption */
37 #ifdef TRACEGC
38 #define TRACE_GC(fmt, ...) jam_printf(fmt, ## __VA_ARGS__)
39 #else
40 #define TRACE_GC(fmt, ...)
41 #endif
43 /* Trace GC Compaction phase */
44 #ifdef TRACECOMPACT
45 #define TRACE_COMPACT(fmt, ...) jam_printf(fmt, ## __VA_ARGS__)
46 #else
47 #define TRACE_COMPACT(fmt, ...)
48 #endif
50 /* Trace class, object and array allocation */
51 #ifdef TRACEALLOC
52 #define TRACE_ALLOC(fmt, ...) jam_printf(fmt, ## __VA_ARGS__)
53 #else
54 #define TRACE_ALLOC(fmt, ...)
55 #endif
57 /* Trace object finalization */
58 #ifdef TRACEFNLZ
59 #define TRACE_FNLZ(fmt, ...) jam_printf(fmt, ## __VA_ARGS__)
60 #else
61 #define TRACE_FNLZ(fmt, ...)
62 #endif
64 /* Object alignment */
65 #define OBJECT_GRAIN 8
67 /* Bits used within the chunk header (see also alloc.h) */
68 #define ALLOC_BIT 1
69 #define SPECIAL_BIT 4
70 #define HAS_HASHCODE_BIT (1<<31)
71 #define HASHCODE_TAKEN_BIT (1<<30)
73 #define HDR_FLAGS_MASK ~(ALLOC_BIT|FLC_BIT|SPECIAL_BIT| \
74 HAS_HASHCODE_BIT|HASHCODE_TAKEN_BIT)
76 /* Macros for getting values from the chunk header */
77 #define HEADER(ptr) *((uintptr_t*)ptr)
78 #define HDR_SIZE(hdr) (hdr & HDR_FLAGS_MASK)
79 #define HDR_ALLOCED(hdr) (hdr & ALLOC_BIT)
80 #define HDR_THREADED(hdr) ((hdr & (ALLOC_BIT|FLC_BIT)) == FLC_BIT)
81 #define HDR_SPECIAL_OBJ(hdr) (hdr & SPECIAL_BIT)
82 #define HDR_HASHCODE_TAKEN(hdr) (hdr & HASHCODE_TAKEN_BIT)
83 #define HDR_HAS_HASHCODE(hdr) (hdr & HAS_HASHCODE_BIT)
85 /* Macro to mark an object as "special" by setting the special
86 bit in the block header. These are treated differently by GC */
87 #define SET_SPECIAL_OB(ob) { \
88 uintptr_t *hdr_addr = HDR_ADDRESS(ob); \
89 *hdr_addr |= SPECIAL_BIT; \
92 /* 1 word header format
93 31 210
94 -------------------------------------------
95 | block size | |
96 -------------------------------------------
97 ^ has hashcode bit ^ alloc bit
98 ^ hashcode taken bit ^ flc bit
99 ^ special bit
102 static int verbosegc;
103 static int compact_override;
104 static int compact_value;
106 /* Format of an unallocated chunk */
107 typedef struct chunk {
108 uintptr_t header;
109 struct chunk *next;
110 } Chunk;
112 /* The free list head, and next allocation pointer */
113 static Chunk *freelist;
114 static Chunk **chunkpp = &freelist;
116 /* Heap limits */
117 static char *heapbase;
118 static char *heaplimit;
119 static char *heapmax;
121 static unsigned long heapfree;
123 /* The mark bit array, used for marking objects during
124 the mark phase. Allocated on start-up. */
125 static unsigned int *markBits;
126 static int markBitSize;
128 /* List holding objects which need to be finalized */
129 static Object **has_finaliser_list = NULL;
130 static int has_finaliser_count = 0;
131 static int has_finaliser_size = 0;
133 /* Compaction needs to know which object references are
134 conservative (i.e. looks like a reference). The objects
135 can't be moved in case they aren't really references. */
136 static Object **conservative_roots = NULL;
137 static int conservative_root_count = 0;
139 /* Above list is transformed into a hashtable before compaction */
140 static uintptr_t *con_roots_hashtable;
141 static int con_roots_hashtable_size;
143 /* List holding object references from outside the heap
144 which have been registered with the GC. Unregistered
145 references (outside of known structures) will not be
146 scanned or threaded during GC/Compaction */
147 static Object ***registered_refs = NULL;
148 static int registered_refs_count = 0;
150 /* The circular list holding finalized objects waiting for
151 their finalizer to be ran by the finalizer thread */
152 static Object **run_finaliser_list = NULL;
153 static int run_finaliser_start = 0;
154 static int run_finaliser_end = 0;
155 static int run_finaliser_size = 0;
157 /* The circular list holding references to be enqueued
158 by the reference handler thread */
159 static Object **reference_list = NULL;
160 static int reference_start = 0;
161 static int reference_end = 0;
162 static int reference_size = 0;
164 /* Flags set during GC if a thread needs to be woken up. The
165 notification is done after the world is resumed, to remove
166 the use of "unsafe" calls while threads are suspended. */
167 static int notify_reference_thread;
168 static int notify_finaliser_thread;
170 /* Internal locks protecting the GC lists and heap */
171 static VMLock heap_lock;
172 static VMLock has_fnlzr_lock;
173 static VMLock registered_refs_lock;
174 static VMWaitLock run_finaliser_lock;
175 static VMWaitLock reference_lock;
177 /* A pointer to the finalizer thread. */
178 static Thread *finalizer_thread;
180 /* Pre-allocated OutOfMemoryError */
181 static Object *oom;
183 /* Cached primitive type array classes -- used to speed up
184 primitive array allocation */
185 static Class *bool_array_class = NULL, *byte_array_class = NULL;
186 static Class *char_array_class = NULL, *short_array_class = NULL;
187 static Class *int_array_class = NULL, *float_array_class = NULL;
188 static Class *double_array_class = NULL, *long_array_class = NULL;
190 /* Field offsets and method table indexes used for finalization and
191 reference handling. Cached in class.c */
192 extern int ref_referent_offset;
193 extern int ref_queue_offset;
194 extern int finalize_mtbl_idx;
195 extern int enqueue_mtbl_idx;
196 extern int ldr_vmdata_offset;
198 /* During GC all threads are suspended. To safeguard against a
199 suspended thread holding the malloc-lock, free cannot be called
200 within the GC to free native memory associated with "special
201 objects". Instead, frees are chained together into a pending
202 list, and are freed once all threads are resumed. Note, we
203 cannot wrap malloc/realloc/free within the VM as this does
204 not guard against "invisible" malloc/realloc/free within libc
205 calls and JNI methods */
206 static void **pending_free_list = NULL;
207 void freePendingFrees();
209 /* Similarly, allocation for internal lists within GC cannot use
210 memory from the system heap. The following functions provide
211 the same API, but allocate memory via mmap */
212 void *gcMemRealloc(void *addr, int new_size);
213 void *gcMemMalloc(int size);
214 void gcMemFree(void *addr);
216 /* Cached system page size (used in above functions) */
217 static int sys_page_size;
219 /* The possible ways in which a reference may be marked in
220 the mark bit array */
221 #define HARD_MARK 3
222 #define FINALIZER_MARK 2
223 #define PHANTOM_MARK 1
225 #define LIST_INCREMENT 100
227 #define LOG_BYTESPERMARK LOG_OBJECT_GRAIN /* 1 mark entry for every OBJECT_GRAIN bytes of heap */
228 #define BITSPERMARK 2
229 #define LOG_BITSPERMARK 1
230 #define LOG_MARKSIZEBITS 5
231 #define MARKSIZEBITS 32
233 /* Macros for manipulating the mark bit array */
235 #define MARKENTRY(ptr) ((((char*)ptr)-heapbase)>>(LOG_BYTESPERMARK+LOG_MARKSIZEBITS-LOG_BITSPERMARK))
236 #define MARKOFFSET(ptr) ((((((char*)ptr)-heapbase)>>LOG_BYTESPERMARK)& \
237 ((MARKSIZEBITS>>LOG_BITSPERMARK)-1))<<LOG_BITSPERMARK)
238 #define MARK(ptr,mark) markBits[MARKENTRY(ptr)]|=mark<<MARKOFFSET(ptr);
240 #define SET_MARK(ptr,mark) markBits[MARKENTRY(ptr)]=(markBits[MARKENTRY(ptr)]& \
241 ~(((1<<BITSPERMARK)-1)<<MARKOFFSET(ptr)))|mark<<MARKOFFSET(ptr)
243 #define IS_MARKED(ptr) ((markBits[MARKENTRY(ptr)]>>MARKOFFSET(ptr))&((1<<BITSPERMARK)-1))
245 #define IS_HARD_MARKED(ptr) (IS_MARKED(ptr) == HARD_MARK)
246 #define IS_PHANTOM_MARKED(ptr) (IS_MARKED(ptr) == PHANTOM_MARK)
248 #define IS_OBJECT(ptr) (((char*)ptr) > heapbase) && \
249 (((char*)ptr) < heaplimit) && \
250 !(((uintptr_t)ptr)&(OBJECT_GRAIN-1))
252 #define MIN_OBJECT_SIZE ((sizeof(Object)+HEADER_SIZE+OBJECT_GRAIN-1)&~(OBJECT_GRAIN-1))
254 void allocMarkBits() {
255 int no_of_bits = (heaplimit-heapbase)>>(LOG_BYTESPERMARK-LOG_BITSPERMARK);
256 markBitSize = (no_of_bits+MARKSIZEBITS-1)>>LOG_MARKSIZEBITS;
258 markBits = (unsigned int *) sysMalloc(markBitSize*sizeof(*markBits));
260 TRACE_GC("Allocated mark bits - size is %d\n", markBitSize);
263 void clearMarkBits() {
264 memset(markBits, 0, markBitSize*sizeof(*markBits));
267 void initialiseAlloc(InitArgs *args) {
269 #ifdef USE_MALLOC
270 /* Don't use mmap - malloc max heap size */
271 char *mem = (char*)malloc(args->max_heap);
272 min = max;
273 if(mem == NULL) {
274 #else
275 char *mem = (char*)mmap(0, args->max_heap, PROT_READ|PROT_WRITE,
276 MAP_PRIVATE|MAP_ANON, -1, 0);
277 if(mem == MAP_FAILED) {
278 #endif
279 perror("Aborting the VM -- couldn't allocate the heap");
280 exitVM(1);
283 /* Align heapbase so that start of heap + HEADER_SIZE is object aligned */
284 heapbase = (char*)(((uintptr_t)mem+HEADER_SIZE+OBJECT_GRAIN-1)&~(OBJECT_GRAIN-1))-HEADER_SIZE;
286 /* Ensure size of heap is multiple of OBJECT_GRAIN */
287 heaplimit = heapbase+((args->min_heap-(heapbase-mem))&~(OBJECT_GRAIN-1));
289 heapmax = heapbase+((args->max_heap-(heapbase-mem))&~(OBJECT_GRAIN-1));
291 /* Set initial free-list to one block covering entire heap */
292 freelist = (Chunk*)heapbase;
293 freelist->header = heapfree = heaplimit-heapbase;
294 freelist->next = NULL;
296 TRACE_GC("Alloced heap size %p\n", heaplimit-heapbase);
297 allocMarkBits();
299 /* Initialise GC locks */
300 initVMLock(heap_lock);
301 initVMLock(has_fnlzr_lock);
302 initVMLock(registered_refs_lock);
303 initVMWaitLock(run_finaliser_lock);
304 initVMWaitLock(reference_lock);
306 /* Cache system page size -- used for internal GC lists */
307 sys_page_size = getpagesize();
309 /* Set verbose option from initialisation arguments */
310 verbosegc = args->verbosegc;
313 /* ------------------------- MARK PHASE ------------------------- */
315 /* Forward declaration */
316 void markChildren(Object *ob, int mark, int mark_soft_refs);
318 int isMarked(Object *ob) {
319 return ob != NULL && IS_MARKED(ob);
322 void markObject(Object *object, int mark, int mark_soft_refs) {
323 if(object != NULL && mark > IS_MARKED(object))
324 markChildren(object, mark, mark_soft_refs);
327 void markRoot(Object *object) {
328 if(object != NULL)
329 MARK(object, HARD_MARK);
332 void markConservativeRoot(Object *object) {
333 if(object == NULL)
334 return;
336 MARK(object, HARD_MARK);
338 if((conservative_root_count % LIST_INCREMENT) == 0) {
339 int new_size = conservative_root_count + LIST_INCREMENT;
340 conservative_roots = gcMemRealloc(conservative_roots,
341 new_size * sizeof(Object *));
343 conservative_roots[conservative_root_count++] = object;
346 void freeConservativeRoots() {
347 gcMemFree(conservative_roots);
348 conservative_roots = NULL;
349 conservative_root_count = 0;
352 void scanThread(Thread *thread) {
353 ExecEnv *ee = thread->ee;
354 Frame *frame = ee->last_frame;
355 uintptr_t *end, *slot;
357 TRACE_GC("Scanning stacks for thread 0x%x id %d\n", thread, thread->id);
359 /* Mark the java.lang.Thread object */
360 markConservativeRoot(ee->thread);
362 /* Mark any pending exception raised on this thread */
363 markConservativeRoot(ee->exception);
365 /* Scan the thread's C stack and mark all references */
366 slot = (uintptr_t*)getStackTop(thread);
367 end = (uintptr_t*)getStackBase(thread);
369 for(; slot < end; slot++)
370 if(IS_OBJECT(*slot)) {
371 Object *ob = (Object*)*slot;
372 TRACE_GC("Found C stack ref @%p object ref is %p\n", slot, ob);
373 markConservativeRoot(ob);
376 /* Scan the thread's Java stack and mark all references */
377 slot = frame->ostack + frame->mb->max_stack;
379 while(frame->prev != NULL) {
380 if(frame->mb != NULL) {
381 TRACE_GC("Scanning %s.%s\n", CLASS_CB(frame->mb->class)->name, frame->mb->name);
382 TRACE_GC("lvars @%p ostack @%p\n", frame->lvars, frame->ostack);
384 /* Mark the method's defining class. This should always
385 be reachable otherwise, but mark to be safe */
386 markConservativeRoot((Object*)frame->mb->class);
389 end = frame->ostack;
391 for(; slot >= end; slot--)
392 if(IS_OBJECT(*slot)) {
393 Object *ob = (Object*)*slot;
394 TRACE_GC("Found Java stack ref @%p object ref is %p\n", slot, ob);
395 markConservativeRoot(ob);
398 slot -= sizeof(Frame)/sizeof(uintptr_t);
399 frame = frame->prev;
403 void markClassData(Class *class, int mark, int mark_soft_refs) {
404 ClassBlock *cb = CLASS_CB(class);
405 ConstantPool *cp = &cb->constant_pool;
406 FieldBlock *fb = cb->fields;
407 int i;
409 TRACE_GC("Marking class %s\n", cb->name);
411 /* Recursively mark the class's classloader */
412 if(cb->class_loader != NULL && mark > IS_MARKED(cb->class_loader))
413 markChildren(cb->class_loader, mark, mark_soft_refs);
415 TRACE_GC("Marking static fields for class %s\n", cb->name);
417 /* Static fields are initialised to default values during
418 preparation (done in the link phase). Therefore, don't
419 scan if the class hasn't been linked */
420 if(cb->state >= CLASS_LINKED)
421 for(i = 0; i < cb->fields_count; i++, fb++)
422 if((fb->access_flags & ACC_STATIC) &&
423 ((*fb->type == 'L') || (*fb->type == '['))) {
424 Object *ob = (Object *)fb->static_value;
425 TRACE_GC("Field %s %s object @%p\n", fb->name, fb->type, ob);
426 if(ob != NULL && mark > IS_MARKED(ob))
427 markChildren(ob, mark, mark_soft_refs);
430 TRACE_GC("Marking constant pool resolved strings for class %s\n", cb->name);
432 /* Scan the constant pool and mark all resolved string references */
433 for(i = 1; i < cb->constant_pool_count; i++)
434 if(CP_TYPE(cp, i) == CONSTANT_ResolvedString) {
435 Object *string = (Object *)CP_INFO(cp, i);
436 TRACE_GC("Resolved String @ constant pool idx %d @%p\n", i, string);
437 if(mark > IS_MARKED(string))
438 markChildren(string, mark, mark_soft_refs);
442 void markChildren(Object *ob, int mark, int mark_soft_refs) {
443 Class *class = ob->class;
444 ClassBlock *cb = CLASS_CB(class);
446 SET_MARK(ob, mark);
448 if(class == NULL)
449 return;
451 if(mark > IS_MARKED(class))
452 markChildren((Object*)class, mark, mark_soft_refs);
454 if(cb->name[0] == '[') {
455 if((cb->name[1] == 'L') || (cb->name[1] == '[')) {
456 Object **body = ARRAY_DATA(ob);
457 int len = ARRAY_LEN(ob);
458 int i;
459 TRACE_GC("Scanning Array object @%p class is %s len is %d\n", ob, cb->name, len);
461 for(i = 0; i < len; i++) {
462 Object *ob = *body++;
463 TRACE_GC("Object at index %d is @%p\n", i, ob);
465 if(ob != NULL && mark > IS_MARKED(ob))
466 markChildren(ob, mark, mark_soft_refs);
468 } else {
469 TRACE_GC("Array object @%p class is %s - Not Scanning...\n", ob, cb->name);
471 } else {
472 uintptr_t *body = INST_DATA(ob);
473 int i;
475 if(IS_CLASS_CLASS(cb)) {
476 TRACE_GC("Found class object @%p name is %s\n", ob, CLASS_CB(ob)->name);
477 markClassData((Class*)ob, mark, mark_soft_refs);
478 } else
479 if(IS_CLASS_LOADER(cb)) {
480 TRACE_GC("Mark found class loader object @%p class %s\n", ob, cb->name);
481 markLoaderClasses(ob, mark, mark_soft_refs);
482 } else
483 if(IS_VMTHROWABLE(cb)) {
484 TRACE_GC("Mark found VMThrowable object @%p\n", ob);
485 markVMThrowable(ob, mark, mark_soft_refs);
486 } else
487 if(IS_REFERENCE(cb)) {
488 Object *referent = (Object *)body[ref_referent_offset];
490 TRACE_GC("Mark found Reference object @%p class %s flags %d referent %p\n",
491 ob, cb->name, cb->flags, referent);
493 if(!IS_WEAK_REFERENCE(cb) && referent != NULL) {
494 int ref_mark = IS_MARKED(referent);
495 int new_mark;
497 if(IS_PHANTOM_REFERENCE(cb))
498 new_mark = PHANTOM_MARK;
499 else
500 if(!IS_SOFT_REFERENCE(cb) || mark_soft_refs)
501 new_mark = mark;
502 else
503 new_mark = 0;
505 if(new_mark > ref_mark) {
506 TRACE_GC("Marking referent object @%p mark %d ref_mark %d new_mark %d\n",
507 referent, mark, ref_mark, new_mark);
508 markChildren(referent, new_mark, mark_soft_refs);
513 TRACE_GC("Scanning object @%p class is %s\n", ob, cb->name);
515 /* The reference offsets table consists of a list of start and
516 end offsets corresponding to the references within the object's
517 instance data. Scan the list, and mark all references. */
519 for(i = 0; i < cb->refs_offsets_size; i++) {
520 int offset = cb->refs_offsets_table[i].start;
521 int end = cb->refs_offsets_table[i].end;
523 for(; offset < end; offset++) {
524 Object *ob = (Object *)body[offset];
525 TRACE_GC("Offset %d reference @%p\n", offset, ob);
527 if(ob != NULL && mark > IS_MARKED(ob))
528 markChildren(ob, mark, mark_soft_refs);
534 #define ADD_TO_OBJECT_LIST(list, ob) \
536 if(list##_start == list##_end) { \
537 list##_end = list##_size; \
538 list##_start = list##_size += LIST_INCREMENT; \
539 list##_list = (Object**)gcMemRealloc(list##_list, \
540 list##_size*sizeof(Object*)); \
542 list##_end = list##_end%list##_size; \
543 list##_list[list##_end++] = ob; \
546 #define ITERATE_OBJECT_LIST(list, action) \
548 int i; \
550 if(list##_end > list##_start) \
551 for(i = list##_start; i < list##_end; i++) { \
552 action(list##_list[i]); \
553 } else { \
554 for(i = list##_start; i < list##_size; i++) { \
555 action(list##_list[i]); \
557 for(i = 0; i < list##_end; i++) { \
558 action(list##_list[i]); \
563 static void doMark(Thread *self, int mark_soft_refs) {
564 char *ptr;
565 int i, j;
567 clearMarkBits();
569 if(oom) MARK(oom, HARD_MARK);
570 markBootClasses();
571 markJNIGlobalRefs();
572 scanThreads();
574 /* All roots should now be marked. Scan the heap and recursively
575 mark all marked objects - once the heap has been scanned all
576 reachable objects should be marked */
578 for(ptr = heapbase; ptr < heaplimit;) {
579 uintptr_t hdr = HEADER(ptr);
580 uintptr_t size = HDR_SIZE(hdr);
582 #ifdef DEBUG
583 jam_printf("Block @%p size %d alloced %d\n", ptr, size, HDR_ALLOCED(hdr));
584 #endif
586 if(HDR_ALLOCED(hdr)) {
587 Object *ob = (Object*)(ptr+HEADER_SIZE);
589 if(IS_HARD_MARKED(ob))
590 markChildren(ob, HARD_MARK, mark_soft_refs);
593 /* Skip to next block */
594 ptr += size;
597 /* Now all reachable objects are marked. All other objects are garbage.
598 Any object with a finalizer which is unmarked, however, must have it's
599 finalizer ran before collecting. Scan the has_finaliser list and move
600 all unmarked objects to the run_finaliser list. This ensures that
601 finalizers are ran only once, even if finalization resurrects the
602 object, as objects are only added to the has_finaliser list on
603 creation */
605 for(i = 0, j = 0; i < has_finaliser_count; i++) {
606 Object *ob = has_finaliser_list[i];
608 if(!IS_HARD_MARKED(ob)) {
609 ADD_TO_OBJECT_LIST(run_finaliser, ob);
610 } else
611 has_finaliser_list[j++] = ob;
614 /* After scanning, j holds how many finalizers are left */
616 if(j != has_finaliser_count) {
617 has_finaliser_count = j;
619 /* Extra finalizers to be ran, so we need to signal the
620 finalizer thread in case it needs waking up. */
621 notify_finaliser_thread = TRUE;
624 /* Mark the objects waiting to be finalized. We must mark them
625 as they, and all objects they ref, cannot be deleted until the
626 finalizer is ran. Note, this includes objects just added,
627 and objects that were already on the list - they were found
628 to be garbage on a previous gc but we haven't got round to
629 finalizing them yet. */
631 #define RUN_MARK(element) \
632 markChildren(element, FINALIZER_MARK, mark_soft_refs)
634 ITERATE_OBJECT_LIST(run_finaliser, RUN_MARK);
636 /* There may be references still waiting to be enqueued by the
637 reference handler (from a previous GC). Remove them if
638 they're now unreachable as they will be collected */
640 #define CLEAR_UNMARKED(element) \
641 if(element && !IS_MARKED(element)) element = NULL
643 ITERATE_OBJECT_LIST(reference, CLEAR_UNMARKED);
645 /* Scan the interned string hash table and remove
646 any entries that are unmarked */
647 freeInternedStrings();
650 /* ------------------------- SWEEP PHASE ------------------------- */
652 int handleMarkedSpecial(Object *ob) {
653 ClassBlock *cb = CLASS_CB(ob->class);
654 int cleared = FALSE;
656 if(IS_REFERENCE(cb)) {
657 Object *referent = (Object *)INST_DATA(ob)[ref_referent_offset];
659 if(referent != NULL) {
660 int ref_mark = IS_MARKED(referent);
662 TRACE_GC("FREE: found Reference Object @%p class %s flags %d referent %x mark %d\n",
663 ob, cb->name, cb->flags, referent, ref_mark);
665 if(IS_PHANTOM_REFERENCE(cb)) {
666 if(ref_mark != PHANTOM_MARK)
667 goto out;
668 } else {
669 if(ref_mark == HARD_MARK)
670 goto out;
672 TRACE_GC("FREE: Clearing the referent field.\n");
673 INST_DATA(ob)[ref_referent_offset] = 0;
674 cleared = TRUE;
677 /* If the reference has a queue, add it to the list for enqueuing
678 by the Reference Handler thread. */
680 if((Object *)INST_DATA(ob)[ref_queue_offset] != NULL) {
681 TRACE_GC("FREE: Adding to list for enqueuing.\n");
683 ADD_TO_OBJECT_LIST(reference, ob);
684 notify_reference_thread = TRUE;
688 out:
689 return cleared;
692 void handleUnmarkedSpecial(Object *ob) {
693 if(IS_CLASS(ob)) {
694 if(verbosegc) {
695 ClassBlock *cb = CLASS_CB(ob);
696 if(!IS_CLASS_DUP(cb))
697 jam_printf("<GC: Unloading class %s>\n", cb->name);
699 freeClassData(ob);
700 } else
701 if(IS_CLASS_LOADER(CLASS_CB(ob->class))) {
702 TRACE_GC("FREE: Freeing class loader object %p\n", ob);
703 unloadClassLoaderDlls(ob);
704 freeClassLoaderData(ob);
705 } else
706 if(IS_VMTHREAD(CLASS_CB(ob->class))) {
707 /* Free the native thread structure (see comment
708 in detachThread (thread.c) */
709 TRACE_GC("FREE: Freeing native thread for VMThread object %p\n", ob);
710 gcPendingFree(threadSelf0(ob));
714 static uintptr_t doSweep(Thread *self) {
715 char *ptr;
716 Chunk newlist;
717 Chunk *curr = NULL, *last = &newlist;
719 /* Will hold the size of the largest free chunk
720 after scanning */
721 uintptr_t largest = 0;
723 /* Variables used to store verbose gc info */
724 uintptr_t marked = 0, unmarked = 0, freed = 0, cleared = 0;
726 /* Amount of free heap is re-calculated during scan */
727 heapfree = 0;
729 /* Scan the heap and free all unmarked objects by reconstructing
730 the freelist. Add all free chunks and unmarked objects and
731 merge adjacent free chunks into contiguous areas */
733 for(ptr = heapbase; ptr < heaplimit; ) {
734 uintptr_t hdr = HEADER(ptr);
735 uintptr_t size = HDR_SIZE(hdr);
736 Object *ob;
738 if(HDR_ALLOCED(hdr)) {
739 ob = (Object*)(ptr+HEADER_SIZE);
741 if(IS_MARKED(ob))
742 goto marked;
744 freed += size;
745 unmarked++;
747 if(HDR_SPECIAL_OBJ(hdr) && ob->class != NULL)
748 handleUnmarkedSpecial(ob);
750 TRACE_GC("FREE: Freeing ob @%p class %s - start of block\n", ob,
751 ob->class ? CLASS_CB(ob->class)->name : "?");
753 else
754 TRACE_GC("FREE: Unalloced block @%p size %d - start of block\n", ptr, size);
756 curr = (Chunk *) ptr;
758 /* Clear any set flag bits within the header */
759 curr->header &= HDR_FLAGS_MASK;
761 /* Scan the next chunks - while they are
762 free, merge them onto the first free
763 chunk */
765 for(;;) {
766 ptr += size;
768 if(ptr>=heaplimit)
769 goto out_last_free;
771 hdr = HEADER(ptr);
772 size = HDR_SIZE(hdr);
773 if(HDR_ALLOCED(hdr)) {
774 ob = (Object*)(ptr+HEADER_SIZE);
776 if(IS_MARKED(ob))
777 break;
779 freed += size;
780 unmarked++;
782 if(HDR_SPECIAL_OBJ(hdr) && ob->class != NULL)
783 handleUnmarkedSpecial(ob);
785 TRACE_GC("FREE: Freeing object @%p class %s - merging onto block @%p\n",
786 ob, ob->class ? CLASS_CB(ob->class)->name : "?", curr);
789 else
790 TRACE_GC("FREE: unalloced block @%p size %d - merging onto block @%p\n", ptr, size, curr);
791 curr->header += size;
794 /* Scanned to next marked object see
795 if it's the largest so far */
796 if(curr->header > largest)
797 largest = curr->header;
799 /* Add onto total count of free chunks */
800 heapfree += curr->header;
802 /* Add chunk onto the freelist only if it's
803 large enough to hold an object */
804 if(curr->header >= MIN_OBJECT_SIZE) {
805 last->next = curr;
806 last = curr;
809 marked:
810 marked++;
812 if(HDR_SPECIAL_OBJ(hdr) && ob->class != NULL && handleMarkedSpecial(ob))
813 cleared++;
815 /* Skip to next block */
816 ptr += size;
818 if(ptr >= heaplimit)
819 goto out_last_marked;
822 out_last_free:
824 /* Last chunk is free - need to check if
825 largest */
826 if(curr->header > largest)
827 largest = curr->header;
829 heapfree += curr->header;
831 /* Add chunk onto the freelist only if it's
832 large enough to hold an object */
833 if(curr->header >= MIN_OBJECT_SIZE) {
834 last->next = curr;
835 last = curr;
838 out_last_marked:
840 /* We've now reconstructed the freelist, set freelist
841 pointer to new list */
842 last->next = NULL;
843 freelist = newlist.next;
845 /* Reset next allocation block to beginning of list -
846 this leads to a search - use largest instead? */
847 chunkpp = &freelist;
849 #ifdef DEBUG
851 Chunk *c;
852 for(c = freelist; c != NULL; c = c->next)
853 jam_printf("Chunk @%p size: %d\n", c, c->header);
855 #endif
857 if(verbosegc) {
858 long long size = heaplimit-heapbase;
859 long long pcnt_used = ((long long)heapfree)*100/size;
860 jam_printf("<GC: Allocated objects: %lld>\n", (long long)marked);
861 jam_printf("<GC: Freed %lld object(s) using %lld bytes",
862 (long long)unmarked, (long long)freed);
863 if(cleared)
864 jam_printf(", cleared %lld reference(s)", (long long)cleared);
865 jam_printf(">\n<GC: Largest block is %lld total free is %lld out of %lld (%lld%%)>\n",
866 (long long)largest, (long long)heapfree, size, pcnt_used);
869 /* Return the size of the largest free chunk in heap - this
870 is the largest allocation request that can be satisfied */
872 return largest;
875 /* ------------------------- COMPACT PHASE ------------------------- */
877 void threadReference(Object **ref) {
878 Object *ob = *ref;
879 uintptr_t *hdr = HDR_ADDRESS(ob);
880 TRACE_COMPACT("Threading ref addr %p object ref %p link %p\n", ref, ob, *hdr);
882 *ref = (Object*)*hdr;
883 *hdr = ((uintptr_t)ref | FLC_BIT);
886 void unthreadHeader(uintptr_t *hdr_addr, Object *new_addr) {
887 uintptr_t hdr = *hdr_addr;
889 TRACE_COMPACT("Unthreading header address %p new addr %p\n", hdr_addr, new_addr);
891 while(HDR_THREADED(hdr)) {
892 uintptr_t *ref_addr = (uintptr_t*)(hdr & ~0x3);
894 TRACE_COMPACT("updating ref address %p\n", ref_addr);
895 hdr = *ref_addr;
896 *ref_addr = (uintptr_t)new_addr;
899 TRACE_COMPACT("Replacing original header contents %p\n", hdr);
900 *hdr_addr = hdr;
903 static void threadObjectLists() {
904 int i;
906 for(i = 0; i < has_finaliser_count; i++)
907 threadReference(&has_finaliser_list[i]);
909 #define THREAD_REFS(element) \
910 if(element) threadReference(&element)
912 ITERATE_OBJECT_LIST(run_finaliser, THREAD_REFS);
913 ITERATE_OBJECT_LIST(reference, THREAD_REFS);
916 void addConservativeRoots2Hash() {
917 int i;
919 for(i = 1; i < conservative_root_count; i <<= 1);
920 con_roots_hashtable_size = i << 1;
922 con_roots_hashtable = gcMemMalloc(con_roots_hashtable_size * sizeof(uintptr_t));
923 memset(con_roots_hashtable, 0, con_roots_hashtable_size * sizeof(uintptr_t));
925 for(i = 0; i < conservative_root_count; i++) {
926 uintptr_t data = ((uintptr_t)conservative_roots[i]) >> LOG_BYTESPERMARK;
927 int index = data & (con_roots_hashtable_size-1);
929 TRACE_COMPACT("Adding conservative root %p\n", conservative_roots[i]);
931 while(con_roots_hashtable[index] && con_roots_hashtable[index] != data)
932 index = (index + 1) & (con_roots_hashtable_size-1);
934 con_roots_hashtable[index] = data;
938 void registerStaticObjectRefLocked(Object **ref) {
939 Thread *self = threadSelf();
941 disableSuspend(self);
942 lockVMLock(registered_refs_lock, self);
944 registerStaticObjectRef(ref);
946 unlockVMLock(registered_refs_lock, self);
947 enableSuspend(self);
950 void registerStaticObjectRef(Object **ref) {
951 if((registered_refs_count % LIST_INCREMENT) == 0) {
952 int new_size = registered_refs_count + LIST_INCREMENT;
953 registered_refs = sysRealloc(registered_refs,
954 new_size * sizeof(Object **));
956 registered_refs[registered_refs_count++] = ref;
959 void threadRegisteredReferences() {
960 int i;
962 for(i = 0; i < registered_refs_count; i++)
963 if(*registered_refs[i] != NULL)
964 threadReference(registered_refs[i]);
967 #define IS_CONSERVATIVE_ROOT(ob) \
968 ({ \
969 uintptr_t data = ((uintptr_t)ob) >> LOG_BYTESPERMARK; \
970 int index = data & (con_roots_hashtable_size-1); \
972 while(con_roots_hashtable[index] && con_roots_hashtable[index] != data) \
973 index = (index + 1) & (con_roots_hashtable_size-1); \
974 con_roots_hashtable[index]; \
977 #define ADD_CHUNK_TO_FREELIST(start, end) \
979 Chunk *curr = (Chunk *) start; \
980 curr->header = end - start; \
982 if(curr->header >= MIN_OBJECT_SIZE) { \
983 last->next = curr; \
984 last = curr; \
987 if(curr->header > largest) \
988 largest = curr->header; \
990 /* Add onto total count of free chunks */ \
991 heapfree += curr->header; \
994 int compactSlideBlock(char *block_addr, char *new_addr) {
995 uintptr_t hdr = HEADER(block_addr);
996 uintptr_t size = HDR_SIZE(hdr);
998 /* Slide the object down the heap. Use memcpy if
999 the areas don't overlap as it should be faster */
1000 if(new_addr + size <= block_addr)
1001 memcpy(new_addr, block_addr, size);
1002 else
1003 memmove(new_addr, block_addr, size);
1005 /* If the objects hashCode (address) has been taken we must
1006 maintain the same value after the object has been moved */
1007 if(HDR_HASHCODE_TAKEN(hdr)) {
1008 uintptr_t *hdr_addr = &HEADER(new_addr);
1009 uintptr_t *hash_addr = (uintptr_t*)(new_addr + size);
1011 TRACE_COMPACT("Adding hashCode to object %p\n", block_addr + HEADER_SIZE);
1013 /* Add the original address onto the end of the object */
1014 *hash_addr = (uintptr_t)(block_addr + HEADER_SIZE);
1015 *hdr_addr &= ~HASHCODE_TAKEN_BIT;
1016 *hdr_addr |= HAS_HASHCODE_BIT;
1017 *hdr_addr += OBJECT_GRAIN;
1018 return TRUE;
1021 return FALSE;
1024 void threadClassData(Class *class, Class *new_addr) {
1025 ClassBlock *cb = CLASS_CB(class);
1026 ConstantPool *cp = &cb->constant_pool;
1027 FieldBlock *fb = cb->fields;
1028 int i;
1030 TRACE_COMPACT("Threading class %s @%p\n", cb->name, class);
1032 if(cb->class_loader != NULL)
1033 threadReference(&cb->class_loader);
1035 if(cb->super != NULL)
1036 threadReference((Object**)&cb->super);
1038 for(i = 0; i < cb->interfaces_count; i++)
1039 if(cb->interfaces[i] != NULL)
1040 threadReference((Object**)&cb->interfaces[i]);
1042 if(IS_ARRAY(cb))
1043 threadReference((Object**)&cb->element_class);
1045 for(i = 0; i < cb->imethod_table_size; i++)
1046 threadReference((Object**)&cb->imethod_table[i].interface);
1048 TRACE_COMPACT("Threading static fields for class %s\n", cb->name);
1050 /* If the class has not been linked it's
1051 statics will not be initialised */
1052 if(cb->state >= CLASS_LINKED)
1053 for(i = 0; i < cb->fields_count; i++, fb++)
1054 if((fb->access_flags & ACC_STATIC) &&
1055 ((*fb->type == 'L') || (*fb->type == '['))) {
1056 Object **ob = (Object **)&fb->static_value;
1057 TRACE_COMPACT("Field %s %s object @%p\n", fb->name, fb->type, *ob);
1058 if(*ob != NULL)
1059 threadReference(ob);
1062 TRACE_COMPACT("Threading constant pool references for class %s\n", cb->name);
1064 for(i = 1; i < cb->constant_pool_count; i++)
1065 if(CP_TYPE(cp, i) == CONSTANT_ResolvedClass || CP_TYPE(cp, i) == CONSTANT_ResolvedString) {
1066 TRACE_COMPACT("Constant pool ref idx %d type %d object @%p\n", i, CP_TYPE(cp, i), CP_INFO(cp, i));
1067 threadReference((Object**)&(CP_INFO(cp, i)));
1070 /* Don't bother threading the references to the class from within the
1071 classes own method and field blocks. As we know the new address we
1072 can update the address now. */
1074 for(i = 0; i < cb->fields_count; i++)
1075 cb->fields[i].class = new_addr;
1077 for(i = 0; i < cb->methods_count; i++)
1078 cb->methods[i].class = new_addr;
1081 int threadChildren(Object *ob, Object *new_addr) {
1082 Class *class = ob->class;
1083 ClassBlock *cb = CLASS_CB(class);
1084 int cleared = FALSE;
1086 if(class == NULL)
1087 return FALSE;
1089 if(cb->name[0] == '[') {
1090 if((cb->name[1] == 'L') || (cb->name[1] == '[')) {
1091 Object **body = ARRAY_DATA(ob);
1092 int len = ARRAY_LEN(ob);
1093 int i;
1094 TRACE_COMPACT("Scanning Array object @%p class is %s len is %d\n", ob, cb->name, len);
1096 for(i = 0; i < len; i++, body++) {
1097 TRACE_COMPACT("Object at index %d is @%p\n", i, *body);
1099 if(*body != NULL)
1100 threadReference(body);
1102 } else {
1103 TRACE_COMPACT("Array object @%p class is %s - not Scanning...\n", ob, cb->name);
1105 } else {
1106 Object **body = (Object**)INST_DATA(ob);
1107 int i;
1109 if(IS_CLASS_CLASS(cb)) {
1110 TRACE_COMPACT("Found class object @%p name is %s\n", ob, CLASS_CB(ob)->name);
1111 threadClassData((Class*)ob, (Class*)new_addr);
1112 } else
1113 if(IS_CLASS_LOADER(cb)) {
1114 TRACE_COMPACT("Found class loader object @%p class %s\n", ob, cb->name);
1115 threadLoaderClasses(ob);
1116 } else
1117 if(IS_REFERENCE(cb)) {
1118 Object **referent = &body[ref_referent_offset];
1120 if(*referent != NULL) {
1121 int ref_mark = IS_MARKED(*referent);
1123 TRACE_GC("Found Reference Object @%p class %s flags %d referent %x mark %d\n",
1124 ob, cb->name, cb->flags, *referent, ref_mark);
1126 if(IS_PHANTOM_REFERENCE(cb)) {
1127 if(ref_mark != PHANTOM_MARK)
1128 goto out;
1129 } else {
1130 if(ref_mark == HARD_MARK)
1131 goto out;
1133 TRACE_GC("Clearing the referent field.\n");
1134 *referent = 0;
1135 cleared = TRUE;
1138 /* If the reference has a queue, add it to the list for enqueuing
1139 by the Reference Handler thread. */
1141 if(body[ref_queue_offset] != NULL) {
1142 TRACE_GC("Adding to list for enqueuing.\n");
1144 ADD_TO_OBJECT_LIST(reference, new_addr);
1145 notify_reference_thread = TRUE;
1147 out:
1148 if(!cleared)
1149 threadReference(referent);
1153 TRACE_COMPACT("Scanning object @%p class is %s\n", ob, cb->name);
1155 /* The reference offsets table consists of a list of start and
1156 end offsets corresponding to the references within the object's
1157 instance data. Scan the list, and thread all references. */
1159 for(i = 0; i < cb->refs_offsets_size; i++) {
1160 int offset = cb->refs_offsets_table[i].start;
1161 int end = cb->refs_offsets_table[i].end;
1163 for(; offset < end; offset++) {
1164 Object **ob = (Object **)&body[offset];
1165 TRACE_COMPACT("Offset %d reference @%p\n", offset, *ob);
1167 if(*ob != NULL)
1168 threadReference(ob);
1173 /* Finally thread the object's class reference */
1174 threadReference((Object**)&ob->class);
1176 return cleared;
1179 uintptr_t doCompact() {
1180 char *ptr, *new_addr;
1181 Chunk newlist;
1182 Chunk *last = &newlist;
1184 /* Will hold the size of the largest free chunk
1185 after scanning */
1186 uintptr_t largest = 0;
1188 /* Variables used to store verbose gc info */
1189 uintptr_t marked = 0, unmarked = 0, freed = 0, cleared = 0, moved = 0;
1191 /* Amount of free heap is re-calculated during scan */
1192 heapfree = 0;
1194 /* Transform conservative root list into
1195 hash table for faster searching */
1196 addConservativeRoots2Hash();
1198 TRACE_COMPACT("COMPACT THREADING ROOTS\n");
1200 /* Thread object references from outside of the heap */
1201 threadObjectLists();
1202 threadRegisteredReferences();
1203 threadBootClasses();
1204 threadMonitorCache();
1205 threadInternedStrings();
1206 threadLiveClassLoaderDlls();
1208 TRACE_COMPACT("COMPACT PHASE ONE\n");
1210 /* First phase scans the heap, threads each objects references
1211 and updates forward references to each object */
1212 for(new_addr = ptr = heapbase; ptr < heaplimit;) {
1213 uintptr_t hdr = HEADER(ptr);
1214 uintptr_t size = HDR_SIZE(hdr);
1215 Object *ob;
1217 if(HDR_THREADED(hdr)) {
1218 ob = (Object*)(ptr+HEADER_SIZE);
1220 if(IS_CONSERVATIVE_ROOT(ob))
1221 new_addr = ptr;
1223 /* Unthread forward references to the object */
1224 unthreadHeader((uintptr_t*)ptr, (Object*)(new_addr+HEADER_SIZE));
1226 /* Header is now unthreaded -- re-read it, and the size */
1227 hdr = HEADER(ptr);
1228 size = HDR_SIZE(hdr);
1230 goto marked_phase1;
1233 if(HDR_ALLOCED(hdr)) {
1234 ob = (Object*)(ptr+HEADER_SIZE);
1236 if(IS_MARKED(ob)) {
1237 if(IS_CONSERVATIVE_ROOT(ob))
1238 new_addr = ptr;
1240 marked_phase1:
1241 marked++;
1243 /* Thread references within the object */
1244 if(threadChildren(ob, (Object*)(new_addr+HEADER_SIZE)))
1245 cleared++;
1247 if(new_addr != ptr && HDR_HASHCODE_TAKEN(hdr))
1248 new_addr += OBJECT_GRAIN;
1250 new_addr += size;
1251 goto next;
1254 if(HDR_SPECIAL_OBJ(hdr) && ob->class != NULL)
1255 handleUnmarkedSpecial(ob);
1257 freed += size;
1258 unmarked++;
1261 next:
1262 /* Skip to next block */
1263 ptr += size;
1266 TRACE_COMPACT("COMPACT PHASE TWO\n");
1268 /* Second phase rescans the heap, updates backwards references
1269 to each object, and then moves them. */
1270 for(new_addr = ptr = heapbase; ptr < heaplimit;) {
1271 uintptr_t hdr = HEADER(ptr);
1272 uintptr_t size = HDR_SIZE(hdr);
1273 Object *ob;
1275 if(HDR_THREADED(hdr)) {
1276 ob = (Object*)(ptr+HEADER_SIZE);
1278 if(IS_CONSERVATIVE_ROOT(ob) && new_addr != ptr) {
1279 ADD_CHUNK_TO_FREELIST(new_addr, ptr);
1280 new_addr = ptr;
1283 /* Unthread backward references to the object */
1284 unthreadHeader((uintptr_t*)ptr, (Object*)(new_addr+HEADER_SIZE));
1286 /* Header is now unthreaded -- re-read */
1287 hdr = HEADER(ptr);
1288 size = HDR_SIZE(hdr);
1290 goto marked_phase2;
1293 if(HDR_ALLOCED(hdr)) {
1294 ob = (Object*)(ptr+HEADER_SIZE);
1296 if(IS_MARKED(ob)) {
1297 if(IS_CONSERVATIVE_ROOT(ob) && new_addr != ptr) {
1298 ADD_CHUNK_TO_FREELIST(new_addr, ptr);
1299 new_addr = ptr;
1302 marked_phase2:
1303 /* Move the object to the new address */
1304 if(new_addr != ptr) {
1305 TRACE_COMPACT("Moving object from %p to %p.\n", ob, new_addr+HEADER_SIZE);
1307 if(compactSlideBlock(ptr, new_addr))
1308 new_addr += OBJECT_GRAIN;
1310 moved++;
1313 new_addr += size;
1317 /* Skip to next block */
1318 ptr += size;
1321 if(new_addr != heaplimit)
1322 ADD_CHUNK_TO_FREELIST(new_addr, heaplimit);
1324 /* We've now reconstructed the freelist, set freelist
1325 pointer to new list */
1326 last->next = NULL;
1327 freelist = newlist.next;
1329 /* Reset next allocation block to beginning of list */
1330 chunkpp = &freelist;
1332 /* Free conservative roots hash table */
1333 gcMemFree(con_roots_hashtable);
1335 if(verbosegc) {
1336 long long size = heaplimit-heapbase;
1337 long long pcnt_used = ((long long)heapfree)*100/size;
1338 jam_printf("<GC: Allocated objects: %lld>\n", (long long)marked);
1339 jam_printf("<GC: Freed %lld object(s) using %lld bytes",
1340 (long long)unmarked, (long long)freed);
1341 if(cleared)
1342 jam_printf(", cleared %lld reference(s)", (long long)cleared);
1343 jam_printf(">\n<GC: Moved %lld objects, largest block is %lld total free is %lld out of %lld (%lld%%)>\n",
1344 (long long)moved, (long long)largest, (long long)heapfree, size, pcnt_used);
1347 /* Return the size of the largest free chunk in heap - this
1348 is the largest allocation request that can be satisfied */
1350 return largest;
1353 void expandHeap(int min) {
1354 Chunk *chunk, *new;
1355 uintptr_t delta;
1357 if(verbosegc)
1358 jam_printf("<GC: Expanding heap - minimum needed is %d>\n", min);
1360 delta = (heaplimit-heapbase)/2;
1361 delta = delta < min ? min : delta;
1363 if((heaplimit + delta) > heapmax)
1364 delta = heapmax - heaplimit;
1366 /* Ensure new region is multiple of object grain in size */
1368 delta = (delta&~(OBJECT_GRAIN-1));
1370 if(verbosegc)
1371 jam_printf("<GC: Expanding heap by %lld bytes>\n", (long long)delta);
1373 new = (Chunk*)heaplimit;
1374 new->header = delta;
1375 new->next = NULL;
1377 if(freelist != NULL) {
1378 /* The freelist is in address order - find the last
1379 free chunk and add the new area to the end. */
1381 for(chunk = freelist; chunk->next != NULL; chunk = chunk->next);
1382 chunk->next = new;
1383 } else
1384 freelist = new;
1386 heaplimit += delta;
1387 heapfree += delta;
1389 /* The heap has increased in size - need to reallocate
1390 the mark bits to cover new area */
1392 sysFree(markBits);
1393 allocMarkBits();
1397 /* ------------------------- GARBAGE COLLECT ------------------------- */
1399 static void getTime(struct timeval *tv) {
1400 gettimeofday(tv, 0);
1403 static long endTime(struct timeval *start) {
1404 struct timeval end;
1405 int secs, usecs;
1407 getTime(&end);
1408 usecs = end.tv_usec - start->tv_usec;
1409 secs = end.tv_sec - start->tv_sec;
1411 return secs * 1000000 + usecs;
1414 unsigned long gc0(int mark_soft_refs, int compact) {
1416 Thread *self = threadSelf();
1417 uintptr_t largest;
1419 /* Override compact if compaction has been specified
1420 on the command line */
1421 if(compact_override)
1422 compact = compact_value;
1424 /* Reset flags. Will be set during GC if a thread needs
1425 to be woken up */
1426 notify_finaliser_thread = notify_reference_thread = FALSE;
1428 /* Grab locks associated with the suspension blocked
1429 regions. This ensures all threads have suspended
1430 or gone to sleep, and cannot modify a list or obtain
1431 a reference after the reference scans */
1433 /* Potential threads adding a newly created object */
1434 lockVMLock(has_fnlzr_lock, self);
1436 /* Held by the finaliser thread */
1437 lockVMWaitLock(run_finaliser_lock, self);
1439 /* Held by the reference handler thread */
1440 lockVMWaitLock(reference_lock, self);
1442 /* Stop the world */
1443 disableSuspend(self);
1444 suspendAllThreads(self);
1446 if(verbosegc) {
1447 struct timeval start;
1448 float mark_time;
1449 float scan_time;
1451 getTime(&start);
1452 doMark(self, mark_soft_refs);
1453 mark_time = endTime(&start)/1000000.0;
1455 getTime(&start);
1456 largest = compact ? doCompact() : doSweep(self);
1457 scan_time = endTime(&start)/1000000.0;
1459 jam_printf("<GC: Mark took %f seconds, %s took %f seconds>\n",
1460 mark_time, compact ? "compact" : "scan", scan_time);
1461 } else {
1462 doMark(self, mark_soft_refs);
1463 largest = compact ? doCompact() : doSweep(self);
1466 /* Restart the world */
1467 resumeAllThreads(self);
1468 enableSuspend(self);
1470 /* Notify the finaliser thread if new finalisers
1471 need to be ran */
1472 if(notify_finaliser_thread)
1473 notifyAllVMWaitLock(run_finaliser_lock, self);
1475 /* Notify the reference thread if new references
1476 have been enqueued */
1477 if(notify_reference_thread)
1478 notifyAllVMWaitLock(reference_lock, self);
1480 /* Release the locks */
1481 unlockVMLock(has_fnlzr_lock, self);
1482 unlockVMWaitLock(reference_lock, self);
1483 unlockVMWaitLock(run_finaliser_lock, self);
1485 freeConservativeRoots();
1486 freePendingFrees();
1488 return largest;
1491 void gc1() {
1492 Thread *self;
1493 disableSuspend(self = threadSelf());
1494 lockVMLock(heap_lock, self);
1495 enableSuspend(self);
1496 gc0(TRUE, FALSE);
1497 unlockVMLock(heap_lock, self);
1500 /* ------------------------- FINALISATION ------------------------- */
1502 /* Run all outstanding finalizers. Finalizers are only ran by the
1503 finalizer thread, so the current thread waits for the finalizer
1504 to finish. Although the JLS allows arbitrary threads to run
1505 finalizers, this is inherently dangerous as locks maybe held,
1506 leading to deadlock. */
1508 #define TIMEOUT 100 /* milliseconds */
1510 static void runFinalizers0(Thread *self, int max_wait) {
1511 int i, size, old_size;
1513 /* If this is the finalizer thread we've been called
1514 from within a finalizer -- don't wait for ourselves! */
1515 if(self == finalizer_thread)
1516 return;
1518 lockVMWaitLock(run_finaliser_lock, self);
1520 /* Wait for the finalizer thread to finish running all
1521 outstanding finalizers. Rare possibility that a finalizer
1522 may try to grab a lock we're holding. To avoid deadlock
1523 use a timeout and give up if the finalizer's made no
1524 foward progress. */
1526 old_size = run_finaliser_size + 1;
1528 for(i = 0; i < max_wait/TIMEOUT; i++) {
1529 size = run_finaliser_end - run_finaliser_start;
1530 if(size <= 0)
1531 size += run_finaliser_size;
1533 if(size == 0 || size >= old_size)
1534 break;
1536 old_size = size;
1537 timedWaitVMWaitLock(run_finaliser_lock, self, TIMEOUT);
1540 unlockVMWaitLock(run_finaliser_lock, self);
1543 /* Called by VMRuntime.runFinalization() -- runFinalizers0
1544 is entered with suspension disabled. */
1546 void runFinalizers() {
1547 Thread *self = threadSelf();
1548 disableSuspend(self);
1549 runFinalizers0(self, 100000);
1550 enableSuspend(self);
1554 /* ------------------------- GC HELPER THREADS ------------------------- */
1556 /* The async gc loop. It sleeps for 1 second and
1557 * calls gc if the system's idle and the heap's
1558 * changed */
1560 void asyncGCThreadLoop(Thread *self) {
1561 for(;;) {
1562 threadSleep(self, 1000, 0);
1563 if(systemIdle(self))
1564 gc1();
1568 #define PROCESS_OBJECT_LIST(list, method_idx, verbose_message, self, stack_top) \
1570 disableSuspend0(self, stack_top); \
1571 lockVMWaitLock(list##_lock, self); \
1573 for(;;) { \
1574 waitVMWaitLock(list##_lock, self); \
1576 if((list##_start == list##_size) && (list##_end == 0)) \
1577 continue; \
1579 if(verbosegc) { \
1580 int diff = list##_end - list##_start; \
1581 jam_printf(verbose_message, diff > 0 ? diff : diff + list##_size); \
1584 do { \
1585 Object *ob; \
1586 list##_start %= list##_size; \
1587 if((ob = list##_list[list##_start]) == NULL) \
1588 continue; \
1590 unlockVMWaitLock(list##_lock, self); \
1591 enableSuspend(self); \
1593 /* Run the process method */ \
1594 executeMethod(ob, CLASS_CB(ob->class)->method_table[method_idx]); \
1596 /* Should be nothing interesting on stack or in \
1597 * registers so use same stack top as thread start. */ \
1599 disableSuspend0(self, stack_top); \
1600 lockVMWaitLock(list##_lock, self); \
1602 /* Clear any exceptions - exceptions thrown in finalizers are \
1603 silently ignored */ \
1605 clearException(); \
1606 } while(++list##_start != list##_end); \
1608 list##_start = list##_size; \
1609 list##_end = 0; \
1611 notifyAllVMWaitLock(list##_lock, self); \
1615 /* The finalizer thread waits for notification
1616 * of new finalizers (by the thread doing gc)
1617 * and then runs them */
1619 void finalizerThreadLoop(Thread *self) {
1620 finalizer_thread = self;
1621 PROCESS_OBJECT_LIST(run_finaliser, finalize_mtbl_idx,
1622 "<GC: running %d finalisers>\n", self, &self);
1625 /* The reference handler thread waits for notification
1626 by the GC of new reference objects, and enqueues
1627 them */
1629 void referenceHandlerThreadLoop(Thread *self) {
1630 PROCESS_OBJECT_LIST(reference, enqueue_mtbl_idx,
1631 "<GC: enqueuing %d references>\n", self, &self);
1634 void initialiseGC(InitArgs *args) {
1635 /* Pre-allocate an OutOfMemoryError exception object - we throw it
1636 * when we're really low on heap space, and can create FA... */
1638 MethodBlock *init;
1639 Class *oom_clazz = findSystemClass("java/lang/OutOfMemoryError");
1640 if(exceptionOccurred()) {
1641 printException();
1642 exitVM(1);
1645 /* Initialize it */
1646 init = lookupMethod(oom_clazz, "<init>", "(Ljava/lang/String;)V");
1647 oom = allocObject(oom_clazz);
1648 registerStaticObjectRef(&oom);
1650 executeMethod(oom, init, NULL);
1652 /* Create and start VM threads for the reference handler and finalizer */
1653 createVMThread("Finalizer", finalizerThreadLoop);
1654 createVMThread("Reference Handler", referenceHandlerThreadLoop);
1656 /* Create and start VM thread for asynchronous GC */
1657 if(!args->noasyncgc)
1658 createVMThread("Async GC", asyncGCThreadLoop);
1660 /* GC will use mark-sweep or mark-compact as appropriate, but this
1661 can be changed via the command line */
1662 compact_override = args->compact_specified;
1663 compact_value = args->do_compact;
1667 /* ------------------------- ALLOCATION ROUTINES ------------------------- */
1669 void *gcMalloc(int length) {
1670 /* The state determines what action to take in the event of
1671 allocation failure. The states go up in seriousness,
1672 and are visible to other threads */
1673 static enum { gc, run_finalizers, throw_oom } state = gc;
1675 int n = (length + HEADER_SIZE + OBJECT_GRAIN - 1) & ~(OBJECT_GRAIN - 1);
1676 uintptr_t largest;
1677 Chunk *found;
1678 Thread *self;
1679 #ifdef TRACEALLOC
1680 int tries;
1681 #endif
1683 /* See comment below */
1684 char *ret_addr;
1686 /* Grab the heap lock, hopefully without having to
1687 wait for it to avoid disabling suspension */
1688 self = threadSelf();
1689 if(!tryLockVMLock(heap_lock, self)) {
1690 disableSuspend(self);
1691 lockVMLock(heap_lock, self);
1692 enableSuspend(self);
1695 /* Scan freelist looking for a chunk big enough to
1696 satisfy allocation request */
1698 for(;;) {
1699 #ifdef TRACEALLOC
1700 tries = 0;
1701 #endif
1702 while(*chunkpp) {
1703 uintptr_t len = (*chunkpp)->header;
1705 if(len == n) {
1706 found = *chunkpp;
1707 *chunkpp = found->next;
1708 goto got_it;
1711 if(len > n) {
1712 Chunk *rem;
1713 unsigned int rem_len;
1714 found = *chunkpp;
1716 rem_len = len - n;
1718 /* Chain the remainder onto the freelist only
1719 if it's large enough to hold an object */
1720 if(rem_len >= MIN_OBJECT_SIZE) {
1721 rem = (Chunk*)((char*)found + n);
1722 rem->header = rem_len;
1723 rem->next = found->next;
1724 *chunkpp = rem;
1725 } else
1726 *chunkpp = found->next;
1728 goto got_it;
1730 chunkpp = &(*chunkpp)->next;
1731 #ifdef TRACEALLOC
1732 tries++;
1733 #endif
1736 if(verbosegc)
1737 jam_printf("<GC: Alloc attempt for %d bytes failed.>\n", n);
1739 switch(state) {
1741 case gc:
1742 /* Normal failure. Do a garbage-collection and retry
1743 allocation if the largest block satisfies the request.
1744 Attempt to ensure heap is at least 25% free, to stop
1745 rapid gc cycles */
1746 largest = gc0(TRUE, FALSE);
1748 if(n <= largest && (heapfree * 4 >= (heaplimit - heapbase)))
1749 break;
1751 /* We fall through into the next state, but we need to set
1752 the state as it will be visible to other threads */
1753 state = run_finalizers;
1755 case run_finalizers:
1756 /* Before expanding heap try to run outstanding finalizers.
1757 If gc found new finalizers, this gives the finalizer chance
1758 to run them */
1759 unlockVMLock(heap_lock, self);
1760 disableSuspend(self);
1762 if(verbosegc)
1763 jam_printf("<GC: Waiting for finalizers to be ran.>\n");
1765 runFinalizers0(self, 200);
1766 lockVMLock(heap_lock, self);
1767 enableSuspend(self);
1769 if(state != run_finalizers)
1770 break;
1772 /* Retry gc, but this time compact the heap rather than just
1773 sweeping it */
1774 largest = gc0(TRUE, TRUE);
1775 if(n <= largest && (heapfree * 4 >= (heaplimit - heapbase))) {
1776 state = gc;
1777 break;
1780 /* Still not freed enough memory so try to expand the heap.
1781 Note we retry allocation even if the heap couldn't be
1782 expanded sufficiently -- there's a chance gc may merge
1783 adjacent blocks together at the top of the heap */
1784 if(heaplimit < heapmax) {
1785 expandHeap(n);
1786 state = gc;
1787 break;
1790 if(verbosegc)
1791 jam_printf("<GC: Stack at maximum already. Clearing Soft References>\n");
1793 /* Can't expand the heap any more. Try GC again but this time
1794 clearing all soft references. Note we succeed if we can satisfy
1795 the request -- we may have been able to all along, but with
1796 nothing spare. We may thrash, but it's better than throwing OOM */
1797 largest = gc0(FALSE, TRUE);
1798 if(n <= largest) {
1799 state = gc;
1800 break;
1803 if(verbosegc)
1804 jam_printf("<GC: completely out of heap space - throwing OutOfMemoryError>\n");
1806 state = throw_oom;
1807 unlockVMLock(heap_lock, self);
1808 signalException("java/lang/OutOfMemoryError", NULL);
1809 return NULL;
1810 break;
1812 case throw_oom:
1813 /* Already throwing an OutOfMemoryError in some thread. In both
1814 * cases, throw an already prepared OOM (no stacktrace). Could have a
1815 * per-thread flag, so we try to throw a new OOM in each thread, but
1816 * if we're this low on memory I doubt it'll make much difference.
1819 if(verbosegc)
1820 jam_printf("<GC: completely out of heap space - throwing prepared OutOfMemoryError>\n");
1822 state = gc;
1823 unlockVMLock(heap_lock, self);
1824 setException(oom);
1825 return NULL;
1826 break;
1830 got_it:
1831 #ifdef TRACEALLOC
1832 jam_printf("<ALLOC: took %d tries to find block.>\n", tries);
1833 #endif
1835 heapfree -= n;
1837 /* Mark found chunk as allocated */
1838 found->header = n | ALLOC_BIT;
1840 /* Found is a block pointer - if we unlock now, small window
1841 * where new object ref is not held and will therefore be gc'ed.
1842 * Setup ret_addr before unlocking to prevent this.
1845 ret_addr = ((char*)found)+HEADER_SIZE;
1846 memset(ret_addr, 0, n-HEADER_SIZE);
1847 unlockVMLock(heap_lock, self);
1849 return ret_addr;
1852 /* Object allocation routines */
1854 #define ADD_FINALIZED_OBJECT(ob) \
1856 Thread *self; \
1857 disableSuspend(self = threadSelf()); \
1858 lockVMLock(has_fnlzr_lock, self); \
1859 TRACE_FNLZ(("Object @%p type %s has a finalize method...\n", \
1860 ob, CLASS_CB(ob->class)->name)); \
1861 if(has_finaliser_count == has_finaliser_size) { \
1862 has_finaliser_size += LIST_INCREMENT; \
1863 has_finaliser_list = (Object**)sysRealloc(has_finaliser_list, \
1864 has_finaliser_size*sizeof(Object*));\
1867 has_finaliser_list[has_finaliser_count++] = ob; \
1868 unlockVMLock(has_fnlzr_lock, self); \
1869 enableSuspend(self); \
1872 Object *allocObject(Class *class) {
1873 ClassBlock *cb = CLASS_CB(class);
1874 int size = cb->object_size * sizeof(uintptr_t);
1875 Object *ob = (Object *)gcMalloc(size + sizeof(Object));
1877 if(ob != NULL) {
1878 ob->class = class;
1880 /* If the object needs finalising add it to the
1881 has finaliser list */
1883 if(IS_FINALIZED(cb))
1884 ADD_FINALIZED_OBJECT(ob);
1886 /* If the object is an instance of a special class
1887 mark it by setting the bit in the chunk header */
1889 if(IS_SPECIAL(cb))
1890 SET_SPECIAL_OB(ob);
1892 TRACE_ALLOC("<ALLOC: allocated %s object @%p>\n", cb->name, ob);
1895 return ob;
1898 Object *allocArray(Class *class, int size, int el_size) {
1899 Object *ob;
1901 /* Special check to protect against integer overflow */
1902 if(size > (INT_MAX - sizeof(u4) - sizeof(Object)) / el_size) {
1903 signalException("java/lang/OutOfMemoryError", NULL);
1904 return NULL;
1907 ob = (Object *)gcMalloc(size * el_size + sizeof(u4) + sizeof(Object));
1909 if(ob != NULL) {
1910 ob->class = class;
1911 ARRAY_LEN(ob) = size;
1912 TRACE_ALLOC("<ALLOC: allocated %s array object @%p>\n", CLASS_CB(class)->name, ob);
1915 return ob;
1918 Object *allocTypeArray(int type, int size) {
1919 Class *class;
1920 int el_size;
1922 if(size < 0) {
1923 signalException("java/lang/NegativeArraySizeException", NULL);
1924 return NULL;
1927 switch(type) {
1928 case T_BOOLEAN:
1929 if(bool_array_class == NULL) {
1930 bool_array_class = findArrayClass("[Z");
1931 registerStaticClassRefLocked(&bool_array_class);
1933 class = bool_array_class;
1934 el_size = 1;
1935 break;
1937 case T_BYTE:
1938 if(byte_array_class == NULL) {
1939 byte_array_class = findArrayClass("[B");
1940 registerStaticClassRefLocked(&byte_array_class);
1942 class = byte_array_class;
1943 el_size = 1;
1944 break;
1946 case T_CHAR:
1947 if(char_array_class == NULL) {
1948 char_array_class = findArrayClass("[C");
1949 registerStaticClassRefLocked(&char_array_class);
1951 class = char_array_class;
1952 el_size = 2;
1953 break;
1955 case T_SHORT:
1956 if(short_array_class == NULL) {
1957 short_array_class = findArrayClass("[S");
1958 registerStaticClassRefLocked(&short_array_class);
1960 class = short_array_class;
1961 el_size = 2;
1962 break;
1964 case T_INT:
1965 if(int_array_class == NULL) {
1966 int_array_class = findArrayClass("[I");
1967 registerStaticClassRefLocked(&int_array_class);
1969 class = int_array_class;
1970 el_size = 4;
1971 break;
1973 case T_FLOAT:
1974 if(float_array_class == NULL) {
1975 float_array_class = findArrayClass("[F");
1976 registerStaticClassRefLocked(&float_array_class);
1978 class = float_array_class;
1979 el_size = 4;
1980 break;
1982 case T_DOUBLE:
1983 if(double_array_class == NULL) {
1984 double_array_class = findArrayClass("[D");
1985 registerStaticClassRefLocked(&double_array_class);
1987 class = double_array_class;
1988 el_size = 8;
1989 break;
1991 case T_LONG:
1992 if(long_array_class == NULL) {
1993 long_array_class = findArrayClass("[J");
1994 registerStaticClassRefLocked(&long_array_class);
1996 class = long_array_class;
1997 el_size = 8;
1998 break;
2000 default:
2001 jam_printf("Invalid array type %d - aborting VM...\n", type);
2002 exit(0);
2005 if(class == NULL)
2006 return NULL;
2008 return allocArray(class, size, el_size);
2011 Object *allocMultiArray(Class *array_class, int dim, intptr_t *count) {
2013 int i;
2014 Object *array;
2015 char *element_name = CLASS_CB(array_class)->name + 1;
2017 if(dim > 1) {
2018 Class *aclass = findArrayClassFromClass(element_name, array_class);
2019 Object **body;
2021 array = allocArray(array_class, *count, sizeof(Object*));
2023 if(array == NULL)
2024 return NULL;
2026 body = ARRAY_DATA(array);
2027 for(i = 0; i < *count; i++)
2028 if((*body++ = allocMultiArray(aclass, dim - 1, count + 1)) == NULL)
2029 return NULL;
2030 } else {
2031 int el_size;
2033 switch(*element_name) {
2034 case 'B':
2035 case 'Z':
2036 el_size = 1;
2037 break;
2039 case 'C':
2040 case 'S':
2041 el_size = 2;
2042 break;
2044 case 'I':
2045 case 'F':
2046 el_size = 4;
2047 break;
2049 case 'L':
2050 el_size = sizeof(Object*);
2051 break;
2053 default:
2054 el_size = 8;
2055 break;
2057 array = allocArray(array_class, *count, el_size);
2060 return array;
2063 Class *allocClass() {
2064 Class *class = (Class*)gcMalloc(sizeof(ClassBlock)+sizeof(Class));
2066 if(class != NULL) {
2067 SET_SPECIAL_OB(class);
2068 TRACE_ALLOC("<ALLOC: allocated class object @%p>\n", class);
2071 return class;
2074 Object *cloneObject(Object *ob) {
2075 uintptr_t hdr = *HDR_ADDRESS(ob);
2076 int size = HDR_SIZE(hdr)-HEADER_SIZE;
2077 Object *clone;
2079 /* If the object stores its original address the actual object
2080 data size will be smaller than the header size */
2081 if(HDR_HAS_HASHCODE(hdr))
2082 size -= OBJECT_GRAIN;
2084 clone = (Object*)gcMalloc(size);
2086 if(clone != NULL) {
2087 memcpy(clone, ob, size);
2089 /* We will also have copied the objects lock word */
2090 clone->lock = 0;
2092 if(IS_FINALIZED(CLASS_CB(clone->class)))
2093 ADD_FINALIZED_OBJECT(clone);
2095 if(HDR_SPECIAL_OBJ(hdr)) {
2096 SET_SPECIAL_OB(clone);
2098 /* Safety. If it's a classloader, clear native
2099 pointer to class table */
2100 if(IS_CLASS_LOADER(CLASS_CB(clone->class)))
2101 INST_DATA(clone)[ldr_vmdata_offset] = 0;
2104 TRACE_ALLOC("<ALLOC: cloned object @%p clone @%p>\n", ob, clone);
2107 return clone;
2110 uintptr_t getObjectHashcode(Object *ob) {
2111 uintptr_t *hdr_addr = HDR_ADDRESS(ob);
2113 /* If the object has had its hashCode taken and then
2114 it has been moved it will store the original value
2115 (see compactSlideBlock) */
2116 if(HDR_HAS_HASHCODE(*hdr_addr)) {
2117 uintptr_t *hash_addr = (uintptr_t *)((char*)hdr_addr +
2118 HDR_SIZE(*hdr_addr) - OBJECT_GRAIN);
2119 return *hash_addr;
2122 /* Mark that the hashCode has been taken, in case
2123 compaction later moves it */
2124 *hdr_addr |= HASHCODE_TAKEN_BIT;
2125 return (uintptr_t) ob;
2129 /* ------- Routines to retrieve snapshot of heap status -------- */
2131 unsigned long freeHeapMem() {
2132 return heapfree;
2135 unsigned long totalHeapMem() {
2136 return heaplimit-heapbase;
2139 unsigned long maxHeapMem() {
2140 return heapmax-heapbase;
2143 //FIXME: we need "heapbase" infomation to idnetify the valid JamVM object
2144 // in JEM. How to do it more securely?
2145 void *getHeapBase() {
2146 return heapbase;
2150 /* ------ Allocation routines for internal GC lists ------- */
2152 /* These use mmap to avoid deadlock with threads
2153 suspended while holding the malloc lock */
2155 void *gcMemMalloc(int n) {
2156 int size = n + sizeof(int);
2157 int *mem = mmap(0, size, PROT_READ|PROT_WRITE,
2158 MAP_PRIVATE|MAP_ANON, -1, 0);
2160 if(mem == MAP_FAILED) {
2161 jam_fprintf(stderr, "Mmap failed - aborting VM...\n");
2162 exitVM(1);
2165 *mem++ = size;
2166 return mem;
2169 void *gcMemRealloc(void *addr, int size) {
2170 if(addr == NULL)
2171 return gcMemMalloc(size);
2172 else {
2173 int *mem = addr;
2174 int old_size = *--mem;
2175 int new_size = size + sizeof(int);
2177 if(old_size/sys_page_size == new_size/sys_page_size) {
2178 *mem = new_size;
2179 return addr;
2180 } else {
2181 int copy_size = new_size > old_size ? old_size : new_size;
2182 void *new_mem = gcMemMalloc(size);
2184 memcpy(new_mem, addr, copy_size - sizeof(int));
2185 munmap(mem, old_size);
2187 return new_mem;
2192 void gcMemFree(void *addr) {
2193 if(addr != NULL) {
2194 int *mem = addr;
2195 int size = *--mem;
2196 munmap(mem, size);
2200 /* Delayed free, for use while in GC to avoid deadlock with
2201 threads suspended while holding the malloc lock. This
2202 simply chains the pointers into a linked-list */
2204 void gcPendingFree(void *addr) {
2205 if(addr != NULL) {
2206 *(void**)addr = pending_free_list;
2207 pending_free_list = addr;
2211 void freePendingFrees() {
2212 while(pending_free_list) {
2213 void *next = *pending_free_list;
2214 sysFree(pending_free_list);
2215 pending_free_list = next;
2220 /* ------ Allocation from system heap ------- */
2222 void *sysMalloc(int size) {
2223 int n = size < sizeof(void*) ? sizeof(void*) : size;
2224 void *mem = malloc(n);
2226 if(mem == NULL && n != 0) {
2227 jam_fprintf(stderr, "Malloc failed - aborting VM...\n");
2228 exitVM(1);
2231 return mem;
2234 void *sysRealloc(void *addr, int size) {
2235 void *mem = realloc(addr, size);
2237 if(mem == NULL) {
2238 jam_fprintf(stderr, "Realloc failed - aborting VM...\n");
2239 exitVM(1);
2242 return mem;
2245 void sysFree(void *addr) {
2246 free(addr);