* call.c (build_over_call): Mark COMPOUND_EXPRs generated for
[official-gcc.git] / boehm-gc / test.c
blob9254fa8370dd77d21ae49ed210092ffc1af583ca
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
15 /* An incomplete test for the garbage collector. */
16 /* Some more obscure entry points are not tested at all. */
18 # undef GC_BUILD
20 # if defined(mips) && defined(SYSTYPE_BSD43)
21 /* MIPS RISCOS 4 */
22 # else
23 # include <stdlib.h>
24 # endif
25 # include <stdio.h>
26 # include <assert.h> /* Not normally used, but handy for debugging. */
27 # include "gc.h"
28 # include "gc_typed.h"
29 # include "gc_priv.h" /* For output, locking, and some statistics */
30 # include "gcconfig.h"
32 # ifdef MSWIN32
33 # include <windows.h>
34 # endif
36 # ifdef PCR
37 # include "th/PCR_ThCrSec.h"
38 # include "th/PCR_Th.h"
39 # undef GC_printf0
40 # define GC_printf0 printf
41 # undef GC_printf1
42 # define GC_printf1 printf
43 # endif
45 # ifdef SOLARIS_THREADS
46 # include <thread.h>
47 # include <synch.h>
48 # endif
50 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) || defined(HPUX_THREADS)
51 # include <pthread.h>
52 # endif
54 # ifdef WIN32_THREADS
55 # include <process.h>
56 static CRITICAL_SECTION incr_cs;
57 # endif
59 # ifdef AMIGA
60 long __stack = 200000;
61 # endif
63 # define FAIL (void)abort()
65 /* AT_END may be defined to excercise the interior pointer test */
66 /* if the collector is configured with ALL_INTERIOR_POINTERS. */
67 /* As it stands, this test should succeed with either */
68 /* configuration. In the FIND_LEAK configuration, it should */
69 /* find lots of leaks, since we free almost nothing. */
71 struct SEXPR {
72 struct SEXPR * sexpr_car;
73 struct SEXPR * sexpr_cdr;
77 typedef struct SEXPR * sexpr;
79 # define INT_TO_SEXPR(x) ((sexpr)(unsigned long)(x))
81 # undef nil
82 # define nil (INT_TO_SEXPR(0))
83 # define car(x) ((x) -> sexpr_car)
84 # define cdr(x) ((x) -> sexpr_cdr)
85 # define is_nil(x) ((x) == nil)
88 int extra_count = 0; /* Amount of space wasted in cons node */
90 /* Silly implementation of Lisp cons. Intentionally wastes lots of space */
91 /* to test collector. */
92 sexpr cons (x, y)
93 sexpr x;
94 sexpr y;
96 register sexpr r;
97 register int *p;
98 register int my_extra = extra_count;
100 r = (sexpr) GC_MALLOC_STUBBORN(sizeof(struct SEXPR) + my_extra);
101 if (r == 0) {
102 (void)GC_printf0("Out of memory\n");
103 exit(1);
105 for (p = (int *)r;
106 ((char *)p) < ((char *)r) + my_extra + sizeof(struct SEXPR); p++) {
107 if (*p) {
108 (void)GC_printf1("Found nonzero at 0x%lx - allocator is broken\n",
109 (unsigned long)p);
110 FAIL;
112 *p = 13;
114 # ifdef AT_END
115 r = (sexpr)((char *)r + (my_extra & ~7));
116 # endif
117 r -> sexpr_car = x;
118 r -> sexpr_cdr = y;
119 my_extra++;
120 if ( my_extra >= 5000 ) {
121 extra_count = 0;
122 } else {
123 extra_count = my_extra;
125 GC_END_STUBBORN_CHANGE((char *)r);
126 return(r);
129 sexpr small_cons (x, y)
130 sexpr x;
131 sexpr y;
133 register sexpr r;
135 r = (sexpr) GC_MALLOC(sizeof(struct SEXPR));
136 if (r == 0) {
137 (void)GC_printf0("Out of memory\n");
138 exit(1);
140 r -> sexpr_car = x;
141 r -> sexpr_cdr = y;
142 return(r);
145 sexpr small_cons_uncollectable (x, y)
146 sexpr x;
147 sexpr y;
149 register sexpr r;
151 r = (sexpr) GC_MALLOC_UNCOLLECTABLE(sizeof(struct SEXPR));
152 if (r == 0) {
153 (void)GC_printf0("Out of memory\n");
154 exit(1);
156 r -> sexpr_car = x;
157 r -> sexpr_cdr = (sexpr)(~(unsigned long)y);
158 return(r);
161 #ifdef GC_GCJ_SUPPORT
163 #include "gc_mark.h"
164 #include "dbg_mlc.h"
165 #include "include/gc_gcj.h"
167 /* The following struct emulates the vtable in gcj. */
168 /* This assumes the default value of MARK_DESCR_OFFSET. */
169 struct fake_vtable {
170 void * dummy; /* class pointer in real gcj. */
171 size_t descr;
174 struct fake_vtable gcj_class_struct1 = { 0, sizeof(struct SEXPR)
175 + sizeof(struct fake_vtable *) };
176 /* length based descriptor. */
177 struct fake_vtable gcj_class_struct2 =
178 { 0, (3l << (CPP_WORDSZ - 3)) | DS_BITMAP};
179 /* Bitmap based descriptor. */
181 struct ms_entry * fake_gcj_mark_proc(word * addr,
182 struct ms_entry *mark_stack_ptr,
183 struct ms_entry *mark_stack_limit,
184 word env )
186 sexpr x;
187 if (1 == env) {
188 /* Object allocated with debug allocator. */
189 addr = (word *)USR_PTR_FROM_BASE(addr);
191 x = (sexpr)(addr + 1); /* Skip the vtable pointer. */
192 /* We could just call PUSH_CONTENTS directly here. But any real */
193 /* real client would try to filter out the obvious misses. */
194 if (0 != x -> sexpr_cdr) {
195 PUSH_CONTENTS((ptr_t)(x -> sexpr_cdr), mark_stack_ptr,
196 mark_stack_limit, &(x -> sexpr_cdr), exit1);
198 if ((ptr_t)(x -> sexpr_car) > GC_least_plausible_heap_addr) {
199 PUSH_CONTENTS((ptr_t)(x -> sexpr_car), mark_stack_ptr,
200 mark_stack_limit, &(x -> sexpr_car), exit2);
202 return(mark_stack_ptr);
205 sexpr gcj_cons(x, y)
206 sexpr x;
207 sexpr y;
209 GC_word * r;
210 sexpr result;
211 static int count = 0;
213 if (++count & 1) {
214 r = (GC_word *) GC_GCJ_FAST_MALLOC(3, &gcj_class_struct1);
215 } else {
216 r = (GC_word *) GC_GCJ_MALLOC(sizeof(struct SEXPR)
217 + sizeof(struct fake_vtable*),
218 &gcj_class_struct2);
220 if (r == 0) {
221 (void)GC_printf0("Out of memory\n");
222 exit(1);
224 result = (sexpr)(r + 1);
225 result -> sexpr_car = x;
226 result -> sexpr_cdr = y;
227 return(result);
229 #endif
231 /* Return reverse(x) concatenated with y */
232 sexpr reverse1(x, y)
233 sexpr x, y;
235 if (is_nil(x)) {
236 return(y);
237 } else {
238 return( reverse1(cdr(x), cons(car(x), y)) );
242 sexpr reverse(x)
243 sexpr x;
245 return( reverse1(x, nil) );
248 sexpr ints(low, up)
249 int low, up;
251 if (low > up) {
252 return(nil);
253 } else {
254 return(small_cons(small_cons(INT_TO_SEXPR(low), nil), ints(low+1, up)));
258 #ifdef GC_GCJ_SUPPORT
259 /* Return reverse(x) concatenated with y */
260 sexpr gcj_reverse1(x, y)
261 sexpr x, y;
263 if (is_nil(x)) {
264 return(y);
265 } else {
266 return( gcj_reverse1(cdr(x), gcj_cons(car(x), y)) );
270 sexpr gcj_reverse(x)
271 sexpr x;
273 return( gcj_reverse1(x, nil) );
276 sexpr gcj_ints(low, up)
277 int low, up;
279 if (low > up) {
280 return(nil);
281 } else {
282 return(gcj_cons(gcj_cons(INT_TO_SEXPR(low), nil), gcj_ints(low+1, up)));
285 #endif /* GC_GCJ_SUPPORT */
287 /* To check uncollectable allocation we build lists with disguised cdr */
288 /* pointers, and make sure they don't go away. */
289 sexpr uncollectable_ints(low, up)
290 int low, up;
292 if (low > up) {
293 return(nil);
294 } else {
295 return(small_cons_uncollectable(small_cons(INT_TO_SEXPR(low), nil),
296 uncollectable_ints(low+1, up)));
300 void check_ints(list, low, up)
301 sexpr list;
302 int low, up;
304 if ((int)(GC_word)(car(car(list))) != low) {
305 (void)GC_printf0(
306 "List reversal produced incorrect list - collector is broken\n");
307 FAIL;
309 if (low == up) {
310 if (cdr(list) != nil) {
311 (void)GC_printf0("List too long - collector is broken\n");
312 FAIL;
314 } else {
315 check_ints(cdr(list), low+1, up);
319 # define UNCOLLECTABLE_CDR(x) (sexpr)(~(unsigned long)(cdr(x)))
321 void check_uncollectable_ints(list, low, up)
322 sexpr list;
323 int low, up;
325 assert(GC_is_marked(list));
326 if ((int)(GC_word)(car(car(list))) != low) {
327 (void)GC_printf0(
328 "Uncollectable list corrupted - collector is broken\n");
329 FAIL;
331 if (low == up) {
332 if (UNCOLLECTABLE_CDR(list) != nil) {
333 (void)GC_printf0("Uncollectable list too long - collector is broken\n");
334 FAIL;
336 } else {
337 check_uncollectable_ints(UNCOLLECTABLE_CDR(list), low+1, up);
341 /* Not used, but useful for debugging: */
342 void print_int_list(x)
343 sexpr x;
345 if (is_nil(x)) {
346 (void)GC_printf0("NIL\n");
347 } else {
348 (void)GC_printf1("(%ld)", (long)(car(car(x))));
349 if (!is_nil(cdr(x))) {
350 (void)GC_printf0(", ");
351 (void)print_int_list(cdr(x));
352 } else {
353 (void)GC_printf0("\n");
358 /* Try to force a to be strangely aligned */
359 struct {
360 char dummy;
361 sexpr aa;
362 } A;
363 #define a A.aa
366 * A tiny list reversal test to check thread creation.
368 #ifdef THREADS
370 # ifdef WIN32_THREADS
371 unsigned __stdcall tiny_reverse_test(void * arg)
372 # else
373 void * tiny_reverse_test(void * arg)
374 # endif
376 check_ints(reverse(reverse(ints(1,10))), 1, 10);
377 return 0;
380 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) \
381 || defined(SOLARIS_PTHREADS) || defined(HPUX_THREADS)
382 void fork_a_thread()
384 pthread_t t;
385 int code;
386 if ((code = pthread_create(&t, 0, tiny_reverse_test, 0)) != 0) {
387 (void)GC_printf1("Small thread creation failed %lu\n",
388 (unsigned long)code);
389 FAIL;
391 if ((code = pthread_join(t, 0)) != 0) {
392 (void)GC_printf1("Small thread join failed %lu\n",
393 (unsigned long)code);
394 FAIL;
398 # elif defined(WIN32_THREADS)
399 void fork_a_thread()
401 unsigned thread_id;
402 HANDLE h;
403 h = (HANDLE)_beginthreadex(NULL, 0, tiny_reverse_test,
404 0, 0, &thread_id);
405 if (h == (HANDLE)-1) {
406 (void)GC_printf1("Small thread creation failed %lu\n",
407 (unsigned long)GetLastError());
408 FAIL;
410 if (WaitForSingleObject(h, INFINITE) != WAIT_OBJECT_0) {
411 (void)GC_printf1("Small thread wait failed %lu\n",
412 (unsigned long)GetLastError());
413 FAIL;
417 /* # elif defined(SOLARIS_THREADS) */
419 # else
421 # define fork_a_thread()
423 # endif
425 #else
427 # define fork_a_thread()
429 #endif
432 * Repeatedly reverse lists built out of very different sized cons cells.
433 * Check that we didn't lose anything.
435 void reverse_test()
437 int i;
438 sexpr b;
439 sexpr c;
440 sexpr d;
441 sexpr e;
442 sexpr *f, *g, *h;
443 # if defined(MSWIN32) || defined(MACOS)
444 /* Win32S only allows 128K stacks */
445 # define BIG 1000
446 # else
447 # if defined PCR
448 /* PCR default stack is 100K. Stack frames are up to 120 bytes. */
449 # define BIG 700
450 # else
451 # define BIG 4500
452 # endif
453 # endif
455 A.dummy = 17;
456 a = ints(1, 49);
457 b = ints(1, 50);
458 c = ints(1, BIG);
459 d = uncollectable_ints(1, 100);
460 e = uncollectable_ints(1, 1);
461 /* Check that realloc updates object descriptors correctly */
462 f = (sexpr *)GC_MALLOC(4 * sizeof(sexpr));
463 f = (sexpr *)GC_REALLOC((GC_PTR)f, 6 * sizeof(sexpr));
464 f[5] = ints(1,17);
465 g = (sexpr *)GC_MALLOC(513 * sizeof(sexpr));
466 g = (sexpr *)GC_REALLOC((GC_PTR)g, 800 * sizeof(sexpr));
467 g[799] = ints(1,18);
468 h = (sexpr *)GC_MALLOC(1025 * sizeof(sexpr));
469 h = (sexpr *)GC_REALLOC((GC_PTR)h, 2000 * sizeof(sexpr));
470 # ifdef GC_GCJ_SUPPORT
471 h[1999] = gcj_ints(1,200);
472 h[1999] = gcj_reverse(h[1999]);
473 # else
474 h[1999] = ints(1,200);
475 # endif
476 /* Try to force some collections and reuse of small list elements */
477 for (i = 0; i < 10; i++) {
478 (void)ints(1, BIG);
480 /* Superficially test interior pointer recognition on stack */
481 c = (sexpr)((char *)c + sizeof(char *));
482 d = (sexpr)((char *)d + sizeof(char *));
484 # ifdef __STDC__
485 GC_FREE((void *)e);
486 # else
487 GC_FREE((char *)e);
488 # endif
489 check_ints(b,1,50);
490 check_ints(a,1,49);
491 for (i = 0; i < 50; i++) {
492 check_ints(b,1,50);
493 b = reverse(reverse(b));
495 check_ints(b,1,50);
496 check_ints(a,1,49);
497 for (i = 0; i < 60; i++) {
498 if (i % 10 == 0) fork_a_thread();
499 /* This maintains the invariant that a always points to a list of */
500 /* 49 integers. Thus this is thread safe without locks, */
501 /* assuming atomic pointer assignments. */
502 a = reverse(reverse(a));
503 # if !defined(AT_END) && !defined(THREADS)
504 /* This is not thread safe, since realloc explicitly deallocates */
505 if (i & 1) {
506 a = (sexpr)GC_REALLOC((GC_PTR)a, 500);
507 } else {
508 a = (sexpr)GC_REALLOC((GC_PTR)a, 8200);
510 # endif
512 check_ints(a,1,49);
513 check_ints(b,1,50);
514 c = (sexpr)((char *)c - sizeof(char *));
515 d = (sexpr)((char *)d - sizeof(char *));
516 check_ints(c,1,BIG);
517 check_uncollectable_ints(d, 1, 100);
518 check_ints(f[5], 1,17);
519 check_ints(g[799], 1,18);
520 # ifdef GC_GCJ_SUPPORT
521 h[1999] = gcj_reverse(h[1999]);
522 # endif
523 check_ints(h[1999], 1,200);
524 # ifndef THREADS
525 a = 0;
526 # endif
527 b = c = 0;
531 * The rest of this builds balanced binary trees, checks that they don't
532 * disappear, and tests finalization.
534 typedef struct treenode {
535 int level;
536 struct treenode * lchild;
537 struct treenode * rchild;
538 } tn;
540 int finalizable_count = 0;
541 int finalized_count = 0;
542 VOLATILE int dropped_something = 0;
544 # ifdef __STDC__
545 void finalizer(void * obj, void * client_data)
546 # else
547 void finalizer(obj, client_data)
548 char * obj;
549 char * client_data;
550 # endif
552 tn * t = (tn *)obj;
554 # ifdef PCR
555 PCR_ThCrSec_EnterSys();
556 # endif
557 # ifdef SOLARIS_THREADS
558 static mutex_t incr_lock;
559 mutex_lock(&incr_lock);
560 # endif
561 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) || defined(HPUX_THREADS)
562 static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
563 pthread_mutex_lock(&incr_lock);
564 # endif
565 # ifdef WIN32_THREADS
566 EnterCriticalSection(&incr_cs);
567 # endif
568 if ((int)(GC_word)client_data != t -> level) {
569 (void)GC_printf0("Wrong finalization data - collector is broken\n");
570 FAIL;
572 finalized_count++;
573 # ifdef PCR
574 PCR_ThCrSec_ExitSys();
575 # endif
576 # ifdef SOLARIS_THREADS
577 mutex_unlock(&incr_lock);
578 # endif
579 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) || defined(HPUX_THREADS)
580 pthread_mutex_unlock(&incr_lock);
581 # endif
582 # ifdef WIN32_THREADS
583 LeaveCriticalSection(&incr_cs);
584 # endif
587 size_t counter = 0;
589 # define MAX_FINALIZED 8000
591 # if !defined(MACOS)
592 GC_FAR GC_word live_indicators[MAX_FINALIZED] = {0};
593 #else
594 /* Too big for THINK_C. have to allocate it dynamically. */
595 GC_word *live_indicators = 0;
596 #endif
598 int live_indicators_count = 0;
600 tn * mktree(n)
601 int n;
603 tn * result = (tn *)GC_MALLOC(sizeof(tn));
605 #if defined(MACOS)
606 /* get around static data limitations. */
607 if (!live_indicators)
608 live_indicators =
609 (GC_word*)NewPtrClear(MAX_FINALIZED * sizeof(GC_word));
610 if (!live_indicators) {
611 (void)GC_printf0("Out of memory\n");
612 exit(1);
614 #endif
615 if (n == 0) return(0);
616 if (result == 0) {
617 (void)GC_printf0("Out of memory\n");
618 exit(1);
620 result -> level = n;
621 result -> lchild = mktree(n-1);
622 result -> rchild = mktree(n-1);
623 if (counter++ % 17 == 0 && n >= 2) {
624 tn * tmp = result -> lchild -> rchild;
626 result -> lchild -> rchild = result -> rchild -> lchild;
627 result -> rchild -> lchild = tmp;
629 if (counter++ % 119 == 0) {
630 int my_index;
633 # ifdef PCR
634 PCR_ThCrSec_EnterSys();
635 # endif
636 # ifdef SOLARIS_THREADS
637 static mutex_t incr_lock;
638 mutex_lock(&incr_lock);
639 # endif
640 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) \
641 || defined(HPUX_THREADS)
642 static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
643 pthread_mutex_lock(&incr_lock);
644 # endif
645 # ifdef WIN32_THREADS
646 EnterCriticalSection(&incr_cs);
647 # endif
648 /* Losing a count here causes erroneous report of failure. */
649 finalizable_count++;
650 my_index = live_indicators_count++;
651 # ifdef PCR
652 PCR_ThCrSec_ExitSys();
653 # endif
654 # ifdef SOLARIS_THREADS
655 mutex_unlock(&incr_lock);
656 # endif
657 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) \
658 || defined(HPUX_THREADS)
659 pthread_mutex_unlock(&incr_lock);
660 # endif
661 # ifdef WIN32_THREADS
662 LeaveCriticalSection(&incr_cs);
663 # endif
666 GC_REGISTER_FINALIZER((GC_PTR)result, finalizer, (GC_PTR)(GC_word)n,
667 (GC_finalization_proc *)0, (GC_PTR *)0);
668 if (my_index >= MAX_FINALIZED) {
669 GC_printf0("live_indicators overflowed\n");
670 FAIL;
672 live_indicators[my_index] = 13;
673 if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
674 (GC_PTR *)(&(live_indicators[my_index])),
675 (GC_PTR)result) != 0) {
676 GC_printf0("GC_general_register_disappearing_link failed\n");
677 FAIL;
679 if (GC_unregister_disappearing_link(
680 (GC_PTR *)
681 (&(live_indicators[my_index]))) == 0) {
682 GC_printf0("GC_unregister_disappearing_link failed\n");
683 FAIL;
685 if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
686 (GC_PTR *)(&(live_indicators[my_index])),
687 (GC_PTR)result) != 0) {
688 GC_printf0("GC_general_register_disappearing_link failed 2\n");
689 FAIL;
692 return(result);
695 void chktree(t,n)
696 tn *t;
697 int n;
699 if (n == 0 && t != 0) {
700 (void)GC_printf0("Clobbered a leaf - collector is broken\n");
701 FAIL;
703 if (n == 0) return;
704 if (t -> level != n) {
705 (void)GC_printf1("Lost a node at level %lu - collector is broken\n",
706 (unsigned long)n);
707 FAIL;
709 if (counter++ % 373 == 0) (void) GC_MALLOC(counter%5001);
710 chktree(t -> lchild, n-1);
711 if (counter++ % 73 == 0) (void) GC_MALLOC(counter%373);
712 chktree(t -> rchild, n-1);
715 # if defined(SOLARIS_THREADS) && !defined(_SOLARIS_PTHREADS)
716 thread_key_t fl_key;
718 void * alloc8bytes()
720 # if defined(SMALL_CONFIG) || defined(GC_DEBUG)
721 return(GC_MALLOC(8));
722 # else
723 void ** my_free_list_ptr;
724 void * my_free_list;
726 if (thr_getspecific(fl_key, (void **)(&my_free_list_ptr)) != 0) {
727 (void)GC_printf0("thr_getspecific failed\n");
728 FAIL;
730 if (my_free_list_ptr == 0) {
731 my_free_list_ptr = GC_NEW_UNCOLLECTABLE(void *);
732 if (thr_setspecific(fl_key, my_free_list_ptr) != 0) {
733 (void)GC_printf0("thr_setspecific failed\n");
734 FAIL;
737 my_free_list = *my_free_list_ptr;
738 if (my_free_list == 0) {
739 my_free_list = GC_malloc_many(8);
740 if (my_free_list == 0) {
741 (void)GC_printf0("alloc8bytes out of memory\n");
742 FAIL;
745 *my_free_list_ptr = GC_NEXT(my_free_list);
746 GC_NEXT(my_free_list) = 0;
747 return(my_free_list);
748 # endif
751 #else
753 # if defined(_SOLARIS_PTHREADS) || defined(IRIX_THREADS) \
754 || defined(LINUX_THREADS) || defined(HPUX_THREADS)
755 pthread_key_t fl_key;
757 void * alloc8bytes()
759 # ifdef SMALL_CONFIG
760 return(GC_malloc(8));
761 # else
762 void ** my_free_list_ptr;
763 void * my_free_list;
765 my_free_list_ptr = (void **)pthread_getspecific(fl_key);
766 if (my_free_list_ptr == 0) {
767 my_free_list_ptr = GC_NEW_UNCOLLECTABLE(void *);
768 if (pthread_setspecific(fl_key, my_free_list_ptr) != 0) {
769 (void)GC_printf0("pthread_setspecific failed\n");
770 FAIL;
773 my_free_list = *my_free_list_ptr;
774 if (my_free_list == 0) {
775 my_free_list = GC_malloc_many(8);
776 if (my_free_list == 0) {
777 (void)GC_printf0("alloc8bytes out of memory\n");
778 FAIL;
781 *my_free_list_ptr = GC_NEXT(my_free_list);
782 GC_NEXT(my_free_list) = 0;
783 return(my_free_list);
784 # endif
787 # else
788 # define alloc8bytes() GC_MALLOC_ATOMIC(8)
789 # endif
790 #endif
792 void alloc_small(n)
793 int n;
795 register int i;
797 for (i = 0; i < n; i += 8) {
798 if (alloc8bytes() == 0) {
799 (void)GC_printf0("Out of memory\n");
800 FAIL;
805 # if defined(THREADS) && defined(GC_DEBUG)
806 # define TREE_HEIGHT 15
807 # else
808 # define TREE_HEIGHT 16
809 # endif
810 void tree_test()
812 tn * root;
813 register int i;
815 root = mktree(TREE_HEIGHT);
816 alloc_small(5000000);
817 chktree(root, TREE_HEIGHT);
818 if (finalized_count && ! dropped_something) {
819 (void)GC_printf0("Premature finalization - collector is broken\n");
820 FAIL;
822 dropped_something = 1;
823 GC_noop(root); /* Root needs to remain live until */
824 /* dropped_something is set. */
825 root = mktree(TREE_HEIGHT);
826 chktree(root, TREE_HEIGHT);
827 for (i = TREE_HEIGHT; i >= 0; i--) {
828 root = mktree(i);
829 chktree(root, i);
831 alloc_small(5000000);
834 unsigned n_tests = 0;
836 GC_word bm_huge[10] = {
837 0xffffffff,
838 0xffffffff,
839 0xffffffff,
840 0xffffffff,
841 0xffffffff,
842 0xffffffff,
843 0xffffffff,
844 0xffffffff,
845 0xffffffff,
846 0x00ffffff,
850 /* A very simple test of explicitly typed allocation */
851 void typed_test()
853 GC_word * old, * new;
854 GC_word bm3 = 0x3;
855 GC_word bm2 = 0x2;
856 GC_word bm_large = 0xf7ff7fff;
857 GC_descr d1 = GC_make_descriptor(&bm3, 2);
858 GC_descr d2 = GC_make_descriptor(&bm2, 2);
859 # ifndef LINT
860 GC_descr dummy = GC_make_descriptor(&bm_large, 32);
861 # endif
862 GC_descr d3 = GC_make_descriptor(&bm_large, 32);
863 GC_descr d4 = GC_make_descriptor(bm_huge, 320);
864 GC_word * x = (GC_word *)GC_malloc_explicitly_typed(2000, d4);
865 register int i;
867 old = 0;
868 for (i = 0; i < 4000; i++) {
869 new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d1);
870 if (0 != new[0] || 0 != new[1]) {
871 GC_printf0("Bad initialization by GC_malloc_explicitly_typed\n");
872 FAIL;
874 new[0] = 17;
875 new[1] = (GC_word)old;
876 old = new;
877 new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d2);
878 new[0] = 17;
879 new[1] = (GC_word)old;
880 old = new;
881 new = (GC_word *) GC_malloc_explicitly_typed(33 * sizeof(GC_word), d3);
882 new[0] = 17;
883 new[1] = (GC_word)old;
884 old = new;
885 new = (GC_word *) GC_calloc_explicitly_typed(4, 2 * sizeof(GC_word),
886 d1);
887 new[0] = 17;
888 new[1] = (GC_word)old;
889 old = new;
890 if (i & 0xff) {
891 new = (GC_word *) GC_calloc_explicitly_typed(7, 3 * sizeof(GC_word),
892 d2);
893 } else {
894 new = (GC_word *) GC_calloc_explicitly_typed(1001,
895 3 * sizeof(GC_word),
896 d2);
897 if (0 != new[0] || 0 != new[1]) {
898 GC_printf0("Bad initialization by GC_malloc_explicitly_typed\n");
899 FAIL;
902 new[0] = 17;
903 new[1] = (GC_word)old;
904 old = new;
906 for (i = 0; i < 20000; i++) {
907 if (new[0] != 17) {
908 (void)GC_printf1("typed alloc failed at %lu\n",
909 (unsigned long)i);
910 FAIL;
912 new[0] = 0;
913 old = new;
914 new = (GC_word *)(old[1]);
916 GC_gcollect();
917 GC_noop(x);
920 int fail_count = 0;
922 #ifndef __STDC__
923 /*ARGSUSED*/
924 void fail_proc1(x)
925 GC_PTR x;
927 fail_count++;
930 #else
932 /*ARGSUSED*/
933 void fail_proc1(GC_PTR x)
935 fail_count++;
938 #endif /* __STDC__ */
940 #ifdef THREADS
941 # define TEST_FAIL_COUNT(n) 1
942 #else
943 # define TEST_FAIL_COUNT(n) (fail_count >= (n))
944 #endif
946 void run_one_test()
948 char *x;
949 # ifdef LINT
950 char *y = 0;
951 # else
952 char *y = (char *)(size_t)fail_proc1;
953 # endif
954 DCL_LOCK_STATE;
956 # ifdef FIND_LEAK
957 (void)GC_printf0(
958 "This test program is not designed for leak detection mode\n");
959 (void)GC_printf0("Expect lots of problems.\n");
960 # endif
961 if (GC_size(GC_malloc(7)) != 8 &&
962 GC_size(GC_malloc(7)) != MIN_WORDS * sizeof(GC_word)
963 || GC_size(GC_malloc(15)) != 16) {
964 (void)GC_printf0("GC_size produced unexpected results\n");
965 FAIL;
967 if (GC_size(GC_malloc(0)) != MIN_WORDS * sizeof(GC_word)) {
968 (void)GC_printf0("GC_malloc(0) failed\n");
969 FAIL;
971 if (GC_size(GC_malloc_uncollectable(0)) != MIN_WORDS * sizeof(GC_word)) {
972 (void)GC_printf0("GC_malloc_uncollectable(0) failed\n");
973 FAIL;
975 GC_FREE(0);
976 GC_is_valid_displacement_print_proc = fail_proc1;
977 GC_is_visible_print_proc = fail_proc1;
978 x = GC_malloc(16);
979 if (GC_base(x + 13) != x) {
980 (void)GC_printf0("GC_base(heap ptr) produced incorrect result\n");
981 FAIL;
983 # ifndef PCR
984 if (GC_base(y) != 0) {
985 (void)GC_printf0("GC_base(fn_ptr) produced incorrect result\n");
986 FAIL;
988 # endif
989 if (GC_same_obj(x+5, x) != x + 5) {
990 (void)GC_printf0("GC_same_obj produced incorrect result\n");
991 FAIL;
993 if (GC_is_visible(y) != y || GC_is_visible(x) != x) {
994 (void)GC_printf0("GC_is_visible produced incorrect result\n");
995 FAIL;
997 if (!TEST_FAIL_COUNT(1)) {
998 # if!(defined(RS6000) || defined(POWERPC) || defined(IA64))
999 /* ON RS6000s function pointers point to a descriptor in the */
1000 /* data segment, so there should have been no failures. */
1001 (void)GC_printf0("GC_is_visible produced wrong failure indication\n");
1002 FAIL;
1003 # endif
1005 if (GC_is_valid_displacement(y) != y
1006 || GC_is_valid_displacement(x) != x
1007 || GC_is_valid_displacement(x + 3) != x + 3) {
1008 (void)GC_printf0(
1009 "GC_is_valid_displacement produced incorrect result\n");
1010 FAIL;
1012 # ifndef ALL_INTERIOR_POINTERS
1013 # if defined(RS6000) || defined(POWERPC)
1014 if (!TEST_FAIL_COUNT(1)) {
1015 # else
1016 if (!TEST_FAIL_COUNT(2)) {
1017 # endif
1018 (void)GC_printf0("GC_is_valid_displacement produced wrong failure indication\n");
1019 FAIL;
1021 # endif
1022 /* Test floating point alignment */
1023 *(double *)GC_MALLOC(sizeof(double)) = 1.0;
1024 *(double *)GC_MALLOC(sizeof(double)) = 1.0;
1025 # ifdef GC_GCJ_SUPPORT
1026 GC_REGISTER_DISPLACEMENT(sizeof(struct fake_vtable *));
1027 GC_init_gcj_malloc(0, (void *)fake_gcj_mark_proc);
1028 # endif
1029 /* Repeated list reversal test. */
1030 reverse_test();
1031 # ifdef PRINTSTATS
1032 GC_printf0("-------------Finished reverse_test\n");
1033 # endif
1034 typed_test();
1035 # ifdef PRINTSTATS
1036 GC_printf0("-------------Finished typed_test\n");
1037 # endif
1038 tree_test();
1039 LOCK();
1040 n_tests++;
1041 UNLOCK();
1042 /* GC_printf1("Finished %x\n", pthread_self()); */
1045 void check_heap_stats()
1047 unsigned long max_heap_sz;
1048 register int i;
1049 int still_live;
1050 int late_finalize_count = 0;
1052 if (sizeof(char *) > 4) {
1053 max_heap_sz = 15000000;
1054 } else {
1055 max_heap_sz = 11000000;
1057 # ifdef GC_DEBUG
1058 max_heap_sz *= 2;
1059 # ifdef SPARC
1060 max_heap_sz *= 2;
1061 # endif
1062 # endif
1063 /* Garbage collect repeatedly so that all inaccessible objects */
1064 /* can be finalized. */
1065 while (GC_collect_a_little()) { }
1066 for (i = 0; i < 16; i++) {
1067 GC_gcollect();
1068 late_finalize_count += GC_invoke_finalizers();
1070 (void)GC_printf1("Completed %lu tests\n", (unsigned long)n_tests);
1071 (void)GC_printf2("Finalized %lu/%lu objects - ",
1072 (unsigned long)finalized_count,
1073 (unsigned long)finalizable_count);
1074 # ifdef FINALIZE_ON_DEMAND
1075 if (finalized_count != late_finalize_count) {
1076 (void)GC_printf0("Demand finalization error\n");
1077 FAIL;
1079 # endif
1080 if (finalized_count > finalizable_count
1081 || finalized_count < finalizable_count/2) {
1082 (void)GC_printf0("finalization is probably broken\n");
1083 FAIL;
1084 } else {
1085 (void)GC_printf0("finalization is probably ok\n");
1087 still_live = 0;
1088 for (i = 0; i < MAX_FINALIZED; i++) {
1089 if (live_indicators[i] != 0) {
1090 still_live++;
1093 i = finalizable_count - finalized_count - still_live;
1094 if (0 != i) {
1095 (void)GC_printf2
1096 ("%lu disappearing links remain and %lu more objects were not finalized\n",
1097 (unsigned long) still_live, (unsigned long)i);
1098 if (i > 10) {
1099 GC_printf0("\tVery suspicious!\n");
1100 } else {
1101 GC_printf0("\tSlightly suspicious, but probably OK.\n");
1104 (void)GC_printf1("Total number of bytes allocated is %lu\n",
1105 (unsigned long)
1106 WORDS_TO_BYTES(GC_words_allocd + GC_words_allocd_before_gc));
1107 (void)GC_printf1("Final heap size is %lu bytes\n",
1108 (unsigned long)GC_get_heap_size());
1109 if (WORDS_TO_BYTES(GC_words_allocd + GC_words_allocd_before_gc)
1110 < 33500000*n_tests) {
1111 (void)GC_printf0("Incorrect execution - missed some allocations\n");
1112 FAIL;
1114 if (GC_get_heap_size() > max_heap_sz*n_tests) {
1115 (void)GC_printf0("Unexpected heap growth - collector may be broken\n");
1116 FAIL;
1118 (void)GC_printf0("Collector appears to work\n");
1121 #if defined(MACOS)
1122 void SetMinimumStack(long minSize)
1124 long newApplLimit;
1126 if (minSize > LMGetDefltStack())
1128 newApplLimit = (long) GetApplLimit()
1129 - (minSize - LMGetDefltStack());
1130 SetApplLimit((Ptr) newApplLimit);
1131 MaxApplZone();
1135 #define cMinStackSpace (512L * 1024L)
1137 #endif
1139 #ifdef __STDC__
1140 void warn_proc(char *msg, GC_word p)
1141 #else
1142 void warn_proc(msg, p)
1143 char *msg;
1144 GC_word p;
1145 #endif
1147 GC_printf1(msg, (unsigned long)p);
1148 FAIL;
1152 #if !defined(PCR) && !defined(SOLARIS_THREADS) && !defined(WIN32_THREADS) \
1153 && !defined(IRIX_THREADS) && !defined(LINUX_THREADS) \
1154 && !defined(HPUX_THREADS) || defined(LINT)
1155 #if defined(MSWIN32) && !defined(__MINGW32__)
1156 int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev, LPSTR cmd, int n)
1157 #else
1158 int main()
1159 #endif
1161 # if defined(DJGPP)
1162 int dummy;
1163 # endif
1164 n_tests = 0;
1166 # if defined(DJGPP)
1167 /* No good way to determine stack base from library; do it */
1168 /* manually on this platform. */
1169 GC_stackbottom = (GC_PTR)(&dummy);
1170 # endif
1171 # if defined(MACOS)
1172 /* Make sure we have lots and lots of stack space. */
1173 SetMinimumStack(cMinStackSpace);
1174 /* Cheat and let stdio initialize toolbox for us. */
1175 printf("Testing GC Macintosh port.\n");
1176 # endif
1177 GC_INIT(); /* Only needed if gc is dynamic library. */
1178 (void) GC_set_warn_proc(warn_proc);
1179 # if defined(MPROTECT_VDB) || defined(PROC_VDB)
1180 GC_enable_incremental();
1181 (void) GC_printf0("Switched to incremental mode\n");
1182 # if defined(MPROTECT_VDB)
1183 (void)GC_printf0("Emulating dirty bits with mprotect/signals\n");
1184 # else
1185 (void)GC_printf0("Reading dirty bits from /proc\n");
1186 # endif
1187 # endif
1188 run_one_test();
1189 check_heap_stats();
1190 (void)fflush(stdout);
1191 # ifdef LINT
1192 /* Entry points we should be testing, but aren't. */
1193 /* Some can be tested by defining GC_DEBUG at the top of this file */
1194 /* This is a bit SunOS4 specific. */
1195 GC_noop(GC_expand_hp, GC_add_roots, GC_clear_roots,
1196 GC_register_disappearing_link,
1197 GC_register_finalizer_ignore_self,
1198 GC_debug_register_displacement,
1199 GC_print_obj, GC_debug_change_stubborn,
1200 GC_debug_end_stubborn_change, GC_debug_malloc_uncollectable,
1201 GC_debug_free, GC_debug_realloc, GC_generic_malloc_words_small,
1202 GC_init, GC_make_closure, GC_debug_invoke_finalizer,
1203 GC_page_was_ever_dirty, GC_is_fresh,
1204 GC_malloc_ignore_off_page, GC_malloc_atomic_ignore_off_page,
1205 GC_set_max_heap_size, GC_get_bytes_since_gc,
1206 GC_pre_incr, GC_post_incr);
1207 # endif
1208 # ifdef MSWIN32
1209 GC_win32_free_heap();
1210 # endif
1211 return(0);
1213 # endif
1215 #ifdef WIN32_THREADS
1217 unsigned __stdcall thr_run_one_test(void *arg)
1219 run_one_test();
1220 return 0;
1223 #define NTEST 2
1225 int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev, LPSTR cmd, int n)
1227 # if NTEST > 0
1228 HANDLE h[NTEST];
1229 # endif
1230 int i;
1231 unsigned thread_id;
1232 # if 0
1233 GC_enable_incremental();
1234 # endif
1235 InitializeCriticalSection(&incr_cs);
1236 (void) GC_set_warn_proc(warn_proc);
1237 # if NTEST > 0
1238 for (i = 0; i < NTEST; i++) {
1239 h[i] = (HANDLE)_beginthreadex(NULL, 0, thr_run_one_test, 0, 0, &thread_id);
1240 if (h[i] == (HANDLE)-1) {
1241 (void)GC_printf1("Thread creation failed %lu\n", (unsigned long)GetLastError());
1242 FAIL;
1245 # endif /* NTEST > 0 */
1246 run_one_test();
1247 # if NTEST > 0
1248 for (i = 0; i < NTEST; i++) {
1249 if (WaitForSingleObject(h[i], INFINITE) != WAIT_OBJECT_0) {
1250 (void)GC_printf1("Thread wait failed %lu\n", (unsigned long)GetLastError());
1251 FAIL;
1254 # endif /* NTEST > 0 */
1255 check_heap_stats();
1256 (void)fflush(stdout);
1257 return(0);
1260 #endif /* WIN32_THREADS */
1263 #ifdef PCR
1264 test()
1266 PCR_Th_T * th1;
1267 PCR_Th_T * th2;
1268 int code;
1270 n_tests = 0;
1271 /* GC_enable_incremental(); */
1272 (void) GC_set_warn_proc(warn_proc);
1273 th1 = PCR_Th_Fork(run_one_test, 0);
1274 th2 = PCR_Th_Fork(run_one_test, 0);
1275 run_one_test();
1276 if (PCR_Th_T_Join(th1, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
1277 != PCR_ERes_okay || code != 0) {
1278 (void)GC_printf0("Thread 1 failed\n");
1280 if (PCR_Th_T_Join(th2, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
1281 != PCR_ERes_okay || code != 0) {
1282 (void)GC_printf0("Thread 2 failed\n");
1284 check_heap_stats();
1285 (void)fflush(stdout);
1286 return(0);
1288 #endif
1290 #if defined(SOLARIS_THREADS) || defined(IRIX_THREADS) \
1291 || defined(HPUX_THREADS) || defined(LINUX_THREADS)
1292 void * thr_run_one_test(void * arg)
1294 run_one_test();
1295 return(0);
1298 #ifdef GC_DEBUG
1299 # define GC_free GC_debug_free
1300 #endif
1302 #ifdef SOLARIS_THREADS
1303 main()
1305 thread_t th1;
1306 thread_t th2;
1307 int code;
1309 n_tests = 0;
1310 GC_INIT(); /* Only needed if gc is dynamic library. */
1311 GC_enable_incremental();
1312 (void) GC_set_warn_proc(warn_proc);
1313 if (thr_keycreate(&fl_key, GC_free) != 0) {
1314 (void)GC_printf1("Key creation failed %lu\n", (unsigned long)code);
1315 FAIL;
1317 if ((code = thr_create(0, 1024*1024, thr_run_one_test, 0, 0, &th1)) != 0) {
1318 (void)GC_printf1("Thread 1 creation failed %lu\n", (unsigned long)code);
1319 FAIL;
1321 if ((code = thr_create(0, 1024*1024, thr_run_one_test, 0, THR_NEW_LWP, &th2)) != 0) {
1322 (void)GC_printf1("Thread 2 creation failed %lu\n", (unsigned long)code);
1323 FAIL;
1325 run_one_test();
1326 if ((code = thr_join(th1, 0, 0)) != 0) {
1327 (void)GC_printf1("Thread 1 failed %lu\n", (unsigned long)code);
1328 FAIL;
1330 if (thr_join(th2, 0, 0) != 0) {
1331 (void)GC_printf1("Thread 2 failed %lu\n", (unsigned long)code);
1332 FAIL;
1334 check_heap_stats();
1335 (void)fflush(stdout);
1336 return(0);
1338 #else /* pthreads */
1339 main()
1341 pthread_t th1;
1342 pthread_t th2;
1343 pthread_attr_t attr;
1344 int code;
1346 # ifdef IRIX_THREADS
1347 /* Force a larger stack to be preallocated */
1348 /* Since the initial cant always grow later. */
1349 *((volatile char *)&code - 1024*1024) = 0; /* Require 1 Mb */
1350 # endif /* IRIX_THREADS */
1351 pthread_attr_init(&attr);
1352 # if defined(IRIX_THREADS) || defined(HPUX_THREADS)
1353 pthread_attr_setstacksize(&attr, 1000000);
1354 # endif
1355 n_tests = 0;
1356 # ifdef MPROTECT_VDB
1357 GC_enable_incremental();
1358 (void) GC_printf0("Switched to incremental mode\n");
1359 (void) GC_printf0("Emulating dirty bits with mprotect/signals\n");
1360 # endif
1361 (void) GC_set_warn_proc(warn_proc);
1362 if ((code = pthread_key_create(&fl_key, 0)) != 0) {
1363 (void)GC_printf1("Key creation failed %lu\n", (unsigned long)code);
1364 FAIL;
1366 if ((code = pthread_create(&th1, &attr, thr_run_one_test, 0)) != 0) {
1367 (void)GC_printf1("Thread 1 creation failed %lu\n", (unsigned long)code);
1368 FAIL;
1370 if ((code = pthread_create(&th2, &attr, thr_run_one_test, 0)) != 0) {
1371 (void)GC_printf1("Thread 2 creation failed %lu\n", (unsigned long)code);
1372 FAIL;
1374 run_one_test();
1375 if ((code = pthread_join(th1, 0)) != 0) {
1376 (void)GC_printf1("Thread 1 failed %lu\n", (unsigned long)code);
1377 FAIL;
1379 if (pthread_join(th2, 0) != 0) {
1380 (void)GC_printf1("Thread 2 failed %lu\n", (unsigned long)code);
1381 FAIL;
1383 check_heap_stats();
1384 (void)fflush(stdout);
1385 pthread_attr_destroy(&attr);
1386 GC_printf1("Completed %d collections\n", GC_gc_no);
1387 return(0);
1389 #endif /* pthreads */
1390 #endif /* SOLARIS_THREADS || IRIX_THREADS || LINUX_THREADS || HPUX_THREADS */