2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
15 /* An incomplete test for the garbage collector. */
16 /* Some more obscure entry points are not tested at all. */
20 # if defined(mips) && defined(SYSTYPE_BSD43)
26 # include <assert.h> /* Not normally used, but handy for debugging. */
28 # include "gc_typed.h"
29 # include "gc_priv.h" /* For output, locking, and some statistics */
30 # include "gcconfig.h"
37 # include "th/PCR_ThCrSec.h"
38 # include "th/PCR_Th.h"
40 # define GC_printf0 printf
42 # define GC_printf1 printf
45 # ifdef SOLARIS_THREADS
50 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) || defined(HPUX_THREADS)
56 static CRITICAL_SECTION incr_cs
;
60 long __stack
= 200000;
63 # define FAIL (void)abort()
65 /* AT_END may be defined to excercise the interior pointer test */
66 /* if the collector is configured with ALL_INTERIOR_POINTERS. */
67 /* As it stands, this test should succeed with either */
68 /* configuration. In the FIND_LEAK configuration, it should */
69 /* find lots of leaks, since we free almost nothing. */
72 struct SEXPR
* sexpr_car
;
73 struct SEXPR
* sexpr_cdr
;
77 typedef struct SEXPR
* sexpr
;
79 # define INT_TO_SEXPR(x) ((sexpr)(unsigned long)(x))
82 # define nil (INT_TO_SEXPR(0))
83 # define car(x) ((x) -> sexpr_car)
84 # define cdr(x) ((x) -> sexpr_cdr)
85 # define is_nil(x) ((x) == nil)
88 int extra_count
= 0; /* Amount of space wasted in cons node */
90 /* Silly implementation of Lisp cons. Intentionally wastes lots of space */
91 /* to test collector. */
98 register int my_extra
= extra_count
;
100 r
= (sexpr
) GC_MALLOC_STUBBORN(sizeof(struct SEXPR
) + my_extra
);
102 (void)GC_printf0("Out of memory\n");
106 ((char *)p
) < ((char *)r
) + my_extra
+ sizeof(struct SEXPR
); p
++) {
108 (void)GC_printf1("Found nonzero at 0x%lx - allocator is broken\n",
115 r
= (sexpr
)((char *)r
+ (my_extra
& ~7));
120 if ( my_extra
>= 5000 ) {
123 extra_count
= my_extra
;
125 GC_END_STUBBORN_CHANGE((char *)r
);
129 sexpr
small_cons (x
, y
)
135 r
= (sexpr
) GC_MALLOC(sizeof(struct SEXPR
));
137 (void)GC_printf0("Out of memory\n");
145 sexpr
small_cons_uncollectable (x
, y
)
151 r
= (sexpr
) GC_MALLOC_UNCOLLECTABLE(sizeof(struct SEXPR
));
153 (void)GC_printf0("Out of memory\n");
157 r
-> sexpr_cdr
= (sexpr
)(~(unsigned long)y
);
161 #ifdef GC_GCJ_SUPPORT
165 #include "include/gc_gcj.h"
167 /* The following struct emulates the vtable in gcj. */
168 /* This assumes the default value of MARK_DESCR_OFFSET. */
170 void * dummy
; /* class pointer in real gcj. */
174 struct fake_vtable gcj_class_struct1
= { 0, sizeof(struct SEXPR
)
175 + sizeof(struct fake_vtable
*) };
176 /* length based descriptor. */
177 struct fake_vtable gcj_class_struct2
=
178 { 0, (3l << (CPP_WORDSZ
- 3)) | DS_BITMAP
};
179 /* Bitmap based descriptor. */
181 struct ms_entry
* fake_gcj_mark_proc(word
* addr
,
182 struct ms_entry
*mark_stack_ptr
,
183 struct ms_entry
*mark_stack_limit
,
188 /* Object allocated with debug allocator. */
189 addr
= (word
*)USR_PTR_FROM_BASE(addr
);
191 x
= (sexpr
)(addr
+ 1); /* Skip the vtable pointer. */
192 /* We could just call PUSH_CONTENTS directly here. But any real */
193 /* real client would try to filter out the obvious misses. */
194 if (0 != x
-> sexpr_cdr
) {
195 PUSH_CONTENTS((ptr_t
)(x
-> sexpr_cdr
), mark_stack_ptr
,
196 mark_stack_limit
, &(x
-> sexpr_cdr
), exit1
);
198 if ((ptr_t
)(x
-> sexpr_car
) > GC_least_plausible_heap_addr
) {
199 PUSH_CONTENTS((ptr_t
)(x
-> sexpr_car
), mark_stack_ptr
,
200 mark_stack_limit
, &(x
-> sexpr_car
), exit2
);
202 return(mark_stack_ptr
);
211 static int count
= 0;
214 r
= (GC_word
*) GC_GCJ_FAST_MALLOC(3, &gcj_class_struct1
);
216 r
= (GC_word
*) GC_GCJ_MALLOC(sizeof(struct SEXPR
)
217 + sizeof(struct fake_vtable
*),
221 (void)GC_printf0("Out of memory\n");
224 result
= (sexpr
)(r
+ 1);
225 result
-> sexpr_car
= x
;
226 result
-> sexpr_cdr
= y
;
231 /* Return reverse(x) concatenated with y */
238 return( reverse1(cdr(x
), cons(car(x
), y
)) );
245 return( reverse1(x
, nil
) );
254 return(small_cons(small_cons(INT_TO_SEXPR(low
), nil
), ints(low
+1, up
)));
258 #ifdef GC_GCJ_SUPPORT
259 /* Return reverse(x) concatenated with y */
260 sexpr
gcj_reverse1(x
, y
)
266 return( gcj_reverse1(cdr(x
), gcj_cons(car(x
), y
)) );
273 return( gcj_reverse1(x
, nil
) );
276 sexpr
gcj_ints(low
, up
)
282 return(gcj_cons(gcj_cons(INT_TO_SEXPR(low
), nil
), gcj_ints(low
+1, up
)));
285 #endif /* GC_GCJ_SUPPORT */
287 /* To check uncollectable allocation we build lists with disguised cdr */
288 /* pointers, and make sure they don't go away. */
289 sexpr
uncollectable_ints(low
, up
)
295 return(small_cons_uncollectable(small_cons(INT_TO_SEXPR(low
), nil
),
296 uncollectable_ints(low
+1, up
)));
300 void check_ints(list
, low
, up
)
304 if ((int)(GC_word
)(car(car(list
))) != low
) {
306 "List reversal produced incorrect list - collector is broken\n");
310 if (cdr(list
) != nil
) {
311 (void)GC_printf0("List too long - collector is broken\n");
315 check_ints(cdr(list
), low
+1, up
);
319 # define UNCOLLECTABLE_CDR(x) (sexpr)(~(unsigned long)(cdr(x)))
321 void check_uncollectable_ints(list
, low
, up
)
325 assert(GC_is_marked(list
));
326 if ((int)(GC_word
)(car(car(list
))) != low
) {
328 "Uncollectable list corrupted - collector is broken\n");
332 if (UNCOLLECTABLE_CDR(list
) != nil
) {
333 (void)GC_printf0("Uncollectable list too long - collector is broken\n");
337 check_uncollectable_ints(UNCOLLECTABLE_CDR(list
), low
+1, up
);
341 /* Not used, but useful for debugging: */
342 void print_int_list(x
)
346 (void)GC_printf0("NIL\n");
348 (void)GC_printf1("(%ld)", (long)(car(car(x
))));
349 if (!is_nil(cdr(x
))) {
350 (void)GC_printf0(", ");
351 (void)print_int_list(cdr(x
));
353 (void)GC_printf0("\n");
358 /* Try to force a to be strangely aligned */
366 * A tiny list reversal test to check thread creation.
370 # ifdef WIN32_THREADS
371 unsigned __stdcall
tiny_reverse_test(void * arg
)
373 void * tiny_reverse_test(void * arg
)
376 check_ints(reverse(reverse(ints(1,10))), 1, 10);
380 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) \
381 || defined(SOLARIS_PTHREADS) || defined(HPUX_THREADS)
386 if ((code
= pthread_create(&t
, 0, tiny_reverse_test
, 0)) != 0) {
387 (void)GC_printf1("Small thread creation failed %lu\n",
388 (unsigned long)code
);
391 if ((code
= pthread_join(t
, 0)) != 0) {
392 (void)GC_printf1("Small thread join failed %lu\n",
393 (unsigned long)code
);
398 # elif defined(WIN32_THREADS)
403 h
= (HANDLE
)_beginthreadex(NULL
, 0, tiny_reverse_test
,
405 if (h
== (HANDLE
)-1) {
406 (void)GC_printf1("Small thread creation failed %lu\n",
407 (unsigned long)GetLastError());
410 if (WaitForSingleObject(h
, INFINITE
) != WAIT_OBJECT_0
) {
411 (void)GC_printf1("Small thread wait failed %lu\n",
412 (unsigned long)GetLastError());
417 /* # elif defined(SOLARIS_THREADS) */
421 # define fork_a_thread()
427 # define fork_a_thread()
432 * Repeatedly reverse lists built out of very different sized cons cells.
433 * Check that we didn't lose anything.
443 # if defined(MSWIN32) || defined(MACOS)
444 /* Win32S only allows 128K stacks */
448 /* PCR default stack is 100K. Stack frames are up to 120 bytes. */
459 d
= uncollectable_ints(1, 100);
460 e
= uncollectable_ints(1, 1);
461 /* Check that realloc updates object descriptors correctly */
462 f
= (sexpr
*)GC_MALLOC(4 * sizeof(sexpr
));
463 f
= (sexpr
*)GC_REALLOC((GC_PTR
)f
, 6 * sizeof(sexpr
));
465 g
= (sexpr
*)GC_MALLOC(513 * sizeof(sexpr
));
466 g
= (sexpr
*)GC_REALLOC((GC_PTR
)g
, 800 * sizeof(sexpr
));
468 h
= (sexpr
*)GC_MALLOC(1025 * sizeof(sexpr
));
469 h
= (sexpr
*)GC_REALLOC((GC_PTR
)h
, 2000 * sizeof(sexpr
));
470 # ifdef GC_GCJ_SUPPORT
471 h
[1999] = gcj_ints(1,200);
472 h
[1999] = gcj_reverse(h
[1999]);
474 h
[1999] = ints(1,200);
476 /* Try to force some collections and reuse of small list elements */
477 for (i
= 0; i
< 10; i
++) {
480 /* Superficially test interior pointer recognition on stack */
481 c
= (sexpr
)((char *)c
+ sizeof(char *));
482 d
= (sexpr
)((char *)d
+ sizeof(char *));
491 for (i
= 0; i
< 50; i
++) {
493 b
= reverse(reverse(b
));
497 for (i
= 0; i
< 60; i
++) {
498 if (i
% 10 == 0) fork_a_thread();
499 /* This maintains the invariant that a always points to a list of */
500 /* 49 integers. Thus this is thread safe without locks, */
501 /* assuming atomic pointer assignments. */
502 a
= reverse(reverse(a
));
503 # if !defined(AT_END) && !defined(THREADS)
504 /* This is not thread safe, since realloc explicitly deallocates */
506 a
= (sexpr
)GC_REALLOC((GC_PTR
)a
, 500);
508 a
= (sexpr
)GC_REALLOC((GC_PTR
)a
, 8200);
514 c
= (sexpr
)((char *)c
- sizeof(char *));
515 d
= (sexpr
)((char *)d
- sizeof(char *));
517 check_uncollectable_ints(d
, 1, 100);
518 check_ints(f
[5], 1,17);
519 check_ints(g
[799], 1,18);
520 # ifdef GC_GCJ_SUPPORT
521 h
[1999] = gcj_reverse(h
[1999]);
523 check_ints(h
[1999], 1,200);
531 * The rest of this builds balanced binary trees, checks that they don't
532 * disappear, and tests finalization.
534 typedef struct treenode
{
536 struct treenode
* lchild
;
537 struct treenode
* rchild
;
540 int finalizable_count
= 0;
541 int finalized_count
= 0;
542 VOLATILE
int dropped_something
= 0;
545 void finalizer(void * obj
, void * client_data
)
547 void finalizer(obj
, client_data
)
555 PCR_ThCrSec_EnterSys();
557 # ifdef SOLARIS_THREADS
558 static mutex_t incr_lock
;
559 mutex_lock(&incr_lock
);
561 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) || defined(HPUX_THREADS)
562 static pthread_mutex_t incr_lock
= PTHREAD_MUTEX_INITIALIZER
;
563 pthread_mutex_lock(&incr_lock
);
565 # ifdef WIN32_THREADS
566 EnterCriticalSection(&incr_cs
);
568 if ((int)(GC_word
)client_data
!= t
-> level
) {
569 (void)GC_printf0("Wrong finalization data - collector is broken\n");
574 PCR_ThCrSec_ExitSys();
576 # ifdef SOLARIS_THREADS
577 mutex_unlock(&incr_lock
);
579 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) || defined(HPUX_THREADS)
580 pthread_mutex_unlock(&incr_lock
);
582 # ifdef WIN32_THREADS
583 LeaveCriticalSection(&incr_cs
);
589 # define MAX_FINALIZED 8000
592 GC_FAR GC_word live_indicators
[MAX_FINALIZED
] = {0};
594 /* Too big for THINK_C. have to allocate it dynamically. */
595 GC_word
*live_indicators
= 0;
598 int live_indicators_count
= 0;
603 tn
* result
= (tn
*)GC_MALLOC(sizeof(tn
));
606 /* get around static data limitations. */
607 if (!live_indicators
)
609 (GC_word
*)NewPtrClear(MAX_FINALIZED
* sizeof(GC_word
));
610 if (!live_indicators
) {
611 (void)GC_printf0("Out of memory\n");
615 if (n
== 0) return(0);
617 (void)GC_printf0("Out of memory\n");
621 result
-> lchild
= mktree(n
-1);
622 result
-> rchild
= mktree(n
-1);
623 if (counter
++ % 17 == 0 && n
>= 2) {
624 tn
* tmp
= result
-> lchild
-> rchild
;
626 result
-> lchild
-> rchild
= result
-> rchild
-> lchild
;
627 result
-> rchild
-> lchild
= tmp
;
629 if (counter
++ % 119 == 0) {
634 PCR_ThCrSec_EnterSys();
636 # ifdef SOLARIS_THREADS
637 static mutex_t incr_lock
;
638 mutex_lock(&incr_lock
);
640 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) \
641 || defined(HPUX_THREADS)
642 static pthread_mutex_t incr_lock
= PTHREAD_MUTEX_INITIALIZER
;
643 pthread_mutex_lock(&incr_lock
);
645 # ifdef WIN32_THREADS
646 EnterCriticalSection(&incr_cs
);
648 /* Losing a count here causes erroneous report of failure. */
650 my_index
= live_indicators_count
++;
652 PCR_ThCrSec_ExitSys();
654 # ifdef SOLARIS_THREADS
655 mutex_unlock(&incr_lock
);
657 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) \
658 || defined(HPUX_THREADS)
659 pthread_mutex_unlock(&incr_lock
);
661 # ifdef WIN32_THREADS
662 LeaveCriticalSection(&incr_cs
);
666 GC_REGISTER_FINALIZER((GC_PTR
)result
, finalizer
, (GC_PTR
)(GC_word
)n
,
667 (GC_finalization_proc
*)0, (GC_PTR
*)0);
668 if (my_index
>= MAX_FINALIZED
) {
669 GC_printf0("live_indicators overflowed\n");
672 live_indicators
[my_index
] = 13;
673 if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
674 (GC_PTR
*)(&(live_indicators
[my_index
])),
675 (GC_PTR
)result
) != 0) {
676 GC_printf0("GC_general_register_disappearing_link failed\n");
679 if (GC_unregister_disappearing_link(
681 (&(live_indicators
[my_index
]))) == 0) {
682 GC_printf0("GC_unregister_disappearing_link failed\n");
685 if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
686 (GC_PTR
*)(&(live_indicators
[my_index
])),
687 (GC_PTR
)result
) != 0) {
688 GC_printf0("GC_general_register_disappearing_link failed 2\n");
699 if (n
== 0 && t
!= 0) {
700 (void)GC_printf0("Clobbered a leaf - collector is broken\n");
704 if (t
-> level
!= n
) {
705 (void)GC_printf1("Lost a node at level %lu - collector is broken\n",
709 if (counter
++ % 373 == 0) (void) GC_MALLOC(counter
%5001);
710 chktree(t
-> lchild
, n
-1);
711 if (counter
++ % 73 == 0) (void) GC_MALLOC(counter
%373);
712 chktree(t
-> rchild
, n
-1);
715 # if defined(SOLARIS_THREADS) && !defined(_SOLARIS_PTHREADS)
720 # if defined(SMALL_CONFIG) || defined(GC_DEBUG)
721 return(GC_MALLOC(8));
723 void ** my_free_list_ptr
;
726 if (thr_getspecific(fl_key
, (void **)(&my_free_list_ptr
)) != 0) {
727 (void)GC_printf0("thr_getspecific failed\n");
730 if (my_free_list_ptr
== 0) {
731 my_free_list_ptr
= GC_NEW_UNCOLLECTABLE(void *);
732 if (thr_setspecific(fl_key
, my_free_list_ptr
) != 0) {
733 (void)GC_printf0("thr_setspecific failed\n");
737 my_free_list
= *my_free_list_ptr
;
738 if (my_free_list
== 0) {
739 my_free_list
= GC_malloc_many(8);
740 if (my_free_list
== 0) {
741 (void)GC_printf0("alloc8bytes out of memory\n");
745 *my_free_list_ptr
= GC_NEXT(my_free_list
);
746 GC_NEXT(my_free_list
) = 0;
747 return(my_free_list
);
753 # if defined(_SOLARIS_PTHREADS) || defined(IRIX_THREADS) \
754 || defined(LINUX_THREADS) || defined(HPUX_THREADS)
755 pthread_key_t fl_key
;
760 return(GC_malloc(8));
762 void ** my_free_list_ptr
;
765 my_free_list_ptr
= (void **)pthread_getspecific(fl_key
);
766 if (my_free_list_ptr
== 0) {
767 my_free_list_ptr
= GC_NEW_UNCOLLECTABLE(void *);
768 if (pthread_setspecific(fl_key
, my_free_list_ptr
) != 0) {
769 (void)GC_printf0("pthread_setspecific failed\n");
773 my_free_list
= *my_free_list_ptr
;
774 if (my_free_list
== 0) {
775 my_free_list
= GC_malloc_many(8);
776 if (my_free_list
== 0) {
777 (void)GC_printf0("alloc8bytes out of memory\n");
781 *my_free_list_ptr
= GC_NEXT(my_free_list
);
782 GC_NEXT(my_free_list
) = 0;
783 return(my_free_list
);
788 # define alloc8bytes() GC_MALLOC_ATOMIC(8)
797 for (i
= 0; i
< n
; i
+= 8) {
798 if (alloc8bytes() == 0) {
799 (void)GC_printf0("Out of memory\n");
805 # if defined(THREADS) && defined(GC_DEBUG)
806 # define TREE_HEIGHT 15
808 # define TREE_HEIGHT 16
815 root
= mktree(TREE_HEIGHT
);
816 alloc_small(5000000);
817 chktree(root
, TREE_HEIGHT
);
818 if (finalized_count
&& ! dropped_something
) {
819 (void)GC_printf0("Premature finalization - collector is broken\n");
822 dropped_something
= 1;
823 GC_noop(root
); /* Root needs to remain live until */
824 /* dropped_something is set. */
825 root
= mktree(TREE_HEIGHT
);
826 chktree(root
, TREE_HEIGHT
);
827 for (i
= TREE_HEIGHT
; i
>= 0; i
--) {
831 alloc_small(5000000);
834 unsigned n_tests
= 0;
836 GC_word bm_huge
[10] = {
850 /* A very simple test of explicitly typed allocation */
853 GC_word
* old
, * new;
856 GC_word bm_large
= 0xf7ff7fff;
857 GC_descr d1
= GC_make_descriptor(&bm3
, 2);
858 GC_descr d2
= GC_make_descriptor(&bm2
, 2);
860 GC_descr dummy
= GC_make_descriptor(&bm_large
, 32);
862 GC_descr d3
= GC_make_descriptor(&bm_large
, 32);
863 GC_descr d4
= GC_make_descriptor(bm_huge
, 320);
864 GC_word
* x
= (GC_word
*)GC_malloc_explicitly_typed(2000, d4
);
868 for (i
= 0; i
< 4000; i
++) {
869 new = (GC_word
*) GC_malloc_explicitly_typed(4 * sizeof(GC_word
), d1
);
870 if (0 != new[0] || 0 != new[1]) {
871 GC_printf0("Bad initialization by GC_malloc_explicitly_typed\n");
875 new[1] = (GC_word
)old
;
877 new = (GC_word
*) GC_malloc_explicitly_typed(4 * sizeof(GC_word
), d2
);
879 new[1] = (GC_word
)old
;
881 new = (GC_word
*) GC_malloc_explicitly_typed(33 * sizeof(GC_word
), d3
);
883 new[1] = (GC_word
)old
;
885 new = (GC_word
*) GC_calloc_explicitly_typed(4, 2 * sizeof(GC_word
),
888 new[1] = (GC_word
)old
;
891 new = (GC_word
*) GC_calloc_explicitly_typed(7, 3 * sizeof(GC_word
),
894 new = (GC_word
*) GC_calloc_explicitly_typed(1001,
897 if (0 != new[0] || 0 != new[1]) {
898 GC_printf0("Bad initialization by GC_malloc_explicitly_typed\n");
903 new[1] = (GC_word
)old
;
906 for (i
= 0; i
< 20000; i
++) {
908 (void)GC_printf1("typed alloc failed at %lu\n",
914 new = (GC_word
*)(old
[1]);
933 void fail_proc1(GC_PTR x
)
938 #endif /* __STDC__ */
941 # define TEST_FAIL_COUNT(n) 1
943 # define TEST_FAIL_COUNT(n) (fail_count >= (n))
952 char *y
= (char *)(size_t)fail_proc1
;
958 "This test program is not designed for leak detection mode\n");
959 (void)GC_printf0("Expect lots of problems.\n");
961 if (GC_size(GC_malloc(7)) != 8 &&
962 GC_size(GC_malloc(7)) != MIN_WORDS
* sizeof(GC_word
)
963 || GC_size(GC_malloc(15)) != 16) {
964 (void)GC_printf0("GC_size produced unexpected results\n");
967 if (GC_size(GC_malloc(0)) != MIN_WORDS
* sizeof(GC_word
)) {
968 (void)GC_printf0("GC_malloc(0) failed\n");
971 if (GC_size(GC_malloc_uncollectable(0)) != MIN_WORDS
* sizeof(GC_word
)) {
972 (void)GC_printf0("GC_malloc_uncollectable(0) failed\n");
976 GC_is_valid_displacement_print_proc
= fail_proc1
;
977 GC_is_visible_print_proc
= fail_proc1
;
979 if (GC_base(x
+ 13) != x
) {
980 (void)GC_printf0("GC_base(heap ptr) produced incorrect result\n");
984 if (GC_base(y
) != 0) {
985 (void)GC_printf0("GC_base(fn_ptr) produced incorrect result\n");
989 if (GC_same_obj(x
+5, x
) != x
+ 5) {
990 (void)GC_printf0("GC_same_obj produced incorrect result\n");
993 if (GC_is_visible(y
) != y
|| GC_is_visible(x
) != x
) {
994 (void)GC_printf0("GC_is_visible produced incorrect result\n");
997 if (!TEST_FAIL_COUNT(1)) {
998 # if!(defined(RS6000) || defined(POWERPC) || defined(IA64))
999 /* ON RS6000s function pointers point to a descriptor in the */
1000 /* data segment, so there should have been no failures. */
1001 (void)GC_printf0("GC_is_visible produced wrong failure indication\n");
1005 if (GC_is_valid_displacement(y
) != y
1006 || GC_is_valid_displacement(x
) != x
1007 || GC_is_valid_displacement(x
+ 3) != x
+ 3) {
1009 "GC_is_valid_displacement produced incorrect result\n");
1012 # ifndef ALL_INTERIOR_POINTERS
1013 # if defined(RS6000) || defined(POWERPC)
1014 if (!TEST_FAIL_COUNT(1)) {
1016 if (!TEST_FAIL_COUNT(2)) {
1018 (void)GC_printf0("GC_is_valid_displacement produced wrong failure indication\n");
1022 /* Test floating point alignment */
1023 *(double *)GC_MALLOC(sizeof(double)) = 1.0;
1024 *(double *)GC_MALLOC(sizeof(double)) = 1.0;
1025 # ifdef GC_GCJ_SUPPORT
1026 GC_REGISTER_DISPLACEMENT(sizeof(struct fake_vtable
*));
1027 GC_init_gcj_malloc(0, (void *)fake_gcj_mark_proc
);
1029 /* Repeated list reversal test. */
1032 GC_printf0("-------------Finished reverse_test\n");
1036 GC_printf0("-------------Finished typed_test\n");
1042 /* GC_printf1("Finished %x\n", pthread_self()); */
1045 void check_heap_stats()
1047 unsigned long max_heap_sz
;
1050 int late_finalize_count
= 0;
1052 if (sizeof(char *) > 4) {
1053 max_heap_sz
= 15000000;
1055 max_heap_sz
= 11000000;
1063 /* Garbage collect repeatedly so that all inaccessible objects */
1064 /* can be finalized. */
1065 while (GC_collect_a_little()) { }
1066 for (i
= 0; i
< 16; i
++) {
1068 late_finalize_count
+= GC_invoke_finalizers();
1070 (void)GC_printf1("Completed %lu tests\n", (unsigned long)n_tests
);
1071 (void)GC_printf2("Finalized %lu/%lu objects - ",
1072 (unsigned long)finalized_count
,
1073 (unsigned long)finalizable_count
);
1074 # ifdef FINALIZE_ON_DEMAND
1075 if (finalized_count
!= late_finalize_count
) {
1076 (void)GC_printf0("Demand finalization error\n");
1080 if (finalized_count
> finalizable_count
1081 || finalized_count
< finalizable_count
/2) {
1082 (void)GC_printf0("finalization is probably broken\n");
1085 (void)GC_printf0("finalization is probably ok\n");
1088 for (i
= 0; i
< MAX_FINALIZED
; i
++) {
1089 if (live_indicators
[i
] != 0) {
1093 i
= finalizable_count
- finalized_count
- still_live
;
1096 ("%lu disappearing links remain and %lu more objects were not finalized\n",
1097 (unsigned long) still_live
, (unsigned long)i
);
1099 GC_printf0("\tVery suspicious!\n");
1101 GC_printf0("\tSlightly suspicious, but probably OK.\n");
1104 (void)GC_printf1("Total number of bytes allocated is %lu\n",
1106 WORDS_TO_BYTES(GC_words_allocd
+ GC_words_allocd_before_gc
));
1107 (void)GC_printf1("Final heap size is %lu bytes\n",
1108 (unsigned long)GC_get_heap_size());
1109 if (WORDS_TO_BYTES(GC_words_allocd
+ GC_words_allocd_before_gc
)
1110 < 33500000*n_tests
) {
1111 (void)GC_printf0("Incorrect execution - missed some allocations\n");
1114 if (GC_get_heap_size() > max_heap_sz
*n_tests
) {
1115 (void)GC_printf0("Unexpected heap growth - collector may be broken\n");
1118 (void)GC_printf0("Collector appears to work\n");
1122 void SetMinimumStack(long minSize
)
1126 if (minSize
> LMGetDefltStack())
1128 newApplLimit
= (long) GetApplLimit()
1129 - (minSize
- LMGetDefltStack());
1130 SetApplLimit((Ptr
) newApplLimit
);
1135 #define cMinStackSpace (512L * 1024L)
1140 void warn_proc(char *msg
, GC_word p
)
1142 void warn_proc(msg
, p
)
1147 GC_printf1(msg
, (unsigned long)p
);
1152 #if !defined(PCR) && !defined(SOLARIS_THREADS) && !defined(WIN32_THREADS) \
1153 && !defined(IRIX_THREADS) && !defined(LINUX_THREADS) \
1154 && !defined(HPUX_THREADS) || defined(LINT)
1155 #if defined(MSWIN32) && !defined(__MINGW32__)
1156 int APIENTRY
WinMain(HINSTANCE instance
, HINSTANCE prev
, LPSTR cmd
, int n
)
1167 /* No good way to determine stack base from library; do it */
1168 /* manually on this platform. */
1169 GC_stackbottom
= (GC_PTR
)(&dummy
);
1172 /* Make sure we have lots and lots of stack space. */
1173 SetMinimumStack(cMinStackSpace
);
1174 /* Cheat and let stdio initialize toolbox for us. */
1175 printf("Testing GC Macintosh port.\n");
1177 GC_INIT(); /* Only needed if gc is dynamic library. */
1178 (void) GC_set_warn_proc(warn_proc
);
1179 # if defined(MPROTECT_VDB) || defined(PROC_VDB)
1180 GC_enable_incremental();
1181 (void) GC_printf0("Switched to incremental mode\n");
1182 # if defined(MPROTECT_VDB)
1183 (void)GC_printf0("Emulating dirty bits with mprotect/signals\n");
1185 (void)GC_printf0("Reading dirty bits from /proc\n");
1190 (void)fflush(stdout
);
1192 /* Entry points we should be testing, but aren't. */
1193 /* Some can be tested by defining GC_DEBUG at the top of this file */
1194 /* This is a bit SunOS4 specific. */
1195 GC_noop(GC_expand_hp
, GC_add_roots
, GC_clear_roots
,
1196 GC_register_disappearing_link
,
1197 GC_register_finalizer_ignore_self
,
1198 GC_debug_register_displacement
,
1199 GC_print_obj
, GC_debug_change_stubborn
,
1200 GC_debug_end_stubborn_change
, GC_debug_malloc_uncollectable
,
1201 GC_debug_free
, GC_debug_realloc
, GC_generic_malloc_words_small
,
1202 GC_init
, GC_make_closure
, GC_debug_invoke_finalizer
,
1203 GC_page_was_ever_dirty
, GC_is_fresh
,
1204 GC_malloc_ignore_off_page
, GC_malloc_atomic_ignore_off_page
,
1205 GC_set_max_heap_size
, GC_get_bytes_since_gc
,
1206 GC_pre_incr
, GC_post_incr
);
1209 GC_win32_free_heap();
1215 #ifdef WIN32_THREADS
1217 unsigned __stdcall
thr_run_one_test(void *arg
)
1225 int APIENTRY
WinMain(HINSTANCE instance
, HINSTANCE prev
, LPSTR cmd
, int n
)
1233 GC_enable_incremental();
1235 InitializeCriticalSection(&incr_cs
);
1236 (void) GC_set_warn_proc(warn_proc
);
1238 for (i
= 0; i
< NTEST
; i
++) {
1239 h
[i
] = (HANDLE
)_beginthreadex(NULL
, 0, thr_run_one_test
, 0, 0, &thread_id
);
1240 if (h
[i
] == (HANDLE
)-1) {
1241 (void)GC_printf1("Thread creation failed %lu\n", (unsigned long)GetLastError());
1245 # endif /* NTEST > 0 */
1248 for (i
= 0; i
< NTEST
; i
++) {
1249 if (WaitForSingleObject(h
[i
], INFINITE
) != WAIT_OBJECT_0
) {
1250 (void)GC_printf1("Thread wait failed %lu\n", (unsigned long)GetLastError());
1254 # endif /* NTEST > 0 */
1256 (void)fflush(stdout
);
1260 #endif /* WIN32_THREADS */
1271 /* GC_enable_incremental(); */
1272 (void) GC_set_warn_proc(warn_proc
);
1273 th1
= PCR_Th_Fork(run_one_test
, 0);
1274 th2
= PCR_Th_Fork(run_one_test
, 0);
1276 if (PCR_Th_T_Join(th1
, &code
, NIL
, PCR_allSigsBlocked
, PCR_waitForever
)
1277 != PCR_ERes_okay
|| code
!= 0) {
1278 (void)GC_printf0("Thread 1 failed\n");
1280 if (PCR_Th_T_Join(th2
, &code
, NIL
, PCR_allSigsBlocked
, PCR_waitForever
)
1281 != PCR_ERes_okay
|| code
!= 0) {
1282 (void)GC_printf0("Thread 2 failed\n");
1285 (void)fflush(stdout
);
1290 #if defined(SOLARIS_THREADS) || defined(IRIX_THREADS) \
1291 || defined(HPUX_THREADS) || defined(LINUX_THREADS)
1292 void * thr_run_one_test(void * arg
)
1299 # define GC_free GC_debug_free
1302 #ifdef SOLARIS_THREADS
1310 GC_INIT(); /* Only needed if gc is dynamic library. */
1311 GC_enable_incremental();
1312 (void) GC_set_warn_proc(warn_proc
);
1313 if (thr_keycreate(&fl_key
, GC_free
) != 0) {
1314 (void)GC_printf1("Key creation failed %lu\n", (unsigned long)code
);
1317 if ((code
= thr_create(0, 1024*1024, thr_run_one_test
, 0, 0, &th1
)) != 0) {
1318 (void)GC_printf1("Thread 1 creation failed %lu\n", (unsigned long)code
);
1321 if ((code
= thr_create(0, 1024*1024, thr_run_one_test
, 0, THR_NEW_LWP
, &th2
)) != 0) {
1322 (void)GC_printf1("Thread 2 creation failed %lu\n", (unsigned long)code
);
1326 if ((code
= thr_join(th1
, 0, 0)) != 0) {
1327 (void)GC_printf1("Thread 1 failed %lu\n", (unsigned long)code
);
1330 if (thr_join(th2
, 0, 0) != 0) {
1331 (void)GC_printf1("Thread 2 failed %lu\n", (unsigned long)code
);
1335 (void)fflush(stdout
);
1338 #else /* pthreads */
1343 pthread_attr_t attr
;
1346 # ifdef IRIX_THREADS
1347 /* Force a larger stack to be preallocated */
1348 /* Since the initial cant always grow later. */
1349 *((volatile char *)&code
- 1024*1024) = 0; /* Require 1 Mb */
1350 # endif /* IRIX_THREADS */
1351 pthread_attr_init(&attr
);
1352 # if defined(IRIX_THREADS) || defined(HPUX_THREADS)
1353 pthread_attr_setstacksize(&attr
, 1000000);
1356 # ifdef MPROTECT_VDB
1357 GC_enable_incremental();
1358 (void) GC_printf0("Switched to incremental mode\n");
1359 (void) GC_printf0("Emulating dirty bits with mprotect/signals\n");
1361 (void) GC_set_warn_proc(warn_proc
);
1362 if ((code
= pthread_key_create(&fl_key
, 0)) != 0) {
1363 (void)GC_printf1("Key creation failed %lu\n", (unsigned long)code
);
1366 if ((code
= pthread_create(&th1
, &attr
, thr_run_one_test
, 0)) != 0) {
1367 (void)GC_printf1("Thread 1 creation failed %lu\n", (unsigned long)code
);
1370 if ((code
= pthread_create(&th2
, &attr
, thr_run_one_test
, 0)) != 0) {
1371 (void)GC_printf1("Thread 2 creation failed %lu\n", (unsigned long)code
);
1375 if ((code
= pthread_join(th1
, 0)) != 0) {
1376 (void)GC_printf1("Thread 1 failed %lu\n", (unsigned long)code
);
1379 if (pthread_join(th2
, 0) != 0) {
1380 (void)GC_printf1("Thread 2 failed %lu\n", (unsigned long)code
);
1384 (void)fflush(stdout
);
1385 pthread_attr_destroy(&attr
);
1386 GC_printf1("Completed %d collections\n", GC_gc_no
);
1389 #endif /* pthreads */
1390 #endif /* SOLARIS_THREADS || IRIX_THREADS || LINUX_THREADS || HPUX_THREADS */