* tree.h (get_containing_scope): Declare it.
[official-gcc.git] / boehm-gc / test.c
blob43b09010f8017ac7abc27ccb8a84de6a4776ce79
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
15 /* An incomplete test for the garbage collector. */
16 /* Some more obscure entry points are not tested at all. */
18 # if defined(mips) && defined(SYSTYPE_BSD43)
19 /* MIPS RISCOS 4 */
20 # else
21 # include <stdlib.h>
22 # endif
23 # include <stdio.h>
24 # include <assert.h> /* Not normally used, but handy for debugging. */
25 # include "gc.h"
26 # include "gc_typed.h"
27 # include "gc_priv.h" /* For output, locking, and some statistics */
28 # include "gcconfig.h"
30 # ifdef MSWIN32
31 # include <windows.h>
32 # endif
34 # ifdef PCR
35 # include "th/PCR_ThCrSec.h"
36 # include "th/PCR_Th.h"
37 # undef GC_printf0
38 # define GC_printf0 printf
39 # undef GC_printf1
40 # define GC_printf1 printf
41 # endif
43 # ifdef SOLARIS_THREADS
44 # include <thread.h>
45 # include <synch.h>
46 # endif
48 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) || defined(HPUX_THREADS)
49 # include <pthread.h>
50 # endif
52 # ifdef WIN32_THREADS
53 # include <process.h>
54 static CRITICAL_SECTION incr_cs;
55 # endif
57 # ifdef AMIGA
58 long __stack = 200000;
59 # endif
61 # define FAIL (void)abort()
63 /* AT_END may be defined to excercise the interior pointer test */
64 /* if the collector is configured with ALL_INTERIOR_POINTERS. */
65 /* As it stands, this test should succeed with either */
66 /* configuration. In the FIND_LEAK configuration, it should */
67 /* find lots of leaks, since we free almost nothing. */
69 struct SEXPR {
70 struct SEXPR * sexpr_car;
71 struct SEXPR * sexpr_cdr;
75 typedef struct SEXPR * sexpr;
77 # define INT_TO_SEXPR(x) ((sexpr)(unsigned long)(x))
79 # undef nil
80 # define nil (INT_TO_SEXPR(0))
81 # define car(x) ((x) -> sexpr_car)
82 # define cdr(x) ((x) -> sexpr_cdr)
83 # define is_nil(x) ((x) == nil)
86 int extra_count = 0; /* Amount of space wasted in cons node */
88 /* Silly implementation of Lisp cons. Intentionally wastes lots of space */
89 /* to test collector. */
90 sexpr cons (x, y)
91 sexpr x;
92 sexpr y;
94 register sexpr r;
95 register int *p;
96 register int my_extra = extra_count;
98 r = (sexpr) GC_MALLOC_STUBBORN(sizeof(struct SEXPR) + my_extra);
99 if (r == 0) {
100 (void)GC_printf0("Out of memory\n");
101 exit(1);
103 for (p = (int *)r;
104 ((char *)p) < ((char *)r) + my_extra + sizeof(struct SEXPR); p++) {
105 if (*p) {
106 (void)GC_printf1("Found nonzero at 0x%lx - allocator is broken\n",
107 (unsigned long)p);
108 FAIL;
110 *p = 13;
112 # ifdef AT_END
113 r = (sexpr)((char *)r + (my_extra & ~7));
114 # endif
115 r -> sexpr_car = x;
116 r -> sexpr_cdr = y;
117 my_extra++;
118 if ( my_extra >= 5000 ) {
119 extra_count = 0;
120 } else {
121 extra_count = my_extra;
123 GC_END_STUBBORN_CHANGE((char *)r);
124 return(r);
127 sexpr small_cons (x, y)
128 sexpr x;
129 sexpr y;
131 register sexpr r;
133 r = (sexpr) GC_MALLOC(sizeof(struct SEXPR));
134 if (r == 0) {
135 (void)GC_printf0("Out of memory\n");
136 exit(1);
138 r -> sexpr_car = x;
139 r -> sexpr_cdr = y;
140 return(r);
143 sexpr small_cons_uncollectable (x, y)
144 sexpr x;
145 sexpr y;
147 register sexpr r;
149 r = (sexpr) GC_MALLOC_UNCOLLECTABLE(sizeof(struct SEXPR));
150 assert(GC_is_marked(r));
151 if (r == 0) {
152 (void)GC_printf0("Out of memory\n");
153 exit(1);
155 r -> sexpr_car = x;
156 r -> sexpr_cdr = (sexpr)(~(unsigned long)y);
157 return(r);
160 /* Return reverse(x) concatenated with y */
161 sexpr reverse1(x, y)
162 sexpr x, y;
164 if (is_nil(x)) {
165 return(y);
166 } else {
167 return( reverse1(cdr(x), cons(car(x), y)) );
171 sexpr reverse(x)
172 sexpr x;
174 return( reverse1(x, nil) );
177 sexpr ints(low, up)
178 int low, up;
180 if (low > up) {
181 return(nil);
182 } else {
183 return(small_cons(small_cons(INT_TO_SEXPR(low), nil), ints(low+1, up)));
187 /* To check uncollectable allocation we build lists with disguised cdr */
188 /* pointers, and make sure they don't go away. */
189 sexpr uncollectable_ints(low, up)
190 int low, up;
192 if (low > up) {
193 return(nil);
194 } else {
195 return(small_cons_uncollectable(small_cons(INT_TO_SEXPR(low), nil),
196 uncollectable_ints(low+1, up)));
200 void check_ints(list, low, up)
201 sexpr list;
202 int low, up;
204 if ((int)(GC_word)(car(car(list))) != low) {
205 (void)GC_printf0(
206 "List reversal produced incorrect list - collector is broken\n");
207 FAIL;
209 if (low == up) {
210 if (cdr(list) != nil) {
211 (void)GC_printf0("List too long - collector is broken\n");
212 FAIL;
214 } else {
215 check_ints(cdr(list), low+1, up);
219 # define UNCOLLECTABLE_CDR(x) (sexpr)(~(unsigned long)(cdr(x)))
221 void check_uncollectable_ints(list, low, up)
222 sexpr list;
223 int low, up;
225 assert(GC_is_marked(list));
226 if ((int)(GC_word)(car(car(list))) != low) {
227 (void)GC_printf0(
228 "Uncollectable list corrupted - collector is broken\n");
229 FAIL;
231 if (low == up) {
232 if (UNCOLLECTABLE_CDR(list) != nil) {
233 (void)GC_printf0("Uncollectable list too long - collector is broken\n");
234 FAIL;
236 } else {
237 check_uncollectable_ints(UNCOLLECTABLE_CDR(list), low+1, up);
241 /* Not used, but useful for debugging: */
242 void print_int_list(x)
243 sexpr x;
245 if (is_nil(x)) {
246 (void)GC_printf0("NIL\n");
247 } else {
248 (void)GC_printf1("(%ld)", (long)(car(car(x))));
249 if (!is_nil(cdr(x))) {
250 (void)GC_printf0(", ");
251 (void)print_int_list(cdr(x));
252 } else {
253 (void)GC_printf0("\n");
258 /* Try to force a to be strangely aligned */
259 struct {
260 char dummy;
261 sexpr aa;
262 } A;
263 #define a A.aa
266 * A tiny list reversal test to check thread creation.
268 #ifdef THREADS
270 # ifdef WIN32_THREADS
271 unsigned __stdcall tiny_reverse_test(void * arg)
272 # else
273 void * tiny_reverse_test(void * arg)
274 # endif
276 check_ints(reverse(reverse(ints(1,10))), 1, 10);
277 return 0;
280 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) \
281 || defined(SOLARIS_PTHREADS) || defined(HPUX_THREADS)
282 void fork_a_thread()
284 pthread_t t;
285 int code;
286 if ((code = pthread_create(&t, 0, tiny_reverse_test, 0)) != 0) {
287 (void)GC_printf1("Small thread creation failed %lu\n",
288 (unsigned long)code);
289 FAIL;
291 if ((code = pthread_join(t, 0)) != 0) {
292 (void)GC_printf1("Small thread join failed %lu\n",
293 (unsigned long)code);
294 FAIL;
298 # elif defined(WIN32_THREADS)
299 void fork_a_thread()
301 unsigned thread_id;
302 HANDLE h;
303 h = (HANDLE)_beginthreadex(NULL, 0, tiny_reverse_test,
304 0, 0, &thread_id);
305 if (h == (HANDLE)-1) {
306 (void)GC_printf1("Small thread creation failed %lu\n",
307 (unsigned long)GetLastError());
308 FAIL;
310 if (WaitForSingleObject(h, INFINITE) != WAIT_OBJECT_0) {
311 (void)GC_printf1("Small thread wait failed %lu\n",
312 (unsigned long)GetLastError());
313 FAIL;
317 /* # elif defined(SOLARIS_THREADS) */
319 # else
321 # define fork_a_thread()
323 # endif
325 #else
327 # define fork_a_thread()
329 #endif
332 * Repeatedly reverse lists built out of very different sized cons cells.
333 * Check that we didn't lose anything.
335 void reverse_test()
337 int i;
338 sexpr b;
339 sexpr c;
340 sexpr d;
341 sexpr e;
342 sexpr *f, *g, *h;
343 # if defined(MSWIN32) || defined(MACOS)
344 /* Win32S only allows 128K stacks */
345 # define BIG 1000
346 # else
347 # if defined PCR
348 /* PCR default stack is 100K. Stack frames are up to 120 bytes. */
349 # define BIG 700
350 # else
351 # define BIG 4500
352 # endif
353 # endif
355 A.dummy = 17;
356 a = ints(1, 49);
357 b = ints(1, 50);
358 c = ints(1, BIG);
359 d = uncollectable_ints(1, 100);
360 e = uncollectable_ints(1, 1);
361 /* Check that realloc updates object descriptors correctly */
362 f = (sexpr *)GC_MALLOC(4 * sizeof(sexpr));
363 f = (sexpr *)GC_REALLOC((GC_PTR)f, 6 * sizeof(sexpr));
364 f[5] = ints(1,17);
365 g = (sexpr *)GC_MALLOC(513 * sizeof(sexpr));
366 g = (sexpr *)GC_REALLOC((GC_PTR)g, 800 * sizeof(sexpr));
367 g[799] = ints(1,18);
368 h = (sexpr *)GC_MALLOC(1025 * sizeof(sexpr));
369 h = (sexpr *)GC_REALLOC((GC_PTR)h, 2000 * sizeof(sexpr));
370 h[1999] = ints(1,19);
371 /* Try to force some collections and reuse of small list elements */
372 for (i = 0; i < 10; i++) {
373 (void)ints(1, BIG);
375 /* Superficially test interior pointer recognition on stack */
376 c = (sexpr)((char *)c + sizeof(char *));
377 d = (sexpr)((char *)d + sizeof(char *));
379 # ifdef __STDC__
380 GC_FREE((void *)e);
381 # else
382 GC_FREE((char *)e);
383 # endif
384 check_ints(b,1,50);
385 check_ints(a,1,49);
386 for (i = 0; i < 50; i++) {
387 check_ints(b,1,50);
388 b = reverse(reverse(b));
390 check_ints(b,1,50);
391 check_ints(a,1,49);
392 for (i = 0; i < 60; i++) {
393 if (i % 10 == 0) fork_a_thread();
394 /* This maintains the invariant that a always points to a list of */
395 /* 49 integers. Thus this is thread safe without locks, */
396 /* assuming atomic pointer assignments. */
397 a = reverse(reverse(a));
398 # if !defined(AT_END) && !defined(THREADS)
399 /* This is not thread safe, since realloc explicitly deallocates */
400 if (i & 1) {
401 a = (sexpr)GC_REALLOC((GC_PTR)a, 500);
402 } else {
403 a = (sexpr)GC_REALLOC((GC_PTR)a, 8200);
405 # endif
407 check_ints(a,1,49);
408 check_ints(b,1,50);
409 c = (sexpr)((char *)c - sizeof(char *));
410 d = (sexpr)((char *)d - sizeof(char *));
411 check_ints(c,1,BIG);
412 check_uncollectable_ints(d, 1, 100);
413 check_ints(f[5], 1,17);
414 check_ints(g[799], 1,18);
415 check_ints(h[1999], 1,19);
416 # ifndef THREADS
417 a = 0;
418 # endif
419 b = c = 0;
423 * The rest of this builds balanced binary trees, checks that they don't
424 * disappear, and tests finalization.
426 typedef struct treenode {
427 int level;
428 struct treenode * lchild;
429 struct treenode * rchild;
430 } tn;
432 int finalizable_count = 0;
433 int finalized_count = 0;
434 VOLATILE int dropped_something = 0;
436 # ifdef __STDC__
437 void finalizer(void * obj, void * client_data)
438 # else
439 void finalizer(obj, client_data)
440 char * obj;
441 char * client_data;
442 # endif
444 tn * t = (tn *)obj;
446 # ifdef PCR
447 PCR_ThCrSec_EnterSys();
448 # endif
449 # ifdef SOLARIS_THREADS
450 static mutex_t incr_lock;
451 mutex_lock(&incr_lock);
452 # endif
453 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) || defined(HPUX_THREADS)
454 static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
455 pthread_mutex_lock(&incr_lock);
456 # endif
457 # ifdef WIN32_THREADS
458 EnterCriticalSection(&incr_cs);
459 # endif
460 if ((int)(GC_word)client_data != t -> level) {
461 (void)GC_printf0("Wrong finalization data - collector is broken\n");
462 FAIL;
464 finalized_count++;
465 # ifdef PCR
466 PCR_ThCrSec_ExitSys();
467 # endif
468 # ifdef SOLARIS_THREADS
469 mutex_unlock(&incr_lock);
470 # endif
471 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) || defined(HPUX_THREADS)
472 pthread_mutex_unlock(&incr_lock);
473 # endif
474 # ifdef WIN32_THREADS
475 LeaveCriticalSection(&incr_cs);
476 # endif
479 size_t counter = 0;
481 # define MAX_FINALIZED 8000
483 # if !defined(MACOS)
484 GC_FAR GC_word live_indicators[MAX_FINALIZED] = {0};
485 #else
486 /* Too big for THINK_C. have to allocate it dynamically. */
487 GC_word *live_indicators = 0;
488 #endif
490 int live_indicators_count = 0;
492 tn * mktree(n)
493 int n;
495 tn * result = (tn *)GC_MALLOC(sizeof(tn));
497 #if defined(MACOS)
498 /* get around static data limitations. */
499 if (!live_indicators)
500 live_indicators =
501 (GC_word*)NewPtrClear(MAX_FINALIZED * sizeof(GC_word));
502 if (!live_indicators) {
503 (void)GC_printf0("Out of memory\n");
504 exit(1);
506 #endif
507 if (n == 0) return(0);
508 if (result == 0) {
509 (void)GC_printf0("Out of memory\n");
510 exit(1);
512 result -> level = n;
513 result -> lchild = mktree(n-1);
514 result -> rchild = mktree(n-1);
515 if (counter++ % 17 == 0 && n >= 2) {
516 tn * tmp = result -> lchild -> rchild;
518 result -> lchild -> rchild = result -> rchild -> lchild;
519 result -> rchild -> lchild = tmp;
521 if (counter++ % 119 == 0) {
522 int my_index;
525 # ifdef PCR
526 PCR_ThCrSec_EnterSys();
527 # endif
528 # ifdef SOLARIS_THREADS
529 static mutex_t incr_lock;
530 mutex_lock(&incr_lock);
531 # endif
532 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) \
533 || defined(HPUX_THREADS)
534 static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
535 pthread_mutex_lock(&incr_lock);
536 # endif
537 # ifdef WIN32_THREADS
538 EnterCriticalSection(&incr_cs);
539 # endif
540 /* Losing a count here causes erroneous report of failure. */
541 finalizable_count++;
542 my_index = live_indicators_count++;
543 # ifdef PCR
544 PCR_ThCrSec_ExitSys();
545 # endif
546 # ifdef SOLARIS_THREADS
547 mutex_unlock(&incr_lock);
548 # endif
549 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) \
550 || defined(HPUX_THREADS)
551 pthread_mutex_unlock(&incr_lock);
552 # endif
553 # ifdef WIN32_THREADS
554 LeaveCriticalSection(&incr_cs);
555 # endif
558 GC_REGISTER_FINALIZER((GC_PTR)result, finalizer, (GC_PTR)(GC_word)n,
559 (GC_finalization_proc *)0, (GC_PTR *)0);
560 if (my_index >= MAX_FINALIZED) {
561 GC_printf0("live_indicators overflowed\n");
562 FAIL;
564 live_indicators[my_index] = 13;
565 if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
566 (GC_PTR *)(&(live_indicators[my_index])),
567 (GC_PTR)result) != 0) {
568 GC_printf0("GC_general_register_disappearing_link failed\n");
569 FAIL;
571 if (GC_unregister_disappearing_link(
572 (GC_PTR *)
573 (&(live_indicators[my_index]))) == 0) {
574 GC_printf0("GC_unregister_disappearing_link failed\n");
575 FAIL;
577 if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
578 (GC_PTR *)(&(live_indicators[my_index])),
579 (GC_PTR)result) != 0) {
580 GC_printf0("GC_general_register_disappearing_link failed 2\n");
581 FAIL;
584 return(result);
587 void chktree(t,n)
588 tn *t;
589 int n;
591 if (n == 0 && t != 0) {
592 (void)GC_printf0("Clobbered a leaf - collector is broken\n");
593 FAIL;
595 if (n == 0) return;
596 if (t -> level != n) {
597 (void)GC_printf1("Lost a node at level %lu - collector is broken\n",
598 (unsigned long)n);
599 FAIL;
601 if (counter++ % 373 == 0) (void) GC_MALLOC(counter%5001);
602 chktree(t -> lchild, n-1);
603 if (counter++ % 73 == 0) (void) GC_MALLOC(counter%373);
604 chktree(t -> rchild, n-1);
607 # if defined(SOLARIS_THREADS) && !defined(_SOLARIS_PTHREADS)
608 thread_key_t fl_key;
610 void * alloc8bytes()
612 # if defined(SMALL_CONFIG) || defined(GC_DEBUG)
613 return(GC_MALLOC(8));
614 # else
615 void ** my_free_list_ptr;
616 void * my_free_list;
618 if (thr_getspecific(fl_key, (void **)(&my_free_list_ptr)) != 0) {
619 (void)GC_printf0("thr_getspecific failed\n");
620 FAIL;
622 if (my_free_list_ptr == 0) {
623 my_free_list_ptr = GC_NEW_UNCOLLECTABLE(void *);
624 if (thr_setspecific(fl_key, my_free_list_ptr) != 0) {
625 (void)GC_printf0("thr_setspecific failed\n");
626 FAIL;
629 my_free_list = *my_free_list_ptr;
630 if (my_free_list == 0) {
631 my_free_list = GC_malloc_many(8);
632 if (my_free_list == 0) {
633 (void)GC_printf0("alloc8bytes out of memory\n");
634 FAIL;
637 *my_free_list_ptr = GC_NEXT(my_free_list);
638 GC_NEXT(my_free_list) = 0;
639 return(my_free_list);
640 # endif
643 #else
645 # if defined(_SOLARIS_PTHREADS) || defined(IRIX_THREADS) \
646 || defined(LINUX_THREADS) || defined(HPUX_THREADS)
647 pthread_key_t fl_key;
649 void * alloc8bytes()
651 # ifdef SMALL_CONFIG
652 return(GC_malloc(8));
653 # else
654 void ** my_free_list_ptr;
655 void * my_free_list;
657 my_free_list_ptr = (void **)pthread_getspecific(fl_key);
658 if (my_free_list_ptr == 0) {
659 my_free_list_ptr = GC_NEW_UNCOLLECTABLE(void *);
660 if (pthread_setspecific(fl_key, my_free_list_ptr) != 0) {
661 (void)GC_printf0("pthread_setspecific failed\n");
662 FAIL;
665 my_free_list = *my_free_list_ptr;
666 if (my_free_list == 0) {
667 my_free_list = GC_malloc_many(8);
668 if (my_free_list == 0) {
669 (void)GC_printf0("alloc8bytes out of memory\n");
670 FAIL;
673 *my_free_list_ptr = GC_NEXT(my_free_list);
674 GC_NEXT(my_free_list) = 0;
675 return(my_free_list);
676 # endif
679 # else
680 # define alloc8bytes() GC_MALLOC_ATOMIC(8)
681 # endif
682 #endif
684 void alloc_small(n)
685 int n;
687 register int i;
689 for (i = 0; i < n; i += 8) {
690 if (alloc8bytes() == 0) {
691 (void)GC_printf0("Out of memory\n");
692 FAIL;
697 # if defined(THREADS) && defined(GC_DEBUG)
698 # define TREE_HEIGHT 15
699 # else
700 # define TREE_HEIGHT 16
701 # endif
702 void tree_test()
704 tn * root;
705 register int i;
707 root = mktree(TREE_HEIGHT);
708 alloc_small(5000000);
709 chktree(root, TREE_HEIGHT);
710 if (finalized_count && ! dropped_something) {
711 (void)GC_printf0("Premature finalization - collector is broken\n");
712 FAIL;
714 dropped_something = 1;
715 GC_noop(root); /* Root needs to remain live until */
716 /* dropped_something is set. */
717 root = mktree(TREE_HEIGHT);
718 chktree(root, TREE_HEIGHT);
719 for (i = TREE_HEIGHT; i >= 0; i--) {
720 root = mktree(i);
721 chktree(root, i);
723 alloc_small(5000000);
726 unsigned n_tests = 0;
728 GC_word bm_huge[10] = {
729 0xffffffff,
730 0xffffffff,
731 0xffffffff,
732 0xffffffff,
733 0xffffffff,
734 0xffffffff,
735 0xffffffff,
736 0xffffffff,
737 0xffffffff,
738 0x00ffffff,
742 /* A very simple test of explicitly typed allocation */
743 void typed_test()
745 GC_word * old, * new;
746 GC_word bm3 = 0x3;
747 GC_word bm2 = 0x2;
748 GC_word bm_large = 0xf7ff7fff;
749 GC_descr d1 = GC_make_descriptor(&bm3, 2);
750 GC_descr d2 = GC_make_descriptor(&bm2, 2);
751 # ifndef LINT
752 GC_descr dummy = GC_make_descriptor(&bm_large, 32);
753 # endif
754 GC_descr d3 = GC_make_descriptor(&bm_large, 32);
755 GC_descr d4 = GC_make_descriptor(bm_huge, 320);
756 GC_word * x = (GC_word *)GC_malloc_explicitly_typed(2000, d4);
757 register int i;
759 old = 0;
760 for (i = 0; i < 4000; i++) {
761 new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d1);
762 new[0] = 17;
763 new[1] = (GC_word)old;
764 old = new;
765 new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d2);
766 new[0] = 17;
767 new[1] = (GC_word)old;
768 old = new;
769 new = (GC_word *) GC_malloc_explicitly_typed(33 * sizeof(GC_word), d3);
770 new[0] = 17;
771 new[1] = (GC_word)old;
772 old = new;
773 new = (GC_word *) GC_calloc_explicitly_typed(4, 2 * sizeof(GC_word),
774 d1);
775 new[0] = 17;
776 new[1] = (GC_word)old;
777 old = new;
778 if (i & 0xff) {
779 new = (GC_word *) GC_calloc_explicitly_typed(7, 3 * sizeof(GC_word),
780 d2);
781 } else {
782 new = (GC_word *) GC_calloc_explicitly_typed(1001,
783 3 * sizeof(GC_word),
784 d2);
786 new[0] = 17;
787 new[1] = (GC_word)old;
788 old = new;
790 for (i = 0; i < 20000; i++) {
791 if (new[0] != 17) {
792 (void)GC_printf1("typed alloc failed at %lu\n",
793 (unsigned long)i);
794 FAIL;
796 new[0] = 0;
797 old = new;
798 new = (GC_word *)(old[1]);
800 GC_gcollect();
801 GC_noop(x);
804 int fail_count = 0;
806 #ifndef __STDC__
807 /*ARGSUSED*/
808 void fail_proc1(x)
809 GC_PTR x;
811 fail_count++;
814 #else
816 /*ARGSUSED*/
817 void fail_proc1(GC_PTR x)
819 fail_count++;
822 #endif /* __STDC__ */
824 #ifdef THREADS
825 # define TEST_FAIL_COUNT(n) 1
826 #else
827 # define TEST_FAIL_COUNT(n) (fail_count >= (n))
828 #endif
830 void run_one_test()
832 char *x;
833 # ifdef LINT
834 char *y = 0;
835 # else
836 char *y = (char *)(size_t)fail_proc1;
837 # endif
838 DCL_LOCK_STATE;
840 # ifdef FIND_LEAK
841 (void)GC_printf0(
842 "This test program is not designed for leak detection mode\n");
843 (void)GC_printf0("Expect lots of problems.\n");
844 # endif
845 if (GC_size(GC_malloc(7)) != 8
846 || GC_size(GC_malloc(15)) != 16) {
847 (void)GC_printf0("GC_size produced unexpected results\n");
848 FAIL;
850 if (GC_size(GC_malloc(0)) != 4 && GC_size(GC_malloc(0)) != 8) {
851 (void)GC_printf0("GC_malloc(0) failed\n");
852 FAIL;
854 if (GC_size(GC_malloc_uncollectable(0)) != 4
855 && GC_size(GC_malloc_uncollectable(0)) != 8) {
856 (void)GC_printf0("GC_malloc_uncollectable(0) failed\n");
857 FAIL;
859 GC_FREE(0);
860 GC_is_valid_displacement_print_proc = fail_proc1;
861 GC_is_visible_print_proc = fail_proc1;
862 x = GC_malloc(16);
863 if (GC_base(x + 13) != x) {
864 (void)GC_printf0("GC_base(heap ptr) produced incorrect result\n");
865 FAIL;
867 # ifndef PCR
868 if (GC_base(y) != 0) {
869 (void)GC_printf0("GC_base(fn_ptr) produced incorrect result\n");
870 FAIL;
872 # endif
873 if (GC_same_obj(x+5, x) != x + 5) {
874 (void)GC_printf0("GC_same_obj produced incorrect result\n");
875 FAIL;
877 if (GC_is_visible(y) != y || GC_is_visible(x) != x) {
878 (void)GC_printf0("GC_is_visible produced incorrect result\n");
879 FAIL;
881 if (!TEST_FAIL_COUNT(1)) {
882 # if!(defined(RS6000) || defined(POWERPC) || defined(IA64))
883 /* ON RS6000s function pointers point to a descriptor in the */
884 /* data segment, so there should have been no failures. */
885 (void)GC_printf0("GC_is_visible produced wrong failure indication\n");
886 FAIL;
887 # endif
889 if (GC_is_valid_displacement(y) != y
890 || GC_is_valid_displacement(x) != x
891 || GC_is_valid_displacement(x + 3) != x + 3) {
892 (void)GC_printf0(
893 "GC_is_valid_displacement produced incorrect result\n");
894 FAIL;
896 # ifndef ALL_INTERIOR_POINTERS
897 # if defined(RS6000) || defined(POWERPC)
898 if (!TEST_FAIL_COUNT(1)) {
899 # else
900 if (!TEST_FAIL_COUNT(2)) {
901 # endif
902 (void)GC_printf0("GC_is_valid_displacement produced wrong failure indication\n");
903 FAIL;
905 # endif
906 /* Test floating point alignment */
907 *(double *)GC_MALLOC(sizeof(double)) = 1.0;
908 *(double *)GC_MALLOC(sizeof(double)) = 1.0;
909 /* Repeated list reversal test. */
910 reverse_test();
911 # ifdef PRINTSTATS
912 GC_printf0("-------------Finished reverse_test\n");
913 # endif
914 typed_test();
915 # ifdef PRINTSTATS
916 GC_printf0("-------------Finished typed_test\n");
917 # endif
918 tree_test();
919 LOCK();
920 n_tests++;
921 UNLOCK();
922 /* GC_printf1("Finished %x\n", pthread_self()); */
925 void check_heap_stats()
927 unsigned long max_heap_sz;
928 register int i;
929 int still_live;
930 int late_finalize_count = 0;
932 if (sizeof(char *) > 4) {
933 max_heap_sz = 15000000;
934 } else {
935 max_heap_sz = 11000000;
937 # ifdef GC_DEBUG
938 max_heap_sz *= 2;
939 # ifdef SPARC
940 max_heap_sz *= 2;
941 # endif
942 # endif
943 /* Garbage collect repeatedly so that all inaccessible objects */
944 /* can be finalized. */
945 while (GC_collect_a_little()) { }
946 for (i = 0; i < 16; i++) {
947 GC_gcollect();
948 late_finalize_count += GC_invoke_finalizers();
950 (void)GC_printf1("Completed %lu tests\n", (unsigned long)n_tests);
951 (void)GC_printf2("Finalized %lu/%lu objects - ",
952 (unsigned long)finalized_count,
953 (unsigned long)finalizable_count);
954 # ifdef FINALIZE_ON_DEMAND
955 if (finalized_count != late_finalize_count) {
956 (void)GC_printf0("Demand finalization error\n");
957 FAIL;
959 # endif
960 if (finalized_count > finalizable_count
961 || finalized_count < finalizable_count/2) {
962 (void)GC_printf0("finalization is probably broken\n");
963 FAIL;
964 } else {
965 (void)GC_printf0("finalization is probably ok\n");
967 still_live = 0;
968 for (i = 0; i < MAX_FINALIZED; i++) {
969 if (live_indicators[i] != 0) {
970 still_live++;
973 i = finalizable_count - finalized_count - still_live;
974 if (0 != i) {
975 (void)GC_printf2
976 ("%lu disappearing links remain and %lu more objects were not finalized\n",
977 (unsigned long) still_live, (unsigned long)i);
978 if (i > 10) {
979 GC_printf0("\tVery suspicious!\n");
980 } else {
981 GC_printf0("\tSlightly suspicious, but probably OK.\n");
984 (void)GC_printf1("Total number of bytes allocated is %lu\n",
985 (unsigned long)
986 WORDS_TO_BYTES(GC_words_allocd + GC_words_allocd_before_gc));
987 (void)GC_printf1("Final heap size is %lu bytes\n",
988 (unsigned long)GC_get_heap_size());
989 if (WORDS_TO_BYTES(GC_words_allocd + GC_words_allocd_before_gc)
990 < 33500000*n_tests) {
991 (void)GC_printf0("Incorrect execution - missed some allocations\n");
992 FAIL;
994 if (GC_get_heap_size() > max_heap_sz*n_tests) {
995 (void)GC_printf0("Unexpected heap growth - collector may be broken\n");
996 FAIL;
998 (void)GC_printf0("Collector appears to work\n");
1001 #if defined(MACOS)
1002 void SetMinimumStack(long minSize)
1004 long newApplLimit;
1006 if (minSize > LMGetDefltStack())
1008 newApplLimit = (long) GetApplLimit()
1009 - (minSize - LMGetDefltStack());
1010 SetApplLimit((Ptr) newApplLimit);
1011 MaxApplZone();
1015 #define cMinStackSpace (512L * 1024L)
1017 #endif
1019 #ifdef __STDC__
1020 void warn_proc(char *msg, GC_word p)
1021 #else
1022 void warn_proc(msg, p)
1023 char *msg;
1024 GC_word p;
1025 #endif
1027 GC_printf1(msg, (unsigned long)p);
1028 FAIL;
1032 #if !defined(PCR) && !defined(SOLARIS_THREADS) && !defined(WIN32_THREADS) \
1033 && !defined(IRIX_THREADS) && !defined(LINUX_THREADS) \
1034 && !defined(HPUX_THREADS) || defined(LINT)
1035 #ifdef MSWIN32
1036 int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev, LPSTR cmd, int n)
1037 #else
1038 int main()
1039 #endif
1041 # if defined(DJGPP)
1042 int dummy;
1043 # endif
1044 n_tests = 0;
1046 # if defined(DJGPP)
1047 /* No good way to determine stack base from library; do it */
1048 /* manually on this platform. */
1049 GC_stackbottom = (GC_PTR)(&dummy);
1050 # endif
1051 # if defined(MACOS)
1052 /* Make sure we have lots and lots of stack space. */
1053 SetMinimumStack(cMinStackSpace);
1054 /* Cheat and let stdio initialize toolbox for us. */
1055 printf("Testing GC Macintosh port.\n");
1056 # endif
1057 GC_INIT(); /* Only needed if gc is dynamic library. */
1058 (void) GC_set_warn_proc(warn_proc);
1059 # if defined(MPROTECT_VDB) || defined(PROC_VDB)
1060 GC_enable_incremental();
1061 (void) GC_printf0("Switched to incremental mode\n");
1062 # if defined(MPROTECT_VDB)
1063 (void)GC_printf0("Emulating dirty bits with mprotect/signals\n");
1064 # else
1065 (void)GC_printf0("Reading dirty bits from /proc\n");
1066 # endif
1067 # endif
1068 run_one_test();
1069 check_heap_stats();
1070 (void)fflush(stdout);
1071 # ifdef LINT
1072 /* Entry points we should be testing, but aren't. */
1073 /* Some can be tested by defining GC_DEBUG at the top of this file */
1074 /* This is a bit SunOS4 specific. */
1075 GC_noop(GC_expand_hp, GC_add_roots, GC_clear_roots,
1076 GC_register_disappearing_link,
1077 GC_register_finalizer_ignore_self,
1078 GC_debug_register_displacement,
1079 GC_print_obj, GC_debug_change_stubborn,
1080 GC_debug_end_stubborn_change, GC_debug_malloc_uncollectable,
1081 GC_debug_free, GC_debug_realloc, GC_generic_malloc_words_small,
1082 GC_init, GC_make_closure, GC_debug_invoke_finalizer,
1083 GC_page_was_ever_dirty, GC_is_fresh,
1084 GC_malloc_ignore_off_page, GC_malloc_atomic_ignore_off_page,
1085 GC_set_max_heap_size, GC_get_bytes_since_gc,
1086 GC_pre_incr, GC_post_incr);
1087 # endif
1088 # ifdef MSWIN32
1089 GC_win32_free_heap();
1090 # endif
1091 return(0);
1093 # endif
1095 #ifdef WIN32_THREADS
1097 unsigned __stdcall thr_run_one_test(void *arg)
1099 run_one_test();
1100 return 0;
1103 #define NTEST 2
1105 int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev, LPSTR cmd, int n)
1107 # if NTEST > 0
1108 HANDLE h[NTEST];
1109 # endif
1110 int i;
1111 unsigned thread_id;
1112 # if 0
1113 GC_enable_incremental();
1114 # endif
1115 InitializeCriticalSection(&incr_cs);
1116 (void) GC_set_warn_proc(warn_proc);
1117 for (i = 0; i < NTEST; i++) {
1118 h[i] = (HANDLE)_beginthreadex(NULL, 0, thr_run_one_test, 0, 0, &thread_id);
1119 if (h[i] == (HANDLE)-1) {
1120 (void)GC_printf1("Thread creation failed %lu\n", (unsigned long)GetLastError());
1121 FAIL;
1124 run_one_test();
1125 for (i = 0; i < NTEST; i++)
1126 if (WaitForSingleObject(h[i], INFINITE) != WAIT_OBJECT_0) {
1127 (void)GC_printf1("Thread wait failed %lu\n", (unsigned long)GetLastError());
1128 FAIL;
1130 check_heap_stats();
1131 (void)fflush(stdout);
1132 return(0);
1135 #endif /* WIN32_THREADS */
1138 #ifdef PCR
1139 test()
1141 PCR_Th_T * th1;
1142 PCR_Th_T * th2;
1143 int code;
1145 n_tests = 0;
1146 /* GC_enable_incremental(); */
1147 (void) GC_set_warn_proc(warn_proc);
1148 th1 = PCR_Th_Fork(run_one_test, 0);
1149 th2 = PCR_Th_Fork(run_one_test, 0);
1150 run_one_test();
1151 if (PCR_Th_T_Join(th1, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
1152 != PCR_ERes_okay || code != 0) {
1153 (void)GC_printf0("Thread 1 failed\n");
1155 if (PCR_Th_T_Join(th2, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
1156 != PCR_ERes_okay || code != 0) {
1157 (void)GC_printf0("Thread 2 failed\n");
1159 check_heap_stats();
1160 (void)fflush(stdout);
1161 return(0);
1163 #endif
1165 #if defined(SOLARIS_THREADS) || defined(IRIX_THREADS) \
1166 || defined(HPUX_THREADS) || defined(LINUX_THREADS)
1167 void * thr_run_one_test(void * arg)
1169 run_one_test();
1170 return(0);
1173 #ifdef GC_DEBUG
1174 # define GC_free GC_debug_free
1175 #endif
1177 #ifdef SOLARIS_THREADS
1178 main()
1180 thread_t th1;
1181 thread_t th2;
1182 int code;
1184 n_tests = 0;
1185 GC_INIT(); /* Only needed if gc is dynamic library. */
1186 GC_enable_incremental();
1187 (void) GC_set_warn_proc(warn_proc);
1188 if (thr_keycreate(&fl_key, GC_free) != 0) {
1189 (void)GC_printf1("Key creation failed %lu\n", (unsigned long)code);
1190 FAIL;
1192 if ((code = thr_create(0, 1024*1024, thr_run_one_test, 0, 0, &th1)) != 0) {
1193 (void)GC_printf1("Thread 1 creation failed %lu\n", (unsigned long)code);
1194 FAIL;
1196 if ((code = thr_create(0, 1024*1024, thr_run_one_test, 0, THR_NEW_LWP, &th2)) != 0) {
1197 (void)GC_printf1("Thread 2 creation failed %lu\n", (unsigned long)code);
1198 FAIL;
1200 run_one_test();
1201 if ((code = thr_join(th1, 0, 0)) != 0) {
1202 (void)GC_printf1("Thread 1 failed %lu\n", (unsigned long)code);
1203 FAIL;
1205 if (thr_join(th2, 0, 0) != 0) {
1206 (void)GC_printf1("Thread 2 failed %lu\n", (unsigned long)code);
1207 FAIL;
1209 check_heap_stats();
1210 (void)fflush(stdout);
1211 return(0);
1213 #else /* pthreads */
1214 main()
1216 pthread_t th1;
1217 pthread_t th2;
1218 pthread_attr_t attr;
1219 int code;
1221 # ifdef IRIX_THREADS
1222 /* Force a larger stack to be preallocated */
1223 /* Since the initial cant always grow later. */
1224 *((volatile char *)&code - 1024*1024) = 0; /* Require 1 Mb */
1225 # endif /* IRIX_THREADS */
1226 pthread_attr_init(&attr);
1227 # if defined(IRIX_THREADS) || defined(HPUX_THREADS)
1228 pthread_attr_setstacksize(&attr, 1000000);
1229 # endif
1230 n_tests = 0;
1231 # ifdef MPROTECT_VDB
1232 GC_enable_incremental();
1233 (void) GC_printf0("Switched to incremental mode\n");
1234 (void) GC_printf0("Emulating dirty bits with mprotect/signals\n");
1235 # endif
1236 (void) GC_set_warn_proc(warn_proc);
1237 if ((code = pthread_key_create(&fl_key, 0)) != 0) {
1238 (void)GC_printf1("Key creation failed %lu\n", (unsigned long)code);
1239 FAIL;
1241 if ((code = pthread_create(&th1, &attr, thr_run_one_test, 0)) != 0) {
1242 (void)GC_printf1("Thread 1 creation failed %lu\n", (unsigned long)code);
1243 FAIL;
1245 if ((code = pthread_create(&th2, &attr, thr_run_one_test, 0)) != 0) {
1246 (void)GC_printf1("Thread 2 creation failed %lu\n", (unsigned long)code);
1247 FAIL;
1249 run_one_test();
1250 if ((code = pthread_join(th1, 0)) != 0) {
1251 (void)GC_printf1("Thread 1 failed %lu\n", (unsigned long)code);
1252 FAIL;
1254 if (pthread_join(th2, 0) != 0) {
1255 (void)GC_printf1("Thread 2 failed %lu\n", (unsigned long)code);
1256 FAIL;
1258 check_heap_stats();
1259 (void)fflush(stdout);
1260 pthread_attr_destroy(&attr);
1261 GC_printf1("Completed %d collections\n", GC_gc_no);
1262 return(0);
1264 #endif /* pthreads */
1265 #endif /* SOLARIS_THREADS || IRIX_THREADS || LINUX_THREADS || HPUX_THREADS */