2003-07-04 Toon Moene <toon@moene.indiv.nluug.nl>
[official-gcc.git] / gcc / ggc-common.c
blob3163c15f83b775dace288b3611a19451b311e03a
1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* Generic garbage collection (GC) functions and data, not specific to
23 any particular GC implementation. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "hashtab.h"
29 #include "ggc.h"
30 #include "toplev.h"
31 #include "params.h"
33 #ifdef HAVE_SYS_RESOURCE_H
34 # include <sys/resource.h>
35 #endif
37 #ifdef HAVE_MMAP_FILE
38 # include <sys/mman.h>
39 # ifdef HAVE_MINCORE
40 /* This is on Solaris. */
41 # include <sys/types.h>
42 # endif
43 #endif
45 #ifndef MAP_FAILED
46 # define MAP_FAILED ((void *)-1)
47 #endif
49 #ifdef ENABLE_VALGRIND_CHECKING
50 # ifdef HAVE_MEMCHECK_H
51 # include <memcheck.h>
52 # else
53 # include <valgrind.h>
54 # endif
55 #else
56 /* Avoid #ifdef:s when we can help it. */
57 #define VALGRIND_DISCARD(x)
58 #endif
60 /* Statistics about the allocation. */
61 static ggc_statistics *ggc_stats;
63 struct traversal_state;
65 static int ggc_htab_delete (void **, void *);
66 static hashval_t saving_htab_hash (const void *);
67 static int saving_htab_eq (const void *, const void *);
68 static int call_count (void **, void *);
69 static int call_alloc (void **, void *);
70 static int compare_ptr_data (const void *, const void *);
71 static void relocate_ptrs (void *, void *);
72 static void write_pch_globals (const struct ggc_root_tab * const *tab,
73 struct traversal_state *state);
74 static double ggc_rlimit_bound (double);
76 /* Maintain global roots that are preserved during GC. */
78 /* Process a slot of an htab by deleting it if it has not been marked. */
80 static int
81 ggc_htab_delete (void **slot, void *info)
83 const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info;
85 if (! (*r->marked_p) (*slot))
86 htab_clear_slot (*r->base, slot);
87 else
88 (*r->cb) (*slot);
90 return 1;
93 /* Iterate through all registered roots and mark each element. */
95 void
96 ggc_mark_roots (void)
98 const struct ggc_root_tab *const *rt;
99 const struct ggc_root_tab *rti;
100 const struct ggc_cache_tab *const *ct;
101 const struct ggc_cache_tab *cti;
102 size_t i;
104 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
105 for (rti = *rt; rti->base != NULL; rti++)
106 memset (rti->base, 0, rti->stride);
108 for (rt = gt_ggc_rtab; *rt; rt++)
109 for (rti = *rt; rti->base != NULL; rti++)
110 for (i = 0; i < rti->nelt; i++)
111 (*rti->cb)(*(void **)((char *)rti->base + rti->stride * i));
113 ggc_mark_stringpool ();
115 /* Now scan all hash tables that have objects which are to be deleted if
116 they are not already marked. */
117 for (ct = gt_ggc_cache_rtab; *ct; ct++)
118 for (cti = *ct; cti->base != NULL; cti++)
119 if (*cti->base)
121 ggc_set_mark (*cti->base);
122 htab_traverse_noresize (*cti->base, ggc_htab_delete, (void *) cti);
123 ggc_set_mark ((*cti->base)->entries);
127 /* Allocate a block of memory, then clear it. */
128 void *
129 ggc_alloc_cleared (size_t size)
131 void *buf = ggc_alloc (size);
132 memset (buf, 0, size);
133 return buf;
136 /* Resize a block of memory, possibly re-allocating it. */
137 void *
138 ggc_realloc (void *x, size_t size)
140 void *r;
141 size_t old_size;
143 if (x == NULL)
144 return ggc_alloc (size);
146 old_size = ggc_get_size (x);
147 if (size <= old_size)
149 /* Mark the unwanted memory as unaccessible. We also need to make
150 the "new" size accessible, since ggc_get_size returns the size of
151 the pool, not the size of the individually allocated object, the
152 size which was previously made accessible. Unfortunately, we
153 don't know that previously allocated size. Without that
154 knowledge we have to lose some initialization-tracking for the
155 old parts of the object. An alternative is to mark the whole
156 old_size as reachable, but that would lose tracking of writes
157 after the end of the object (by small offsets). Discard the
158 handle to avoid handle leak. */
159 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) x + size,
160 old_size - size));
161 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, size));
162 return x;
165 r = ggc_alloc (size);
167 /* Since ggc_get_size returns the size of the pool, not the size of the
168 individually allocated object, we'd access parts of the old object
169 that were marked invalid with the memcpy below. We lose a bit of the
170 initialization-tracking since some of it may be uninitialized. */
171 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, old_size));
173 memcpy (r, x, old_size);
175 /* The old object is not supposed to be used anymore. */
176 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (x, old_size));
178 return r;
181 /* Like ggc_alloc_cleared, but performs a multiplication. */
182 void *
183 ggc_calloc (size_t s1, size_t s2)
185 return ggc_alloc_cleared (s1 * s2);
188 /* These are for splay_tree_new_ggc. */
189 void *
190 ggc_splay_alloc (int sz, void *nl)
192 if (nl != NULL)
193 abort ();
194 return ggc_alloc (sz);
197 void
198 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
200 if (nl != NULL)
201 abort ();
204 /* Print statistics that are independent of the collector in use. */
205 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
206 ? (x) \
207 : ((x) < 1024*1024*10 \
208 ? (x) / 1024 \
209 : (x) / (1024*1024))))
210 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
212 void
213 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
214 ggc_statistics *stats)
216 /* Set the pointer so that during collection we will actually gather
217 the statistics. */
218 ggc_stats = stats;
220 /* Then do one collection to fill in the statistics. */
221 ggc_collect ();
223 /* At present, we don't really gather any interesting statistics. */
225 /* Don't gather statistics any more. */
226 ggc_stats = NULL;
229 /* Functions for saving and restoring GCable memory to disk. */
231 static htab_t saving_htab;
233 struct ptr_data
235 void *obj;
236 void *note_ptr_cookie;
237 gt_note_pointers note_ptr_fn;
238 gt_handle_reorder reorder_fn;
239 size_t size;
240 void *new_addr;
243 #define POINTER_HASH(x) (hashval_t)((long)x >> 3)
245 /* Register an object in the hash table. */
248 gt_pch_note_object (void *obj, void *note_ptr_cookie,
249 gt_note_pointers note_ptr_fn)
251 struct ptr_data **slot;
253 if (obj == NULL || obj == (void *) 1)
254 return 0;
256 slot = (struct ptr_data **)
257 htab_find_slot_with_hash (saving_htab, obj, POINTER_HASH (obj),
258 INSERT);
259 if (*slot != NULL)
261 if ((*slot)->note_ptr_fn != note_ptr_fn
262 || (*slot)->note_ptr_cookie != note_ptr_cookie)
263 abort ();
264 return 0;
267 *slot = xcalloc (sizeof (struct ptr_data), 1);
268 (*slot)->obj = obj;
269 (*slot)->note_ptr_fn = note_ptr_fn;
270 (*slot)->note_ptr_cookie = note_ptr_cookie;
271 if (note_ptr_fn == gt_pch_p_S)
272 (*slot)->size = strlen (obj) + 1;
273 else
274 (*slot)->size = ggc_get_size (obj);
275 return 1;
278 /* Register an object in the hash table. */
280 void
281 gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
282 gt_handle_reorder reorder_fn)
284 struct ptr_data *data;
286 if (obj == NULL || obj == (void *) 1)
287 return;
289 data = htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj));
290 if (data == NULL
291 || data->note_ptr_cookie != note_ptr_cookie)
292 abort ();
294 data->reorder_fn = reorder_fn;
297 /* Hash and equality functions for saving_htab, callbacks for htab_create. */
299 static hashval_t
300 saving_htab_hash (const void *p)
302 return POINTER_HASH (((struct ptr_data *)p)->obj);
305 static int
306 saving_htab_eq (const void *p1, const void *p2)
308 return ((struct ptr_data *)p1)->obj == p2;
311 /* Handy state for the traversal functions. */
313 struct traversal_state
315 FILE *f;
316 struct ggc_pch_data *d;
317 size_t count;
318 struct ptr_data **ptrs;
319 size_t ptrs_i;
322 /* Callbacks for htab_traverse. */
324 static int
325 call_count (void **slot, void *state_p)
327 struct ptr_data *d = (struct ptr_data *)*slot;
328 struct traversal_state *state = (struct traversal_state *)state_p;
330 ggc_pch_count_object (state->d, d->obj, d->size);
331 state->count++;
332 return 1;
335 static int
336 call_alloc (void **slot, void *state_p)
338 struct ptr_data *d = (struct ptr_data *)*slot;
339 struct traversal_state *state = (struct traversal_state *)state_p;
341 d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size);
342 state->ptrs[state->ptrs_i++] = d;
343 return 1;
346 /* Callback for qsort. */
348 static int
349 compare_ptr_data (const void *p1_p, const void *p2_p)
351 struct ptr_data *p1 = *(struct ptr_data *const *)p1_p;
352 struct ptr_data *p2 = *(struct ptr_data *const *)p2_p;
353 return (((size_t)p1->new_addr > (size_t)p2->new_addr)
354 - ((size_t)p1->new_addr < (size_t)p2->new_addr));
357 /* Callbacks for note_ptr_fn. */
359 static void
360 relocate_ptrs (void *ptr_p, void *state_p)
362 void **ptr = (void **)ptr_p;
363 struct traversal_state *state ATTRIBUTE_UNUSED
364 = (struct traversal_state *)state_p;
365 struct ptr_data *result;
367 if (*ptr == NULL || *ptr == (void *)1)
368 return;
370 result = htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr));
371 if (result == NULL)
372 abort ();
373 *ptr = result->new_addr;
376 /* Write out, after relocation, the pointers in TAB. */
377 static void
378 write_pch_globals (const struct ggc_root_tab * const *tab,
379 struct traversal_state *state)
381 const struct ggc_root_tab *const *rt;
382 const struct ggc_root_tab *rti;
383 size_t i;
385 for (rt = tab; *rt; rt++)
386 for (rti = *rt; rti->base != NULL; rti++)
387 for (i = 0; i < rti->nelt; i++)
389 void *ptr = *(void **)((char *)rti->base + rti->stride * i);
390 struct ptr_data *new_ptr;
391 if (ptr == NULL || ptr == (void *)1)
393 if (fwrite (&ptr, sizeof (void *), 1, state->f)
394 != 1)
395 fatal_error ("can't write PCH file: %m");
397 else
399 new_ptr = htab_find_with_hash (saving_htab, ptr,
400 POINTER_HASH (ptr));
401 if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
402 != 1)
403 fatal_error ("can't write PCH file: %m");
408 /* Hold the information we need to mmap the file back in. */
410 struct mmap_info
412 size_t offset;
413 size_t size;
414 void *preferred_base;
417 /* Write out the state of the compiler to F. */
419 void
420 gt_pch_save (FILE *f)
422 const struct ggc_root_tab *const *rt;
423 const struct ggc_root_tab *rti;
424 size_t i;
425 struct traversal_state state;
426 char *this_object = NULL;
427 size_t this_object_size = 0;
428 struct mmap_info mmi;
429 size_t page_size = getpagesize();
431 gt_pch_save_stringpool ();
433 saving_htab = htab_create (50000, saving_htab_hash, saving_htab_eq, free);
435 for (rt = gt_ggc_rtab; *rt; rt++)
436 for (rti = *rt; rti->base != NULL; rti++)
437 for (i = 0; i < rti->nelt; i++)
438 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
440 for (rt = gt_pch_cache_rtab; *rt; rt++)
441 for (rti = *rt; rti->base != NULL; rti++)
442 for (i = 0; i < rti->nelt; i++)
443 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
445 /* Prepare the objects for writing, determine addresses and such. */
446 state.f = f;
447 state.d = init_ggc_pch();
448 state.count = 0;
449 htab_traverse (saving_htab, call_count, &state);
451 mmi.size = ggc_pch_total_size (state.d);
453 /* Try to arrange things so that no relocation is necessary,
454 but don't try very hard. On most platforms, this will always work,
455 and on the rest it's a lot of work to do better. */
456 #if HAVE_MMAP_FILE
457 mmi.preferred_base = mmap (NULL, mmi.size,
458 PROT_READ | PROT_WRITE, MAP_PRIVATE,
459 fileno (state.f), 0);
460 if (mmi.preferred_base == (void *) MAP_FAILED)
461 mmi.preferred_base = NULL;
462 else
463 munmap (mmi.preferred_base, mmi.size);
464 #else /* HAVE_MMAP_FILE */
465 mmi.preferred_base = NULL;
466 #endif /* HAVE_MMAP_FILE */
468 ggc_pch_this_base (state.d, mmi.preferred_base);
470 state.ptrs = xmalloc (state.count * sizeof (*state.ptrs));
471 state.ptrs_i = 0;
472 htab_traverse (saving_htab, call_alloc, &state);
473 qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
475 /* Write out all the scalar variables. */
476 for (rt = gt_pch_scalar_rtab; *rt; rt++)
477 for (rti = *rt; rti->base != NULL; rti++)
478 if (fwrite (rti->base, rti->stride, 1, f) != 1)
479 fatal_error ("can't write PCH file: %m");
481 /* Write out all the global pointers, after translation. */
482 write_pch_globals (gt_ggc_rtab, &state);
483 write_pch_globals (gt_pch_cache_rtab, &state);
485 ggc_pch_prepare_write (state.d, state.f);
487 /* Pad the PCH file so that the mmapped area starts on a page boundary. */
489 long o;
490 o = ftell (state.f) + sizeof (mmi);
491 if (o == -1)
492 fatal_error ("can't get position in PCH file: %m");
493 mmi.offset = page_size - o % page_size;
494 if (mmi.offset == page_size)
495 mmi.offset = 0;
496 mmi.offset += o;
498 if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
499 fatal_error ("can't write PCH file: %m");
500 if (mmi.offset != 0
501 && fseek (state.f, mmi.offset, SEEK_SET) != 0)
502 fatal_error ("can't write padding to PCH file: %m");
504 /* Actually write out the objects. */
505 for (i = 0; i < state.count; i++)
507 if (this_object_size < state.ptrs[i]->size)
509 this_object_size = state.ptrs[i]->size;
510 this_object = xrealloc (this_object, this_object_size);
512 memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
513 if (state.ptrs[i]->reorder_fn != NULL)
514 state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
515 state.ptrs[i]->note_ptr_cookie,
516 relocate_ptrs, &state);
517 state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
518 state.ptrs[i]->note_ptr_cookie,
519 relocate_ptrs, &state);
520 ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
521 state.ptrs[i]->new_addr, state.ptrs[i]->size);
522 if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
523 memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
525 ggc_pch_finish (state.d, state.f);
526 gt_pch_fixup_stringpool ();
528 free (state.ptrs);
529 htab_delete (saving_htab);
532 /* Read the state of the compiler back in from F. */
534 void
535 gt_pch_restore (FILE *f)
537 const struct ggc_root_tab *const *rt;
538 const struct ggc_root_tab *rti;
539 size_t i;
540 struct mmap_info mmi;
541 void *addr;
543 /* Delete any deletable objects. This makes ggc_pch_read much
544 faster, as it can be sure that no GCable objects remain other
545 than the ones just read in. */
546 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
547 for (rti = *rt; rti->base != NULL; rti++)
548 memset (rti->base, 0, rti->stride);
550 /* Read in all the scalar variables. */
551 for (rt = gt_pch_scalar_rtab; *rt; rt++)
552 for (rti = *rt; rti->base != NULL; rti++)
553 if (fread (rti->base, rti->stride, 1, f) != 1)
554 fatal_error ("can't read PCH file: %m");
556 /* Read in all the global pointers, in 6 easy loops. */
557 for (rt = gt_ggc_rtab; *rt; rt++)
558 for (rti = *rt; rti->base != NULL; rti++)
559 for (i = 0; i < rti->nelt; i++)
560 if (fread ((char *)rti->base + rti->stride * i,
561 sizeof (void *), 1, f) != 1)
562 fatal_error ("can't read PCH file: %m");
564 for (rt = gt_pch_cache_rtab; *rt; rt++)
565 for (rti = *rt; rti->base != NULL; rti++)
566 for (i = 0; i < rti->nelt; i++)
567 if (fread ((char *)rti->base + rti->stride * i,
568 sizeof (void *), 1, f) != 1)
569 fatal_error ("can't read PCH file: %m");
571 if (fread (&mmi, sizeof (mmi), 1, f) != 1)
572 fatal_error ("can't read PCH file: %m");
574 #if HAVE_MMAP_FILE
575 addr = mmap (mmi.preferred_base, mmi.size,
576 PROT_READ | PROT_WRITE, MAP_PRIVATE,
577 fileno (f), mmi.offset);
579 #if HAVE_MINCORE
580 if (addr != mmi.preferred_base)
582 size_t page_size = getpagesize();
583 char one_byte;
585 if (addr != (void *) MAP_FAILED)
586 munmap (addr, mmi.size);
588 /* We really want to be mapped at mmi.preferred_base
589 so we're going to resort to MAP_FIXED. But before,
590 make sure that we can do so without destroying a
591 previously mapped area, by looping over all pages
592 that would be affected by the fixed mapping. */
593 errno = 0;
595 for (i = 0; i < mmi.size; i+= page_size)
596 if (mincore ((char *)mmi.preferred_base + i, page_size, (void *)&one_byte) == -1
597 && errno == ENOMEM)
598 continue; /* The page is not mapped. */
599 else
600 break;
602 if (i >= mmi.size)
603 addr = mmap (mmi.preferred_base, mmi.size,
604 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED,
605 fileno (f), mmi.offset);
607 #endif /* HAVE_MINCORE */
609 #else /* HAVE_MMAP_FILE */
610 addr = MAP_FAILED;
611 #endif /* HAVE_MMAP_FILE */
612 if (addr == (void *) MAP_FAILED)
614 addr = xmalloc (mmi.size);
615 if (fseek (f, mmi.offset, SEEK_SET) != 0
616 || fread (&mmi, mmi.size, 1, f) != 1)
617 fatal_error ("can't read PCH file: %m");
619 else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
620 fatal_error ("can't read PCH file: %m");
622 ggc_pch_read (f, addr);
624 if (addr != mmi.preferred_base)
626 for (rt = gt_ggc_rtab; *rt; rt++)
627 for (rti = *rt; rti->base != NULL; rti++)
628 for (i = 0; i < rti->nelt; i++)
630 char **ptr = (char **)((char *)rti->base + rti->stride * i);
631 if (*ptr != NULL)
632 *ptr += (size_t)addr - (size_t)mmi.preferred_base;
635 for (rt = gt_pch_cache_rtab; *rt; rt++)
636 for (rti = *rt; rti->base != NULL; rti++)
637 for (i = 0; i < rti->nelt; i++)
639 char **ptr = (char **)((char *)rti->base + rti->stride * i);
640 if (*ptr != NULL)
641 *ptr += (size_t)addr - (size_t)mmi.preferred_base;
644 sorry ("had to relocate PCH");
647 gt_pch_restore_stringpool ();
650 /* Modify the bound based on rlimits. Keep the smallest number found. */
651 static double
652 ggc_rlimit_bound (double limit)
654 #if defined(HAVE_GETRLIMIT)
655 struct rlimit rlim;
656 # ifdef RLIMIT_RSS
657 if (getrlimit (RLIMIT_RSS, &rlim) == 0
658 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
659 && rlim.rlim_cur < limit)
660 limit = rlim.rlim_cur;
661 # endif
662 # ifdef RLIMIT_DATA
663 if (getrlimit (RLIMIT_DATA, &rlim) == 0
664 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
665 && rlim.rlim_cur < limit)
666 limit = rlim.rlim_cur;
667 # endif
668 # ifdef RLIMIT_AS
669 if (getrlimit (RLIMIT_AS, &rlim) == 0
670 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
671 && rlim.rlim_cur < limit)
672 limit = rlim.rlim_cur;
673 # endif
674 #endif /* HAVE_GETRLIMIT */
676 return limit;
679 /* Heuristic to set a default for GGC_MIN_EXPAND. */
681 ggc_min_expand_heuristic (void)
683 double min_expand = physmem_total();
685 /* Adjust for rlimits. */
686 min_expand = ggc_rlimit_bound (min_expand);
688 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
689 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
690 min_expand /= 1024*1024*1024;
691 min_expand *= 70;
692 min_expand = MIN (min_expand, 70);
693 min_expand += 30;
695 return min_expand;
698 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
700 ggc_min_heapsize_heuristic (void)
702 double min_heap_kbytes = physmem_total();
704 /* Adjust for rlimits. */
705 min_heap_kbytes = ggc_rlimit_bound (min_heap_kbytes);
707 min_heap_kbytes /= 1024; /* convert to Kbytes. */
709 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
710 bound of 128M (when RAM >= 1GB). */
711 min_heap_kbytes /= 8;
712 min_heap_kbytes = MAX (min_heap_kbytes, 4 * 1024);
713 min_heap_kbytes = MIN (min_heap_kbytes, 128 * 1024);
715 return min_heap_kbytes;
718 void
719 init_ggc_heuristics (void)
721 #ifndef ENABLE_GC_ALWAYS_COLLECT
722 set_param_value ("ggc-min-expand", ggc_min_expand_heuristic());
723 set_param_value ("ggc-min-heapsize", ggc_min_heapsize_heuristic());
724 #endif