1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 /* Generic garbage collection (GC) functions and data, not specific to
23 any particular GC implementation. */
27 #include "coretypes.h"
32 #include "hosthooks.h"
34 #ifdef HAVE_SYS_RESOURCE_H
35 # include <sys/resource.h>
39 # include <sys/mman.h>
41 /* This is on Solaris. */
42 # include <sys/types.h>
47 # define MAP_FAILED ((void *)-1)
50 #ifdef ENABLE_VALGRIND_CHECKING
51 # ifdef HAVE_VALGRIND_MEMCHECK_H
52 # include <valgrind/memcheck.h>
53 # elif defined HAVE_MEMCHECK_H
54 # include <memcheck.h>
56 # include <valgrind.h>
59 /* Avoid #ifdef:s when we can help it. */
60 #define VALGRIND_DISCARD(x)
63 /* Statistics about the allocation. */
64 static ggc_statistics
*ggc_stats
;
66 struct traversal_state
;
68 static int ggc_htab_delete (void **, void *);
69 static hashval_t
saving_htab_hash (const void *);
70 static int saving_htab_eq (const void *, const void *);
71 static int call_count (void **, void *);
72 static int call_alloc (void **, void *);
73 static int compare_ptr_data (const void *, const void *);
74 static void relocate_ptrs (void *, void *);
75 static void write_pch_globals (const struct ggc_root_tab
* const *tab
,
76 struct traversal_state
*state
);
77 static double ggc_rlimit_bound (double);
79 /* Maintain global roots that are preserved during GC. */
81 /* Process a slot of an htab by deleting it if it has not been marked. */
84 ggc_htab_delete (void **slot
, void *info
)
86 const struct ggc_cache_tab
*r
= (const struct ggc_cache_tab
*) info
;
88 if (! (*r
->marked_p
) (*slot
))
89 htab_clear_slot (*r
->base
, slot
);
96 /* Iterate through all registered roots and mark each element. */
101 const struct ggc_root_tab
*const *rt
;
102 const struct ggc_root_tab
*rti
;
103 const struct ggc_cache_tab
*const *ct
;
104 const struct ggc_cache_tab
*cti
;
107 for (rt
= gt_ggc_deletable_rtab
; *rt
; rt
++)
108 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
109 memset (rti
->base
, 0, rti
->stride
);
111 for (rt
= gt_ggc_rtab
; *rt
; rt
++)
112 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
113 for (i
= 0; i
< rti
->nelt
; i
++)
114 (*rti
->cb
)(*(void **)((char *)rti
->base
+ rti
->stride
* i
));
116 ggc_mark_stringpool ();
118 /* Now scan all hash tables that have objects which are to be deleted if
119 they are not already marked. */
120 for (ct
= gt_ggc_cache_rtab
; *ct
; ct
++)
121 for (cti
= *ct
; cti
->base
!= NULL
; cti
++)
124 ggc_set_mark (*cti
->base
);
125 htab_traverse_noresize (*cti
->base
, ggc_htab_delete
, (void *) cti
);
126 ggc_set_mark ((*cti
->base
)->entries
);
130 /* Allocate a block of memory, then clear it. */
132 ggc_alloc_cleared (size_t size
)
134 void *buf
= ggc_alloc (size
);
135 memset (buf
, 0, size
);
139 /* Resize a block of memory, possibly re-allocating it. */
141 ggc_realloc (void *x
, size_t size
)
147 return ggc_alloc (size
);
149 old_size
= ggc_get_size (x
);
150 if (size
<= old_size
)
152 /* Mark the unwanted memory as unaccessible. We also need to make
153 the "new" size accessible, since ggc_get_size returns the size of
154 the pool, not the size of the individually allocated object, the
155 size which was previously made accessible. Unfortunately, we
156 don't know that previously allocated size. Without that
157 knowledge we have to lose some initialization-tracking for the
158 old parts of the object. An alternative is to mark the whole
159 old_size as reachable, but that would lose tracking of writes
160 after the end of the object (by small offsets). Discard the
161 handle to avoid handle leak. */
162 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) x
+ size
,
164 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x
, size
));
168 r
= ggc_alloc (size
);
170 /* Since ggc_get_size returns the size of the pool, not the size of the
171 individually allocated object, we'd access parts of the old object
172 that were marked invalid with the memcpy below. We lose a bit of the
173 initialization-tracking since some of it may be uninitialized. */
174 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x
, old_size
));
176 memcpy (r
, x
, old_size
);
178 /* The old object is not supposed to be used anymore. */
179 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (x
, old_size
));
184 /* Like ggc_alloc_cleared, but performs a multiplication. */
186 ggc_calloc (size_t s1
, size_t s2
)
188 return ggc_alloc_cleared (s1
* s2
);
191 /* These are for splay_tree_new_ggc. */
193 ggc_splay_alloc (int sz
, void *nl
)
197 return ggc_alloc (sz
);
201 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED
, void *nl
)
207 /* Print statistics that are independent of the collector in use. */
208 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
210 : ((x) < 1024*1024*10 \
212 : (x) / (1024*1024))))
213 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
216 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED
,
217 ggc_statistics
*stats
)
219 /* Set the pointer so that during collection we will actually gather
223 /* Then do one collection to fill in the statistics. */
226 /* At present, we don't really gather any interesting statistics. */
228 /* Don't gather statistics any more. */
232 /* Functions for saving and restoring GCable memory to disk. */
234 static htab_t saving_htab
;
239 void *note_ptr_cookie
;
240 gt_note_pointers note_ptr_fn
;
241 gt_handle_reorder reorder_fn
;
246 #define POINTER_HASH(x) (hashval_t)((long)x >> 3)
248 /* Register an object in the hash table. */
251 gt_pch_note_object (void *obj
, void *note_ptr_cookie
,
252 gt_note_pointers note_ptr_fn
)
254 struct ptr_data
**slot
;
256 if (obj
== NULL
|| obj
== (void *) 1)
259 slot
= (struct ptr_data
**)
260 htab_find_slot_with_hash (saving_htab
, obj
, POINTER_HASH (obj
),
264 if ((*slot
)->note_ptr_fn
!= note_ptr_fn
265 || (*slot
)->note_ptr_cookie
!= note_ptr_cookie
)
270 *slot
= xcalloc (sizeof (struct ptr_data
), 1);
272 (*slot
)->note_ptr_fn
= note_ptr_fn
;
273 (*slot
)->note_ptr_cookie
= note_ptr_cookie
;
274 if (note_ptr_fn
== gt_pch_p_S
)
275 (*slot
)->size
= strlen (obj
) + 1;
277 (*slot
)->size
= ggc_get_size (obj
);
281 /* Register an object in the hash table. */
284 gt_pch_note_reorder (void *obj
, void *note_ptr_cookie
,
285 gt_handle_reorder reorder_fn
)
287 struct ptr_data
*data
;
289 if (obj
== NULL
|| obj
== (void *) 1)
292 data
= htab_find_with_hash (saving_htab
, obj
, POINTER_HASH (obj
));
294 || data
->note_ptr_cookie
!= note_ptr_cookie
)
297 data
->reorder_fn
= reorder_fn
;
300 /* Hash and equality functions for saving_htab, callbacks for htab_create. */
303 saving_htab_hash (const void *p
)
305 return POINTER_HASH (((struct ptr_data
*)p
)->obj
);
309 saving_htab_eq (const void *p1
, const void *p2
)
311 return ((struct ptr_data
*)p1
)->obj
== p2
;
314 /* Handy state for the traversal functions. */
316 struct traversal_state
319 struct ggc_pch_data
*d
;
321 struct ptr_data
**ptrs
;
325 /* Callbacks for htab_traverse. */
328 call_count (void **slot
, void *state_p
)
330 struct ptr_data
*d
= (struct ptr_data
*)*slot
;
331 struct traversal_state
*state
= (struct traversal_state
*)state_p
;
333 ggc_pch_count_object (state
->d
, d
->obj
, d
->size
, d
->note_ptr_fn
== gt_pch_p_S
);
339 call_alloc (void **slot
, void *state_p
)
341 struct ptr_data
*d
= (struct ptr_data
*)*slot
;
342 struct traversal_state
*state
= (struct traversal_state
*)state_p
;
344 d
->new_addr
= ggc_pch_alloc_object (state
->d
, d
->obj
, d
->size
, d
->note_ptr_fn
== gt_pch_p_S
);
345 state
->ptrs
[state
->ptrs_i
++] = d
;
349 /* Callback for qsort. */
352 compare_ptr_data (const void *p1_p
, const void *p2_p
)
354 struct ptr_data
*p1
= *(struct ptr_data
*const *)p1_p
;
355 struct ptr_data
*p2
= *(struct ptr_data
*const *)p2_p
;
356 return (((size_t)p1
->new_addr
> (size_t)p2
->new_addr
)
357 - ((size_t)p1
->new_addr
< (size_t)p2
->new_addr
));
360 /* Callbacks for note_ptr_fn. */
363 relocate_ptrs (void *ptr_p
, void *state_p
)
365 void **ptr
= (void **)ptr_p
;
366 struct traversal_state
*state ATTRIBUTE_UNUSED
367 = (struct traversal_state
*)state_p
;
368 struct ptr_data
*result
;
370 if (*ptr
== NULL
|| *ptr
== (void *)1)
373 result
= htab_find_with_hash (saving_htab
, *ptr
, POINTER_HASH (*ptr
));
376 *ptr
= result
->new_addr
;
379 /* Write out, after relocation, the pointers in TAB. */
381 write_pch_globals (const struct ggc_root_tab
* const *tab
,
382 struct traversal_state
*state
)
384 const struct ggc_root_tab
*const *rt
;
385 const struct ggc_root_tab
*rti
;
388 for (rt
= tab
; *rt
; rt
++)
389 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
390 for (i
= 0; i
< rti
->nelt
; i
++)
392 void *ptr
= *(void **)((char *)rti
->base
+ rti
->stride
* i
);
393 struct ptr_data
*new_ptr
;
394 if (ptr
== NULL
|| ptr
== (void *)1)
396 if (fwrite (&ptr
, sizeof (void *), 1, state
->f
)
398 fatal_error ("can't write PCH file: %m");
402 new_ptr
= htab_find_with_hash (saving_htab
, ptr
,
404 if (fwrite (&new_ptr
->new_addr
, sizeof (void *), 1, state
->f
)
406 fatal_error ("can't write PCH file: %m");
411 /* Hold the information we need to mmap the file back in. */
417 void *preferred_base
;
420 /* Write out the state of the compiler to F. */
423 gt_pch_save (FILE *f
)
425 const struct ggc_root_tab
*const *rt
;
426 const struct ggc_root_tab
*rti
;
428 struct traversal_state state
;
429 char *this_object
= NULL
;
430 size_t this_object_size
= 0;
431 struct mmap_info mmi
;
432 size_t page_size
= getpagesize();
434 gt_pch_save_stringpool ();
436 saving_htab
= htab_create (50000, saving_htab_hash
, saving_htab_eq
, free
);
438 for (rt
= gt_ggc_rtab
; *rt
; rt
++)
439 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
440 for (i
= 0; i
< rti
->nelt
; i
++)
441 (*rti
->pchw
)(*(void **)((char *)rti
->base
+ rti
->stride
* i
));
443 for (rt
= gt_pch_cache_rtab
; *rt
; rt
++)
444 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
445 for (i
= 0; i
< rti
->nelt
; i
++)
446 (*rti
->pchw
)(*(void **)((char *)rti
->base
+ rti
->stride
* i
));
448 /* Prepare the objects for writing, determine addresses and such. */
450 state
.d
= init_ggc_pch();
452 htab_traverse (saving_htab
, call_count
, &state
);
454 mmi
.size
= ggc_pch_total_size (state
.d
);
456 /* Try to arrange things so that no relocation is necessary, but
457 don't try very hard. On most platforms, this will always work,
458 and on the rest it's a lot of work to do better.
459 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
460 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
461 mmi
.preferred_base
= host_hooks
.gt_pch_get_address (mmi
.size
);
464 if (mmi
.preferred_base
== NULL
)
466 mmi
.preferred_base
= mmap (NULL
, mmi
.size
,
467 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
,
468 fileno (state
.f
), 0);
469 if (mmi
.preferred_base
== (void *) MAP_FAILED
)
470 mmi
.preferred_base
= NULL
;
472 munmap (mmi
.preferred_base
, mmi
.size
);
474 #endif /* HAVE_MMAP_FILE */
476 ggc_pch_this_base (state
.d
, mmi
.preferred_base
);
478 state
.ptrs
= xmalloc (state
.count
* sizeof (*state
.ptrs
));
480 htab_traverse (saving_htab
, call_alloc
, &state
);
481 qsort (state
.ptrs
, state
.count
, sizeof (*state
.ptrs
), compare_ptr_data
);
483 /* Write out all the scalar variables. */
484 for (rt
= gt_pch_scalar_rtab
; *rt
; rt
++)
485 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
486 if (fwrite (rti
->base
, rti
->stride
, 1, f
) != 1)
487 fatal_error ("can't write PCH file: %m");
489 /* Write out all the global pointers, after translation. */
490 write_pch_globals (gt_ggc_rtab
, &state
);
491 write_pch_globals (gt_pch_cache_rtab
, &state
);
493 ggc_pch_prepare_write (state
.d
, state
.f
);
495 /* Pad the PCH file so that the mmapped area starts on a page boundary. */
498 o
= ftell (state
.f
) + sizeof (mmi
);
500 fatal_error ("can't get position in PCH file: %m");
501 mmi
.offset
= page_size
- o
% page_size
;
502 if (mmi
.offset
== page_size
)
506 if (fwrite (&mmi
, sizeof (mmi
), 1, state
.f
) != 1)
507 fatal_error ("can't write PCH file: %m");
509 && fseek (state
.f
, mmi
.offset
, SEEK_SET
) != 0)
510 fatal_error ("can't write padding to PCH file: %m");
512 /* Actually write out the objects. */
513 for (i
= 0; i
< state
.count
; i
++)
515 if (this_object_size
< state
.ptrs
[i
]->size
)
517 this_object_size
= state
.ptrs
[i
]->size
;
518 this_object
= xrealloc (this_object
, this_object_size
);
520 memcpy (this_object
, state
.ptrs
[i
]->obj
, state
.ptrs
[i
]->size
);
521 if (state
.ptrs
[i
]->reorder_fn
!= NULL
)
522 state
.ptrs
[i
]->reorder_fn (state
.ptrs
[i
]->obj
,
523 state
.ptrs
[i
]->note_ptr_cookie
,
524 relocate_ptrs
, &state
);
525 state
.ptrs
[i
]->note_ptr_fn (state
.ptrs
[i
]->obj
,
526 state
.ptrs
[i
]->note_ptr_cookie
,
527 relocate_ptrs
, &state
);
528 ggc_pch_write_object (state
.d
, state
.f
, state
.ptrs
[i
]->obj
,
529 state
.ptrs
[i
]->new_addr
, state
.ptrs
[i
]->size
, state
.ptrs
[i
]->note_ptr_fn
== gt_pch_p_S
);
530 if (state
.ptrs
[i
]->note_ptr_fn
!= gt_pch_p_S
)
531 memcpy (state
.ptrs
[i
]->obj
, this_object
, state
.ptrs
[i
]->size
);
533 ggc_pch_finish (state
.d
, state
.f
);
534 gt_pch_fixup_stringpool ();
537 htab_delete (saving_htab
);
540 /* Read the state of the compiler back in from F. */
543 gt_pch_restore (FILE *f
)
545 const struct ggc_root_tab
*const *rt
;
546 const struct ggc_root_tab
*rti
;
548 struct mmap_info mmi
;
552 /* Delete any deletable objects. This makes ggc_pch_read much
553 faster, as it can be sure that no GCable objects remain other
554 than the ones just read in. */
555 for (rt
= gt_ggc_deletable_rtab
; *rt
; rt
++)
556 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
557 memset (rti
->base
, 0, rti
->stride
);
559 /* Read in all the scalar variables. */
560 for (rt
= gt_pch_scalar_rtab
; *rt
; rt
++)
561 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
562 if (fread (rti
->base
, rti
->stride
, 1, f
) != 1)
563 fatal_error ("can't read PCH file: %m");
565 /* Read in all the global pointers, in 6 easy loops. */
566 for (rt
= gt_ggc_rtab
; *rt
; rt
++)
567 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
568 for (i
= 0; i
< rti
->nelt
; i
++)
569 if (fread ((char *)rti
->base
+ rti
->stride
* i
,
570 sizeof (void *), 1, f
) != 1)
571 fatal_error ("can't read PCH file: %m");
573 for (rt
= gt_pch_cache_rtab
; *rt
; rt
++)
574 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
575 for (i
= 0; i
< rti
->nelt
; i
++)
576 if (fread ((char *)rti
->base
+ rti
->stride
* i
,
577 sizeof (void *), 1, f
) != 1)
578 fatal_error ("can't read PCH file: %m");
580 if (fread (&mmi
, sizeof (mmi
), 1, f
) != 1)
581 fatal_error ("can't read PCH file: %m");
583 if (host_hooks
.gt_pch_use_address (mmi
.preferred_base
, mmi
.size
))
588 mmap_result
= mmap (mmi
.preferred_base
, mmi
.size
,
589 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_FIXED
,
590 fileno (f
), mmi
.offset
);
592 /* The file might not be mmap-able. */
593 needs_read
= mmap_result
== (void *) MAP_FAILED
;
595 /* Sanity check for broken MAP_FIXED. */
596 if (! needs_read
&& mmap_result
!= mmi
.preferred_base
)
601 addr
= mmi
.preferred_base
;
606 addr
= mmap (mmi
.preferred_base
, mmi
.size
,
607 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
,
608 fileno (f
), mmi
.offset
);
611 if (addr
!= mmi
.preferred_base
)
613 size_t page_size
= getpagesize();
616 if (addr
!= (void *) MAP_FAILED
)
617 munmap (addr
, mmi
.size
);
619 /* We really want to be mapped at mmi.preferred_base
620 so we're going to resort to MAP_FIXED. But before,
621 make sure that we can do so without destroying a
622 previously mapped area, by looping over all pages
623 that would be affected by the fixed mapping. */
626 for (i
= 0; i
< mmi
.size
; i
+= page_size
)
627 if (mincore ((char *)mmi
.preferred_base
+ i
, page_size
,
628 (void *)&one_byte
) == -1
630 continue; /* The page is not mapped. */
635 addr
= mmap (mmi
.preferred_base
, mmi
.size
,
636 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_FIXED
,
637 fileno (f
), mmi
.offset
);
639 #endif /* HAVE_MINCORE */
641 needs_read
= addr
== (void *) MAP_FAILED
;
643 #else /* HAVE_MMAP_FILE */
645 #endif /* HAVE_MMAP_FILE */
647 addr
= xmalloc (mmi
.size
);
652 if (fseek (f
, mmi
.offset
, SEEK_SET
) != 0
653 || fread (&mmi
, mmi
.size
, 1, f
) != 1)
654 fatal_error ("can't read PCH file: %m");
656 else if (fseek (f
, mmi
.offset
+ mmi
.size
, SEEK_SET
) != 0)
657 fatal_error ("can't read PCH file: %m");
659 ggc_pch_read (f
, addr
);
661 if (addr
!= mmi
.preferred_base
)
663 for (rt
= gt_ggc_rtab
; *rt
; rt
++)
664 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
665 for (i
= 0; i
< rti
->nelt
; i
++)
667 char **ptr
= (char **)((char *)rti
->base
+ rti
->stride
* i
);
669 *ptr
+= (size_t)addr
- (size_t)mmi
.preferred_base
;
672 for (rt
= gt_pch_cache_rtab
; *rt
; rt
++)
673 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
674 for (i
= 0; i
< rti
->nelt
; i
++)
676 char **ptr
= (char **)((char *)rti
->base
+ rti
->stride
* i
);
678 *ptr
+= (size_t)addr
- (size_t)mmi
.preferred_base
;
681 sorry ("had to relocate PCH");
684 gt_pch_restore_stringpool ();
687 /* Modify the bound based on rlimits. Keep the smallest number found. */
689 ggc_rlimit_bound (double limit
)
691 #if defined(HAVE_GETRLIMIT)
694 if (getrlimit (RLIMIT_RSS
, &rlim
) == 0
695 && rlim
.rlim_cur
!= (rlim_t
) RLIM_INFINITY
696 && rlim
.rlim_cur
< limit
)
697 limit
= rlim
.rlim_cur
;
700 if (getrlimit (RLIMIT_DATA
, &rlim
) == 0
701 && rlim
.rlim_cur
!= (rlim_t
) RLIM_INFINITY
702 && rlim
.rlim_cur
< limit
)
703 limit
= rlim
.rlim_cur
;
706 if (getrlimit (RLIMIT_AS
, &rlim
) == 0
707 && rlim
.rlim_cur
!= (rlim_t
) RLIM_INFINITY
708 && rlim
.rlim_cur
< limit
)
709 limit
= rlim
.rlim_cur
;
711 #endif /* HAVE_GETRLIMIT */
716 /* Heuristic to set a default for GGC_MIN_EXPAND. */
718 ggc_min_expand_heuristic (void)
720 double min_expand
= physmem_total();
722 /* Adjust for rlimits. */
723 min_expand
= ggc_rlimit_bound (min_expand
);
725 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
726 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
727 min_expand
/= 1024*1024*1024;
729 min_expand
= MIN (min_expand
, 70);
735 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
737 ggc_min_heapsize_heuristic (void)
739 double min_heap_kbytes
= physmem_total();
741 /* Adjust for rlimits. */
742 min_heap_kbytes
= ggc_rlimit_bound (min_heap_kbytes
);
744 min_heap_kbytes
/= 1024; /* Convert to Kbytes. */
746 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
747 bound of 128M (when RAM >= 1GB). */
748 min_heap_kbytes
/= 8;
749 min_heap_kbytes
= MAX (min_heap_kbytes
, 4 * 1024);
750 min_heap_kbytes
= MIN (min_heap_kbytes
, 128 * 1024);
752 return min_heap_kbytes
;
756 init_ggc_heuristics (void)
758 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
759 set_param_value ("ggc-min-expand", ggc_min_expand_heuristic());
760 set_param_value ("ggc-min-heapsize", ggc_min_heapsize_heuristic());