Merge branch 'master' of ssh://crater.dragonflybsd.org/repository/git/dragonfly
[dragonfly.git] / contrib / gcc-3.4 / gcc / ggc-common.c
blob583059173bea1ab67556d1162a9e0d2b7bf88569
1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* Generic garbage collection (GC) functions and data, not specific to
23 any particular GC implementation. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "hashtab.h"
29 #include "ggc.h"
30 #include "toplev.h"
31 #include "params.h"
32 #include "hosthooks.h"
33 #include "hosthooks-def.h"
35 #ifdef HAVE_SYS_RESOURCE_H
36 # include <sys/resource.h>
37 #endif
39 #ifdef HAVE_MMAP_FILE
40 # include <sys/mman.h>
41 # ifdef HAVE_MINCORE
42 /* This is on Solaris. */
43 # include <sys/types.h>
44 # endif
45 #endif
47 #ifndef MAP_FAILED
48 # define MAP_FAILED ((void *)-1)
49 #endif
51 #ifdef ENABLE_VALGRIND_CHECKING
52 # ifdef HAVE_VALGRIND_MEMCHECK_H
53 # include <valgrind/memcheck.h>
54 # elif defined HAVE_MEMCHECK_H
55 # include <memcheck.h>
56 # else
57 # include <valgrind.h>
58 # endif
59 #else
60 /* Avoid #ifdef:s when we can help it. */
61 #define VALGRIND_DISCARD(x)
62 #endif
64 /* Statistics about the allocation. */
65 static ggc_statistics *ggc_stats;
67 struct traversal_state;
69 static int ggc_htab_delete (void **, void *);
70 static hashval_t saving_htab_hash (const void *);
71 static int saving_htab_eq (const void *, const void *);
72 static int call_count (void **, void *);
73 static int call_alloc (void **, void *);
74 static int compare_ptr_data (const void *, const void *);
75 static void relocate_ptrs (void *, void *);
76 static void write_pch_globals (const struct ggc_root_tab * const *tab,
77 struct traversal_state *state);
78 static double ggc_rlimit_bound (double);
80 /* Maintain global roots that are preserved during GC. */
82 /* Process a slot of an htab by deleting it if it has not been marked. */
84 static int
85 ggc_htab_delete (void **slot, void *info)
87 const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info;
89 if (! (*r->marked_p) (*slot))
90 htab_clear_slot (*r->base, slot);
91 else
92 (*r->cb) (*slot);
94 return 1;
97 /* Iterate through all registered roots and mark each element. */
99 void
100 ggc_mark_roots (void)
102 const struct ggc_root_tab *const *rt;
103 const struct ggc_root_tab *rti;
104 const struct ggc_cache_tab *const *ct;
105 const struct ggc_cache_tab *cti;
106 size_t i;
108 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
109 for (rti = *rt; rti->base != NULL; rti++)
110 memset (rti->base, 0, rti->stride);
112 for (rt = gt_ggc_rtab; *rt; rt++)
113 for (rti = *rt; rti->base != NULL; rti++)
114 for (i = 0; i < rti->nelt; i++)
115 (*rti->cb)(*(void **)((char *)rti->base + rti->stride * i));
117 ggc_mark_stringpool ();
119 /* Now scan all hash tables that have objects which are to be deleted if
120 they are not already marked. */
121 for (ct = gt_ggc_cache_rtab; *ct; ct++)
122 for (cti = *ct; cti->base != NULL; cti++)
123 if (*cti->base)
125 ggc_set_mark (*cti->base);
126 htab_traverse_noresize (*cti->base, ggc_htab_delete, (void *) cti);
127 ggc_set_mark ((*cti->base)->entries);
131 /* Allocate a block of memory, then clear it. */
132 void *
133 ggc_alloc_cleared (size_t size)
135 void *buf = ggc_alloc (size);
136 memset (buf, 0, size);
137 return buf;
140 /* Resize a block of memory, possibly re-allocating it. */
141 void *
142 ggc_realloc (void *x, size_t size)
144 void *r;
145 size_t old_size;
147 if (x == NULL)
148 return ggc_alloc (size);
150 old_size = ggc_get_size (x);
151 if (size <= old_size)
153 /* Mark the unwanted memory as unaccessible. We also need to make
154 the "new" size accessible, since ggc_get_size returns the size of
155 the pool, not the size of the individually allocated object, the
156 size which was previously made accessible. Unfortunately, we
157 don't know that previously allocated size. Without that
158 knowledge we have to lose some initialization-tracking for the
159 old parts of the object. An alternative is to mark the whole
160 old_size as reachable, but that would lose tracking of writes
161 after the end of the object (by small offsets). Discard the
162 handle to avoid handle leak. */
163 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) x + size,
164 old_size - size));
165 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, size));
166 return x;
169 r = ggc_alloc (size);
171 /* Since ggc_get_size returns the size of the pool, not the size of the
172 individually allocated object, we'd access parts of the old object
173 that were marked invalid with the memcpy below. We lose a bit of the
174 initialization-tracking since some of it may be uninitialized. */
175 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, old_size));
177 memcpy (r, x, old_size);
179 /* The old object is not supposed to be used anymore. */
180 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (x, old_size));
182 return r;
185 /* Like ggc_alloc_cleared, but performs a multiplication. */
186 void *
187 ggc_calloc (size_t s1, size_t s2)
189 return ggc_alloc_cleared (s1 * s2);
192 /* These are for splay_tree_new_ggc. */
193 void *
194 ggc_splay_alloc (int sz, void *nl)
196 if (nl != NULL)
197 abort ();
198 return ggc_alloc (sz);
201 void
202 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
204 if (nl != NULL)
205 abort ();
208 /* Print statistics that are independent of the collector in use. */
209 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
210 ? (x) \
211 : ((x) < 1024*1024*10 \
212 ? (x) / 1024 \
213 : (x) / (1024*1024))))
214 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
216 void
217 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
218 ggc_statistics *stats)
220 /* Set the pointer so that during collection we will actually gather
221 the statistics. */
222 ggc_stats = stats;
224 /* Then do one collection to fill in the statistics. */
225 ggc_collect ();
227 /* At present, we don't really gather any interesting statistics. */
229 /* Don't gather statistics any more. */
230 ggc_stats = NULL;
233 /* Functions for saving and restoring GCable memory to disk. */
235 static htab_t saving_htab;
237 struct ptr_data
239 void *obj;
240 void *note_ptr_cookie;
241 gt_note_pointers note_ptr_fn;
242 gt_handle_reorder reorder_fn;
243 size_t size;
244 void *new_addr;
247 #define POINTER_HASH(x) (hashval_t)((long)x >> 3)
249 /* Register an object in the hash table. */
252 gt_pch_note_object (void *obj, void *note_ptr_cookie,
253 gt_note_pointers note_ptr_fn)
255 struct ptr_data **slot;
257 if (obj == NULL || obj == (void *) 1)
258 return 0;
260 slot = (struct ptr_data **)
261 htab_find_slot_with_hash (saving_htab, obj, POINTER_HASH (obj),
262 INSERT);
263 if (*slot != NULL)
265 if ((*slot)->note_ptr_fn != note_ptr_fn
266 || (*slot)->note_ptr_cookie != note_ptr_cookie)
267 abort ();
268 return 0;
271 *slot = xcalloc (sizeof (struct ptr_data), 1);
272 (*slot)->obj = obj;
273 (*slot)->note_ptr_fn = note_ptr_fn;
274 (*slot)->note_ptr_cookie = note_ptr_cookie;
275 if (note_ptr_fn == gt_pch_p_S)
276 (*slot)->size = strlen (obj) + 1;
277 else
278 (*slot)->size = ggc_get_size (obj);
279 return 1;
282 /* Register an object in the hash table. */
284 void
285 gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
286 gt_handle_reorder reorder_fn)
288 struct ptr_data *data;
290 if (obj == NULL || obj == (void *) 1)
291 return;
293 data = htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj));
294 if (data == NULL
295 || data->note_ptr_cookie != note_ptr_cookie)
296 abort ();
298 data->reorder_fn = reorder_fn;
301 /* Hash and equality functions for saving_htab, callbacks for htab_create. */
303 static hashval_t
304 saving_htab_hash (const void *p)
306 return POINTER_HASH (((struct ptr_data *)p)->obj);
309 static int
310 saving_htab_eq (const void *p1, const void *p2)
312 return ((struct ptr_data *)p1)->obj == p2;
315 /* Handy state for the traversal functions. */
317 struct traversal_state
319 FILE *f;
320 struct ggc_pch_data *d;
321 size_t count;
322 struct ptr_data **ptrs;
323 size_t ptrs_i;
326 /* Callbacks for htab_traverse. */
328 static int
329 call_count (void **slot, void *state_p)
331 struct ptr_data *d = (struct ptr_data *)*slot;
332 struct traversal_state *state = (struct traversal_state *)state_p;
334 ggc_pch_count_object (state->d, d->obj, d->size, d->note_ptr_fn == gt_pch_p_S);
335 state->count++;
336 return 1;
339 static int
340 call_alloc (void **slot, void *state_p)
342 struct ptr_data *d = (struct ptr_data *)*slot;
343 struct traversal_state *state = (struct traversal_state *)state_p;
345 d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size, d->note_ptr_fn == gt_pch_p_S);
346 state->ptrs[state->ptrs_i++] = d;
347 return 1;
350 /* Callback for qsort. */
352 static int
353 compare_ptr_data (const void *p1_p, const void *p2_p)
355 struct ptr_data *p1 = *(struct ptr_data *const *)p1_p;
356 struct ptr_data *p2 = *(struct ptr_data *const *)p2_p;
357 return (((size_t)p1->new_addr > (size_t)p2->new_addr)
358 - ((size_t)p1->new_addr < (size_t)p2->new_addr));
361 /* Callbacks for note_ptr_fn. */
363 static void
364 relocate_ptrs (void *ptr_p, void *state_p)
366 void **ptr = (void **)ptr_p;
367 struct traversal_state *state ATTRIBUTE_UNUSED
368 = (struct traversal_state *)state_p;
369 struct ptr_data *result;
371 if (*ptr == NULL || *ptr == (void *)1)
372 return;
374 result = htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr));
375 if (result == NULL)
376 abort ();
377 *ptr = result->new_addr;
380 /* Write out, after relocation, the pointers in TAB. */
381 static void
382 write_pch_globals (const struct ggc_root_tab * const *tab,
383 struct traversal_state *state)
385 const struct ggc_root_tab *const *rt;
386 const struct ggc_root_tab *rti;
387 size_t i;
389 for (rt = tab; *rt; rt++)
390 for (rti = *rt; rti->base != NULL; rti++)
391 for (i = 0; i < rti->nelt; i++)
393 void *ptr = *(void **)((char *)rti->base + rti->stride * i);
394 struct ptr_data *new_ptr;
395 if (ptr == NULL || ptr == (void *)1)
397 if (fwrite (&ptr, sizeof (void *), 1, state->f)
398 != 1)
399 fatal_error ("can't write PCH file: %m");
401 else
403 new_ptr = htab_find_with_hash (saving_htab, ptr,
404 POINTER_HASH (ptr));
405 if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
406 != 1)
407 fatal_error ("can't write PCH file: %m");
412 /* Hold the information we need to mmap the file back in. */
414 struct mmap_info
416 size_t offset;
417 size_t size;
418 void *preferred_base;
421 /* Write out the state of the compiler to F. */
423 void
424 gt_pch_save (FILE *f)
426 const struct ggc_root_tab *const *rt;
427 const struct ggc_root_tab *rti;
428 size_t i;
429 struct traversal_state state;
430 char *this_object = NULL;
431 size_t this_object_size = 0;
432 struct mmap_info mmi;
433 size_t page_size = getpagesize();
435 gt_pch_save_stringpool ();
437 saving_htab = htab_create (50000, saving_htab_hash, saving_htab_eq, free);
439 for (rt = gt_ggc_rtab; *rt; rt++)
440 for (rti = *rt; rti->base != NULL; rti++)
441 for (i = 0; i < rti->nelt; i++)
442 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
444 for (rt = gt_pch_cache_rtab; *rt; rt++)
445 for (rti = *rt; rti->base != NULL; rti++)
446 for (i = 0; i < rti->nelt; i++)
447 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
449 /* Prepare the objects for writing, determine addresses and such. */
450 state.f = f;
451 state.d = init_ggc_pch();
452 state.count = 0;
453 htab_traverse (saving_htab, call_count, &state);
455 mmi.size = ggc_pch_total_size (state.d);
457 /* Try to arrange things so that no relocation is necessary, but
458 don't try very hard. On most platforms, this will always work,
459 and on the rest it's a lot of work to do better.
460 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
461 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
462 mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
464 ggc_pch_this_base (state.d, mmi.preferred_base);
466 state.ptrs = xmalloc (state.count * sizeof (*state.ptrs));
467 state.ptrs_i = 0;
468 htab_traverse (saving_htab, call_alloc, &state);
469 qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
471 /* Write out all the scalar variables. */
472 for (rt = gt_pch_scalar_rtab; *rt; rt++)
473 for (rti = *rt; rti->base != NULL; rti++)
474 if (fwrite (rti->base, rti->stride, 1, f) != 1)
475 fatal_error ("can't write PCH file: %m");
477 /* Write out all the global pointers, after translation. */
478 write_pch_globals (gt_ggc_rtab, &state);
479 write_pch_globals (gt_pch_cache_rtab, &state);
481 ggc_pch_prepare_write (state.d, state.f);
483 /* Pad the PCH file so that the mmapped area starts on a page boundary. */
485 long o;
486 o = ftell (state.f) + sizeof (mmi);
487 if (o == -1)
488 fatal_error ("can't get position in PCH file: %m");
489 mmi.offset = page_size - o % page_size;
490 if (mmi.offset == page_size)
491 mmi.offset = 0;
492 mmi.offset += o;
494 if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
495 fatal_error ("can't write PCH file: %m");
496 if (mmi.offset != 0
497 && fseek (state.f, mmi.offset, SEEK_SET) != 0)
498 fatal_error ("can't write padding to PCH file: %m");
500 /* Actually write out the objects. */
501 for (i = 0; i < state.count; i++)
503 if (this_object_size < state.ptrs[i]->size)
505 this_object_size = state.ptrs[i]->size;
506 this_object = xrealloc (this_object, this_object_size);
508 memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
509 if (state.ptrs[i]->reorder_fn != NULL)
510 state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
511 state.ptrs[i]->note_ptr_cookie,
512 relocate_ptrs, &state);
513 state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
514 state.ptrs[i]->note_ptr_cookie,
515 relocate_ptrs, &state);
516 ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
517 state.ptrs[i]->new_addr, state.ptrs[i]->size,
518 state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
519 if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
520 memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
522 ggc_pch_finish (state.d, state.f);
523 gt_pch_fixup_stringpool ();
525 free (state.ptrs);
526 htab_delete (saving_htab);
529 /* Read the state of the compiler back in from F. */
531 void
532 gt_pch_restore (FILE *f)
534 const struct ggc_root_tab *const *rt;
535 const struct ggc_root_tab *rti;
536 size_t i;
537 struct mmap_info mmi;
538 int result;
540 /* Delete any deletable objects. This makes ggc_pch_read much
541 faster, as it can be sure that no GCable objects remain other
542 than the ones just read in. */
543 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
544 for (rti = *rt; rti->base != NULL; rti++)
545 memset (rti->base, 0, rti->stride);
547 /* Read in all the scalar variables. */
548 for (rt = gt_pch_scalar_rtab; *rt; rt++)
549 for (rti = *rt; rti->base != NULL; rti++)
550 if (fread (rti->base, rti->stride, 1, f) != 1)
551 fatal_error ("can't read PCH file: %m");
553 /* Read in all the global pointers, in 6 easy loops. */
554 for (rt = gt_ggc_rtab; *rt; rt++)
555 for (rti = *rt; rti->base != NULL; rti++)
556 for (i = 0; i < rti->nelt; i++)
557 if (fread ((char *)rti->base + rti->stride * i,
558 sizeof (void *), 1, f) != 1)
559 fatal_error ("can't read PCH file: %m");
561 for (rt = gt_pch_cache_rtab; *rt; rt++)
562 for (rti = *rt; rti->base != NULL; rti++)
563 for (i = 0; i < rti->nelt; i++)
564 if (fread ((char *)rti->base + rti->stride * i,
565 sizeof (void *), 1, f) != 1)
566 fatal_error ("can't read PCH file: %m");
568 if (fread (&mmi, sizeof (mmi), 1, f) != 1)
569 fatal_error ("can't read PCH file: %m");
571 result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
572 fileno (f), mmi.offset);
573 if (result < 0)
574 fatal_error ("had to relocate PCH");
575 if (result == 0)
577 if (fseek (f, mmi.offset, SEEK_SET) != 0
578 || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
579 fatal_error ("can't read PCH file: %m");
581 else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
582 fatal_error ("can't read PCH file: %m");
584 ggc_pch_read (f, mmi.preferred_base);
586 gt_pch_restore_stringpool ();
589 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
590 Select no address whatsoever, and let gt_pch_save choose what it will with
591 malloc, presumably. */
593 void *
594 default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
595 int fd ATTRIBUTE_UNUSED)
597 return NULL;
600 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
601 Allocate SIZE bytes with malloc. Return 0 if the address we got is the
602 same as base, indicating that the memory has been allocated but needs to
603 be read in from the file. Return -1 if the address differs, to relocation
604 of the PCH file would be required. */
607 default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
608 size_t offset ATTRIBUTE_UNUSED)
610 void *addr = xmalloc (size);
611 return (addr == base) - 1;
614 #if HAVE_MMAP_FILE
615 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
616 We temporarily allocate SIZE bytes, and let the kernel place the data
617 whereever it will. If it worked, that's our spot, if not we're likely
618 to be in trouble. */
620 void *
621 mmap_gt_pch_get_address (size_t size, int fd)
623 void *ret;
625 ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
626 if (ret == (void *) MAP_FAILED)
627 ret = NULL;
628 else
629 munmap (ret, size);
631 return ret;
634 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
635 Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
636 mapping the data at BASE, -1 if we couldn't.
638 This version assumes that the kernel honors the START operand of mmap
639 even without MAP_FIXED if START through START+SIZE are not currently
640 mapped with something. */
643 mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
645 void *addr;
647 /* We're called with size == 0 if we're not planning to load a PCH
648 file at all. This allows the hook to free any static space that
649 we might have allocated at link time. */
650 if (size == 0)
651 return -1;
653 addr = mmap (base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
654 fd, offset);
656 return addr == base ? 1 : -1;
658 #endif /* HAVE_MMAP_FILE */
660 /* Modify the bound based on rlimits. Keep the smallest number found. */
661 static double
662 ggc_rlimit_bound (double limit)
664 #if defined(HAVE_GETRLIMIT)
665 struct rlimit rlim;
666 # ifdef RLIMIT_RSS
667 if (getrlimit (RLIMIT_RSS, &rlim) == 0
668 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
669 && rlim.rlim_cur < limit)
670 limit = rlim.rlim_cur;
671 # endif
672 # ifdef RLIMIT_DATA
673 if (getrlimit (RLIMIT_DATA, &rlim) == 0
674 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
675 && rlim.rlim_cur < limit)
676 limit = rlim.rlim_cur;
677 # endif
678 # ifdef RLIMIT_AS
679 if (getrlimit (RLIMIT_AS, &rlim) == 0
680 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
681 && rlim.rlim_cur < limit)
682 limit = rlim.rlim_cur;
683 # endif
684 #endif /* HAVE_GETRLIMIT */
686 return limit;
689 /* Heuristic to set a default for GGC_MIN_EXPAND. */
691 ggc_min_expand_heuristic (void)
693 double min_expand = physmem_total();
695 /* Adjust for rlimits. */
696 min_expand = ggc_rlimit_bound (min_expand);
698 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
699 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
700 min_expand /= 1024*1024*1024;
701 min_expand *= 70;
702 min_expand = MIN (min_expand, 70);
703 min_expand += 30;
705 return min_expand;
708 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
710 ggc_min_heapsize_heuristic (void)
712 double min_heap_kbytes = physmem_total();
714 /* Adjust for rlimits. */
715 min_heap_kbytes = ggc_rlimit_bound (min_heap_kbytes);
717 min_heap_kbytes /= 1024; /* Convert to Kbytes. */
719 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
720 bound of 128M (when RAM >= 1GB). */
721 min_heap_kbytes /= 8;
722 min_heap_kbytes = MAX (min_heap_kbytes, 4 * 1024);
723 min_heap_kbytes = MIN (min_heap_kbytes, 128 * 1024);
725 return min_heap_kbytes;
728 void
729 init_ggc_heuristics (void)
731 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
732 set_param_value ("ggc-min-expand", ggc_min_expand_heuristic());
733 set_param_value ("ggc-min-heapsize", ggc_min_heapsize_heuristic());
734 #endif