* Makefile.in (options.c options.h): Use stamp file s-options to
[official-gcc.git] / gcc / ggc-common.c
blobcf9bd0056968bb5d10a307dd2556090b547335ef
1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* Generic garbage collection (GC) functions and data, not specific to
23 any particular GC implementation. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "hashtab.h"
29 #include "ggc.h"
30 #include "toplev.h"
31 #include "params.h"
32 #include "hosthooks.h"
34 #ifdef HAVE_SYS_RESOURCE_H
35 # include <sys/resource.h>
36 #endif
38 #ifdef HAVE_MMAP_FILE
39 # include <sys/mman.h>
40 # ifdef HAVE_MINCORE
41 /* This is on Solaris. */
42 # include <sys/types.h>
43 # endif
44 #endif
46 #ifndef MAP_FAILED
47 # define MAP_FAILED ((void *)-1)
48 #endif
50 #ifdef ENABLE_VALGRIND_CHECKING
51 # ifdef HAVE_VALGRIND_MEMCHECK_H
52 # include <valgrind/memcheck.h>
53 # elif defined HAVE_MEMCHECK_H
54 # include <memcheck.h>
55 # else
56 # include <valgrind.h>
57 # endif
58 #else
59 /* Avoid #ifdef:s when we can help it. */
60 #define VALGRIND_DISCARD(x)
61 #endif
63 /* Statistics about the allocation. */
64 static ggc_statistics *ggc_stats;
66 struct traversal_state;
68 static int ggc_htab_delete (void **, void *);
69 static hashval_t saving_htab_hash (const void *);
70 static int saving_htab_eq (const void *, const void *);
71 static int call_count (void **, void *);
72 static int call_alloc (void **, void *);
73 static int compare_ptr_data (const void *, const void *);
74 static void relocate_ptrs (void *, void *);
75 static void write_pch_globals (const struct ggc_root_tab * const *tab,
76 struct traversal_state *state);
77 static double ggc_rlimit_bound (double);
79 /* Maintain global roots that are preserved during GC. */
81 /* Process a slot of an htab by deleting it if it has not been marked. */
83 static int
84 ggc_htab_delete (void **slot, void *info)
86 const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info;
88 if (! (*r->marked_p) (*slot))
89 htab_clear_slot (*r->base, slot);
90 else
91 (*r->cb) (*slot);
93 return 1;
96 /* Iterate through all registered roots and mark each element. */
98 void
99 ggc_mark_roots (void)
101 const struct ggc_root_tab *const *rt;
102 const struct ggc_root_tab *rti;
103 const struct ggc_cache_tab *const *ct;
104 const struct ggc_cache_tab *cti;
105 size_t i;
107 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
108 for (rti = *rt; rti->base != NULL; rti++)
109 memset (rti->base, 0, rti->stride);
111 for (rt = gt_ggc_rtab; *rt; rt++)
112 for (rti = *rt; rti->base != NULL; rti++)
113 for (i = 0; i < rti->nelt; i++)
114 (*rti->cb)(*(void **)((char *)rti->base + rti->stride * i));
116 ggc_mark_stringpool ();
118 /* Now scan all hash tables that have objects which are to be deleted if
119 they are not already marked. */
120 for (ct = gt_ggc_cache_rtab; *ct; ct++)
121 for (cti = *ct; cti->base != NULL; cti++)
122 if (*cti->base)
124 ggc_set_mark (*cti->base);
125 htab_traverse_noresize (*cti->base, ggc_htab_delete, (void *) cti);
126 ggc_set_mark ((*cti->base)->entries);
130 /* Allocate a block of memory, then clear it. */
131 void *
132 ggc_alloc_cleared (size_t size)
134 void *buf = ggc_alloc (size);
135 memset (buf, 0, size);
136 return buf;
139 /* Resize a block of memory, possibly re-allocating it. */
140 void *
141 ggc_realloc (void *x, size_t size)
143 void *r;
144 size_t old_size;
146 if (x == NULL)
147 return ggc_alloc (size);
149 old_size = ggc_get_size (x);
151 if (size <= old_size)
153 /* Mark the unwanted memory as unaccessible. We also need to make
154 the "new" size accessible, since ggc_get_size returns the size of
155 the pool, not the size of the individually allocated object, the
156 size which was previously made accessible. Unfortunately, we
157 don't know that previously allocated size. Without that
158 knowledge we have to lose some initialization-tracking for the
159 old parts of the object. An alternative is to mark the whole
160 old_size as reachable, but that would lose tracking of writes
161 after the end of the object (by small offsets). Discard the
162 handle to avoid handle leak. */
163 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) x + size,
164 old_size - size));
165 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, size));
166 return x;
169 r = ggc_alloc (size);
171 /* Since ggc_get_size returns the size of the pool, not the size of the
172 individually allocated object, we'd access parts of the old object
173 that were marked invalid with the memcpy below. We lose a bit of the
174 initialization-tracking since some of it may be uninitialized. */
175 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, old_size));
177 memcpy (r, x, old_size);
179 /* The old object is not supposed to be used anymore. */
180 ggc_free (x);
182 return r;
185 /* Like ggc_alloc_cleared, but performs a multiplication. */
186 void *
187 ggc_calloc (size_t s1, size_t s2)
189 return ggc_alloc_cleared (s1 * s2);
192 /* These are for splay_tree_new_ggc. */
193 void *
194 ggc_splay_alloc (int sz, void *nl)
196 if (nl != NULL)
197 abort ();
198 return ggc_alloc (sz);
201 void
202 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
204 if (nl != NULL)
205 abort ();
208 /* Print statistics that are independent of the collector in use. */
209 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
210 ? (x) \
211 : ((x) < 1024*1024*10 \
212 ? (x) / 1024 \
213 : (x) / (1024*1024))))
214 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
216 void
217 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
218 ggc_statistics *stats)
220 /* Set the pointer so that during collection we will actually gather
221 the statistics. */
222 ggc_stats = stats;
224 /* Then do one collection to fill in the statistics. */
225 ggc_collect ();
227 /* At present, we don't really gather any interesting statistics. */
229 /* Don't gather statistics any more. */
230 ggc_stats = NULL;
233 /* Functions for saving and restoring GCable memory to disk. */
235 static htab_t saving_htab;
237 struct ptr_data
239 void *obj;
240 void *note_ptr_cookie;
241 gt_note_pointers note_ptr_fn;
242 gt_handle_reorder reorder_fn;
243 size_t size;
244 void *new_addr;
247 #define POINTER_HASH(x) (hashval_t)((long)x >> 3)
249 /* Register an object in the hash table. */
252 gt_pch_note_object (void *obj, void *note_ptr_cookie,
253 gt_note_pointers note_ptr_fn)
255 struct ptr_data **slot;
257 if (obj == NULL || obj == (void *) 1)
258 return 0;
260 slot = (struct ptr_data **)
261 htab_find_slot_with_hash (saving_htab, obj, POINTER_HASH (obj),
262 INSERT);
263 if (*slot != NULL)
265 if ((*slot)->note_ptr_fn != note_ptr_fn
266 || (*slot)->note_ptr_cookie != note_ptr_cookie)
267 abort ();
268 return 0;
271 *slot = xcalloc (sizeof (struct ptr_data), 1);
272 (*slot)->obj = obj;
273 (*slot)->note_ptr_fn = note_ptr_fn;
274 (*slot)->note_ptr_cookie = note_ptr_cookie;
275 if (note_ptr_fn == gt_pch_p_S)
276 (*slot)->size = strlen (obj) + 1;
277 else
278 (*slot)->size = ggc_get_size (obj);
279 return 1;
282 /* Register an object in the hash table. */
284 void
285 gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
286 gt_handle_reorder reorder_fn)
288 struct ptr_data *data;
290 if (obj == NULL || obj == (void *) 1)
291 return;
293 data = htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj));
294 if (data == NULL
295 || data->note_ptr_cookie != note_ptr_cookie)
296 abort ();
298 data->reorder_fn = reorder_fn;
301 /* Hash and equality functions for saving_htab, callbacks for htab_create. */
303 static hashval_t
304 saving_htab_hash (const void *p)
306 return POINTER_HASH (((struct ptr_data *)p)->obj);
309 static int
310 saving_htab_eq (const void *p1, const void *p2)
312 return ((struct ptr_data *)p1)->obj == p2;
315 /* Handy state for the traversal functions. */
317 struct traversal_state
319 FILE *f;
320 struct ggc_pch_data *d;
321 size_t count;
322 struct ptr_data **ptrs;
323 size_t ptrs_i;
326 /* Callbacks for htab_traverse. */
328 static int
329 call_count (void **slot, void *state_p)
331 struct ptr_data *d = (struct ptr_data *)*slot;
332 struct traversal_state *state = (struct traversal_state *)state_p;
334 ggc_pch_count_object (state->d, d->obj, d->size, d->note_ptr_fn == gt_pch_p_S);
335 state->count++;
336 return 1;
339 static int
340 call_alloc (void **slot, void *state_p)
342 struct ptr_data *d = (struct ptr_data *)*slot;
343 struct traversal_state *state = (struct traversal_state *)state_p;
345 d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size, d->note_ptr_fn == gt_pch_p_S);
346 state->ptrs[state->ptrs_i++] = d;
347 return 1;
350 /* Callback for qsort. */
352 static int
353 compare_ptr_data (const void *p1_p, const void *p2_p)
355 struct ptr_data *p1 = *(struct ptr_data *const *)p1_p;
356 struct ptr_data *p2 = *(struct ptr_data *const *)p2_p;
357 return (((size_t)p1->new_addr > (size_t)p2->new_addr)
358 - ((size_t)p1->new_addr < (size_t)p2->new_addr));
361 /* Callbacks for note_ptr_fn. */
363 static void
364 relocate_ptrs (void *ptr_p, void *state_p)
366 void **ptr = (void **)ptr_p;
367 struct traversal_state *state ATTRIBUTE_UNUSED
368 = (struct traversal_state *)state_p;
369 struct ptr_data *result;
371 if (*ptr == NULL || *ptr == (void *)1)
372 return;
374 result = htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr));
375 if (result == NULL)
376 abort ();
377 *ptr = result->new_addr;
380 /* Write out, after relocation, the pointers in TAB. */
381 static void
382 write_pch_globals (const struct ggc_root_tab * const *tab,
383 struct traversal_state *state)
385 const struct ggc_root_tab *const *rt;
386 const struct ggc_root_tab *rti;
387 size_t i;
389 for (rt = tab; *rt; rt++)
390 for (rti = *rt; rti->base != NULL; rti++)
391 for (i = 0; i < rti->nelt; i++)
393 void *ptr = *(void **)((char *)rti->base + rti->stride * i);
394 struct ptr_data *new_ptr;
395 if (ptr == NULL || ptr == (void *)1)
397 if (fwrite (&ptr, sizeof (void *), 1, state->f)
398 != 1)
399 fatal_error ("can't write PCH file: %m");
401 else
403 new_ptr = htab_find_with_hash (saving_htab, ptr,
404 POINTER_HASH (ptr));
405 if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
406 != 1)
407 fatal_error ("can't write PCH file: %m");
412 /* Hold the information we need to mmap the file back in. */
414 struct mmap_info
416 size_t offset;
417 size_t size;
418 void *preferred_base;
421 /* Write out the state of the compiler to F. */
423 void
424 gt_pch_save (FILE *f)
426 const struct ggc_root_tab *const *rt;
427 const struct ggc_root_tab *rti;
428 size_t i;
429 struct traversal_state state;
430 char *this_object = NULL;
431 size_t this_object_size = 0;
432 struct mmap_info mmi;
433 size_t page_size = getpagesize();
435 gt_pch_save_stringpool ();
437 saving_htab = htab_create (50000, saving_htab_hash, saving_htab_eq, free);
439 for (rt = gt_ggc_rtab; *rt; rt++)
440 for (rti = *rt; rti->base != NULL; rti++)
441 for (i = 0; i < rti->nelt; i++)
442 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
444 for (rt = gt_pch_cache_rtab; *rt; rt++)
445 for (rti = *rt; rti->base != NULL; rti++)
446 for (i = 0; i < rti->nelt; i++)
447 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
449 /* Prepare the objects for writing, determine addresses and such. */
450 state.f = f;
451 state.d = init_ggc_pch();
452 state.count = 0;
453 htab_traverse (saving_htab, call_count, &state);
455 mmi.size = ggc_pch_total_size (state.d);
457 /* Try to arrange things so that no relocation is necessary, but
458 don't try very hard. On most platforms, this will always work,
459 and on the rest it's a lot of work to do better.
460 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
461 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
462 mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size);
464 #if HAVE_MMAP_FILE
465 if (mmi.preferred_base == NULL)
467 mmi.preferred_base = mmap (NULL, mmi.size,
468 PROT_READ | PROT_WRITE, MAP_PRIVATE,
469 fileno (state.f), 0);
470 if (mmi.preferred_base == (void *) MAP_FAILED)
471 mmi.preferred_base = NULL;
472 else
473 munmap (mmi.preferred_base, mmi.size);
475 #endif /* HAVE_MMAP_FILE */
477 ggc_pch_this_base (state.d, mmi.preferred_base);
479 state.ptrs = xmalloc (state.count * sizeof (*state.ptrs));
480 state.ptrs_i = 0;
481 htab_traverse (saving_htab, call_alloc, &state);
482 qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
484 /* Write out all the scalar variables. */
485 for (rt = gt_pch_scalar_rtab; *rt; rt++)
486 for (rti = *rt; rti->base != NULL; rti++)
487 if (fwrite (rti->base, rti->stride, 1, f) != 1)
488 fatal_error ("can't write PCH file: %m");
490 /* Write out all the global pointers, after translation. */
491 write_pch_globals (gt_ggc_rtab, &state);
492 write_pch_globals (gt_pch_cache_rtab, &state);
494 ggc_pch_prepare_write (state.d, state.f);
496 /* Pad the PCH file so that the mmapped area starts on a page boundary. */
498 long o;
499 o = ftell (state.f) + sizeof (mmi);
500 if (o == -1)
501 fatal_error ("can't get position in PCH file: %m");
502 mmi.offset = page_size - o % page_size;
503 if (mmi.offset == page_size)
504 mmi.offset = 0;
505 mmi.offset += o;
507 if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
508 fatal_error ("can't write PCH file: %m");
509 if (mmi.offset != 0
510 && fseek (state.f, mmi.offset, SEEK_SET) != 0)
511 fatal_error ("can't write padding to PCH file: %m");
513 /* Actually write out the objects. */
514 for (i = 0; i < state.count; i++)
516 if (this_object_size < state.ptrs[i]->size)
518 this_object_size = state.ptrs[i]->size;
519 this_object = xrealloc (this_object, this_object_size);
521 memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
522 if (state.ptrs[i]->reorder_fn != NULL)
523 state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
524 state.ptrs[i]->note_ptr_cookie,
525 relocate_ptrs, &state);
526 state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
527 state.ptrs[i]->note_ptr_cookie,
528 relocate_ptrs, &state);
529 ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
530 state.ptrs[i]->new_addr, state.ptrs[i]->size, state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
531 if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
532 memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
534 ggc_pch_finish (state.d, state.f);
535 gt_pch_fixup_stringpool ();
537 free (state.ptrs);
538 htab_delete (saving_htab);
541 /* Read the state of the compiler back in from F. */
543 void
544 gt_pch_restore (FILE *f)
546 const struct ggc_root_tab *const *rt;
547 const struct ggc_root_tab *rti;
548 size_t i;
549 struct mmap_info mmi;
550 void *addr;
551 bool needs_read;
553 /* Delete any deletable objects. This makes ggc_pch_read much
554 faster, as it can be sure that no GCable objects remain other
555 than the ones just read in. */
556 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
557 for (rti = *rt; rti->base != NULL; rti++)
558 memset (rti->base, 0, rti->stride);
560 /* Read in all the scalar variables. */
561 for (rt = gt_pch_scalar_rtab; *rt; rt++)
562 for (rti = *rt; rti->base != NULL; rti++)
563 if (fread (rti->base, rti->stride, 1, f) != 1)
564 fatal_error ("can't read PCH file: %m");
566 /* Read in all the global pointers, in 6 easy loops. */
567 for (rt = gt_ggc_rtab; *rt; rt++)
568 for (rti = *rt; rti->base != NULL; rti++)
569 for (i = 0; i < rti->nelt; i++)
570 if (fread ((char *)rti->base + rti->stride * i,
571 sizeof (void *), 1, f) != 1)
572 fatal_error ("can't read PCH file: %m");
574 for (rt = gt_pch_cache_rtab; *rt; rt++)
575 for (rti = *rt; rti->base != NULL; rti++)
576 for (i = 0; i < rti->nelt; i++)
577 if (fread ((char *)rti->base + rti->stride * i,
578 sizeof (void *), 1, f) != 1)
579 fatal_error ("can't read PCH file: %m");
581 if (fread (&mmi, sizeof (mmi), 1, f) != 1)
582 fatal_error ("can't read PCH file: %m");
584 if (host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size))
586 #if HAVE_MMAP_FILE
587 void *mmap_result;
589 mmap_result = mmap (mmi.preferred_base, mmi.size,
590 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED,
591 fileno (f), mmi.offset);
593 /* The file might not be mmap-able. */
594 needs_read = mmap_result == (void *) MAP_FAILED;
596 /* Sanity check for broken MAP_FIXED. */
597 if (! needs_read && mmap_result != mmi.preferred_base)
598 abort ();
599 #else
600 needs_read = true;
601 #endif
602 addr = mmi.preferred_base;
604 else
606 #if HAVE_MMAP_FILE
607 addr = mmap (mmi.preferred_base, mmi.size,
608 PROT_READ | PROT_WRITE, MAP_PRIVATE,
609 fileno (f), mmi.offset);
611 #if HAVE_MINCORE
612 if (addr != mmi.preferred_base)
614 size_t page_size = getpagesize();
615 char one_byte;
617 if (addr != (void *) MAP_FAILED)
618 munmap (addr, mmi.size);
620 /* We really want to be mapped at mmi.preferred_base
621 so we're going to resort to MAP_FIXED. But before,
622 make sure that we can do so without destroying a
623 previously mapped area, by looping over all pages
624 that would be affected by the fixed mapping. */
625 errno = 0;
627 for (i = 0; i < mmi.size; i+= page_size)
628 if (mincore ((char *)mmi.preferred_base + i, page_size,
629 (void *)&one_byte) == -1
630 && errno == ENOMEM)
631 continue; /* The page is not mapped. */
632 else
633 break;
635 if (i >= mmi.size)
636 addr = mmap (mmi.preferred_base, mmi.size,
637 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED,
638 fileno (f), mmi.offset);
640 #endif /* HAVE_MINCORE */
642 needs_read = addr == (void *) MAP_FAILED;
644 #else /* HAVE_MMAP_FILE */
645 needs_read = true;
646 #endif /* HAVE_MMAP_FILE */
647 if (needs_read)
648 addr = xmalloc (mmi.size);
651 if (needs_read)
653 if (fseek (f, mmi.offset, SEEK_SET) != 0
654 || fread (&mmi, mmi.size, 1, f) != 1)
655 fatal_error ("can't read PCH file: %m");
657 else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
658 fatal_error ("can't read PCH file: %m");
660 ggc_pch_read (f, addr);
662 if (addr != mmi.preferred_base)
664 for (rt = gt_ggc_rtab; *rt; rt++)
665 for (rti = *rt; rti->base != NULL; rti++)
666 for (i = 0; i < rti->nelt; i++)
668 char **ptr = (char **)((char *)rti->base + rti->stride * i);
669 if (*ptr != NULL)
670 *ptr += (size_t)addr - (size_t)mmi.preferred_base;
673 for (rt = gt_pch_cache_rtab; *rt; rt++)
674 for (rti = *rt; rti->base != NULL; rti++)
675 for (i = 0; i < rti->nelt; i++)
677 char **ptr = (char **)((char *)rti->base + rti->stride * i);
678 if (*ptr != NULL)
679 *ptr += (size_t)addr - (size_t)mmi.preferred_base;
682 sorry ("had to relocate PCH");
685 gt_pch_restore_stringpool ();
688 /* Modify the bound based on rlimits. Keep the smallest number found. */
689 static double
690 ggc_rlimit_bound (double limit)
692 #if defined(HAVE_GETRLIMIT)
693 struct rlimit rlim;
694 # ifdef RLIMIT_RSS
695 if (getrlimit (RLIMIT_RSS, &rlim) == 0
696 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
697 && rlim.rlim_cur < limit)
698 limit = rlim.rlim_cur;
699 # endif
700 # ifdef RLIMIT_DATA
701 if (getrlimit (RLIMIT_DATA, &rlim) == 0
702 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
703 && rlim.rlim_cur < limit)
704 limit = rlim.rlim_cur;
705 # endif
706 # ifdef RLIMIT_AS
707 if (getrlimit (RLIMIT_AS, &rlim) == 0
708 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
709 && rlim.rlim_cur < limit)
710 limit = rlim.rlim_cur;
711 # endif
712 #endif /* HAVE_GETRLIMIT */
714 return limit;
717 /* Heuristic to set a default for GGC_MIN_EXPAND. */
719 ggc_min_expand_heuristic (void)
721 double min_expand = physmem_total();
723 /* Adjust for rlimits. */
724 min_expand = ggc_rlimit_bound (min_expand);
726 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
727 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
728 min_expand /= 1024*1024*1024;
729 min_expand *= 70;
730 min_expand = MIN (min_expand, 70);
731 min_expand += 30;
733 return min_expand;
736 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
738 ggc_min_heapsize_heuristic (void)
740 double min_heap_kbytes = physmem_total();
742 /* Adjust for rlimits. */
743 min_heap_kbytes = ggc_rlimit_bound (min_heap_kbytes);
745 min_heap_kbytes /= 1024; /* Convert to Kbytes. */
747 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
748 bound of 128M (when RAM >= 1GB). */
749 min_heap_kbytes /= 8;
750 min_heap_kbytes = MAX (min_heap_kbytes, 4 * 1024);
751 min_heap_kbytes = MIN (min_heap_kbytes, 128 * 1024);
753 return min_heap_kbytes;
756 void
757 init_ggc_heuristics (void)
759 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
760 set_param_value ("ggc-min-expand", ggc_min_expand_heuristic());
761 set_param_value ("ggc-min-heapsize", ggc_min_heapsize_heuristic());
762 #endif