2010-07-27 Paolo Carlini <paolo.carlini@oracle.com>
[official-gcc/alias-decl.git] / gcc / ggc-common.c
blob982d266dfba69868b94437e52c6200ce44dc27d1
1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
3 2009, 2010 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Generic garbage collection (GC) functions and data, not specific to
22 any particular GC implementation. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "hashtab.h"
28 #include "ggc.h"
29 #include "ggc-internal.h"
30 #include "diagnostic-core.h"
31 #include "toplev.h"
32 #include "params.h"
33 #include "hosthooks.h"
34 #include "hosthooks-def.h"
35 #include "plugin.h"
36 #include "vec.h"
37 #include "timevar.h"
39 #ifdef HAVE_SYS_RESOURCE_H
40 # include <sys/resource.h>
41 #endif
43 #ifdef HAVE_MMAP_FILE
44 # include <sys/mman.h>
45 # ifdef HAVE_MINCORE
46 /* This is on Solaris. */
47 # include <sys/types.h>
48 # endif
49 #endif
51 #ifndef MAP_FAILED
52 # define MAP_FAILED ((void *)-1)
53 #endif
55 /* When set, ggc_collect will do collection. */
56 bool ggc_force_collect;
58 /* When true, protect the contents of the identifier hash table. */
59 bool ggc_protect_identifiers = true;
61 /* Statistics about the allocation. */
62 static ggc_statistics *ggc_stats;
64 struct traversal_state;
66 static int ggc_htab_delete (void **, void *);
67 static hashval_t saving_htab_hash (const void *);
68 static int saving_htab_eq (const void *, const void *);
69 static int call_count (void **, void *);
70 static int call_alloc (void **, void *);
71 static int compare_ptr_data (const void *, const void *);
72 static void relocate_ptrs (void *, void *);
73 static void write_pch_globals (const struct ggc_root_tab * const *tab,
74 struct traversal_state *state);
76 /* Maintain global roots that are preserved during GC. */
78 /* Process a slot of an htab by deleting it if it has not been marked. */
80 static int
81 ggc_htab_delete (void **slot, void *info)
83 const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info;
85 if (! (*r->marked_p) (*slot))
86 htab_clear_slot (*r->base, slot);
87 else
88 (*r->cb) (*slot);
90 return 1;
94 /* This extra vector of dynamically registered root_tab-s is used by
95 ggc_mark_roots and gives the ability to dynamically add new GGC root
96 tables, for instance from some plugins; this vector is on the heap
97 since it is used by GGC internally. */
98 typedef const struct ggc_root_tab *const_ggc_root_tab_t;
99 DEF_VEC_P(const_ggc_root_tab_t);
100 DEF_VEC_ALLOC_P(const_ggc_root_tab_t, heap);
101 static VEC(const_ggc_root_tab_t, heap) *extra_root_vec;
103 /* Dynamically register a new GGC root table RT. This is useful for
104 plugins. */
106 void
107 ggc_register_root_tab (const struct ggc_root_tab* rt)
109 if (rt)
110 VEC_safe_push (const_ggc_root_tab_t, heap, extra_root_vec, rt);
113 /* This extra vector of dynamically registered cache_tab-s is used by
114 ggc_mark_roots and gives the ability to dynamically add new GGC cache
115 tables, for instance from some plugins; this vector is on the heap
116 since it is used by GGC internally. */
117 typedef const struct ggc_cache_tab *const_ggc_cache_tab_t;
118 DEF_VEC_P(const_ggc_cache_tab_t);
119 DEF_VEC_ALLOC_P(const_ggc_cache_tab_t, heap);
120 static VEC(const_ggc_cache_tab_t, heap) *extra_cache_vec;
122 /* Dynamically register a new GGC cache table CT. This is useful for
123 plugins. */
125 void
126 ggc_register_cache_tab (const struct ggc_cache_tab* ct)
128 if (ct)
129 VEC_safe_push (const_ggc_cache_tab_t, heap, extra_cache_vec, ct);
132 /* Scan a hash table that has objects which are to be deleted if they are not
133 already marked. */
135 static void
136 ggc_scan_cache_tab (const_ggc_cache_tab_t ctp)
138 const struct ggc_cache_tab *cti;
140 for (cti = ctp; cti->base != NULL; cti++)
141 if (*cti->base)
143 ggc_set_mark (*cti->base);
144 htab_traverse_noresize (*cti->base, ggc_htab_delete,
145 CONST_CAST (void *, (const void *)cti));
146 ggc_set_mark ((*cti->base)->entries);
150 /* Iterate through all registered roots and mark each element. */
152 void
153 ggc_mark_roots (void)
155 const struct ggc_root_tab *const *rt;
156 const struct ggc_root_tab *rti;
157 const_ggc_root_tab_t rtp;
158 const struct ggc_cache_tab *const *ct;
159 const_ggc_cache_tab_t ctp;
160 size_t i;
162 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
163 for (rti = *rt; rti->base != NULL; rti++)
164 memset (rti->base, 0, rti->stride);
166 for (rt = gt_ggc_rtab; *rt; rt++)
167 for (rti = *rt; rti->base != NULL; rti++)
168 for (i = 0; i < rti->nelt; i++)
169 (*rti->cb) (*(void **)((char *)rti->base + rti->stride * i));
171 for (i = 0; VEC_iterate (const_ggc_root_tab_t, extra_root_vec, i, rtp); i++)
173 for (rti = rtp; rti->base != NULL; rti++)
174 for (i = 0; i < rti->nelt; i++)
175 (*rti->cb) (*(void **) ((char *)rti->base + rti->stride * i));
178 if (ggc_protect_identifiers)
179 ggc_mark_stringpool ();
181 /* Now scan all hash tables that have objects which are to be deleted if
182 they are not already marked. */
183 for (ct = gt_ggc_cache_rtab; *ct; ct++)
184 ggc_scan_cache_tab (*ct);
186 for (i = 0; VEC_iterate (const_ggc_cache_tab_t, extra_cache_vec, i, ctp); i++)
187 ggc_scan_cache_tab (ctp);
189 if (! ggc_protect_identifiers)
190 ggc_purge_stringpool ();
192 /* Some plugins may call ggc_set_mark from here. */
193 invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
196 /* Allocate a block of memory, then clear it. */
197 void *
198 ggc_internal_cleared_alloc_stat (size_t size MEM_STAT_DECL)
200 void *buf = ggc_internal_alloc_stat (size PASS_MEM_STAT);
201 memset (buf, 0, size);
202 return buf;
205 /* Resize a block of memory, possibly re-allocating it. */
206 void *
207 ggc_realloc_stat (void *x, size_t size MEM_STAT_DECL)
209 void *r;
210 size_t old_size;
212 if (x == NULL)
213 return ggc_internal_alloc_stat (size PASS_MEM_STAT);
215 old_size = ggc_get_size (x);
217 if (size <= old_size)
219 /* Mark the unwanted memory as unaccessible. We also need to make
220 the "new" size accessible, since ggc_get_size returns the size of
221 the pool, not the size of the individually allocated object, the
222 size which was previously made accessible. Unfortunately, we
223 don't know that previously allocated size. Without that
224 knowledge we have to lose some initialization-tracking for the
225 old parts of the object. An alternative is to mark the whole
226 old_size as reachable, but that would lose tracking of writes
227 after the end of the object (by small offsets). Discard the
228 handle to avoid handle leak. */
229 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
230 old_size - size));
231 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
232 return x;
235 r = ggc_internal_alloc_stat (size PASS_MEM_STAT);
237 /* Since ggc_get_size returns the size of the pool, not the size of the
238 individually allocated object, we'd access parts of the old object
239 that were marked invalid with the memcpy below. We lose a bit of the
240 initialization-tracking since some of it may be uninitialized. */
241 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
243 memcpy (r, x, old_size);
245 /* The old object is not supposed to be used anymore. */
246 ggc_free (x);
248 return r;
251 void *
252 ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED,
253 size_t n ATTRIBUTE_UNUSED)
255 gcc_assert (c * n == sizeof (struct htab));
256 return ggc_alloc_cleared_htab ();
259 /* TODO: once we actually use type information in GGC, create a new tag
260 gt_gcc_ptr_array and use it for pointer arrays. */
261 void *
262 ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n)
264 gcc_assert (sizeof (PTR *) == n);
265 return ggc_internal_cleared_vec_alloc (sizeof (PTR *), c);
268 /* These are for splay_tree_new_ggc. */
269 void *
270 ggc_splay_alloc (enum gt_types_enum obj_type ATTRIBUTE_UNUSED, int sz,
271 void *nl)
273 gcc_assert (!nl);
274 return ggc_internal_alloc (sz);
277 void
278 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
280 gcc_assert (!nl);
283 /* Print statistics that are independent of the collector in use. */
284 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
285 ? (x) \
286 : ((x) < 1024*1024*10 \
287 ? (x) / 1024 \
288 : (x) / (1024*1024))))
289 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
291 void
292 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
293 ggc_statistics *stats)
295 /* Set the pointer so that during collection we will actually gather
296 the statistics. */
297 ggc_stats = stats;
299 /* Then do one collection to fill in the statistics. */
300 ggc_collect ();
302 /* At present, we don't really gather any interesting statistics. */
304 /* Don't gather statistics any more. */
305 ggc_stats = NULL;
308 /* Functions for saving and restoring GCable memory to disk. */
310 static htab_t saving_htab;
312 struct ptr_data
314 void *obj;
315 void *note_ptr_cookie;
316 gt_note_pointers note_ptr_fn;
317 gt_handle_reorder reorder_fn;
318 size_t size;
319 void *new_addr;
320 enum gt_types_enum type;
323 #define POINTER_HASH(x) (hashval_t)((long)x >> 3)
325 /* Register an object in the hash table. */
328 gt_pch_note_object (void *obj, void *note_ptr_cookie,
329 gt_note_pointers note_ptr_fn,
330 enum gt_types_enum type)
332 struct ptr_data **slot;
334 if (obj == NULL || obj == (void *) 1)
335 return 0;
337 slot = (struct ptr_data **)
338 htab_find_slot_with_hash (saving_htab, obj, POINTER_HASH (obj),
339 INSERT);
340 if (*slot != NULL)
342 gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
343 && (*slot)->note_ptr_cookie == note_ptr_cookie);
344 return 0;
347 *slot = XCNEW (struct ptr_data);
348 (*slot)->obj = obj;
349 (*slot)->note_ptr_fn = note_ptr_fn;
350 (*slot)->note_ptr_cookie = note_ptr_cookie;
351 if (note_ptr_fn == gt_pch_p_S)
352 (*slot)->size = strlen ((const char *)obj) + 1;
353 else
354 (*slot)->size = ggc_get_size (obj);
355 (*slot)->type = type;
356 return 1;
359 /* Register an object in the hash table. */
361 void
362 gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
363 gt_handle_reorder reorder_fn)
365 struct ptr_data *data;
367 if (obj == NULL || obj == (void *) 1)
368 return;
370 data = (struct ptr_data *)
371 htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj));
372 gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
374 data->reorder_fn = reorder_fn;
377 /* Hash and equality functions for saving_htab, callbacks for htab_create. */
379 static hashval_t
380 saving_htab_hash (const void *p)
382 return POINTER_HASH (((const struct ptr_data *)p)->obj);
385 static int
386 saving_htab_eq (const void *p1, const void *p2)
388 return ((const struct ptr_data *)p1)->obj == p2;
391 /* Handy state for the traversal functions. */
393 struct traversal_state
395 FILE *f;
396 struct ggc_pch_data *d;
397 size_t count;
398 struct ptr_data **ptrs;
399 size_t ptrs_i;
402 /* Callbacks for htab_traverse. */
404 static int
405 call_count (void **slot, void *state_p)
407 struct ptr_data *d = (struct ptr_data *)*slot;
408 struct traversal_state *state = (struct traversal_state *)state_p;
410 ggc_pch_count_object (state->d, d->obj, d->size,
411 d->note_ptr_fn == gt_pch_p_S,
412 d->type);
413 state->count++;
414 return 1;
417 static int
418 call_alloc (void **slot, void *state_p)
420 struct ptr_data *d = (struct ptr_data *)*slot;
421 struct traversal_state *state = (struct traversal_state *)state_p;
423 d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
424 d->note_ptr_fn == gt_pch_p_S,
425 d->type);
426 state->ptrs[state->ptrs_i++] = d;
427 return 1;
430 /* Callback for qsort. */
432 static int
433 compare_ptr_data (const void *p1_p, const void *p2_p)
435 const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
436 const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
437 return (((size_t)p1->new_addr > (size_t)p2->new_addr)
438 - ((size_t)p1->new_addr < (size_t)p2->new_addr));
441 /* Callbacks for note_ptr_fn. */
443 static void
444 relocate_ptrs (void *ptr_p, void *state_p)
446 void **ptr = (void **)ptr_p;
447 struct traversal_state *state ATTRIBUTE_UNUSED
448 = (struct traversal_state *)state_p;
449 struct ptr_data *result;
451 if (*ptr == NULL || *ptr == (void *)1)
452 return;
454 result = (struct ptr_data *)
455 htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr));
456 gcc_assert (result);
457 *ptr = result->new_addr;
460 /* Write out, after relocation, the pointers in TAB. */
461 static void
462 write_pch_globals (const struct ggc_root_tab * const *tab,
463 struct traversal_state *state)
465 const struct ggc_root_tab *const *rt;
466 const struct ggc_root_tab *rti;
467 size_t i;
469 for (rt = tab; *rt; rt++)
470 for (rti = *rt; rti->base != NULL; rti++)
471 for (i = 0; i < rti->nelt; i++)
473 void *ptr = *(void **)((char *)rti->base + rti->stride * i);
474 struct ptr_data *new_ptr;
475 if (ptr == NULL || ptr == (void *)1)
477 if (fwrite (&ptr, sizeof (void *), 1, state->f)
478 != 1)
479 fatal_error ("can't write PCH file: %m");
481 else
483 new_ptr = (struct ptr_data *)
484 htab_find_with_hash (saving_htab, ptr, POINTER_HASH (ptr));
485 if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
486 != 1)
487 fatal_error ("can't write PCH file: %m");
492 /* Hold the information we need to mmap the file back in. */
494 struct mmap_info
496 size_t offset;
497 size_t size;
498 void *preferred_base;
501 /* Write out the state of the compiler to F. */
503 void
504 gt_pch_save (FILE *f)
506 const struct ggc_root_tab *const *rt;
507 const struct ggc_root_tab *rti;
508 size_t i;
509 struct traversal_state state;
510 char *this_object = NULL;
511 size_t this_object_size = 0;
512 struct mmap_info mmi;
513 const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity();
515 gt_pch_save_stringpool ();
517 timevar_push (TV_PCH_PTR_REALLOC);
518 saving_htab = htab_create (50000, saving_htab_hash, saving_htab_eq, free);
520 for (rt = gt_ggc_rtab; *rt; rt++)
521 for (rti = *rt; rti->base != NULL; rti++)
522 for (i = 0; i < rti->nelt; i++)
523 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
525 for (rt = gt_pch_cache_rtab; *rt; rt++)
526 for (rti = *rt; rti->base != NULL; rti++)
527 for (i = 0; i < rti->nelt; i++)
528 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
530 /* Prepare the objects for writing, determine addresses and such. */
531 state.f = f;
532 state.d = init_ggc_pch ();
533 state.count = 0;
534 htab_traverse (saving_htab, call_count, &state);
536 mmi.size = ggc_pch_total_size (state.d);
538 /* Try to arrange things so that no relocation is necessary, but
539 don't try very hard. On most platforms, this will always work,
540 and on the rest it's a lot of work to do better.
541 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
542 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
543 mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
545 ggc_pch_this_base (state.d, mmi.preferred_base);
547 state.ptrs = XNEWVEC (struct ptr_data *, state.count);
548 state.ptrs_i = 0;
550 htab_traverse (saving_htab, call_alloc, &state);
551 timevar_pop (TV_PCH_PTR_REALLOC);
553 timevar_push (TV_PCH_PTR_SORT);
554 qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
555 timevar_pop (TV_PCH_PTR_SORT);
557 /* Write out all the scalar variables. */
558 for (rt = gt_pch_scalar_rtab; *rt; rt++)
559 for (rti = *rt; rti->base != NULL; rti++)
560 if (fwrite (rti->base, rti->stride, 1, f) != 1)
561 fatal_error ("can't write PCH file: %m");
563 /* Write out all the global pointers, after translation. */
564 write_pch_globals (gt_ggc_rtab, &state);
565 write_pch_globals (gt_pch_cache_rtab, &state);
567 /* Pad the PCH file so that the mmapped area starts on an allocation
568 granularity (usually page) boundary. */
570 long o;
571 o = ftell (state.f) + sizeof (mmi);
572 if (o == -1)
573 fatal_error ("can't get position in PCH file: %m");
574 mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
575 if (mmi.offset == mmap_offset_alignment)
576 mmi.offset = 0;
577 mmi.offset += o;
579 if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
580 fatal_error ("can't write PCH file: %m");
581 if (mmi.offset != 0
582 && fseek (state.f, mmi.offset, SEEK_SET) != 0)
583 fatal_error ("can't write padding to PCH file: %m");
585 ggc_pch_prepare_write (state.d, state.f);
587 /* Actually write out the objects. */
588 for (i = 0; i < state.count; i++)
590 if (this_object_size < state.ptrs[i]->size)
592 this_object_size = state.ptrs[i]->size;
593 this_object = XRESIZEVAR (char, this_object, this_object_size);
595 memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
596 if (state.ptrs[i]->reorder_fn != NULL)
597 state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
598 state.ptrs[i]->note_ptr_cookie,
599 relocate_ptrs, &state);
600 state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
601 state.ptrs[i]->note_ptr_cookie,
602 relocate_ptrs, &state);
603 ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
604 state.ptrs[i]->new_addr, state.ptrs[i]->size,
605 state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
606 if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
607 memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
609 ggc_pch_finish (state.d, state.f);
610 gt_pch_fixup_stringpool ();
612 free (state.ptrs);
613 htab_delete (saving_htab);
616 /* Read the state of the compiler back in from F. */
618 void
619 gt_pch_restore (FILE *f)
621 const struct ggc_root_tab *const *rt;
622 const struct ggc_root_tab *rti;
623 size_t i;
624 struct mmap_info mmi;
625 int result;
627 /* Delete any deletable objects. This makes ggc_pch_read much
628 faster, as it can be sure that no GCable objects remain other
629 than the ones just read in. */
630 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
631 for (rti = *rt; rti->base != NULL; rti++)
632 memset (rti->base, 0, rti->stride);
634 /* Read in all the scalar variables. */
635 for (rt = gt_pch_scalar_rtab; *rt; rt++)
636 for (rti = *rt; rti->base != NULL; rti++)
637 if (fread (rti->base, rti->stride, 1, f) != 1)
638 fatal_error ("can't read PCH file: %m");
640 /* Read in all the global pointers, in 6 easy loops. */
641 for (rt = gt_ggc_rtab; *rt; rt++)
642 for (rti = *rt; rti->base != NULL; rti++)
643 for (i = 0; i < rti->nelt; i++)
644 if (fread ((char *)rti->base + rti->stride * i,
645 sizeof (void *), 1, f) != 1)
646 fatal_error ("can't read PCH file: %m");
648 for (rt = gt_pch_cache_rtab; *rt; rt++)
649 for (rti = *rt; rti->base != NULL; rti++)
650 for (i = 0; i < rti->nelt; i++)
651 if (fread ((char *)rti->base + rti->stride * i,
652 sizeof (void *), 1, f) != 1)
653 fatal_error ("can't read PCH file: %m");
655 if (fread (&mmi, sizeof (mmi), 1, f) != 1)
656 fatal_error ("can't read PCH file: %m");
658 result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
659 fileno (f), mmi.offset);
660 if (result < 0)
661 fatal_error ("had to relocate PCH");
662 if (result == 0)
664 if (fseek (f, mmi.offset, SEEK_SET) != 0
665 || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
666 fatal_error ("can't read PCH file: %m");
668 else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
669 fatal_error ("can't read PCH file: %m");
671 ggc_pch_read (f, mmi.preferred_base);
673 gt_pch_restore_stringpool ();
676 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
677 Select no address whatsoever, and let gt_pch_save choose what it will with
678 malloc, presumably. */
680 void *
681 default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
682 int fd ATTRIBUTE_UNUSED)
684 return NULL;
687 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
688 Allocate SIZE bytes with malloc. Return 0 if the address we got is the
689 same as base, indicating that the memory has been allocated but needs to
690 be read in from the file. Return -1 if the address differs, to relocation
691 of the PCH file would be required. */
694 default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
695 size_t offset ATTRIBUTE_UNUSED)
697 void *addr = xmalloc (size);
698 return (addr == base) - 1;
701 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the
702 alignment required for allocating virtual memory. Usually this is the
703 same as pagesize. */
705 size_t
706 default_gt_pch_alloc_granularity (void)
708 return getpagesize();
711 #if HAVE_MMAP_FILE
712 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
713 We temporarily allocate SIZE bytes, and let the kernel place the data
714 wherever it will. If it worked, that's our spot, if not we're likely
715 to be in trouble. */
717 void *
718 mmap_gt_pch_get_address (size_t size, int fd)
720 void *ret;
722 ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
723 if (ret == (void *) MAP_FAILED)
724 ret = NULL;
725 else
726 munmap ((caddr_t) ret, size);
728 return ret;
731 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
732 Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
733 mapping the data at BASE, -1 if we couldn't.
735 This version assumes that the kernel honors the START operand of mmap
736 even without MAP_FIXED if START through START+SIZE are not currently
737 mapped with something. */
740 mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
742 void *addr;
744 /* We're called with size == 0 if we're not planning to load a PCH
745 file at all. This allows the hook to free any static space that
746 we might have allocated at link time. */
747 if (size == 0)
748 return -1;
750 addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
751 fd, offset);
753 return addr == base ? 1 : -1;
755 #endif /* HAVE_MMAP_FILE */
757 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
759 /* Modify the bound based on rlimits. */
760 static double
761 ggc_rlimit_bound (double limit)
763 #if defined(HAVE_GETRLIMIT)
764 struct rlimit rlim;
765 # if defined (RLIMIT_AS)
766 /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably
767 any OS which has RLIMIT_AS also has a working mmap that GCC will use. */
768 if (getrlimit (RLIMIT_AS, &rlim) == 0
769 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
770 && rlim.rlim_cur < limit)
771 limit = rlim.rlim_cur;
772 # elif defined (RLIMIT_DATA)
773 /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
774 might be on an OS that has a broken mmap. (Others don't bound
775 mmap at all, apparently.) */
776 if (getrlimit (RLIMIT_DATA, &rlim) == 0
777 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
778 && rlim.rlim_cur < limit
779 /* Darwin has this horribly bogus default setting of
780 RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA
781 appears to be ignored. Ignore such silliness. If a limit
782 this small was actually effective for mmap, GCC wouldn't even
783 start up. */
784 && rlim.rlim_cur >= 8 * 1024 * 1024)
785 limit = rlim.rlim_cur;
786 # endif /* RLIMIT_AS or RLIMIT_DATA */
787 #endif /* HAVE_GETRLIMIT */
789 return limit;
792 /* Heuristic to set a default for GGC_MIN_EXPAND. */
793 static int
794 ggc_min_expand_heuristic (void)
796 double min_expand = physmem_total();
798 /* Adjust for rlimits. */
799 min_expand = ggc_rlimit_bound (min_expand);
801 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
802 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
803 min_expand /= 1024*1024*1024;
804 min_expand *= 70;
805 min_expand = MIN (min_expand, 70);
806 min_expand += 30;
808 return min_expand;
811 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
812 static int
813 ggc_min_heapsize_heuristic (void)
815 double phys_kbytes = physmem_total();
816 double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
818 phys_kbytes /= 1024; /* Convert to Kbytes. */
819 limit_kbytes /= 1024;
821 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
822 bound of 128M (when RAM >= 1GB). */
823 phys_kbytes /= 8;
825 #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
826 /* Try not to overrun the RSS limit while doing garbage collection.
827 The RSS limit is only advisory, so no margin is subtracted. */
829 struct rlimit rlim;
830 if (getrlimit (RLIMIT_RSS, &rlim) == 0
831 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
832 phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024);
834 # endif
836 /* Don't blindly run over our data limit; do GC at least when the
837 *next* GC would be within 20Mb of the limit or within a quarter of
838 the limit, whichever is larger. If GCC does hit the data limit,
839 compilation will fail, so this tries to be conservative. */
840 limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * 1024));
841 limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ());
842 phys_kbytes = MIN (phys_kbytes, limit_kbytes);
844 phys_kbytes = MAX (phys_kbytes, 4 * 1024);
845 phys_kbytes = MIN (phys_kbytes, 128 * 1024);
847 return phys_kbytes;
849 #endif
851 void
852 init_ggc_heuristics (void)
854 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
855 set_param_value ("ggc-min-expand", ggc_min_expand_heuristic ());
856 set_param_value ("ggc-min-heapsize", ggc_min_heapsize_heuristic ());
857 #endif
860 #ifdef GATHER_STATISTICS
862 /* Datastructure used to store per-call-site statistics. */
863 struct loc_descriptor
865 const char *file;
866 int line;
867 const char *function;
868 int times;
869 size_t allocated;
870 size_t overhead;
871 size_t freed;
872 size_t collected;
875 /* Hashtable used for statistics. */
876 static htab_t loc_hash;
878 /* Hash table helpers functions. */
879 static hashval_t
880 hash_descriptor (const void *p)
882 const struct loc_descriptor *const d = (const struct loc_descriptor *) p;
884 return htab_hash_pointer (d->function) | d->line;
887 static int
888 eq_descriptor (const void *p1, const void *p2)
890 const struct loc_descriptor *const d = (const struct loc_descriptor *) p1;
891 const struct loc_descriptor *const d2 = (const struct loc_descriptor *) p2;
893 return (d->file == d2->file && d->line == d2->line
894 && d->function == d2->function);
897 /* Hashtable converting address of allocated field to loc descriptor. */
898 static htab_t ptr_hash;
899 struct ptr_hash_entry
901 void *ptr;
902 struct loc_descriptor *loc;
903 size_t size;
906 /* Hash table helpers functions. */
907 static hashval_t
908 hash_ptr (const void *p)
910 const struct ptr_hash_entry *const d = (const struct ptr_hash_entry *) p;
912 return htab_hash_pointer (d->ptr);
915 static int
916 eq_ptr (const void *p1, const void *p2)
918 const struct ptr_hash_entry *const p = (const struct ptr_hash_entry *) p1;
920 return (p->ptr == p2);
923 /* Return descriptor for given call site, create new one if needed. */
924 static struct loc_descriptor *
925 loc_descriptor (const char *name, int line, const char *function)
927 struct loc_descriptor loc;
928 struct loc_descriptor **slot;
930 loc.file = name;
931 loc.line = line;
932 loc.function = function;
933 if (!loc_hash)
934 loc_hash = htab_create (10, hash_descriptor, eq_descriptor, NULL);
936 slot = (struct loc_descriptor **) htab_find_slot (loc_hash, &loc, INSERT);
937 if (*slot)
938 return *slot;
939 *slot = XCNEW (struct loc_descriptor);
940 (*slot)->file = name;
941 (*slot)->line = line;
942 (*slot)->function = function;
943 return *slot;
946 /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */
947 void
948 ggc_record_overhead (size_t allocated, size_t overhead, void *ptr,
949 const char *name, int line, const char *function)
951 struct loc_descriptor *loc = loc_descriptor (name, line, function);
952 struct ptr_hash_entry *p = XNEW (struct ptr_hash_entry);
953 PTR *slot;
955 p->ptr = ptr;
956 p->loc = loc;
957 p->size = allocated + overhead;
958 if (!ptr_hash)
959 ptr_hash = htab_create (10, hash_ptr, eq_ptr, NULL);
960 slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr), INSERT);
961 gcc_assert (!*slot);
962 *slot = p;
964 loc->times++;
965 loc->allocated+=allocated;
966 loc->overhead+=overhead;
969 /* Helper function for prune_overhead_list. See if SLOT is still marked and
970 remove it from hashtable if it is not. */
971 static int
972 ggc_prune_ptr (void **slot, void *b ATTRIBUTE_UNUSED)
974 struct ptr_hash_entry *p = (struct ptr_hash_entry *) *slot;
975 if (!ggc_marked_p (p->ptr))
977 p->loc->collected += p->size;
978 htab_clear_slot (ptr_hash, slot);
979 free (p);
981 return 1;
984 /* After live values has been marked, walk all recorded pointers and see if
985 they are still live. */
986 void
987 ggc_prune_overhead_list (void)
989 htab_traverse (ptr_hash, ggc_prune_ptr, NULL);
992 /* Notice that the pointer has been freed. */
993 void
994 ggc_free_overhead (void *ptr)
996 PTR *slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr),
997 NO_INSERT);
998 struct ptr_hash_entry *p;
999 /* The pointer might be not found if a PCH read happened between allocation
1000 and ggc_free () call. FIXME: account memory properly in the presence of
1001 PCH. */
1002 if (!slot)
1003 return;
1004 p = (struct ptr_hash_entry *) *slot;
1005 p->loc->freed += p->size;
1006 htab_clear_slot (ptr_hash, slot);
1007 free (p);
1010 /* Helper for qsort; sort descriptors by amount of memory consumed. */
1011 static int
1012 final_cmp_statistic (const void *loc1, const void *loc2)
1014 const struct loc_descriptor *const l1 =
1015 *(const struct loc_descriptor *const *) loc1;
1016 const struct loc_descriptor *const l2 =
1017 *(const struct loc_descriptor *const *) loc2;
1018 long diff;
1019 diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
1020 (l2->allocated + l2->overhead - l2->freed));
1021 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1024 /* Helper for qsort; sort descriptors by amount of memory consumed. */
1025 static int
1026 cmp_statistic (const void *loc1, const void *loc2)
1028 const struct loc_descriptor *const l1 =
1029 *(const struct loc_descriptor *const *) loc1;
1030 const struct loc_descriptor *const l2 =
1031 *(const struct loc_descriptor *const *) loc2;
1032 long diff;
1034 diff = ((long)(l1->allocated + l1->overhead - l1->freed - l1->collected) -
1035 (l2->allocated + l2->overhead - l2->freed - l2->collected));
1036 if (diff)
1037 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1038 diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
1039 (l2->allocated + l2->overhead - l2->freed));
1040 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1043 /* Collect array of the descriptors from hashtable. */
1044 static struct loc_descriptor **loc_array;
1045 static int
1046 add_statistics (void **slot, void *b)
1048 int *n = (int *)b;
1049 loc_array[*n] = (struct loc_descriptor *) *slot;
1050 (*n)++;
1051 return 1;
1054 /* Dump per-site memory statistics. */
1055 #endif
1056 void
1057 dump_ggc_loc_statistics (bool final ATTRIBUTE_UNUSED)
1059 #ifdef GATHER_STATISTICS
1060 int nentries = 0;
1061 char s[4096];
1062 size_t collected = 0, freed = 0, allocated = 0, overhead = 0, times = 0;
1063 int i;
1065 ggc_force_collect = true;
1066 ggc_collect ();
1068 loc_array = XCNEWVEC (struct loc_descriptor *, loc_hash->n_elements);
1069 fprintf (stderr, "-------------------------------------------------------\n");
1070 fprintf (stderr, "\n%-48s %10s %10s %10s %10s %10s\n",
1071 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1072 fprintf (stderr, "-------------------------------------------------------\n");
1073 htab_traverse (loc_hash, add_statistics, &nentries);
1074 qsort (loc_array, nentries, sizeof (*loc_array),
1075 final ? final_cmp_statistic : cmp_statistic);
1076 for (i = 0; i < nentries; i++)
1078 struct loc_descriptor *d = loc_array[i];
1079 allocated += d->allocated;
1080 times += d->times;
1081 freed += d->freed;
1082 collected += d->collected;
1083 overhead += d->overhead;
1085 for (i = 0; i < nentries; i++)
1087 struct loc_descriptor *d = loc_array[i];
1088 if (d->allocated)
1090 const char *s1 = d->file;
1091 const char *s2;
1092 while ((s2 = strstr (s1, "gcc/")))
1093 s1 = s2 + 4;
1094 sprintf (s, "%s:%i (%s)", s1, d->line, d->function);
1095 s[48] = 0;
1096 fprintf (stderr, "%-48s %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li\n", s,
1097 (long)d->collected,
1098 (d->collected) * 100.0 / collected,
1099 (long)d->freed,
1100 (d->freed) * 100.0 / freed,
1101 (long)(d->allocated + d->overhead - d->freed - d->collected),
1102 (d->allocated + d->overhead - d->freed - d->collected) * 100.0
1103 / (allocated + overhead - freed - collected),
1104 (long)d->overhead,
1105 d->overhead * 100.0 / overhead,
1106 (long)d->times);
1109 fprintf (stderr, "%-48s %10ld %10ld %10ld %10ld %10ld\n",
1110 "Total", (long)collected, (long)freed,
1111 (long)(allocated + overhead - freed - collected), (long)overhead,
1112 (long)times);
1113 fprintf (stderr, "%-48s %10s %10s %10s %10s %10s\n",
1114 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1115 fprintf (stderr, "-------------------------------------------------------\n");
1116 ggc_force_collect = false;
1117 #endif