Mark ChangeLog
[official-gcc.git] / gcc / ggc-common.c
blob91c8249f3aebc34d8660bc5cf37ff62683779a62
1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Generic garbage collection (GC) functions and data, not specific to
21 any particular GC implementation. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "hashtab.h"
27 #include "ggc.h"
28 #include "ggc-internal.h"
29 #include "diagnostic-core.h"
30 #include "params.h"
31 #include "hosthooks.h"
32 #include "hosthooks-def.h"
33 #include "plugin.h"
34 #include "vec.h"
35 #include "timevar.h"
37 /* When set, ggc_collect will do collection. */
38 bool ggc_force_collect;
40 /* When true, protect the contents of the identifier hash table. */
41 bool ggc_protect_identifiers = true;
43 /* Statistics about the allocation. */
44 static ggc_statistics *ggc_stats;
46 struct traversal_state;
48 static int ggc_htab_delete (void **, void *);
49 static hashval_t saving_htab_hash (const void *);
50 static int saving_htab_eq (const void *, const void *);
51 static int call_count (void **, void *);
52 static int call_alloc (void **, void *);
53 static int compare_ptr_data (const void *, const void *);
54 static void relocate_ptrs (void *, void *);
55 static void write_pch_globals (const struct ggc_root_tab * const *tab,
56 struct traversal_state *state);
58 /* Maintain global roots that are preserved during GC. */
60 /* Process a slot of an htab by deleting it if it has not been marked. */
62 static int
63 ggc_htab_delete (void **slot, void *info)
65 const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info;
67 if (! (*r->marked_p) (*slot))
68 htab_clear_slot (*r->base, slot);
69 else
70 (*r->cb) (*slot);
72 return 1;
76 /* This extra vector of dynamically registered root_tab-s is used by
77 ggc_mark_roots and gives the ability to dynamically add new GGC root
78 tables, for instance from some plugins; this vector is on the heap
79 since it is used by GGC internally. */
80 typedef const struct ggc_root_tab *const_ggc_root_tab_t;
81 static vec<const_ggc_root_tab_t> extra_root_vec;
83 /* Dynamically register a new GGC root table RT. This is useful for
84 plugins. */
86 void
87 ggc_register_root_tab (const struct ggc_root_tab* rt)
89 if (rt)
90 extra_root_vec.safe_push (rt);
93 /* This extra vector of dynamically registered cache_tab-s is used by
94 ggc_mark_roots and gives the ability to dynamically add new GGC cache
95 tables, for instance from some plugins; this vector is on the heap
96 since it is used by GGC internally. */
97 typedef const struct ggc_cache_tab *const_ggc_cache_tab_t;
98 static vec<const_ggc_cache_tab_t> extra_cache_vec;
100 /* Dynamically register a new GGC cache table CT. This is useful for
101 plugins. */
103 void
104 ggc_register_cache_tab (const struct ggc_cache_tab* ct)
106 if (ct)
107 extra_cache_vec.safe_push (ct);
110 /* Scan a hash table that has objects which are to be deleted if they are not
111 already marked. */
113 static void
114 ggc_scan_cache_tab (const_ggc_cache_tab_t ctp)
116 const struct ggc_cache_tab *cti;
118 for (cti = ctp; cti->base != NULL; cti++)
119 if (*cti->base)
121 ggc_set_mark (*cti->base);
122 htab_traverse_noresize (*cti->base, ggc_htab_delete,
123 CONST_CAST (void *, (const void *)cti));
124 ggc_set_mark ((*cti->base)->entries);
128 /* Mark all the roots in the table RT. */
130 static void
131 ggc_mark_root_tab (const_ggc_root_tab_t rt)
133 size_t i;
135 for ( ; rt->base != NULL; rt++)
136 for (i = 0; i < rt->nelt; i++)
137 (*rt->cb) (*(void **) ((char *)rt->base + rt->stride * i));
140 /* Iterate through all registered roots and mark each element. */
142 void
143 ggc_mark_roots (void)
145 const struct ggc_root_tab *const *rt;
146 const_ggc_root_tab_t rtp, rti;
147 const struct ggc_cache_tab *const *ct;
148 const_ggc_cache_tab_t ctp;
149 size_t i;
151 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
152 for (rti = *rt; rti->base != NULL; rti++)
153 memset (rti->base, 0, rti->stride);
155 for (rt = gt_ggc_rtab; *rt; rt++)
156 ggc_mark_root_tab (*rt);
158 FOR_EACH_VEC_ELT (extra_root_vec, i, rtp)
159 ggc_mark_root_tab (rtp);
161 if (ggc_protect_identifiers)
162 ggc_mark_stringpool ();
164 /* Now scan all hash tables that have objects which are to be deleted if
165 they are not already marked. */
166 for (ct = gt_ggc_cache_rtab; *ct; ct++)
167 ggc_scan_cache_tab (*ct);
169 FOR_EACH_VEC_ELT (extra_cache_vec, i, ctp)
170 ggc_scan_cache_tab (ctp);
172 if (! ggc_protect_identifiers)
173 ggc_purge_stringpool ();
175 /* Some plugins may call ggc_set_mark from here. */
176 invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
179 /* Allocate a block of memory, then clear it. */
180 void *
181 ggc_internal_cleared_alloc_stat (size_t size MEM_STAT_DECL)
183 void *buf = ggc_internal_alloc_stat (size PASS_MEM_STAT);
184 memset (buf, 0, size);
185 return buf;
188 /* Resize a block of memory, possibly re-allocating it. */
189 void *
190 ggc_realloc_stat (void *x, size_t size MEM_STAT_DECL)
192 void *r;
193 size_t old_size;
195 if (x == NULL)
196 return ggc_internal_alloc_stat (size PASS_MEM_STAT);
198 old_size = ggc_get_size (x);
200 if (size <= old_size)
202 /* Mark the unwanted memory as unaccessible. We also need to make
203 the "new" size accessible, since ggc_get_size returns the size of
204 the pool, not the size of the individually allocated object, the
205 size which was previously made accessible. Unfortunately, we
206 don't know that previously allocated size. Without that
207 knowledge we have to lose some initialization-tracking for the
208 old parts of the object. An alternative is to mark the whole
209 old_size as reachable, but that would lose tracking of writes
210 after the end of the object (by small offsets). Discard the
211 handle to avoid handle leak. */
212 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
213 old_size - size));
214 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
215 return x;
218 r = ggc_internal_alloc_stat (size PASS_MEM_STAT);
220 /* Since ggc_get_size returns the size of the pool, not the size of the
221 individually allocated object, we'd access parts of the old object
222 that were marked invalid with the memcpy below. We lose a bit of the
223 initialization-tracking since some of it may be uninitialized. */
224 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
226 memcpy (r, x, old_size);
228 /* The old object is not supposed to be used anymore. */
229 ggc_free (x);
231 return r;
234 void *
235 ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED,
236 size_t n ATTRIBUTE_UNUSED)
238 gcc_assert (c * n == sizeof (struct htab));
239 return ggc_alloc_cleared_htab ();
242 /* TODO: once we actually use type information in GGC, create a new tag
243 gt_gcc_ptr_array and use it for pointer arrays. */
244 void *
245 ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n)
247 gcc_assert (sizeof (PTR *) == n);
248 return ggc_internal_cleared_vec_alloc (sizeof (PTR *), c);
251 /* These are for splay_tree_new_ggc. */
252 void *
253 ggc_splay_alloc (int sz, void *nl)
255 gcc_assert (!nl);
256 return ggc_internal_alloc (sz);
259 void
260 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
262 gcc_assert (!nl);
265 /* Print statistics that are independent of the collector in use. */
266 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
267 ? (x) \
268 : ((x) < 1024*1024*10 \
269 ? (x) / 1024 \
270 : (x) / (1024*1024))))
271 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
273 void
274 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
275 ggc_statistics *stats)
277 /* Set the pointer so that during collection we will actually gather
278 the statistics. */
279 ggc_stats = stats;
281 /* Then do one collection to fill in the statistics. */
282 ggc_collect ();
284 /* At present, we don't really gather any interesting statistics. */
286 /* Don't gather statistics any more. */
287 ggc_stats = NULL;
290 /* Functions for saving and restoring GCable memory to disk. */
292 static htab_t saving_htab;
294 struct ptr_data
296 void *obj;
297 void *note_ptr_cookie;
298 gt_note_pointers note_ptr_fn;
299 gt_handle_reorder reorder_fn;
300 size_t size;
301 void *new_addr;
304 #define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3)
306 /* Register an object in the hash table. */
309 gt_pch_note_object (void *obj, void *note_ptr_cookie,
310 gt_note_pointers note_ptr_fn)
312 struct ptr_data **slot;
314 if (obj == NULL || obj == (void *) 1)
315 return 0;
317 slot = (struct ptr_data **)
318 htab_find_slot_with_hash (saving_htab, obj, POINTER_HASH (obj),
319 INSERT);
320 if (*slot != NULL)
322 gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
323 && (*slot)->note_ptr_cookie == note_ptr_cookie);
324 return 0;
327 *slot = XCNEW (struct ptr_data);
328 (*slot)->obj = obj;
329 (*slot)->note_ptr_fn = note_ptr_fn;
330 (*slot)->note_ptr_cookie = note_ptr_cookie;
331 if (note_ptr_fn == gt_pch_p_S)
332 (*slot)->size = strlen ((const char *)obj) + 1;
333 else
334 (*slot)->size = ggc_get_size (obj);
335 return 1;
338 /* Register an object in the hash table. */
340 void
341 gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
342 gt_handle_reorder reorder_fn)
344 struct ptr_data *data;
346 if (obj == NULL || obj == (void *) 1)
347 return;
349 data = (struct ptr_data *)
350 htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj));
351 gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
353 data->reorder_fn = reorder_fn;
356 /* Hash and equality functions for saving_htab, callbacks for htab_create. */
358 static hashval_t
359 saving_htab_hash (const void *p)
361 return POINTER_HASH (((const struct ptr_data *)p)->obj);
364 static int
365 saving_htab_eq (const void *p1, const void *p2)
367 return ((const struct ptr_data *)p1)->obj == p2;
370 /* Handy state for the traversal functions. */
372 struct traversal_state
374 FILE *f;
375 struct ggc_pch_data *d;
376 size_t count;
377 struct ptr_data **ptrs;
378 size_t ptrs_i;
381 /* Callbacks for htab_traverse. */
383 static int
384 call_count (void **slot, void *state_p)
386 struct ptr_data *d = (struct ptr_data *)*slot;
387 struct traversal_state *state = (struct traversal_state *)state_p;
389 ggc_pch_count_object (state->d, d->obj, d->size,
390 d->note_ptr_fn == gt_pch_p_S);
391 state->count++;
392 return 1;
395 static int
396 call_alloc (void **slot, void *state_p)
398 struct ptr_data *d = (struct ptr_data *)*slot;
399 struct traversal_state *state = (struct traversal_state *)state_p;
401 d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
402 d->note_ptr_fn == gt_pch_p_S);
403 state->ptrs[state->ptrs_i++] = d;
404 return 1;
407 /* Callback for qsort. */
409 static int
410 compare_ptr_data (const void *p1_p, const void *p2_p)
412 const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
413 const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
414 return (((size_t)p1->new_addr > (size_t)p2->new_addr)
415 - ((size_t)p1->new_addr < (size_t)p2->new_addr));
418 /* Callbacks for note_ptr_fn. */
420 static void
421 relocate_ptrs (void *ptr_p, void *state_p)
423 void **ptr = (void **)ptr_p;
424 struct traversal_state *state ATTRIBUTE_UNUSED
425 = (struct traversal_state *)state_p;
426 struct ptr_data *result;
428 if (*ptr == NULL || *ptr == (void *)1)
429 return;
431 result = (struct ptr_data *)
432 htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr));
433 gcc_assert (result);
434 *ptr = result->new_addr;
437 /* Write out, after relocation, the pointers in TAB. */
438 static void
439 write_pch_globals (const struct ggc_root_tab * const *tab,
440 struct traversal_state *state)
442 const struct ggc_root_tab *const *rt;
443 const struct ggc_root_tab *rti;
444 size_t i;
446 for (rt = tab; *rt; rt++)
447 for (rti = *rt; rti->base != NULL; rti++)
448 for (i = 0; i < rti->nelt; i++)
450 void *ptr = *(void **)((char *)rti->base + rti->stride * i);
451 struct ptr_data *new_ptr;
452 if (ptr == NULL || ptr == (void *)1)
454 if (fwrite (&ptr, sizeof (void *), 1, state->f)
455 != 1)
456 fatal_error ("can%'t write PCH file: %m");
458 else
460 new_ptr = (struct ptr_data *)
461 htab_find_with_hash (saving_htab, ptr, POINTER_HASH (ptr));
462 if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
463 != 1)
464 fatal_error ("can%'t write PCH file: %m");
469 /* Hold the information we need to mmap the file back in. */
471 struct mmap_info
473 size_t offset;
474 size_t size;
475 void *preferred_base;
478 /* Write out the state of the compiler to F. */
480 void
481 gt_pch_save (FILE *f)
483 const struct ggc_root_tab *const *rt;
484 const struct ggc_root_tab *rti;
485 size_t i;
486 struct traversal_state state;
487 char *this_object = NULL;
488 size_t this_object_size = 0;
489 struct mmap_info mmi;
490 const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity();
492 gt_pch_save_stringpool ();
494 timevar_push (TV_PCH_PTR_REALLOC);
495 saving_htab = htab_create (50000, saving_htab_hash, saving_htab_eq, free);
497 for (rt = gt_ggc_rtab; *rt; rt++)
498 for (rti = *rt; rti->base != NULL; rti++)
499 for (i = 0; i < rti->nelt; i++)
500 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
502 for (rt = gt_pch_cache_rtab; *rt; rt++)
503 for (rti = *rt; rti->base != NULL; rti++)
504 for (i = 0; i < rti->nelt; i++)
505 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
507 /* Prepare the objects for writing, determine addresses and such. */
508 state.f = f;
509 state.d = init_ggc_pch ();
510 state.count = 0;
511 htab_traverse (saving_htab, call_count, &state);
513 mmi.size = ggc_pch_total_size (state.d);
515 /* Try to arrange things so that no relocation is necessary, but
516 don't try very hard. On most platforms, this will always work,
517 and on the rest it's a lot of work to do better.
518 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
519 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
520 mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
522 ggc_pch_this_base (state.d, mmi.preferred_base);
524 state.ptrs = XNEWVEC (struct ptr_data *, state.count);
525 state.ptrs_i = 0;
527 htab_traverse (saving_htab, call_alloc, &state);
528 timevar_pop (TV_PCH_PTR_REALLOC);
530 timevar_push (TV_PCH_PTR_SORT);
531 qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
532 timevar_pop (TV_PCH_PTR_SORT);
534 /* Write out all the scalar variables. */
535 for (rt = gt_pch_scalar_rtab; *rt; rt++)
536 for (rti = *rt; rti->base != NULL; rti++)
537 if (fwrite (rti->base, rti->stride, 1, f) != 1)
538 fatal_error ("can%'t write PCH file: %m");
540 /* Write out all the global pointers, after translation. */
541 write_pch_globals (gt_ggc_rtab, &state);
542 write_pch_globals (gt_pch_cache_rtab, &state);
544 /* Pad the PCH file so that the mmapped area starts on an allocation
545 granularity (usually page) boundary. */
547 long o;
548 o = ftell (state.f) + sizeof (mmi);
549 if (o == -1)
550 fatal_error ("can%'t get position in PCH file: %m");
551 mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
552 if (mmi.offset == mmap_offset_alignment)
553 mmi.offset = 0;
554 mmi.offset += o;
556 if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
557 fatal_error ("can%'t write PCH file: %m");
558 if (mmi.offset != 0
559 && fseek (state.f, mmi.offset, SEEK_SET) != 0)
560 fatal_error ("can%'t write padding to PCH file: %m");
562 ggc_pch_prepare_write (state.d, state.f);
564 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
565 vec<char> vbits = vNULL;
566 #endif
568 /* Actually write out the objects. */
569 for (i = 0; i < state.count; i++)
571 if (this_object_size < state.ptrs[i]->size)
573 this_object_size = state.ptrs[i]->size;
574 this_object = XRESIZEVAR (char, this_object, this_object_size);
576 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
577 /* obj might contain uninitialized bytes, e.g. in the trailing
578 padding of the object. Avoid warnings by making the memory
579 temporarily defined and then restoring previous state. */
580 int get_vbits = 0;
581 size_t valid_size = state.ptrs[i]->size;
582 if (__builtin_expect (RUNNING_ON_VALGRIND, 0))
584 if (vbits.length () < valid_size)
585 vbits.safe_grow (valid_size);
586 get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
587 vbits.address (), valid_size);
588 if (get_vbits == 3)
590 /* We assume that first part of obj is addressable, and
591 the rest is unaddressable. Find out where the boundary is
592 using binary search. */
593 size_t lo = 0, hi = valid_size;
594 while (hi > lo)
596 size_t mid = (lo + hi) / 2;
597 get_vbits = VALGRIND_GET_VBITS ((char *) state.ptrs[i]->obj
598 + mid, vbits.address (),
600 if (get_vbits == 3)
601 hi = mid;
602 else if (get_vbits == 1)
603 lo = mid + 1;
604 else
605 break;
607 if (get_vbits == 1 || get_vbits == 3)
609 valid_size = lo;
610 get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
611 vbits.address (),
612 valid_size);
615 if (get_vbits == 1)
616 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (state.ptrs[i]->obj,
617 state.ptrs[i]->size));
619 #endif
620 memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
621 if (state.ptrs[i]->reorder_fn != NULL)
622 state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
623 state.ptrs[i]->note_ptr_cookie,
624 relocate_ptrs, &state);
625 state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
626 state.ptrs[i]->note_ptr_cookie,
627 relocate_ptrs, &state);
628 ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
629 state.ptrs[i]->new_addr, state.ptrs[i]->size,
630 state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
631 if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
632 memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
633 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
634 if (__builtin_expect (get_vbits == 1, 0))
636 (void) VALGRIND_SET_VBITS (state.ptrs[i]->obj, vbits.address (),
637 valid_size);
638 if (valid_size != state.ptrs[i]->size)
639 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *)
640 state.ptrs[i]->obj
641 + valid_size,
642 state.ptrs[i]->size
643 - valid_size));
645 #endif
647 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
648 vbits.release ();
649 #endif
651 ggc_pch_finish (state.d, state.f);
652 gt_pch_fixup_stringpool ();
654 XDELETE (state.ptrs);
655 XDELETE (this_object);
656 htab_delete (saving_htab);
659 /* Read the state of the compiler back in from F. */
661 void
662 gt_pch_restore (FILE *f)
664 const struct ggc_root_tab *const *rt;
665 const struct ggc_root_tab *rti;
666 size_t i;
667 struct mmap_info mmi;
668 int result;
670 /* Delete any deletable objects. This makes ggc_pch_read much
671 faster, as it can be sure that no GCable objects remain other
672 than the ones just read in. */
673 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
674 for (rti = *rt; rti->base != NULL; rti++)
675 memset (rti->base, 0, rti->stride);
677 /* Read in all the scalar variables. */
678 for (rt = gt_pch_scalar_rtab; *rt; rt++)
679 for (rti = *rt; rti->base != NULL; rti++)
680 if (fread (rti->base, rti->stride, 1, f) != 1)
681 fatal_error ("can%'t read PCH file: %m");
683 /* Read in all the global pointers, in 6 easy loops. */
684 for (rt = gt_ggc_rtab; *rt; rt++)
685 for (rti = *rt; rti->base != NULL; rti++)
686 for (i = 0; i < rti->nelt; i++)
687 if (fread ((char *)rti->base + rti->stride * i,
688 sizeof (void *), 1, f) != 1)
689 fatal_error ("can%'t read PCH file: %m");
691 for (rt = gt_pch_cache_rtab; *rt; rt++)
692 for (rti = *rt; rti->base != NULL; rti++)
693 for (i = 0; i < rti->nelt; i++)
694 if (fread ((char *)rti->base + rti->stride * i,
695 sizeof (void *), 1, f) != 1)
696 fatal_error ("can%'t read PCH file: %m");
698 if (fread (&mmi, sizeof (mmi), 1, f) != 1)
699 fatal_error ("can%'t read PCH file: %m");
701 result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
702 fileno (f), mmi.offset);
703 if (result < 0)
704 fatal_error ("had to relocate PCH");
705 if (result == 0)
707 if (fseek (f, mmi.offset, SEEK_SET) != 0
708 || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
709 fatal_error ("can%'t read PCH file: %m");
711 else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
712 fatal_error ("can%'t read PCH file: %m");
714 ggc_pch_read (f, mmi.preferred_base);
716 gt_pch_restore_stringpool ();
719 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
720 Select no address whatsoever, and let gt_pch_save choose what it will with
721 malloc, presumably. */
723 void *
724 default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
725 int fd ATTRIBUTE_UNUSED)
727 return NULL;
730 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
731 Allocate SIZE bytes with malloc. Return 0 if the address we got is the
732 same as base, indicating that the memory has been allocated but needs to
733 be read in from the file. Return -1 if the address differs, to relocation
734 of the PCH file would be required. */
737 default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
738 size_t offset ATTRIBUTE_UNUSED)
740 void *addr = xmalloc (size);
741 return (addr == base) - 1;
744 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the
745 alignment required for allocating virtual memory. Usually this is the
746 same as pagesize. */
748 size_t
749 default_gt_pch_alloc_granularity (void)
751 return getpagesize();
754 #if HAVE_MMAP_FILE
755 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
756 We temporarily allocate SIZE bytes, and let the kernel place the data
757 wherever it will. If it worked, that's our spot, if not we're likely
758 to be in trouble. */
760 void *
761 mmap_gt_pch_get_address (size_t size, int fd)
763 void *ret;
765 ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
766 if (ret == (void *) MAP_FAILED)
767 ret = NULL;
768 else
769 munmap ((caddr_t) ret, size);
771 return ret;
774 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
775 Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
776 mapping the data at BASE, -1 if we couldn't.
778 This version assumes that the kernel honors the START operand of mmap
779 even without MAP_FIXED if START through START+SIZE are not currently
780 mapped with something. */
783 mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
785 void *addr;
787 /* We're called with size == 0 if we're not planning to load a PCH
788 file at all. This allows the hook to free any static space that
789 we might have allocated at link time. */
790 if (size == 0)
791 return -1;
793 addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
794 fd, offset);
796 return addr == base ? 1 : -1;
798 #endif /* HAVE_MMAP_FILE */
800 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
802 /* Modify the bound based on rlimits. */
803 static double
804 ggc_rlimit_bound (double limit)
806 #if defined(HAVE_GETRLIMIT)
807 struct rlimit rlim;
808 # if defined (RLIMIT_AS)
809 /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably
810 any OS which has RLIMIT_AS also has a working mmap that GCC will use. */
811 if (getrlimit (RLIMIT_AS, &rlim) == 0
812 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
813 && rlim.rlim_cur < limit)
814 limit = rlim.rlim_cur;
815 # elif defined (RLIMIT_DATA)
816 /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
817 might be on an OS that has a broken mmap. (Others don't bound
818 mmap at all, apparently.) */
819 if (getrlimit (RLIMIT_DATA, &rlim) == 0
820 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
821 && rlim.rlim_cur < limit
822 /* Darwin has this horribly bogus default setting of
823 RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA
824 appears to be ignored. Ignore such silliness. If a limit
825 this small was actually effective for mmap, GCC wouldn't even
826 start up. */
827 && rlim.rlim_cur >= 8 * 1024 * 1024)
828 limit = rlim.rlim_cur;
829 # endif /* RLIMIT_AS or RLIMIT_DATA */
830 #endif /* HAVE_GETRLIMIT */
832 return limit;
835 /* Heuristic to set a default for GGC_MIN_EXPAND. */
836 static int
837 ggc_min_expand_heuristic (void)
839 double min_expand = physmem_total();
841 /* Adjust for rlimits. */
842 min_expand = ggc_rlimit_bound (min_expand);
844 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
845 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
846 min_expand /= 1024*1024*1024;
847 min_expand *= 70;
848 min_expand = MIN (min_expand, 70);
849 min_expand += 30;
851 return min_expand;
854 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
855 static int
856 ggc_min_heapsize_heuristic (void)
858 double phys_kbytes = physmem_total();
859 double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
861 phys_kbytes /= 1024; /* Convert to Kbytes. */
862 limit_kbytes /= 1024;
864 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
865 bound of 128M (when RAM >= 1GB). */
866 phys_kbytes /= 8;
868 #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
869 /* Try not to overrun the RSS limit while doing garbage collection.
870 The RSS limit is only advisory, so no margin is subtracted. */
872 struct rlimit rlim;
873 if (getrlimit (RLIMIT_RSS, &rlim) == 0
874 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
875 phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024);
877 # endif
879 /* Don't blindly run over our data limit; do GC at least when the
880 *next* GC would be within 20Mb of the limit or within a quarter of
881 the limit, whichever is larger. If GCC does hit the data limit,
882 compilation will fail, so this tries to be conservative. */
883 limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * 1024));
884 limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ());
885 phys_kbytes = MIN (phys_kbytes, limit_kbytes);
887 phys_kbytes = MAX (phys_kbytes, 4 * 1024);
888 phys_kbytes = MIN (phys_kbytes, 128 * 1024);
890 return phys_kbytes;
892 #endif
894 void
895 init_ggc_heuristics (void)
897 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
898 set_default_param_value (GGC_MIN_EXPAND, ggc_min_expand_heuristic ());
899 set_default_param_value (GGC_MIN_HEAPSIZE, ggc_min_heapsize_heuristic ());
900 #endif
903 /* Datastructure used to store per-call-site statistics. */
904 struct loc_descriptor
906 const char *file;
907 int line;
908 const char *function;
909 int times;
910 size_t allocated;
911 size_t overhead;
912 size_t freed;
913 size_t collected;
916 /* Hashtable used for statistics. */
917 static htab_t loc_hash;
919 /* Hash table helpers functions. */
920 static hashval_t
921 hash_descriptor (const void *p)
923 const struct loc_descriptor *const d = (const struct loc_descriptor *) p;
925 return htab_hash_pointer (d->function) | d->line;
928 static int
929 eq_descriptor (const void *p1, const void *p2)
931 const struct loc_descriptor *const d = (const struct loc_descriptor *) p1;
932 const struct loc_descriptor *const d2 = (const struct loc_descriptor *) p2;
934 return (d->file == d2->file && d->line == d2->line
935 && d->function == d2->function);
938 /* Hashtable converting address of allocated field to loc descriptor. */
939 static htab_t ptr_hash;
940 struct ptr_hash_entry
942 void *ptr;
943 struct loc_descriptor *loc;
944 size_t size;
947 /* Hash table helpers functions. */
948 static hashval_t
949 hash_ptr (const void *p)
951 const struct ptr_hash_entry *const d = (const struct ptr_hash_entry *) p;
953 return htab_hash_pointer (d->ptr);
956 static int
957 eq_ptr (const void *p1, const void *p2)
959 const struct ptr_hash_entry *const p = (const struct ptr_hash_entry *) p1;
961 return (p->ptr == p2);
964 /* Return descriptor for given call site, create new one if needed. */
965 static struct loc_descriptor *
966 loc_descriptor (const char *name, int line, const char *function)
968 struct loc_descriptor loc;
969 struct loc_descriptor **slot;
971 loc.file = name;
972 loc.line = line;
973 loc.function = function;
974 if (!loc_hash)
975 loc_hash = htab_create (10, hash_descriptor, eq_descriptor, NULL);
977 slot = (struct loc_descriptor **) htab_find_slot (loc_hash, &loc, INSERT);
978 if (*slot)
979 return *slot;
980 *slot = XCNEW (struct loc_descriptor);
981 (*slot)->file = name;
982 (*slot)->line = line;
983 (*slot)->function = function;
984 return *slot;
987 /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */
988 void
989 ggc_record_overhead (size_t allocated, size_t overhead, void *ptr,
990 const char *name, int line, const char *function)
992 struct loc_descriptor *loc = loc_descriptor (name, line, function);
993 struct ptr_hash_entry *p = XNEW (struct ptr_hash_entry);
994 PTR *slot;
996 p->ptr = ptr;
997 p->loc = loc;
998 p->size = allocated + overhead;
999 if (!ptr_hash)
1000 ptr_hash = htab_create (10, hash_ptr, eq_ptr, NULL);
1001 slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr), INSERT);
1002 gcc_assert (!*slot);
1003 *slot = p;
1005 loc->times++;
1006 loc->allocated+=allocated;
1007 loc->overhead+=overhead;
1010 /* Helper function for prune_overhead_list. See if SLOT is still marked and
1011 remove it from hashtable if it is not. */
1012 static int
1013 ggc_prune_ptr (void **slot, void *b ATTRIBUTE_UNUSED)
1015 struct ptr_hash_entry *p = (struct ptr_hash_entry *) *slot;
1016 if (!ggc_marked_p (p->ptr))
1018 p->loc->collected += p->size;
1019 htab_clear_slot (ptr_hash, slot);
1020 free (p);
1022 return 1;
1025 /* After live values has been marked, walk all recorded pointers and see if
1026 they are still live. */
1027 void
1028 ggc_prune_overhead_list (void)
1030 htab_traverse (ptr_hash, ggc_prune_ptr, NULL);
1033 /* Notice that the pointer has been freed. */
1034 void
1035 ggc_free_overhead (void *ptr)
1037 PTR *slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr),
1038 NO_INSERT);
1039 struct ptr_hash_entry *p;
1040 /* The pointer might be not found if a PCH read happened between allocation
1041 and ggc_free () call. FIXME: account memory properly in the presence of
1042 PCH. */
1043 if (!slot)
1044 return;
1045 p = (struct ptr_hash_entry *) *slot;
1046 p->loc->freed += p->size;
1047 htab_clear_slot (ptr_hash, slot);
1048 free (p);
1051 /* Helper for qsort; sort descriptors by amount of memory consumed. */
1052 static int
1053 final_cmp_statistic (const void *loc1, const void *loc2)
1055 const struct loc_descriptor *const l1 =
1056 *(const struct loc_descriptor *const *) loc1;
1057 const struct loc_descriptor *const l2 =
1058 *(const struct loc_descriptor *const *) loc2;
1059 long diff;
1060 diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
1061 (l2->allocated + l2->overhead - l2->freed));
1062 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1065 /* Helper for qsort; sort descriptors by amount of memory consumed. */
1066 static int
1067 cmp_statistic (const void *loc1, const void *loc2)
1069 const struct loc_descriptor *const l1 =
1070 *(const struct loc_descriptor *const *) loc1;
1071 const struct loc_descriptor *const l2 =
1072 *(const struct loc_descriptor *const *) loc2;
1073 long diff;
1075 diff = ((long)(l1->allocated + l1->overhead - l1->freed - l1->collected) -
1076 (l2->allocated + l2->overhead - l2->freed - l2->collected));
1077 if (diff)
1078 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1079 diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
1080 (l2->allocated + l2->overhead - l2->freed));
1081 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1084 /* Collect array of the descriptors from hashtable. */
1085 static struct loc_descriptor **loc_array;
1086 static int
1087 add_statistics (void **slot, void *b)
1089 int *n = (int *)b;
1090 loc_array[*n] = (struct loc_descriptor *) *slot;
1091 (*n)++;
1092 return 1;
1095 /* Dump per-site memory statistics. */
1097 void
1098 dump_ggc_loc_statistics (bool final)
1100 int nentries = 0;
1101 char s[4096];
1102 size_t collected = 0, freed = 0, allocated = 0, overhead = 0, times = 0;
1103 int i;
1105 if (! GATHER_STATISTICS)
1106 return;
1108 ggc_force_collect = true;
1109 ggc_collect ();
1111 loc_array = XCNEWVEC (struct loc_descriptor *, loc_hash->n_elements);
1112 fprintf (stderr, "-------------------------------------------------------\n");
1113 fprintf (stderr, "\n%-48s %10s %10s %10s %10s %10s\n",
1114 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1115 fprintf (stderr, "-------------------------------------------------------\n");
1116 htab_traverse (loc_hash, add_statistics, &nentries);
1117 qsort (loc_array, nentries, sizeof (*loc_array),
1118 final ? final_cmp_statistic : cmp_statistic);
1119 for (i = 0; i < nentries; i++)
1121 struct loc_descriptor *d = loc_array[i];
1122 allocated += d->allocated;
1123 times += d->times;
1124 freed += d->freed;
1125 collected += d->collected;
1126 overhead += d->overhead;
1128 for (i = 0; i < nentries; i++)
1130 struct loc_descriptor *d = loc_array[i];
1131 if (d->allocated)
1133 const char *s1 = d->file;
1134 const char *s2;
1135 while ((s2 = strstr (s1, "gcc/")))
1136 s1 = s2 + 4;
1137 sprintf (s, "%s:%i (%s)", s1, d->line, d->function);
1138 s[48] = 0;
1139 fprintf (stderr, "%-48s %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li\n", s,
1140 (long)d->collected,
1141 (d->collected) * 100.0 / collected,
1142 (long)d->freed,
1143 (d->freed) * 100.0 / freed,
1144 (long)(d->allocated + d->overhead - d->freed - d->collected),
1145 (d->allocated + d->overhead - d->freed - d->collected) * 100.0
1146 / (allocated + overhead - freed - collected),
1147 (long)d->overhead,
1148 d->overhead * 100.0 / overhead,
1149 (long)d->times);
1152 fprintf (stderr, "%-48s %10ld %10ld %10ld %10ld %10ld\n",
1153 "Total", (long)collected, (long)freed,
1154 (long)(allocated + overhead - freed - collected), (long)overhead,
1155 (long)times);
1156 fprintf (stderr, "%-48s %10s %10s %10s %10s %10s\n",
1157 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1158 fprintf (stderr, "-------------------------------------------------------\n");
1159 ggc_force_collect = false;