* config/linux/x86/futex_bits.h (sys_futex0) [__x86_64__]: Change
[official-gcc.git] / gcc / ggc-common.c
blob2e94ca4a6ef0d6742d9d09ecfea0df34314de36e
1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Generic garbage collection (GC) functions and data, not specific to
21 any particular GC implementation. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "hash-table.h"
27 #include "ggc.h"
28 #include "ggc-internal.h"
29 #include "diagnostic-core.h"
30 #include "params.h"
31 #include "hosthooks.h"
32 #include "hosthooks-def.h"
33 #include "plugin.h"
34 #include "vec.h"
35 #include "timevar.h"
36 #include "mem-stats.h"
38 /* When set, ggc_collect will do collection. */
39 bool ggc_force_collect;
41 /* When true, protect the contents of the identifier hash table. */
42 bool ggc_protect_identifiers = true;
44 /* Statistics about the allocation. */
45 static ggc_statistics *ggc_stats;
47 struct traversal_state;
49 static int compare_ptr_data (const void *, const void *);
50 static void relocate_ptrs (void *, void *);
51 static void write_pch_globals (const struct ggc_root_tab * const *tab,
52 struct traversal_state *state);
54 /* Maintain global roots that are preserved during GC. */
56 /* This extra vector of dynamically registered root_tab-s is used by
57 ggc_mark_roots and gives the ability to dynamically add new GGC root
58 tables, for instance from some plugins; this vector is on the heap
59 since it is used by GGC internally. */
60 typedef const struct ggc_root_tab *const_ggc_root_tab_t;
61 static vec<const_ggc_root_tab_t> extra_root_vec;
63 /* Dynamically register a new GGC root table RT. This is useful for
64 plugins. */
66 void
67 ggc_register_root_tab (const struct ggc_root_tab* rt)
69 if (rt)
70 extra_root_vec.safe_push (rt);
73 /* Mark all the roots in the table RT. */
75 static void
76 ggc_mark_root_tab (const_ggc_root_tab_t rt)
78 size_t i;
80 for ( ; rt->base != NULL; rt++)
81 for (i = 0; i < rt->nelt; i++)
82 (*rt->cb) (*(void **) ((char *)rt->base + rt->stride * i));
85 /* Iterate through all registered roots and mark each element. */
87 void
88 ggc_mark_roots (void)
90 const struct ggc_root_tab *const *rt;
91 const_ggc_root_tab_t rtp, rti;
92 size_t i;
94 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
95 for (rti = *rt; rti->base != NULL; rti++)
96 memset (rti->base, 0, rti->stride);
98 for (rt = gt_ggc_rtab; *rt; rt++)
99 ggc_mark_root_tab (*rt);
101 FOR_EACH_VEC_ELT (extra_root_vec, i, rtp)
102 ggc_mark_root_tab (rtp);
104 if (ggc_protect_identifiers)
105 ggc_mark_stringpool ();
107 gt_clear_caches ();
109 if (! ggc_protect_identifiers)
110 ggc_purge_stringpool ();
112 /* Some plugins may call ggc_set_mark from here. */
113 invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
116 /* Allocate a block of memory, then clear it. */
117 void *
118 ggc_internal_cleared_alloc (size_t size, void (*f)(void *), size_t s, size_t n
119 MEM_STAT_DECL)
121 void *buf = ggc_internal_alloc (size, f, s, n PASS_MEM_STAT);
122 memset (buf, 0, size);
123 return buf;
126 /* Resize a block of memory, possibly re-allocating it. */
127 void *
128 ggc_realloc (void *x, size_t size MEM_STAT_DECL)
130 void *r;
131 size_t old_size;
133 if (x == NULL)
134 return ggc_internal_alloc (size PASS_MEM_STAT);
136 old_size = ggc_get_size (x);
138 if (size <= old_size)
140 /* Mark the unwanted memory as unaccessible. We also need to make
141 the "new" size accessible, since ggc_get_size returns the size of
142 the pool, not the size of the individually allocated object, the
143 size which was previously made accessible. Unfortunately, we
144 don't know that previously allocated size. Without that
145 knowledge we have to lose some initialization-tracking for the
146 old parts of the object. An alternative is to mark the whole
147 old_size as reachable, but that would lose tracking of writes
148 after the end of the object (by small offsets). Discard the
149 handle to avoid handle leak. */
150 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
151 old_size - size));
152 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
153 return x;
156 r = ggc_internal_alloc (size PASS_MEM_STAT);
158 /* Since ggc_get_size returns the size of the pool, not the size of the
159 individually allocated object, we'd access parts of the old object
160 that were marked invalid with the memcpy below. We lose a bit of the
161 initialization-tracking since some of it may be uninitialized. */
162 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
164 memcpy (r, x, old_size);
166 /* The old object is not supposed to be used anymore. */
167 ggc_free (x);
169 return r;
172 void *
173 ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED,
174 size_t n ATTRIBUTE_UNUSED)
176 gcc_assert (c * n == sizeof (struct htab));
177 return ggc_cleared_alloc<htab> ();
180 /* TODO: once we actually use type information in GGC, create a new tag
181 gt_gcc_ptr_array and use it for pointer arrays. */
182 void *
183 ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n)
185 gcc_assert (sizeof (PTR *) == n);
186 return ggc_cleared_vec_alloc<PTR *> (c);
189 /* These are for splay_tree_new_ggc. */
190 void *
191 ggc_splay_alloc (int sz, void *nl)
193 gcc_assert (!nl);
194 return ggc_internal_alloc (sz);
197 void
198 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
200 gcc_assert (!nl);
203 /* Print statistics that are independent of the collector in use. */
204 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
205 ? (x) \
206 : ((x) < 1024*1024*10 \
207 ? (x) / 1024 \
208 : (x) / (1024*1024))))
209 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
211 void
212 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
213 ggc_statistics *stats)
215 /* Set the pointer so that during collection we will actually gather
216 the statistics. */
217 ggc_stats = stats;
219 /* Then do one collection to fill in the statistics. */
220 ggc_collect ();
222 /* At present, we don't really gather any interesting statistics. */
224 /* Don't gather statistics any more. */
225 ggc_stats = NULL;
228 /* Functions for saving and restoring GCable memory to disk. */
230 struct ptr_data
232 void *obj;
233 void *note_ptr_cookie;
234 gt_note_pointers note_ptr_fn;
235 gt_handle_reorder reorder_fn;
236 size_t size;
237 void *new_addr;
240 #define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3)
242 /* Helper for hashing saving_htab. */
244 struct saving_hasher : typed_free_remove <ptr_data>
246 typedef ptr_data *value_type;
247 typedef void *compare_type;
248 static inline hashval_t hash (const ptr_data *);
249 static inline bool equal (const ptr_data *, const void *);
252 inline hashval_t
253 saving_hasher::hash (const ptr_data *p)
255 return POINTER_HASH (p->obj);
258 inline bool
259 saving_hasher::equal (const ptr_data *p1, const void *p2)
261 return p1->obj == p2;
264 static hash_table<saving_hasher> *saving_htab;
266 /* Register an object in the hash table. */
269 gt_pch_note_object (void *obj, void *note_ptr_cookie,
270 gt_note_pointers note_ptr_fn)
272 struct ptr_data **slot;
274 if (obj == NULL || obj == (void *) 1)
275 return 0;
277 slot = (struct ptr_data **)
278 saving_htab->find_slot_with_hash (obj, POINTER_HASH (obj), INSERT);
279 if (*slot != NULL)
281 gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
282 && (*slot)->note_ptr_cookie == note_ptr_cookie);
283 return 0;
286 *slot = XCNEW (struct ptr_data);
287 (*slot)->obj = obj;
288 (*slot)->note_ptr_fn = note_ptr_fn;
289 (*slot)->note_ptr_cookie = note_ptr_cookie;
290 if (note_ptr_fn == gt_pch_p_S)
291 (*slot)->size = strlen ((const char *)obj) + 1;
292 else
293 (*slot)->size = ggc_get_size (obj);
294 return 1;
297 /* Register an object in the hash table. */
299 void
300 gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
301 gt_handle_reorder reorder_fn)
303 struct ptr_data *data;
305 if (obj == NULL || obj == (void *) 1)
306 return;
308 data = (struct ptr_data *)
309 saving_htab->find_with_hash (obj, POINTER_HASH (obj));
310 gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
312 data->reorder_fn = reorder_fn;
315 /* Handy state for the traversal functions. */
317 struct traversal_state
319 FILE *f;
320 struct ggc_pch_data *d;
321 size_t count;
322 struct ptr_data **ptrs;
323 size_t ptrs_i;
326 /* Callbacks for htab_traverse. */
329 ggc_call_count (ptr_data **slot, traversal_state *state)
331 struct ptr_data *d = *slot;
333 ggc_pch_count_object (state->d, d->obj, d->size,
334 d->note_ptr_fn == gt_pch_p_S);
335 state->count++;
336 return 1;
340 ggc_call_alloc (ptr_data **slot, traversal_state *state)
342 struct ptr_data *d = *slot;
344 d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
345 d->note_ptr_fn == gt_pch_p_S);
346 state->ptrs[state->ptrs_i++] = d;
347 return 1;
350 /* Callback for qsort. */
352 static int
353 compare_ptr_data (const void *p1_p, const void *p2_p)
355 const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
356 const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
357 return (((size_t)p1->new_addr > (size_t)p2->new_addr)
358 - ((size_t)p1->new_addr < (size_t)p2->new_addr));
361 /* Callbacks for note_ptr_fn. */
363 static void
364 relocate_ptrs (void *ptr_p, void *state_p)
366 void **ptr = (void **)ptr_p;
367 struct traversal_state *state ATTRIBUTE_UNUSED
368 = (struct traversal_state *)state_p;
369 struct ptr_data *result;
371 if (*ptr == NULL || *ptr == (void *)1)
372 return;
374 result = (struct ptr_data *)
375 saving_htab->find_with_hash (*ptr, POINTER_HASH (*ptr));
376 gcc_assert (result);
377 *ptr = result->new_addr;
380 /* Write out, after relocation, the pointers in TAB. */
381 static void
382 write_pch_globals (const struct ggc_root_tab * const *tab,
383 struct traversal_state *state)
385 const struct ggc_root_tab *const *rt;
386 const struct ggc_root_tab *rti;
387 size_t i;
389 for (rt = tab; *rt; rt++)
390 for (rti = *rt; rti->base != NULL; rti++)
391 for (i = 0; i < rti->nelt; i++)
393 void *ptr = *(void **)((char *)rti->base + rti->stride * i);
394 struct ptr_data *new_ptr;
395 if (ptr == NULL || ptr == (void *)1)
397 if (fwrite (&ptr, sizeof (void *), 1, state->f)
398 != 1)
399 fatal_error (input_location, "can%'t write PCH file: %m");
401 else
403 new_ptr = (struct ptr_data *)
404 saving_htab->find_with_hash (ptr, POINTER_HASH (ptr));
405 if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
406 != 1)
407 fatal_error (input_location, "can%'t write PCH file: %m");
412 /* Hold the information we need to mmap the file back in. */
414 struct mmap_info
416 size_t offset;
417 size_t size;
418 void *preferred_base;
421 /* Write out the state of the compiler to F. */
423 void
424 gt_pch_save (FILE *f)
426 const struct ggc_root_tab *const *rt;
427 const struct ggc_root_tab *rti;
428 size_t i;
429 struct traversal_state state;
430 char *this_object = NULL;
431 size_t this_object_size = 0;
432 struct mmap_info mmi;
433 const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity ();
435 gt_pch_save_stringpool ();
437 timevar_push (TV_PCH_PTR_REALLOC);
438 saving_htab = new hash_table<saving_hasher> (50000);
440 for (rt = gt_ggc_rtab; *rt; rt++)
441 for (rti = *rt; rti->base != NULL; rti++)
442 for (i = 0; i < rti->nelt; i++)
443 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
445 /* Prepare the objects for writing, determine addresses and such. */
446 state.f = f;
447 state.d = init_ggc_pch ();
448 state.count = 0;
449 saving_htab->traverse <traversal_state *, ggc_call_count> (&state);
451 mmi.size = ggc_pch_total_size (state.d);
453 /* Try to arrange things so that no relocation is necessary, but
454 don't try very hard. On most platforms, this will always work,
455 and on the rest it's a lot of work to do better.
456 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
457 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
458 mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
460 ggc_pch_this_base (state.d, mmi.preferred_base);
462 state.ptrs = XNEWVEC (struct ptr_data *, state.count);
463 state.ptrs_i = 0;
465 saving_htab->traverse <traversal_state *, ggc_call_alloc> (&state);
466 timevar_pop (TV_PCH_PTR_REALLOC);
468 timevar_push (TV_PCH_PTR_SORT);
469 qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
470 timevar_pop (TV_PCH_PTR_SORT);
472 /* Write out all the scalar variables. */
473 for (rt = gt_pch_scalar_rtab; *rt; rt++)
474 for (rti = *rt; rti->base != NULL; rti++)
475 if (fwrite (rti->base, rti->stride, 1, f) != 1)
476 fatal_error (input_location, "can%'t write PCH file: %m");
478 /* Write out all the global pointers, after translation. */
479 write_pch_globals (gt_ggc_rtab, &state);
481 /* Pad the PCH file so that the mmapped area starts on an allocation
482 granularity (usually page) boundary. */
484 long o;
485 o = ftell (state.f) + sizeof (mmi);
486 if (o == -1)
487 fatal_error (input_location, "can%'t get position in PCH file: %m");
488 mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
489 if (mmi.offset == mmap_offset_alignment)
490 mmi.offset = 0;
491 mmi.offset += o;
493 if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
494 fatal_error (input_location, "can%'t write PCH file: %m");
495 if (mmi.offset != 0
496 && fseek (state.f, mmi.offset, SEEK_SET) != 0)
497 fatal_error (input_location, "can%'t write padding to PCH file: %m");
499 ggc_pch_prepare_write (state.d, state.f);
501 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
502 vec<char> vbits = vNULL;
503 #endif
505 /* Actually write out the objects. */
506 for (i = 0; i < state.count; i++)
508 if (this_object_size < state.ptrs[i]->size)
510 this_object_size = state.ptrs[i]->size;
511 this_object = XRESIZEVAR (char, this_object, this_object_size);
513 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
514 /* obj might contain uninitialized bytes, e.g. in the trailing
515 padding of the object. Avoid warnings by making the memory
516 temporarily defined and then restoring previous state. */
517 int get_vbits = 0;
518 size_t valid_size = state.ptrs[i]->size;
519 if (__builtin_expect (RUNNING_ON_VALGRIND, 0))
521 if (vbits.length () < valid_size)
522 vbits.safe_grow (valid_size);
523 get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
524 vbits.address (), valid_size);
525 if (get_vbits == 3)
527 /* We assume that first part of obj is addressable, and
528 the rest is unaddressable. Find out where the boundary is
529 using binary search. */
530 size_t lo = 0, hi = valid_size;
531 while (hi > lo)
533 size_t mid = (lo + hi) / 2;
534 get_vbits = VALGRIND_GET_VBITS ((char *) state.ptrs[i]->obj
535 + mid, vbits.address (),
537 if (get_vbits == 3)
538 hi = mid;
539 else if (get_vbits == 1)
540 lo = mid + 1;
541 else
542 break;
544 if (get_vbits == 1 || get_vbits == 3)
546 valid_size = lo;
547 get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
548 vbits.address (),
549 valid_size);
552 if (get_vbits == 1)
553 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (state.ptrs[i]->obj,
554 state.ptrs[i]->size));
556 #endif
557 memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
558 if (state.ptrs[i]->reorder_fn != NULL)
559 state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
560 state.ptrs[i]->note_ptr_cookie,
561 relocate_ptrs, &state);
562 state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
563 state.ptrs[i]->note_ptr_cookie,
564 relocate_ptrs, &state);
565 ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
566 state.ptrs[i]->new_addr, state.ptrs[i]->size,
567 state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
568 if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
569 memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
570 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
571 if (__builtin_expect (get_vbits == 1, 0))
573 (void) VALGRIND_SET_VBITS (state.ptrs[i]->obj, vbits.address (),
574 valid_size);
575 if (valid_size != state.ptrs[i]->size)
576 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *)
577 state.ptrs[i]->obj
578 + valid_size,
579 state.ptrs[i]->size
580 - valid_size));
582 #endif
584 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
585 vbits.release ();
586 #endif
588 ggc_pch_finish (state.d, state.f);
589 gt_pch_fixup_stringpool ();
591 XDELETE (state.ptrs);
592 XDELETE (this_object);
593 delete saving_htab;
594 saving_htab = NULL;
597 /* Read the state of the compiler back in from F. */
599 void
600 gt_pch_restore (FILE *f)
602 const struct ggc_root_tab *const *rt;
603 const struct ggc_root_tab *rti;
604 size_t i;
605 struct mmap_info mmi;
606 int result;
608 /* Delete any deletable objects. This makes ggc_pch_read much
609 faster, as it can be sure that no GCable objects remain other
610 than the ones just read in. */
611 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
612 for (rti = *rt; rti->base != NULL; rti++)
613 memset (rti->base, 0, rti->stride);
615 /* Read in all the scalar variables. */
616 for (rt = gt_pch_scalar_rtab; *rt; rt++)
617 for (rti = *rt; rti->base != NULL; rti++)
618 if (fread (rti->base, rti->stride, 1, f) != 1)
619 fatal_error (input_location, "can%'t read PCH file: %m");
621 /* Read in all the global pointers, in 6 easy loops. */
622 for (rt = gt_ggc_rtab; *rt; rt++)
623 for (rti = *rt; rti->base != NULL; rti++)
624 for (i = 0; i < rti->nelt; i++)
625 if (fread ((char *)rti->base + rti->stride * i,
626 sizeof (void *), 1, f) != 1)
627 fatal_error (input_location, "can%'t read PCH file: %m");
629 if (fread (&mmi, sizeof (mmi), 1, f) != 1)
630 fatal_error (input_location, "can%'t read PCH file: %m");
632 result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
633 fileno (f), mmi.offset);
634 if (result < 0)
635 fatal_error (input_location, "had to relocate PCH");
636 if (result == 0)
638 if (fseek (f, mmi.offset, SEEK_SET) != 0
639 || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
640 fatal_error (input_location, "can%'t read PCH file: %m");
642 else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
643 fatal_error (input_location, "can%'t read PCH file: %m");
645 ggc_pch_read (f, mmi.preferred_base);
647 gt_pch_restore_stringpool ();
650 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
651 Select no address whatsoever, and let gt_pch_save choose what it will with
652 malloc, presumably. */
654 void *
655 default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
656 int fd ATTRIBUTE_UNUSED)
658 return NULL;
661 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
662 Allocate SIZE bytes with malloc. Return 0 if the address we got is the
663 same as base, indicating that the memory has been allocated but needs to
664 be read in from the file. Return -1 if the address differs, to relocation
665 of the PCH file would be required. */
668 default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
669 size_t offset ATTRIBUTE_UNUSED)
671 void *addr = xmalloc (size);
672 return (addr == base) - 1;
675 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the
676 alignment required for allocating virtual memory. Usually this is the
677 same as pagesize. */
679 size_t
680 default_gt_pch_alloc_granularity (void)
682 return getpagesize ();
685 #if HAVE_MMAP_FILE
686 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
687 We temporarily allocate SIZE bytes, and let the kernel place the data
688 wherever it will. If it worked, that's our spot, if not we're likely
689 to be in trouble. */
691 void *
692 mmap_gt_pch_get_address (size_t size, int fd)
694 void *ret;
696 ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
697 if (ret == (void *) MAP_FAILED)
698 ret = NULL;
699 else
700 munmap ((caddr_t) ret, size);
702 return ret;
705 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
706 Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
707 mapping the data at BASE, -1 if we couldn't.
709 This version assumes that the kernel honors the START operand of mmap
710 even without MAP_FIXED if START through START+SIZE are not currently
711 mapped with something. */
714 mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
716 void *addr;
718 /* We're called with size == 0 if we're not planning to load a PCH
719 file at all. This allows the hook to free any static space that
720 we might have allocated at link time. */
721 if (size == 0)
722 return -1;
724 addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
725 fd, offset);
727 return addr == base ? 1 : -1;
729 #endif /* HAVE_MMAP_FILE */
731 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
733 /* Modify the bound based on rlimits. */
734 static double
735 ggc_rlimit_bound (double limit)
737 #if defined(HAVE_GETRLIMIT)
738 struct rlimit rlim;
739 # if defined (RLIMIT_AS)
740 /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably
741 any OS which has RLIMIT_AS also has a working mmap that GCC will use. */
742 if (getrlimit (RLIMIT_AS, &rlim) == 0
743 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
744 && rlim.rlim_cur < limit)
745 limit = rlim.rlim_cur;
746 # elif defined (RLIMIT_DATA)
747 /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
748 might be on an OS that has a broken mmap. (Others don't bound
749 mmap at all, apparently.) */
750 if (getrlimit (RLIMIT_DATA, &rlim) == 0
751 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
752 && rlim.rlim_cur < limit
753 /* Darwin has this horribly bogus default setting of
754 RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA
755 appears to be ignored. Ignore such silliness. If a limit
756 this small was actually effective for mmap, GCC wouldn't even
757 start up. */
758 && rlim.rlim_cur >= 8 * 1024 * 1024)
759 limit = rlim.rlim_cur;
760 # endif /* RLIMIT_AS or RLIMIT_DATA */
761 #endif /* HAVE_GETRLIMIT */
763 return limit;
766 /* Heuristic to set a default for GGC_MIN_EXPAND. */
767 static int
768 ggc_min_expand_heuristic (void)
770 double min_expand = physmem_total ();
772 /* Adjust for rlimits. */
773 min_expand = ggc_rlimit_bound (min_expand);
775 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
776 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
777 min_expand /= 1024*1024*1024;
778 min_expand *= 70;
779 min_expand = MIN (min_expand, 70);
780 min_expand += 30;
782 return min_expand;
785 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
786 static int
787 ggc_min_heapsize_heuristic (void)
789 double phys_kbytes = physmem_total ();
790 double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
792 phys_kbytes /= 1024; /* Convert to Kbytes. */
793 limit_kbytes /= 1024;
795 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
796 bound of 128M (when RAM >= 1GB). */
797 phys_kbytes /= 8;
799 #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
800 /* Try not to overrun the RSS limit while doing garbage collection.
801 The RSS limit is only advisory, so no margin is subtracted. */
803 struct rlimit rlim;
804 if (getrlimit (RLIMIT_RSS, &rlim) == 0
805 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
806 phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024);
808 # endif
810 /* Don't blindly run over our data limit; do GC at least when the
811 *next* GC would be within 20Mb of the limit or within a quarter of
812 the limit, whichever is larger. If GCC does hit the data limit,
813 compilation will fail, so this tries to be conservative. */
814 limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * 1024));
815 limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ());
816 phys_kbytes = MIN (phys_kbytes, limit_kbytes);
818 phys_kbytes = MAX (phys_kbytes, 4 * 1024);
819 phys_kbytes = MIN (phys_kbytes, 128 * 1024);
821 return phys_kbytes;
823 #endif
825 void
826 init_ggc_heuristics (void)
828 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
829 set_default_param_value (GGC_MIN_EXPAND, ggc_min_expand_heuristic ());
830 set_default_param_value (GGC_MIN_HEAPSIZE, ggc_min_heapsize_heuristic ());
831 #endif
834 /* GGC memory usage. */
835 struct ggc_usage: public mem_usage
837 /* Default constructor. */
838 ggc_usage (): m_freed (0), m_collected (0), m_overhead (0) {}
839 /* Constructor. */
840 ggc_usage (size_t allocated, size_t times, size_t peak,
841 size_t freed, size_t collected, size_t overhead)
842 : mem_usage (allocated, times, peak),
843 m_freed (freed), m_collected (collected), m_overhead (overhead) {}
845 /* Comparison operator. */
846 inline bool operator< (const ggc_usage &second) const
848 return (get_balance () == second.get_balance () ?
849 (m_peak == second.m_peak ? m_times < second.m_times
850 : m_peak < second.m_peak)
851 : get_balance () < second.get_balance ());
854 /* Register overhead of ALLOCATED and OVERHEAD bytes. */
855 inline void register_overhead (size_t allocated, size_t overhead)
857 m_allocated += allocated;
858 m_overhead += overhead;
859 m_times++;
862 /* Release overhead of SIZE bytes. */
863 inline void release_overhead (size_t size)
865 m_freed += size;
868 /* Sum the usage with SECOND usage. */
869 ggc_usage operator+ (const ggc_usage &second)
871 return ggc_usage (m_allocated + second.m_allocated,
872 m_times + second.m_times,
873 m_peak + second.m_peak,
874 m_freed + second.m_freed,
875 m_collected + second.m_collected,
876 m_overhead + second.m_overhead);
879 /* Dump usage with PREFIX, where TOTAL is sum of all rows. */
880 inline void dump (const char *prefix, ggc_usage &total) const
882 long balance = get_balance ();
883 fprintf (stderr,
884 "%-48s %10li:%5.1f%%%10li:%5.1f%%"
885 "%10li:%5.1f%%%10li:%5.1f%%%10li\n",
886 prefix, (long)m_collected,
887 get_percent (m_collected, total.m_collected),
888 (long)m_freed, get_percent (m_freed, total.m_freed),
889 (long)balance, get_percent (balance, total.get_balance ()),
890 (long)m_overhead, get_percent (m_overhead, total.m_overhead),
891 (long)m_times);
894 /* Dump usage coupled to LOC location, where TOTAL is sum of all rows. */
895 inline void dump (mem_location *loc, ggc_usage &total) const
897 char s[4096];
898 sprintf (s, "%s:%i (%s)", loc->get_trimmed_filename (),
899 loc->m_line, loc->m_function);
900 s[48] = '\0';
902 dump (s, total);
905 /* Dump footer. */
906 inline void dump_footer ()
908 print_dash_line ();
909 dump ("Total", *this);
910 print_dash_line ();
913 /* Get balance which is GGC allocation leak. */
914 inline long get_balance () const
916 return m_allocated + m_overhead - m_collected - m_freed;
919 typedef std::pair<mem_location *, ggc_usage *> mem_pair_t;
921 /* Compare wrapper used by qsort method. */
922 static int compare (const void *first, const void *second)
924 const mem_pair_t f = *(const mem_pair_t *)first;
925 const mem_pair_t s = *(const mem_pair_t *)second;
927 return (*f.second) < (*s.second);
930 /* Compare rows in final GGC summary dump. */
931 static int compare_final (const void *first, const void *second)
932 { typedef std::pair<mem_location *, ggc_usage *> mem_pair_t;
934 const ggc_usage *f = ((const mem_pair_t *)first)->second;
935 const ggc_usage *s = ((const mem_pair_t *)second)->second;
937 size_t a = f->m_allocated + f->m_overhead - f->m_freed;
938 size_t b = s->m_allocated + s->m_overhead - s->m_freed;
940 return a == b ? 0 : (a < b ? 1 : -1);
943 /* Dump header with NAME. */
944 static inline void dump_header (const char *name)
946 fprintf (stderr, "%-48s %11s%17s%17s%16s%17s\n", name, "Garbage", "Freed",
947 "Leak", "Overhead", "Times");
948 print_dash_line ();
951 /* Freed memory in bytes. */
952 size_t m_freed;
953 /* Collected memory in bytes. */
954 size_t m_collected;
955 /* Overhead memory in bytes. */
956 size_t m_overhead;
959 /* GCC memory description. */
960 static mem_alloc_description<ggc_usage> ggc_mem_desc;
962 /* Dump per-site memory statistics. */
964 void
965 dump_ggc_loc_statistics (bool final)
967 if (! GATHER_STATISTICS)
968 return;
970 ggc_force_collect = true;
971 ggc_collect ();
973 ggc_mem_desc.dump (GGC, final ? ggc_usage::compare_final : NULL);
975 ggc_force_collect = false;
978 /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */
979 void
980 ggc_record_overhead (size_t allocated, size_t overhead, void *ptr MEM_STAT_DECL)
982 ggc_usage *usage = ggc_mem_desc.register_descriptor (ptr, GGC, false
983 FINAL_PASS_MEM_STAT);
985 ggc_mem_desc.register_object_overhead (usage, allocated + overhead, ptr);
986 usage->register_overhead (allocated, overhead);
989 /* Notice that the pointer has been freed. */
990 void
991 ggc_free_overhead (void *ptr)
993 ggc_mem_desc.release_object_overhead (ptr);
996 /* After live values has been marked, walk all recorded pointers and see if
997 they are still live. */
998 void
999 ggc_prune_overhead_list (void)
1001 typedef hash_map<const void *, std::pair<ggc_usage *, size_t > > map_t;
1003 map_t::iterator it = ggc_mem_desc.m_reverse_object_map->begin ();
1005 for (; it != ggc_mem_desc.m_reverse_object_map->end (); ++it)
1006 if (!ggc_marked_p ((*it).first))
1007 (*it).second.first->m_collected += (*it).second.second;
1009 delete ggc_mem_desc.m_reverse_object_map;
1010 ggc_mem_desc.m_reverse_object_map = new map_t (13, false, false);