1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Generic garbage collection (GC) functions and data, not specific to
21 any particular GC implementation. */
25 #include "coretypes.h"
26 #include "hash-table.h"
28 #include "ggc-internal.h"
29 #include "diagnostic-core.h"
31 #include "hosthooks.h"
32 #include "hosthooks-def.h"
36 #include "mem-stats.h"
38 /* When set, ggc_collect will do collection. */
39 bool ggc_force_collect
;
41 /* When true, protect the contents of the identifier hash table. */
42 bool ggc_protect_identifiers
= true;
44 /* Statistics about the allocation. */
45 static ggc_statistics
*ggc_stats
;
47 struct traversal_state
;
49 static int compare_ptr_data (const void *, const void *);
50 static void relocate_ptrs (void *, void *);
51 static void write_pch_globals (const struct ggc_root_tab
* const *tab
,
52 struct traversal_state
*state
);
54 /* Maintain global roots that are preserved during GC. */
56 /* This extra vector of dynamically registered root_tab-s is used by
57 ggc_mark_roots and gives the ability to dynamically add new GGC root
58 tables, for instance from some plugins; this vector is on the heap
59 since it is used by GGC internally. */
60 typedef const struct ggc_root_tab
*const_ggc_root_tab_t
;
61 static vec
<const_ggc_root_tab_t
> extra_root_vec
;
63 /* Dynamically register a new GGC root table RT. This is useful for
67 ggc_register_root_tab (const struct ggc_root_tab
* rt
)
70 extra_root_vec
.safe_push (rt
);
73 /* Mark all the roots in the table RT. */
76 ggc_mark_root_tab (const_ggc_root_tab_t rt
)
80 for ( ; rt
->base
!= NULL
; rt
++)
81 for (i
= 0; i
< rt
->nelt
; i
++)
82 (*rt
->cb
) (*(void **) ((char *)rt
->base
+ rt
->stride
* i
));
85 /* Iterate through all registered roots and mark each element. */
90 const struct ggc_root_tab
*const *rt
;
91 const_ggc_root_tab_t rtp
, rti
;
94 for (rt
= gt_ggc_deletable_rtab
; *rt
; rt
++)
95 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
96 memset (rti
->base
, 0, rti
->stride
);
98 for (rt
= gt_ggc_rtab
; *rt
; rt
++)
99 ggc_mark_root_tab (*rt
);
101 FOR_EACH_VEC_ELT (extra_root_vec
, i
, rtp
)
102 ggc_mark_root_tab (rtp
);
104 if (ggc_protect_identifiers
)
105 ggc_mark_stringpool ();
109 if (! ggc_protect_identifiers
)
110 ggc_purge_stringpool ();
112 /* Some plugins may call ggc_set_mark from here. */
113 invoke_plugin_callbacks (PLUGIN_GGC_MARKING
, NULL
);
116 /* Allocate a block of memory, then clear it. */
118 ggc_internal_cleared_alloc (size_t size
, void (*f
)(void *), size_t s
, size_t n
121 void *buf
= ggc_internal_alloc (size
, f
, s
, n PASS_MEM_STAT
);
122 memset (buf
, 0, size
);
126 /* Resize a block of memory, possibly re-allocating it. */
128 ggc_realloc (void *x
, size_t size MEM_STAT_DECL
)
134 return ggc_internal_alloc (size PASS_MEM_STAT
);
136 old_size
= ggc_get_size (x
);
138 if (size
<= old_size
)
140 /* Mark the unwanted memory as unaccessible. We also need to make
141 the "new" size accessible, since ggc_get_size returns the size of
142 the pool, not the size of the individually allocated object, the
143 size which was previously made accessible. Unfortunately, we
144 don't know that previously allocated size. Without that
145 knowledge we have to lose some initialization-tracking for the
146 old parts of the object. An alternative is to mark the whole
147 old_size as reachable, but that would lose tracking of writes
148 after the end of the object (by small offsets). Discard the
149 handle to avoid handle leak. */
150 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x
+ size
,
152 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x
, size
));
156 r
= ggc_internal_alloc (size PASS_MEM_STAT
);
158 /* Since ggc_get_size returns the size of the pool, not the size of the
159 individually allocated object, we'd access parts of the old object
160 that were marked invalid with the memcpy below. We lose a bit of the
161 initialization-tracking since some of it may be uninitialized. */
162 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x
, old_size
));
164 memcpy (r
, x
, old_size
);
166 /* The old object is not supposed to be used anymore. */
173 ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED
,
174 size_t n ATTRIBUTE_UNUSED
)
176 gcc_assert (c
* n
== sizeof (struct htab
));
177 return ggc_cleared_alloc
<htab
> ();
180 /* TODO: once we actually use type information in GGC, create a new tag
181 gt_gcc_ptr_array and use it for pointer arrays. */
183 ggc_cleared_alloc_ptr_array_two_args (size_t c
, size_t n
)
185 gcc_assert (sizeof (PTR
*) == n
);
186 return ggc_cleared_vec_alloc
<PTR
*> (c
);
189 /* These are for splay_tree_new_ggc. */
191 ggc_splay_alloc (int sz
, void *nl
)
194 return ggc_internal_alloc (sz
);
198 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED
, void *nl
)
203 /* Print statistics that are independent of the collector in use. */
204 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
206 : ((x) < 1024*1024*10 \
208 : (x) / (1024*1024))))
209 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
212 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED
,
213 ggc_statistics
*stats
)
215 /* Set the pointer so that during collection we will actually gather
219 /* Then do one collection to fill in the statistics. */
222 /* At present, we don't really gather any interesting statistics. */
224 /* Don't gather statistics any more. */
228 /* Functions for saving and restoring GCable memory to disk. */
233 void *note_ptr_cookie
;
234 gt_note_pointers note_ptr_fn
;
235 gt_handle_reorder reorder_fn
;
240 #define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3)
242 /* Helper for hashing saving_htab. */
244 struct saving_hasher
: typed_free_remove
<ptr_data
>
246 typedef ptr_data
*value_type
;
247 typedef void *compare_type
;
248 static inline hashval_t
hash (const ptr_data
*);
249 static inline bool equal (const ptr_data
*, const void *);
253 saving_hasher::hash (const ptr_data
*p
)
255 return POINTER_HASH (p
->obj
);
259 saving_hasher::equal (const ptr_data
*p1
, const void *p2
)
261 return p1
->obj
== p2
;
264 static hash_table
<saving_hasher
> *saving_htab
;
266 /* Register an object in the hash table. */
269 gt_pch_note_object (void *obj
, void *note_ptr_cookie
,
270 gt_note_pointers note_ptr_fn
)
272 struct ptr_data
**slot
;
274 if (obj
== NULL
|| obj
== (void *) 1)
277 slot
= (struct ptr_data
**)
278 saving_htab
->find_slot_with_hash (obj
, POINTER_HASH (obj
), INSERT
);
281 gcc_assert ((*slot
)->note_ptr_fn
== note_ptr_fn
282 && (*slot
)->note_ptr_cookie
== note_ptr_cookie
);
286 *slot
= XCNEW (struct ptr_data
);
288 (*slot
)->note_ptr_fn
= note_ptr_fn
;
289 (*slot
)->note_ptr_cookie
= note_ptr_cookie
;
290 if (note_ptr_fn
== gt_pch_p_S
)
291 (*slot
)->size
= strlen ((const char *)obj
) + 1;
293 (*slot
)->size
= ggc_get_size (obj
);
297 /* Register an object in the hash table. */
300 gt_pch_note_reorder (void *obj
, void *note_ptr_cookie
,
301 gt_handle_reorder reorder_fn
)
303 struct ptr_data
*data
;
305 if (obj
== NULL
|| obj
== (void *) 1)
308 data
= (struct ptr_data
*)
309 saving_htab
->find_with_hash (obj
, POINTER_HASH (obj
));
310 gcc_assert (data
&& data
->note_ptr_cookie
== note_ptr_cookie
);
312 data
->reorder_fn
= reorder_fn
;
315 /* Handy state for the traversal functions. */
317 struct traversal_state
320 struct ggc_pch_data
*d
;
322 struct ptr_data
**ptrs
;
326 /* Callbacks for htab_traverse. */
329 ggc_call_count (ptr_data
**slot
, traversal_state
*state
)
331 struct ptr_data
*d
= *slot
;
333 ggc_pch_count_object (state
->d
, d
->obj
, d
->size
,
334 d
->note_ptr_fn
== gt_pch_p_S
);
340 ggc_call_alloc (ptr_data
**slot
, traversal_state
*state
)
342 struct ptr_data
*d
= *slot
;
344 d
->new_addr
= ggc_pch_alloc_object (state
->d
, d
->obj
, d
->size
,
345 d
->note_ptr_fn
== gt_pch_p_S
);
346 state
->ptrs
[state
->ptrs_i
++] = d
;
350 /* Callback for qsort. */
353 compare_ptr_data (const void *p1_p
, const void *p2_p
)
355 const struct ptr_data
*const p1
= *(const struct ptr_data
*const *)p1_p
;
356 const struct ptr_data
*const p2
= *(const struct ptr_data
*const *)p2_p
;
357 return (((size_t)p1
->new_addr
> (size_t)p2
->new_addr
)
358 - ((size_t)p1
->new_addr
< (size_t)p2
->new_addr
));
361 /* Callbacks for note_ptr_fn. */
364 relocate_ptrs (void *ptr_p
, void *state_p
)
366 void **ptr
= (void **)ptr_p
;
367 struct traversal_state
*state ATTRIBUTE_UNUSED
368 = (struct traversal_state
*)state_p
;
369 struct ptr_data
*result
;
371 if (*ptr
== NULL
|| *ptr
== (void *)1)
374 result
= (struct ptr_data
*)
375 saving_htab
->find_with_hash (*ptr
, POINTER_HASH (*ptr
));
377 *ptr
= result
->new_addr
;
380 /* Write out, after relocation, the pointers in TAB. */
382 write_pch_globals (const struct ggc_root_tab
* const *tab
,
383 struct traversal_state
*state
)
385 const struct ggc_root_tab
*const *rt
;
386 const struct ggc_root_tab
*rti
;
389 for (rt
= tab
; *rt
; rt
++)
390 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
391 for (i
= 0; i
< rti
->nelt
; i
++)
393 void *ptr
= *(void **)((char *)rti
->base
+ rti
->stride
* i
);
394 struct ptr_data
*new_ptr
;
395 if (ptr
== NULL
|| ptr
== (void *)1)
397 if (fwrite (&ptr
, sizeof (void *), 1, state
->f
)
399 fatal_error (input_location
, "can%'t write PCH file: %m");
403 new_ptr
= (struct ptr_data
*)
404 saving_htab
->find_with_hash (ptr
, POINTER_HASH (ptr
));
405 if (fwrite (&new_ptr
->new_addr
, sizeof (void *), 1, state
->f
)
407 fatal_error (input_location
, "can%'t write PCH file: %m");
412 /* Hold the information we need to mmap the file back in. */
418 void *preferred_base
;
421 /* Write out the state of the compiler to F. */
424 gt_pch_save (FILE *f
)
426 const struct ggc_root_tab
*const *rt
;
427 const struct ggc_root_tab
*rti
;
429 struct traversal_state state
;
430 char *this_object
= NULL
;
431 size_t this_object_size
= 0;
432 struct mmap_info mmi
;
433 const size_t mmap_offset_alignment
= host_hooks
.gt_pch_alloc_granularity ();
435 gt_pch_save_stringpool ();
437 timevar_push (TV_PCH_PTR_REALLOC
);
438 saving_htab
= new hash_table
<saving_hasher
> (50000);
440 for (rt
= gt_ggc_rtab
; *rt
; rt
++)
441 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
442 for (i
= 0; i
< rti
->nelt
; i
++)
443 (*rti
->pchw
)(*(void **)((char *)rti
->base
+ rti
->stride
* i
));
445 /* Prepare the objects for writing, determine addresses and such. */
447 state
.d
= init_ggc_pch ();
449 saving_htab
->traverse
<traversal_state
*, ggc_call_count
> (&state
);
451 mmi
.size
= ggc_pch_total_size (state
.d
);
453 /* Try to arrange things so that no relocation is necessary, but
454 don't try very hard. On most platforms, this will always work,
455 and on the rest it's a lot of work to do better.
456 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
457 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
458 mmi
.preferred_base
= host_hooks
.gt_pch_get_address (mmi
.size
, fileno (f
));
460 ggc_pch_this_base (state
.d
, mmi
.preferred_base
);
462 state
.ptrs
= XNEWVEC (struct ptr_data
*, state
.count
);
465 saving_htab
->traverse
<traversal_state
*, ggc_call_alloc
> (&state
);
466 timevar_pop (TV_PCH_PTR_REALLOC
);
468 timevar_push (TV_PCH_PTR_SORT
);
469 qsort (state
.ptrs
, state
.count
, sizeof (*state
.ptrs
), compare_ptr_data
);
470 timevar_pop (TV_PCH_PTR_SORT
);
472 /* Write out all the scalar variables. */
473 for (rt
= gt_pch_scalar_rtab
; *rt
; rt
++)
474 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
475 if (fwrite (rti
->base
, rti
->stride
, 1, f
) != 1)
476 fatal_error (input_location
, "can%'t write PCH file: %m");
478 /* Write out all the global pointers, after translation. */
479 write_pch_globals (gt_ggc_rtab
, &state
);
481 /* Pad the PCH file so that the mmapped area starts on an allocation
482 granularity (usually page) boundary. */
485 o
= ftell (state
.f
) + sizeof (mmi
);
487 fatal_error (input_location
, "can%'t get position in PCH file: %m");
488 mmi
.offset
= mmap_offset_alignment
- o
% mmap_offset_alignment
;
489 if (mmi
.offset
== mmap_offset_alignment
)
493 if (fwrite (&mmi
, sizeof (mmi
), 1, state
.f
) != 1)
494 fatal_error (input_location
, "can%'t write PCH file: %m");
496 && fseek (state
.f
, mmi
.offset
, SEEK_SET
) != 0)
497 fatal_error (input_location
, "can%'t write padding to PCH file: %m");
499 ggc_pch_prepare_write (state
.d
, state
.f
);
501 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
502 vec
<char> vbits
= vNULL
;
505 /* Actually write out the objects. */
506 for (i
= 0; i
< state
.count
; i
++)
508 if (this_object_size
< state
.ptrs
[i
]->size
)
510 this_object_size
= state
.ptrs
[i
]->size
;
511 this_object
= XRESIZEVAR (char, this_object
, this_object_size
);
513 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
514 /* obj might contain uninitialized bytes, e.g. in the trailing
515 padding of the object. Avoid warnings by making the memory
516 temporarily defined and then restoring previous state. */
518 size_t valid_size
= state
.ptrs
[i
]->size
;
519 if (__builtin_expect (RUNNING_ON_VALGRIND
, 0))
521 if (vbits
.length () < valid_size
)
522 vbits
.safe_grow (valid_size
);
523 get_vbits
= VALGRIND_GET_VBITS (state
.ptrs
[i
]->obj
,
524 vbits
.address (), valid_size
);
527 /* We assume that first part of obj is addressable, and
528 the rest is unaddressable. Find out where the boundary is
529 using binary search. */
530 size_t lo
= 0, hi
= valid_size
;
533 size_t mid
= (lo
+ hi
) / 2;
534 get_vbits
= VALGRIND_GET_VBITS ((char *) state
.ptrs
[i
]->obj
535 + mid
, vbits
.address (),
539 else if (get_vbits
== 1)
544 if (get_vbits
== 1 || get_vbits
== 3)
547 get_vbits
= VALGRIND_GET_VBITS (state
.ptrs
[i
]->obj
,
553 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (state
.ptrs
[i
]->obj
,
554 state
.ptrs
[i
]->size
));
557 memcpy (this_object
, state
.ptrs
[i
]->obj
, state
.ptrs
[i
]->size
);
558 if (state
.ptrs
[i
]->reorder_fn
!= NULL
)
559 state
.ptrs
[i
]->reorder_fn (state
.ptrs
[i
]->obj
,
560 state
.ptrs
[i
]->note_ptr_cookie
,
561 relocate_ptrs
, &state
);
562 state
.ptrs
[i
]->note_ptr_fn (state
.ptrs
[i
]->obj
,
563 state
.ptrs
[i
]->note_ptr_cookie
,
564 relocate_ptrs
, &state
);
565 ggc_pch_write_object (state
.d
, state
.f
, state
.ptrs
[i
]->obj
,
566 state
.ptrs
[i
]->new_addr
, state
.ptrs
[i
]->size
,
567 state
.ptrs
[i
]->note_ptr_fn
== gt_pch_p_S
);
568 if (state
.ptrs
[i
]->note_ptr_fn
!= gt_pch_p_S
)
569 memcpy (state
.ptrs
[i
]->obj
, this_object
, state
.ptrs
[i
]->size
);
570 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
571 if (__builtin_expect (get_vbits
== 1, 0))
573 (void) VALGRIND_SET_VBITS (state
.ptrs
[i
]->obj
, vbits
.address (),
575 if (valid_size
!= state
.ptrs
[i
]->size
)
576 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *)
584 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
588 ggc_pch_finish (state
.d
, state
.f
);
589 gt_pch_fixup_stringpool ();
591 XDELETE (state
.ptrs
);
592 XDELETE (this_object
);
597 /* Read the state of the compiler back in from F. */
600 gt_pch_restore (FILE *f
)
602 const struct ggc_root_tab
*const *rt
;
603 const struct ggc_root_tab
*rti
;
605 struct mmap_info mmi
;
608 /* Delete any deletable objects. This makes ggc_pch_read much
609 faster, as it can be sure that no GCable objects remain other
610 than the ones just read in. */
611 for (rt
= gt_ggc_deletable_rtab
; *rt
; rt
++)
612 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
613 memset (rti
->base
, 0, rti
->stride
);
615 /* Read in all the scalar variables. */
616 for (rt
= gt_pch_scalar_rtab
; *rt
; rt
++)
617 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
618 if (fread (rti
->base
, rti
->stride
, 1, f
) != 1)
619 fatal_error (input_location
, "can%'t read PCH file: %m");
621 /* Read in all the global pointers, in 6 easy loops. */
622 for (rt
= gt_ggc_rtab
; *rt
; rt
++)
623 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
624 for (i
= 0; i
< rti
->nelt
; i
++)
625 if (fread ((char *)rti
->base
+ rti
->stride
* i
,
626 sizeof (void *), 1, f
) != 1)
627 fatal_error (input_location
, "can%'t read PCH file: %m");
629 if (fread (&mmi
, sizeof (mmi
), 1, f
) != 1)
630 fatal_error (input_location
, "can%'t read PCH file: %m");
632 result
= host_hooks
.gt_pch_use_address (mmi
.preferred_base
, mmi
.size
,
633 fileno (f
), mmi
.offset
);
635 fatal_error (input_location
, "had to relocate PCH");
638 if (fseek (f
, mmi
.offset
, SEEK_SET
) != 0
639 || fread (mmi
.preferred_base
, mmi
.size
, 1, f
) != 1)
640 fatal_error (input_location
, "can%'t read PCH file: %m");
642 else if (fseek (f
, mmi
.offset
+ mmi
.size
, SEEK_SET
) != 0)
643 fatal_error (input_location
, "can%'t read PCH file: %m");
645 ggc_pch_read (f
, mmi
.preferred_base
);
647 gt_pch_restore_stringpool ();
650 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
651 Select no address whatsoever, and let gt_pch_save choose what it will with
652 malloc, presumably. */
655 default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED
,
656 int fd ATTRIBUTE_UNUSED
)
661 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
662 Allocate SIZE bytes with malloc. Return 0 if the address we got is the
663 same as base, indicating that the memory has been allocated but needs to
664 be read in from the file. Return -1 if the address differs, to relocation
665 of the PCH file would be required. */
668 default_gt_pch_use_address (void *base
, size_t size
, int fd ATTRIBUTE_UNUSED
,
669 size_t offset ATTRIBUTE_UNUSED
)
671 void *addr
= xmalloc (size
);
672 return (addr
== base
) - 1;
675 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the
676 alignment required for allocating virtual memory. Usually this is the
680 default_gt_pch_alloc_granularity (void)
682 return getpagesize ();
686 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
687 We temporarily allocate SIZE bytes, and let the kernel place the data
688 wherever it will. If it worked, that's our spot, if not we're likely
692 mmap_gt_pch_get_address (size_t size
, int fd
)
696 ret
= mmap (NULL
, size
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
697 if (ret
== (void *) MAP_FAILED
)
700 munmap ((caddr_t
) ret
, size
);
705 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
706 Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
707 mapping the data at BASE, -1 if we couldn't.
709 This version assumes that the kernel honors the START operand of mmap
710 even without MAP_FIXED if START through START+SIZE are not currently
711 mapped with something. */
714 mmap_gt_pch_use_address (void *base
, size_t size
, int fd
, size_t offset
)
718 /* We're called with size == 0 if we're not planning to load a PCH
719 file at all. This allows the hook to free any static space that
720 we might have allocated at link time. */
724 addr
= mmap ((caddr_t
) base
, size
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
,
727 return addr
== base
? 1 : -1;
729 #endif /* HAVE_MMAP_FILE */
731 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
733 /* Modify the bound based on rlimits. */
735 ggc_rlimit_bound (double limit
)
737 #if defined(HAVE_GETRLIMIT)
739 # if defined (RLIMIT_AS)
740 /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably
741 any OS which has RLIMIT_AS also has a working mmap that GCC will use. */
742 if (getrlimit (RLIMIT_AS
, &rlim
) == 0
743 && rlim
.rlim_cur
!= (rlim_t
) RLIM_INFINITY
744 && rlim
.rlim_cur
< limit
)
745 limit
= rlim
.rlim_cur
;
746 # elif defined (RLIMIT_DATA)
747 /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
748 might be on an OS that has a broken mmap. (Others don't bound
749 mmap at all, apparently.) */
750 if (getrlimit (RLIMIT_DATA
, &rlim
) == 0
751 && rlim
.rlim_cur
!= (rlim_t
) RLIM_INFINITY
752 && rlim
.rlim_cur
< limit
753 /* Darwin has this horribly bogus default setting of
754 RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA
755 appears to be ignored. Ignore such silliness. If a limit
756 this small was actually effective for mmap, GCC wouldn't even
758 && rlim
.rlim_cur
>= 8 * 1024 * 1024)
759 limit
= rlim
.rlim_cur
;
760 # endif /* RLIMIT_AS or RLIMIT_DATA */
761 #endif /* HAVE_GETRLIMIT */
766 /* Heuristic to set a default for GGC_MIN_EXPAND. */
768 ggc_min_expand_heuristic (void)
770 double min_expand
= physmem_total ();
772 /* Adjust for rlimits. */
773 min_expand
= ggc_rlimit_bound (min_expand
);
775 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
776 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
777 min_expand
/= 1024*1024*1024;
779 min_expand
= MIN (min_expand
, 70);
785 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
787 ggc_min_heapsize_heuristic (void)
789 double phys_kbytes
= physmem_total ();
790 double limit_kbytes
= ggc_rlimit_bound (phys_kbytes
* 2);
792 phys_kbytes
/= 1024; /* Convert to Kbytes. */
793 limit_kbytes
/= 1024;
795 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
796 bound of 128M (when RAM >= 1GB). */
799 #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
800 /* Try not to overrun the RSS limit while doing garbage collection.
801 The RSS limit is only advisory, so no margin is subtracted. */
804 if (getrlimit (RLIMIT_RSS
, &rlim
) == 0
805 && rlim
.rlim_cur
!= (rlim_t
) RLIM_INFINITY
)
806 phys_kbytes
= MIN (phys_kbytes
, rlim
.rlim_cur
/ 1024);
810 /* Don't blindly run over our data limit; do GC at least when the
811 *next* GC would be within 20Mb of the limit or within a quarter of
812 the limit, whichever is larger. If GCC does hit the data limit,
813 compilation will fail, so this tries to be conservative. */
814 limit_kbytes
= MAX (0, limit_kbytes
- MAX (limit_kbytes
/ 4, 20 * 1024));
815 limit_kbytes
= (limit_kbytes
* 100) / (110 + ggc_min_expand_heuristic ());
816 phys_kbytes
= MIN (phys_kbytes
, limit_kbytes
);
818 phys_kbytes
= MAX (phys_kbytes
, 4 * 1024);
819 phys_kbytes
= MIN (phys_kbytes
, 128 * 1024);
826 init_ggc_heuristics (void)
828 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
829 set_default_param_value (GGC_MIN_EXPAND
, ggc_min_expand_heuristic ());
830 set_default_param_value (GGC_MIN_HEAPSIZE
, ggc_min_heapsize_heuristic ());
834 /* GGC memory usage. */
835 struct ggc_usage
: public mem_usage
837 /* Default constructor. */
838 ggc_usage (): m_freed (0), m_collected (0), m_overhead (0) {}
840 ggc_usage (size_t allocated
, size_t times
, size_t peak
,
841 size_t freed
, size_t collected
, size_t overhead
)
842 : mem_usage (allocated
, times
, peak
),
843 m_freed (freed
), m_collected (collected
), m_overhead (overhead
) {}
845 /* Comparison operator. */
846 inline bool operator< (const ggc_usage
&second
) const
848 return (get_balance () == second
.get_balance () ?
849 (m_peak
== second
.m_peak
? m_times
< second
.m_times
850 : m_peak
< second
.m_peak
)
851 : get_balance () < second
.get_balance ());
854 /* Register overhead of ALLOCATED and OVERHEAD bytes. */
855 inline void register_overhead (size_t allocated
, size_t overhead
)
857 m_allocated
+= allocated
;
858 m_overhead
+= overhead
;
862 /* Release overhead of SIZE bytes. */
863 inline void release_overhead (size_t size
)
868 /* Sum the usage with SECOND usage. */
869 ggc_usage
operator+ (const ggc_usage
&second
)
871 return ggc_usage (m_allocated
+ second
.m_allocated
,
872 m_times
+ second
.m_times
,
873 m_peak
+ second
.m_peak
,
874 m_freed
+ second
.m_freed
,
875 m_collected
+ second
.m_collected
,
876 m_overhead
+ second
.m_overhead
);
879 /* Dump usage with PREFIX, where TOTAL is sum of all rows. */
880 inline void dump (const char *prefix
, ggc_usage
&total
) const
882 long balance
= get_balance ();
884 "%-48s %10li:%5.1f%%%10li:%5.1f%%"
885 "%10li:%5.1f%%%10li:%5.1f%%%10li\n",
886 prefix
, (long)m_collected
,
887 get_percent (m_collected
, total
.m_collected
),
888 (long)m_freed
, get_percent (m_freed
, total
.m_freed
),
889 (long)balance
, get_percent (balance
, total
.get_balance ()),
890 (long)m_overhead
, get_percent (m_overhead
, total
.m_overhead
),
894 /* Dump usage coupled to LOC location, where TOTAL is sum of all rows. */
895 inline void dump (mem_location
*loc
, ggc_usage
&total
) const
898 sprintf (s
, "%s:%i (%s)", loc
->get_trimmed_filename (),
899 loc
->m_line
, loc
->m_function
);
906 inline void dump_footer ()
909 dump ("Total", *this);
913 /* Get balance which is GGC allocation leak. */
914 inline long get_balance () const
916 return m_allocated
+ m_overhead
- m_collected
- m_freed
;
919 typedef std::pair
<mem_location
*, ggc_usage
*> mem_pair_t
;
921 /* Compare wrapper used by qsort method. */
922 static int compare (const void *first
, const void *second
)
924 const mem_pair_t f
= *(const mem_pair_t
*)first
;
925 const mem_pair_t s
= *(const mem_pair_t
*)second
;
927 return (*f
.second
) < (*s
.second
);
930 /* Compare rows in final GGC summary dump. */
931 static int compare_final (const void *first
, const void *second
)
932 { typedef std::pair
<mem_location
*, ggc_usage
*> mem_pair_t
;
934 const ggc_usage
*f
= ((const mem_pair_t
*)first
)->second
;
935 const ggc_usage
*s
= ((const mem_pair_t
*)second
)->second
;
937 size_t a
= f
->m_allocated
+ f
->m_overhead
- f
->m_freed
;
938 size_t b
= s
->m_allocated
+ s
->m_overhead
- s
->m_freed
;
940 return a
== b
? 0 : (a
< b
? 1 : -1);
943 /* Dump header with NAME. */
944 static inline void dump_header (const char *name
)
946 fprintf (stderr
, "%-48s %11s%17s%17s%16s%17s\n", name
, "Garbage", "Freed",
947 "Leak", "Overhead", "Times");
951 /* Freed memory in bytes. */
953 /* Collected memory in bytes. */
955 /* Overhead memory in bytes. */
959 /* GCC memory description. */
960 static mem_alloc_description
<ggc_usage
> ggc_mem_desc
;
962 /* Dump per-site memory statistics. */
965 dump_ggc_loc_statistics (bool final
)
967 if (! GATHER_STATISTICS
)
970 ggc_force_collect
= true;
973 ggc_mem_desc
.dump (GGC
, final
? ggc_usage::compare_final
: NULL
);
975 ggc_force_collect
= false;
978 /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */
980 ggc_record_overhead (size_t allocated
, size_t overhead
, void *ptr MEM_STAT_DECL
)
982 ggc_usage
*usage
= ggc_mem_desc
.register_descriptor (ptr
, GGC
, false
983 FINAL_PASS_MEM_STAT
);
985 ggc_mem_desc
.register_object_overhead (usage
, allocated
+ overhead
, ptr
);
986 usage
->register_overhead (allocated
, overhead
);
989 /* Notice that the pointer has been freed. */
991 ggc_free_overhead (void *ptr
)
993 ggc_mem_desc
.release_object_overhead (ptr
);
996 /* After live values has been marked, walk all recorded pointers and see if
997 they are still live. */
999 ggc_prune_overhead_list (void)
1001 typedef hash_map
<const void *, std::pair
<ggc_usage
*, size_t > > map_t
;
1003 map_t::iterator it
= ggc_mem_desc
.m_reverse_object_map
->begin ();
1005 for (; it
!= ggc_mem_desc
.m_reverse_object_map
->end (); ++it
)
1006 if (!ggc_marked_p ((*it
).first
))
1007 (*it
).second
.first
->m_collected
+= (*it
).second
.second
;
1009 delete ggc_mem_desc
.m_reverse_object_map
;
1010 ggc_mem_desc
.m_reverse_object_map
= new map_t (13, false, false);