1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Generic garbage collection (GC) functions and data, not specific to
22 any particular GC implementation. */
26 #include "coretypes.h"
31 #include "hosthooks.h"
32 #include "hosthooks-def.h"
36 #ifdef HAVE_SYS_RESOURCE_H
37 # include <sys/resource.h>
41 # include <sys/mman.h>
43 /* This is on Solaris. */
44 # include <sys/types.h>
49 # define MAP_FAILED ((void *)-1)
52 /* When set, ggc_collect will do collection. */
53 bool ggc_force_collect
;
55 /* When true, protect the contents of the identifier hash table. */
56 bool ggc_protect_identifiers
= true;
58 /* Statistics about the allocation. */
59 static ggc_statistics
*ggc_stats
;
61 struct traversal_state
;
63 static int ggc_htab_delete (void **, void *);
64 static hashval_t
saving_htab_hash (const void *);
65 static int saving_htab_eq (const void *, const void *);
66 static int call_count (void **, void *);
67 static int call_alloc (void **, void *);
68 static int compare_ptr_data (const void *, const void *);
69 static void relocate_ptrs (void *, void *);
70 static void write_pch_globals (const struct ggc_root_tab
* const *tab
,
71 struct traversal_state
*state
);
72 static double ggc_rlimit_bound (double);
74 /* Maintain global roots that are preserved during GC. */
76 /* Process a slot of an htab by deleting it if it has not been marked. */
79 ggc_htab_delete (void **slot
, void *info
)
81 const struct ggc_cache_tab
*r
= (const struct ggc_cache_tab
*) info
;
83 if (! (*r
->marked_p
) (*slot
))
84 htab_clear_slot (*r
->base
, slot
);
92 /* This extra vector of dynamically registered root_tab-s is used by
93 ggc_mark_roots and gives the ability to dynamically add new GGC root
94 tables, for instance from some plugins; this vector is on the heap
95 since it is used by GGC internally. */
96 typedef const struct ggc_root_tab
*const_ggc_root_tab_t
;
97 DEF_VEC_P(const_ggc_root_tab_t
);
98 DEF_VEC_ALLOC_P(const_ggc_root_tab_t
, heap
);
99 static VEC(const_ggc_root_tab_t
, heap
) *extra_root_vec
;
101 /* Dynamically register a new GGC root table RT. This is useful for
105 ggc_register_root_tab (const struct ggc_root_tab
* rt
)
108 VEC_safe_push (const_ggc_root_tab_t
, heap
, extra_root_vec
, rt
);
111 /* This extra vector of dynamically registered cache_tab-s is used by
112 ggc_mark_roots and gives the ability to dynamically add new GGC cache
113 tables, for instance from some plugins; this vector is on the heap
114 since it is used by GGC internally. */
115 typedef const struct ggc_cache_tab
*const_ggc_cache_tab_t
;
116 DEF_VEC_P(const_ggc_cache_tab_t
);
117 DEF_VEC_ALLOC_P(const_ggc_cache_tab_t
, heap
);
118 static VEC(const_ggc_cache_tab_t
, heap
) *extra_cache_vec
;
120 /* Dynamically register a new GGC cache table CT. This is useful for
124 ggc_register_cache_tab (const struct ggc_cache_tab
* ct
)
127 VEC_safe_push (const_ggc_cache_tab_t
, heap
, extra_cache_vec
, ct
);
130 /* Scan a hash table that has objects which are to be deleted if they are not
134 ggc_scan_cache_tab (const_ggc_cache_tab_t ctp
)
136 const struct ggc_cache_tab
*cti
;
138 for (cti
= ctp
; cti
->base
!= NULL
; cti
++)
141 ggc_set_mark (*cti
->base
);
142 htab_traverse_noresize (*cti
->base
, ggc_htab_delete
,
143 CONST_CAST (void *, (const void *)cti
));
144 ggc_set_mark ((*cti
->base
)->entries
);
148 /* Iterate through all registered roots and mark each element. */
151 ggc_mark_roots (void)
153 const struct ggc_root_tab
*const *rt
;
154 const struct ggc_root_tab
*rti
;
155 const_ggc_root_tab_t rtp
;
156 const struct ggc_cache_tab
*const *ct
;
157 const_ggc_cache_tab_t ctp
;
160 for (rt
= gt_ggc_deletable_rtab
; *rt
; rt
++)
161 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
162 memset (rti
->base
, 0, rti
->stride
);
164 for (rt
= gt_ggc_rtab
; *rt
; rt
++)
165 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
166 for (i
= 0; i
< rti
->nelt
; i
++)
167 (*rti
->cb
) (*(void **)((char *)rti
->base
+ rti
->stride
* i
));
169 for (i
= 0; VEC_iterate (const_ggc_root_tab_t
, extra_root_vec
, i
, rtp
); i
++)
171 for (rti
= rtp
; rti
->base
!= NULL
; rti
++)
172 for (i
= 0; i
< rti
->nelt
; i
++)
173 (*rti
->cb
) (*(void **) ((char *)rti
->base
+ rti
->stride
* i
));
176 if (ggc_protect_identifiers
)
177 ggc_mark_stringpool ();
179 /* Now scan all hash tables that have objects which are to be deleted if
180 they are not already marked. */
181 for (ct
= gt_ggc_cache_rtab
; *ct
; ct
++)
182 ggc_scan_cache_tab (*ct
);
184 for (i
= 0; VEC_iterate (const_ggc_cache_tab_t
, extra_cache_vec
, i
, ctp
); i
++)
185 ggc_scan_cache_tab (ctp
);
187 if (! ggc_protect_identifiers
)
188 ggc_purge_stringpool ();
190 /* Some plugins may call ggc_set_mark from here. */
191 invoke_plugin_callbacks (PLUGIN_GGC_MARKING
, NULL
);
194 /* Allocate a block of memory, then clear it. */
196 ggc_alloc_cleared_stat (size_t size MEM_STAT_DECL
)
198 void *buf
= ggc_alloc_stat (size PASS_MEM_STAT
);
199 memset (buf
, 0, size
);
203 /* Resize a block of memory, possibly re-allocating it. */
205 ggc_realloc_stat (void *x
, size_t size MEM_STAT_DECL
)
211 return ggc_alloc_stat (size PASS_MEM_STAT
);
213 old_size
= ggc_get_size (x
);
215 if (size
<= old_size
)
217 /* Mark the unwanted memory as unaccessible. We also need to make
218 the "new" size accessible, since ggc_get_size returns the size of
219 the pool, not the size of the individually allocated object, the
220 size which was previously made accessible. Unfortunately, we
221 don't know that previously allocated size. Without that
222 knowledge we have to lose some initialization-tracking for the
223 old parts of the object. An alternative is to mark the whole
224 old_size as reachable, but that would lose tracking of writes
225 after the end of the object (by small offsets). Discard the
226 handle to avoid handle leak. */
227 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x
+ size
,
229 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x
, size
));
233 r
= ggc_alloc_stat (size PASS_MEM_STAT
);
235 /* Since ggc_get_size returns the size of the pool, not the size of the
236 individually allocated object, we'd access parts of the old object
237 that were marked invalid with the memcpy below. We lose a bit of the
238 initialization-tracking since some of it may be uninitialized. */
239 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x
, old_size
));
241 memcpy (r
, x
, old_size
);
243 /* The old object is not supposed to be used anymore. */
249 /* Like ggc_alloc_cleared, but performs a multiplication. */
251 ggc_calloc (size_t s1
, size_t s2
)
253 return ggc_alloc_cleared (s1
* s2
);
256 /* These are for splay_tree_new_ggc. */
258 ggc_splay_alloc (int sz
, void *nl
)
261 return ggc_alloc (sz
);
265 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED
, void *nl
)
270 /* Print statistics that are independent of the collector in use. */
271 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
273 : ((x) < 1024*1024*10 \
275 : (x) / (1024*1024))))
276 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
279 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED
,
280 ggc_statistics
*stats
)
282 /* Set the pointer so that during collection we will actually gather
286 /* Then do one collection to fill in the statistics. */
289 /* At present, we don't really gather any interesting statistics. */
291 /* Don't gather statistics any more. */
295 /* Functions for saving and restoring GCable memory to disk. */
297 static htab_t saving_htab
;
302 void *note_ptr_cookie
;
303 gt_note_pointers note_ptr_fn
;
304 gt_handle_reorder reorder_fn
;
307 enum gt_types_enum type
;
310 #define POINTER_HASH(x) (hashval_t)((long)x >> 3)
312 /* Register an object in the hash table. */
315 gt_pch_note_object (void *obj
, void *note_ptr_cookie
,
316 gt_note_pointers note_ptr_fn
,
317 enum gt_types_enum type
)
319 struct ptr_data
**slot
;
321 if (obj
== NULL
|| obj
== (void *) 1)
324 slot
= (struct ptr_data
**)
325 htab_find_slot_with_hash (saving_htab
, obj
, POINTER_HASH (obj
),
329 gcc_assert ((*slot
)->note_ptr_fn
== note_ptr_fn
330 && (*slot
)->note_ptr_cookie
== note_ptr_cookie
);
334 *slot
= XCNEW (struct ptr_data
);
336 (*slot
)->note_ptr_fn
= note_ptr_fn
;
337 (*slot
)->note_ptr_cookie
= note_ptr_cookie
;
338 if (note_ptr_fn
== gt_pch_p_S
)
339 (*slot
)->size
= strlen ((const char *)obj
) + 1;
341 (*slot
)->size
= ggc_get_size (obj
);
342 (*slot
)->type
= type
;
346 /* Register an object in the hash table. */
349 gt_pch_note_reorder (void *obj
, void *note_ptr_cookie
,
350 gt_handle_reorder reorder_fn
)
352 struct ptr_data
*data
;
354 if (obj
== NULL
|| obj
== (void *) 1)
357 data
= (struct ptr_data
*)
358 htab_find_with_hash (saving_htab
, obj
, POINTER_HASH (obj
));
359 gcc_assert (data
&& data
->note_ptr_cookie
== note_ptr_cookie
);
361 data
->reorder_fn
= reorder_fn
;
364 /* Hash and equality functions for saving_htab, callbacks for htab_create. */
367 saving_htab_hash (const void *p
)
369 return POINTER_HASH (((const struct ptr_data
*)p
)->obj
);
373 saving_htab_eq (const void *p1
, const void *p2
)
375 return ((const struct ptr_data
*)p1
)->obj
== p2
;
378 /* Handy state for the traversal functions. */
380 struct traversal_state
383 struct ggc_pch_data
*d
;
385 struct ptr_data
**ptrs
;
389 /* Callbacks for htab_traverse. */
392 call_count (void **slot
, void *state_p
)
394 struct ptr_data
*d
= (struct ptr_data
*)*slot
;
395 struct traversal_state
*state
= (struct traversal_state
*)state_p
;
397 ggc_pch_count_object (state
->d
, d
->obj
, d
->size
,
398 d
->note_ptr_fn
== gt_pch_p_S
,
405 call_alloc (void **slot
, void *state_p
)
407 struct ptr_data
*d
= (struct ptr_data
*)*slot
;
408 struct traversal_state
*state
= (struct traversal_state
*)state_p
;
410 d
->new_addr
= ggc_pch_alloc_object (state
->d
, d
->obj
, d
->size
,
411 d
->note_ptr_fn
== gt_pch_p_S
,
413 state
->ptrs
[state
->ptrs_i
++] = d
;
417 /* Callback for qsort. */
420 compare_ptr_data (const void *p1_p
, const void *p2_p
)
422 const struct ptr_data
*const p1
= *(const struct ptr_data
*const *)p1_p
;
423 const struct ptr_data
*const p2
= *(const struct ptr_data
*const *)p2_p
;
424 return (((size_t)p1
->new_addr
> (size_t)p2
->new_addr
)
425 - ((size_t)p1
->new_addr
< (size_t)p2
->new_addr
));
428 /* Callbacks for note_ptr_fn. */
431 relocate_ptrs (void *ptr_p
, void *state_p
)
433 void **ptr
= (void **)ptr_p
;
434 struct traversal_state
*state ATTRIBUTE_UNUSED
435 = (struct traversal_state
*)state_p
;
436 struct ptr_data
*result
;
438 if (*ptr
== NULL
|| *ptr
== (void *)1)
441 result
= (struct ptr_data
*)
442 htab_find_with_hash (saving_htab
, *ptr
, POINTER_HASH (*ptr
));
444 *ptr
= result
->new_addr
;
447 /* Write out, after relocation, the pointers in TAB. */
449 write_pch_globals (const struct ggc_root_tab
* const *tab
,
450 struct traversal_state
*state
)
452 const struct ggc_root_tab
*const *rt
;
453 const struct ggc_root_tab
*rti
;
456 for (rt
= tab
; *rt
; rt
++)
457 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
458 for (i
= 0; i
< rti
->nelt
; i
++)
460 void *ptr
= *(void **)((char *)rti
->base
+ rti
->stride
* i
);
461 struct ptr_data
*new_ptr
;
462 if (ptr
== NULL
|| ptr
== (void *)1)
464 if (fwrite (&ptr
, sizeof (void *), 1, state
->f
)
466 fatal_error ("can't write PCH file: %m");
470 new_ptr
= (struct ptr_data
*)
471 htab_find_with_hash (saving_htab
, ptr
, POINTER_HASH (ptr
));
472 if (fwrite (&new_ptr
->new_addr
, sizeof (void *), 1, state
->f
)
474 fatal_error ("can't write PCH file: %m");
479 /* Hold the information we need to mmap the file back in. */
485 void *preferred_base
;
488 /* Write out the state of the compiler to F. */
491 gt_pch_save (FILE *f
)
493 const struct ggc_root_tab
*const *rt
;
494 const struct ggc_root_tab
*rti
;
496 struct traversal_state state
;
497 char *this_object
= NULL
;
498 size_t this_object_size
= 0;
499 struct mmap_info mmi
;
500 const size_t mmap_offset_alignment
= host_hooks
.gt_pch_alloc_granularity();
502 gt_pch_save_stringpool ();
504 saving_htab
= htab_create (50000, saving_htab_hash
, saving_htab_eq
, free
);
506 for (rt
= gt_ggc_rtab
; *rt
; rt
++)
507 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
508 for (i
= 0; i
< rti
->nelt
; i
++)
509 (*rti
->pchw
)(*(void **)((char *)rti
->base
+ rti
->stride
* i
));
511 for (rt
= gt_pch_cache_rtab
; *rt
; rt
++)
512 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
513 for (i
= 0; i
< rti
->nelt
; i
++)
514 (*rti
->pchw
)(*(void **)((char *)rti
->base
+ rti
->stride
* i
));
516 /* Prepare the objects for writing, determine addresses and such. */
518 state
.d
= init_ggc_pch();
520 htab_traverse (saving_htab
, call_count
, &state
);
522 mmi
.size
= ggc_pch_total_size (state
.d
);
524 /* Try to arrange things so that no relocation is necessary, but
525 don't try very hard. On most platforms, this will always work,
526 and on the rest it's a lot of work to do better.
527 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
528 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
529 mmi
.preferred_base
= host_hooks
.gt_pch_get_address (mmi
.size
, fileno (f
));
531 ggc_pch_this_base (state
.d
, mmi
.preferred_base
);
533 state
.ptrs
= XNEWVEC (struct ptr_data
*, state
.count
);
535 htab_traverse (saving_htab
, call_alloc
, &state
);
536 qsort (state
.ptrs
, state
.count
, sizeof (*state
.ptrs
), compare_ptr_data
);
538 /* Write out all the scalar variables. */
539 for (rt
= gt_pch_scalar_rtab
; *rt
; rt
++)
540 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
541 if (fwrite (rti
->base
, rti
->stride
, 1, f
) != 1)
542 fatal_error ("can't write PCH file: %m");
544 /* Write out all the global pointers, after translation. */
545 write_pch_globals (gt_ggc_rtab
, &state
);
546 write_pch_globals (gt_pch_cache_rtab
, &state
);
548 /* Pad the PCH file so that the mmapped area starts on an allocation
549 granularity (usually page) boundary. */
552 o
= ftell (state
.f
) + sizeof (mmi
);
554 fatal_error ("can't get position in PCH file: %m");
555 mmi
.offset
= mmap_offset_alignment
- o
% mmap_offset_alignment
;
556 if (mmi
.offset
== mmap_offset_alignment
)
560 if (fwrite (&mmi
, sizeof (mmi
), 1, state
.f
) != 1)
561 fatal_error ("can't write PCH file: %m");
563 && fseek (state
.f
, mmi
.offset
, SEEK_SET
) != 0)
564 fatal_error ("can't write padding to PCH file: %m");
566 ggc_pch_prepare_write (state
.d
, state
.f
);
568 /* Actually write out the objects. */
569 for (i
= 0; i
< state
.count
; i
++)
571 if (this_object_size
< state
.ptrs
[i
]->size
)
573 this_object_size
= state
.ptrs
[i
]->size
;
574 this_object
= XRESIZEVAR (char, this_object
, this_object_size
);
576 memcpy (this_object
, state
.ptrs
[i
]->obj
, state
.ptrs
[i
]->size
);
577 if (state
.ptrs
[i
]->reorder_fn
!= NULL
)
578 state
.ptrs
[i
]->reorder_fn (state
.ptrs
[i
]->obj
,
579 state
.ptrs
[i
]->note_ptr_cookie
,
580 relocate_ptrs
, &state
);
581 state
.ptrs
[i
]->note_ptr_fn (state
.ptrs
[i
]->obj
,
582 state
.ptrs
[i
]->note_ptr_cookie
,
583 relocate_ptrs
, &state
);
584 ggc_pch_write_object (state
.d
, state
.f
, state
.ptrs
[i
]->obj
,
585 state
.ptrs
[i
]->new_addr
, state
.ptrs
[i
]->size
,
586 state
.ptrs
[i
]->note_ptr_fn
== gt_pch_p_S
);
587 if (state
.ptrs
[i
]->note_ptr_fn
!= gt_pch_p_S
)
588 memcpy (state
.ptrs
[i
]->obj
, this_object
, state
.ptrs
[i
]->size
);
590 ggc_pch_finish (state
.d
, state
.f
);
591 gt_pch_fixup_stringpool ();
594 htab_delete (saving_htab
);
597 /* Read the state of the compiler back in from F. */
600 gt_pch_restore (FILE *f
)
602 const struct ggc_root_tab
*const *rt
;
603 const struct ggc_root_tab
*rti
;
605 struct mmap_info mmi
;
608 /* Delete any deletable objects. This makes ggc_pch_read much
609 faster, as it can be sure that no GCable objects remain other
610 than the ones just read in. */
611 for (rt
= gt_ggc_deletable_rtab
; *rt
; rt
++)
612 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
613 memset (rti
->base
, 0, rti
->stride
);
615 /* Read in all the scalar variables. */
616 for (rt
= gt_pch_scalar_rtab
; *rt
; rt
++)
617 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
618 if (fread (rti
->base
, rti
->stride
, 1, f
) != 1)
619 fatal_error ("can't read PCH file: %m");
621 /* Read in all the global pointers, in 6 easy loops. */
622 for (rt
= gt_ggc_rtab
; *rt
; rt
++)
623 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
624 for (i
= 0; i
< rti
->nelt
; i
++)
625 if (fread ((char *)rti
->base
+ rti
->stride
* i
,
626 sizeof (void *), 1, f
) != 1)
627 fatal_error ("can't read PCH file: %m");
629 for (rt
= gt_pch_cache_rtab
; *rt
; rt
++)
630 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
631 for (i
= 0; i
< rti
->nelt
; i
++)
632 if (fread ((char *)rti
->base
+ rti
->stride
* i
,
633 sizeof (void *), 1, f
) != 1)
634 fatal_error ("can't read PCH file: %m");
636 if (fread (&mmi
, sizeof (mmi
), 1, f
) != 1)
637 fatal_error ("can't read PCH file: %m");
639 result
= host_hooks
.gt_pch_use_address (mmi
.preferred_base
, mmi
.size
,
640 fileno (f
), mmi
.offset
);
642 fatal_error ("had to relocate PCH");
645 if (fseek (f
, mmi
.offset
, SEEK_SET
) != 0
646 || fread (mmi
.preferred_base
, mmi
.size
, 1, f
) != 1)
647 fatal_error ("can't read PCH file: %m");
649 else if (fseek (f
, mmi
.offset
+ mmi
.size
, SEEK_SET
) != 0)
650 fatal_error ("can't read PCH file: %m");
652 ggc_pch_read (f
, mmi
.preferred_base
);
654 gt_pch_restore_stringpool ();
657 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
658 Select no address whatsoever, and let gt_pch_save choose what it will with
659 malloc, presumably. */
662 default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED
,
663 int fd ATTRIBUTE_UNUSED
)
668 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
669 Allocate SIZE bytes with malloc. Return 0 if the address we got is the
670 same as base, indicating that the memory has been allocated but needs to
671 be read in from the file. Return -1 if the address differs, to relocation
672 of the PCH file would be required. */
675 default_gt_pch_use_address (void *base
, size_t size
, int fd ATTRIBUTE_UNUSED
,
676 size_t offset ATTRIBUTE_UNUSED
)
678 void *addr
= xmalloc (size
);
679 return (addr
== base
) - 1;
682 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the
683 alignment required for allocating virtual memory. Usually this is the
687 default_gt_pch_alloc_granularity (void)
689 return getpagesize();
693 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
694 We temporarily allocate SIZE bytes, and let the kernel place the data
695 wherever it will. If it worked, that's our spot, if not we're likely
699 mmap_gt_pch_get_address (size_t size
, int fd
)
703 ret
= mmap (NULL
, size
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
704 if (ret
== (void *) MAP_FAILED
)
707 munmap ((caddr_t
) ret
, size
);
712 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
713 Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
714 mapping the data at BASE, -1 if we couldn't.
716 This version assumes that the kernel honors the START operand of mmap
717 even without MAP_FIXED if START through START+SIZE are not currently
718 mapped with something. */
721 mmap_gt_pch_use_address (void *base
, size_t size
, int fd
, size_t offset
)
725 /* We're called with size == 0 if we're not planning to load a PCH
726 file at all. This allows the hook to free any static space that
727 we might have allocated at link time. */
731 addr
= mmap ((caddr_t
) base
, size
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
,
734 return addr
== base
? 1 : -1;
736 #endif /* HAVE_MMAP_FILE */
738 /* Modify the bound based on rlimits. */
740 ggc_rlimit_bound (double limit
)
742 #if defined(HAVE_GETRLIMIT)
744 # if defined (RLIMIT_AS)
745 /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably
746 any OS which has RLIMIT_AS also has a working mmap that GCC will use. */
747 if (getrlimit (RLIMIT_AS
, &rlim
) == 0
748 && rlim
.rlim_cur
!= (rlim_t
) RLIM_INFINITY
749 && rlim
.rlim_cur
< limit
)
750 limit
= rlim
.rlim_cur
;
751 # elif defined (RLIMIT_DATA)
752 /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
753 might be on an OS that has a broken mmap. (Others don't bound
754 mmap at all, apparently.) */
755 if (getrlimit (RLIMIT_DATA
, &rlim
) == 0
756 && rlim
.rlim_cur
!= (rlim_t
) RLIM_INFINITY
757 && rlim
.rlim_cur
< limit
758 /* Darwin has this horribly bogus default setting of
759 RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA
760 appears to be ignored. Ignore such silliness. If a limit
761 this small was actually effective for mmap, GCC wouldn't even
763 && rlim
.rlim_cur
>= 8 * 1024 * 1024)
764 limit
= rlim
.rlim_cur
;
765 # endif /* RLIMIT_AS or RLIMIT_DATA */
766 #endif /* HAVE_GETRLIMIT */
771 /* Heuristic to set a default for GGC_MIN_EXPAND. */
773 ggc_min_expand_heuristic (void)
775 double min_expand
= physmem_total();
777 /* Adjust for rlimits. */
778 min_expand
= ggc_rlimit_bound (min_expand
);
780 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
781 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
782 min_expand
/= 1024*1024*1024;
784 min_expand
= MIN (min_expand
, 70);
790 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
792 ggc_min_heapsize_heuristic (void)
794 double phys_kbytes
= physmem_total();
795 double limit_kbytes
= ggc_rlimit_bound (phys_kbytes
* 2);
797 phys_kbytes
/= 1024; /* Convert to Kbytes. */
798 limit_kbytes
/= 1024;
800 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
801 bound of 128M (when RAM >= 1GB). */
804 #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
805 /* Try not to overrun the RSS limit while doing garbage collection.
806 The RSS limit is only advisory, so no margin is subtracted. */
809 if (getrlimit (RLIMIT_RSS
, &rlim
) == 0
810 && rlim
.rlim_cur
!= (rlim_t
) RLIM_INFINITY
)
811 phys_kbytes
= MIN (phys_kbytes
, rlim
.rlim_cur
/ 1024);
815 /* Don't blindly run over our data limit; do GC at least when the
816 *next* GC would be within 20Mb of the limit or within a quarter of
817 the limit, whichever is larger. If GCC does hit the data limit,
818 compilation will fail, so this tries to be conservative. */
819 limit_kbytes
= MAX (0, limit_kbytes
- MAX (limit_kbytes
/ 4, 20 * 1024));
820 limit_kbytes
= (limit_kbytes
* 100) / (110 + ggc_min_expand_heuristic());
821 phys_kbytes
= MIN (phys_kbytes
, limit_kbytes
);
823 phys_kbytes
= MAX (phys_kbytes
, 4 * 1024);
824 phys_kbytes
= MIN (phys_kbytes
, 128 * 1024);
830 init_ggc_heuristics (void)
832 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
833 set_param_value ("ggc-min-expand", ggc_min_expand_heuristic());
834 set_param_value ("ggc-min-heapsize", ggc_min_heapsize_heuristic());
838 #ifdef GATHER_STATISTICS
840 /* Datastructure used to store per-call-site statistics. */
841 struct loc_descriptor
845 const char *function
;
853 /* Hashtable used for statistics. */
854 static htab_t loc_hash
;
856 /* Hash table helpers functions. */
858 hash_descriptor (const void *p
)
860 const struct loc_descriptor
*const d
= (const struct loc_descriptor
*) p
;
862 return htab_hash_pointer (d
->function
) | d
->line
;
866 eq_descriptor (const void *p1
, const void *p2
)
868 const struct loc_descriptor
*const d
= (const struct loc_descriptor
*) p1
;
869 const struct loc_descriptor
*const d2
= (const struct loc_descriptor
*) p2
;
871 return (d
->file
== d2
->file
&& d
->line
== d2
->line
872 && d
->function
== d2
->function
);
875 /* Hashtable converting address of allocated field to loc descriptor. */
876 static htab_t ptr_hash
;
877 struct ptr_hash_entry
880 struct loc_descriptor
*loc
;
884 /* Hash table helpers functions. */
886 hash_ptr (const void *p
)
888 const struct ptr_hash_entry
*const d
= (const struct ptr_hash_entry
*) p
;
890 return htab_hash_pointer (d
->ptr
);
894 eq_ptr (const void *p1
, const void *p2
)
896 const struct ptr_hash_entry
*const p
= (const struct ptr_hash_entry
*) p1
;
898 return (p
->ptr
== p2
);
901 /* Return descriptor for given call site, create new one if needed. */
902 static struct loc_descriptor
*
903 loc_descriptor (const char *name
, int line
, const char *function
)
905 struct loc_descriptor loc
;
906 struct loc_descriptor
**slot
;
910 loc
.function
= function
;
912 loc_hash
= htab_create (10, hash_descriptor
, eq_descriptor
, NULL
);
914 slot
= (struct loc_descriptor
**) htab_find_slot (loc_hash
, &loc
, INSERT
);
917 *slot
= XCNEW (struct loc_descriptor
);
918 (*slot
)->file
= name
;
919 (*slot
)->line
= line
;
920 (*slot
)->function
= function
;
924 /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */
926 ggc_record_overhead (size_t allocated
, size_t overhead
, void *ptr
,
927 const char *name
, int line
, const char *function
)
929 struct loc_descriptor
*loc
= loc_descriptor (name
, line
, function
);
930 struct ptr_hash_entry
*p
= XNEW (struct ptr_hash_entry
);
935 p
->size
= allocated
+ overhead
;
937 ptr_hash
= htab_create (10, hash_ptr
, eq_ptr
, NULL
);
938 slot
= htab_find_slot_with_hash (ptr_hash
, ptr
, htab_hash_pointer (ptr
), INSERT
);
943 loc
->allocated
+=allocated
;
944 loc
->overhead
+=overhead
;
947 /* Helper function for prune_overhead_list. See if SLOT is still marked and
948 remove it from hashtable if it is not. */
950 ggc_prune_ptr (void **slot
, void *b ATTRIBUTE_UNUSED
)
952 struct ptr_hash_entry
*p
= (struct ptr_hash_entry
*) *slot
;
953 if (!ggc_marked_p (p
->ptr
))
955 p
->loc
->collected
+= p
->size
;
956 htab_clear_slot (ptr_hash
, slot
);
962 /* After live values has been marked, walk all recorded pointers and see if
963 they are still live. */
965 ggc_prune_overhead_list (void)
967 htab_traverse (ptr_hash
, ggc_prune_ptr
, NULL
);
970 /* Notice that the pointer has been freed. */
972 ggc_free_overhead (void *ptr
)
974 PTR
*slot
= htab_find_slot_with_hash (ptr_hash
, ptr
, htab_hash_pointer (ptr
),
976 struct ptr_hash_entry
*p
= (struct ptr_hash_entry
*) *slot
;
977 p
->loc
->freed
+= p
->size
;
978 htab_clear_slot (ptr_hash
, slot
);
982 /* Helper for qsort; sort descriptors by amount of memory consumed. */
984 final_cmp_statistic (const void *loc1
, const void *loc2
)
986 const struct loc_descriptor
*const l1
=
987 *(const struct loc_descriptor
*const *) loc1
;
988 const struct loc_descriptor
*const l2
=
989 *(const struct loc_descriptor
*const *) loc2
;
991 diff
= ((long)(l1
->allocated
+ l1
->overhead
- l1
->freed
) -
992 (l2
->allocated
+ l2
->overhead
- l2
->freed
));
993 return diff
> 0 ? 1 : diff
< 0 ? -1 : 0;
996 /* Helper for qsort; sort descriptors by amount of memory consumed. */
998 cmp_statistic (const void *loc1
, const void *loc2
)
1000 const struct loc_descriptor
*const l1
=
1001 *(const struct loc_descriptor
*const *) loc1
;
1002 const struct loc_descriptor
*const l2
=
1003 *(const struct loc_descriptor
*const *) loc2
;
1006 diff
= ((long)(l1
->allocated
+ l1
->overhead
- l1
->freed
- l1
->collected
) -
1007 (l2
->allocated
+ l2
->overhead
- l2
->freed
- l2
->collected
));
1009 return diff
> 0 ? 1 : diff
< 0 ? -1 : 0;
1010 diff
= ((long)(l1
->allocated
+ l1
->overhead
- l1
->freed
) -
1011 (l2
->allocated
+ l2
->overhead
- l2
->freed
));
1012 return diff
> 0 ? 1 : diff
< 0 ? -1 : 0;
1015 /* Collect array of the descriptors from hashtable. */
1016 static struct loc_descriptor
**loc_array
;
1018 add_statistics (void **slot
, void *b
)
1021 loc_array
[*n
] = (struct loc_descriptor
*) *slot
;
1026 /* Dump per-site memory statistics. */
1029 dump_ggc_loc_statistics (bool final ATTRIBUTE_UNUSED
)
1031 #ifdef GATHER_STATISTICS
1034 size_t collected
= 0, freed
= 0, allocated
= 0, overhead
= 0, times
= 0;
1037 ggc_force_collect
= true;
1040 loc_array
= XCNEWVEC (struct loc_descriptor
*, loc_hash
->n_elements
);
1041 fprintf (stderr
, "-------------------------------------------------------\n");
1042 fprintf (stderr
, "\n%-48s %10s %10s %10s %10s %10s\n",
1043 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1044 fprintf (stderr
, "-------------------------------------------------------\n");
1045 htab_traverse (loc_hash
, add_statistics
, &nentries
);
1046 qsort (loc_array
, nentries
, sizeof (*loc_array
),
1047 final
? final_cmp_statistic
: cmp_statistic
);
1048 for (i
= 0; i
< nentries
; i
++)
1050 struct loc_descriptor
*d
= loc_array
[i
];
1051 allocated
+= d
->allocated
;
1054 collected
+= d
->collected
;
1055 overhead
+= d
->overhead
;
1057 for (i
= 0; i
< nentries
; i
++)
1059 struct loc_descriptor
*d
= loc_array
[i
];
1062 const char *s1
= d
->file
;
1064 while ((s2
= strstr (s1
, "gcc/")))
1066 sprintf (s
, "%s:%i (%s)", s1
, d
->line
, d
->function
);
1068 fprintf (stderr
, "%-48s %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li\n", s
,
1070 (d
->collected
) * 100.0 / collected
,
1072 (d
->freed
) * 100.0 / freed
,
1073 (long)(d
->allocated
+ d
->overhead
- d
->freed
- d
->collected
),
1074 (d
->allocated
+ d
->overhead
- d
->freed
- d
->collected
) * 100.0
1075 / (allocated
+ overhead
- freed
- collected
),
1077 d
->overhead
* 100.0 / overhead
,
1081 fprintf (stderr
, "%-48s %10ld %10ld %10ld %10ld %10ld\n",
1082 "Total", (long)collected
, (long)freed
,
1083 (long)(allocated
+ overhead
- freed
- collected
), (long)overhead
,
1085 fprintf (stderr
, "%-48s %10s %10s %10s %10s %10s\n",
1086 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1087 fprintf (stderr
, "-------------------------------------------------------\n");
1088 ggc_force_collect
= false;