1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 /* Generic garbage collection (GC) functions and data, not specific to
23 any particular GC implementation. */
27 #include "coretypes.h"
32 #include "hosthooks.h"
34 #ifdef HAVE_SYS_RESOURCE_H
35 # include <sys/resource.h>
39 # include <sys/mman.h>
41 /* This is on Solaris. */
42 # include <sys/types.h>
47 # define MAP_FAILED ((void *)-1)
50 #ifdef ENABLE_VALGRIND_CHECKING
51 # ifdef HAVE_MEMCHECK_H
52 # include <memcheck.h>
54 # include <valgrind.h>
57 /* Avoid #ifdef:s when we can help it. */
58 #define VALGRIND_DISCARD(x)
61 /* Statistics about the allocation. */
62 static ggc_statistics
*ggc_stats
;
64 struct traversal_state
;
66 static int ggc_htab_delete (void **, void *);
67 static hashval_t
saving_htab_hash (const void *);
68 static int saving_htab_eq (const void *, const void *);
69 static int call_count (void **, void *);
70 static int call_alloc (void **, void *);
71 static int compare_ptr_data (const void *, const void *);
72 static void relocate_ptrs (void *, void *);
73 static void write_pch_globals (const struct ggc_root_tab
* const *tab
,
74 struct traversal_state
*state
);
75 static double ggc_rlimit_bound (double);
77 /* Maintain global roots that are preserved during GC. */
79 /* Process a slot of an htab by deleting it if it has not been marked. */
82 ggc_htab_delete (void **slot
, void *info
)
84 const struct ggc_cache_tab
*r
= (const struct ggc_cache_tab
*) info
;
86 if (! (*r
->marked_p
) (*slot
))
87 htab_clear_slot (*r
->base
, slot
);
94 /* Iterate through all registered roots and mark each element. */
99 const struct ggc_root_tab
*const *rt
;
100 const struct ggc_root_tab
*rti
;
101 const struct ggc_cache_tab
*const *ct
;
102 const struct ggc_cache_tab
*cti
;
105 for (rt
= gt_ggc_deletable_rtab
; *rt
; rt
++)
106 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
107 memset (rti
->base
, 0, rti
->stride
);
109 for (rt
= gt_ggc_rtab
; *rt
; rt
++)
110 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
111 for (i
= 0; i
< rti
->nelt
; i
++)
112 (*rti
->cb
)(*(void **)((char *)rti
->base
+ rti
->stride
* i
));
114 ggc_mark_stringpool ();
116 /* Now scan all hash tables that have objects which are to be deleted if
117 they are not already marked. */
118 for (ct
= gt_ggc_cache_rtab
; *ct
; ct
++)
119 for (cti
= *ct
; cti
->base
!= NULL
; cti
++)
122 ggc_set_mark (*cti
->base
);
123 htab_traverse_noresize (*cti
->base
, ggc_htab_delete
, (void *) cti
);
124 ggc_set_mark ((*cti
->base
)->entries
);
128 /* Allocate a block of memory, then clear it. */
130 ggc_alloc_cleared (size_t size
)
132 void *buf
= ggc_alloc (size
);
133 memset (buf
, 0, size
);
137 /* Resize a block of memory, possibly re-allocating it. */
139 ggc_realloc (void *x
, size_t size
)
145 return ggc_alloc (size
);
147 old_size
= ggc_get_size (x
);
148 if (size
<= old_size
)
150 /* Mark the unwanted memory as unaccessible. We also need to make
151 the "new" size accessible, since ggc_get_size returns the size of
152 the pool, not the size of the individually allocated object, the
153 size which was previously made accessible. Unfortunately, we
154 don't know that previously allocated size. Without that
155 knowledge we have to lose some initialization-tracking for the
156 old parts of the object. An alternative is to mark the whole
157 old_size as reachable, but that would lose tracking of writes
158 after the end of the object (by small offsets). Discard the
159 handle to avoid handle leak. */
160 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) x
+ size
,
162 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x
, size
));
166 r
= ggc_alloc (size
);
168 /* Since ggc_get_size returns the size of the pool, not the size of the
169 individually allocated object, we'd access parts of the old object
170 that were marked invalid with the memcpy below. We lose a bit of the
171 initialization-tracking since some of it may be uninitialized. */
172 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x
, old_size
));
174 memcpy (r
, x
, old_size
);
176 /* The old object is not supposed to be used anymore. */
177 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (x
, old_size
));
182 /* Like ggc_alloc_cleared, but performs a multiplication. */
184 ggc_calloc (size_t s1
, size_t s2
)
186 return ggc_alloc_cleared (s1
* s2
);
189 /* These are for splay_tree_new_ggc. */
191 ggc_splay_alloc (int sz
, void *nl
)
195 return ggc_alloc (sz
);
199 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED
, void *nl
)
205 /* Print statistics that are independent of the collector in use. */
206 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
208 : ((x) < 1024*1024*10 \
210 : (x) / (1024*1024))))
211 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
214 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED
,
215 ggc_statistics
*stats
)
217 /* Set the pointer so that during collection we will actually gather
221 /* Then do one collection to fill in the statistics. */
224 /* At present, we don't really gather any interesting statistics. */
226 /* Don't gather statistics any more. */
230 /* Functions for saving and restoring GCable memory to disk. */
232 static htab_t saving_htab
;
237 void *note_ptr_cookie
;
238 gt_note_pointers note_ptr_fn
;
239 gt_handle_reorder reorder_fn
;
244 #define POINTER_HASH(x) (hashval_t)((long)x >> 3)
246 /* Register an object in the hash table. */
249 gt_pch_note_object (void *obj
, void *note_ptr_cookie
,
250 gt_note_pointers note_ptr_fn
)
252 struct ptr_data
**slot
;
254 if (obj
== NULL
|| obj
== (void *) 1)
257 slot
= (struct ptr_data
**)
258 htab_find_slot_with_hash (saving_htab
, obj
, POINTER_HASH (obj
),
262 if ((*slot
)->note_ptr_fn
!= note_ptr_fn
263 || (*slot
)->note_ptr_cookie
!= note_ptr_cookie
)
268 *slot
= xcalloc (sizeof (struct ptr_data
), 1);
270 (*slot
)->note_ptr_fn
= note_ptr_fn
;
271 (*slot
)->note_ptr_cookie
= note_ptr_cookie
;
272 if (note_ptr_fn
== gt_pch_p_S
)
273 (*slot
)->size
= strlen (obj
) + 1;
275 (*slot
)->size
= ggc_get_size (obj
);
279 /* Register an object in the hash table. */
282 gt_pch_note_reorder (void *obj
, void *note_ptr_cookie
,
283 gt_handle_reorder reorder_fn
)
285 struct ptr_data
*data
;
287 if (obj
== NULL
|| obj
== (void *) 1)
290 data
= htab_find_with_hash (saving_htab
, obj
, POINTER_HASH (obj
));
292 || data
->note_ptr_cookie
!= note_ptr_cookie
)
295 data
->reorder_fn
= reorder_fn
;
298 /* Hash and equality functions for saving_htab, callbacks for htab_create. */
301 saving_htab_hash (const void *p
)
303 return POINTER_HASH (((struct ptr_data
*)p
)->obj
);
307 saving_htab_eq (const void *p1
, const void *p2
)
309 return ((struct ptr_data
*)p1
)->obj
== p2
;
312 /* Handy state for the traversal functions. */
314 struct traversal_state
317 struct ggc_pch_data
*d
;
319 struct ptr_data
**ptrs
;
323 /* Callbacks for htab_traverse. */
326 call_count (void **slot
, void *state_p
)
328 struct ptr_data
*d
= (struct ptr_data
*)*slot
;
329 struct traversal_state
*state
= (struct traversal_state
*)state_p
;
331 ggc_pch_count_object (state
->d
, d
->obj
, d
->size
);
337 call_alloc (void **slot
, void *state_p
)
339 struct ptr_data
*d
= (struct ptr_data
*)*slot
;
340 struct traversal_state
*state
= (struct traversal_state
*)state_p
;
342 d
->new_addr
= ggc_pch_alloc_object (state
->d
, d
->obj
, d
->size
);
343 state
->ptrs
[state
->ptrs_i
++] = d
;
347 /* Callback for qsort. */
350 compare_ptr_data (const void *p1_p
, const void *p2_p
)
352 struct ptr_data
*p1
= *(struct ptr_data
*const *)p1_p
;
353 struct ptr_data
*p2
= *(struct ptr_data
*const *)p2_p
;
354 return (((size_t)p1
->new_addr
> (size_t)p2
->new_addr
)
355 - ((size_t)p1
->new_addr
< (size_t)p2
->new_addr
));
358 /* Callbacks for note_ptr_fn. */
361 relocate_ptrs (void *ptr_p
, void *state_p
)
363 void **ptr
= (void **)ptr_p
;
364 struct traversal_state
*state ATTRIBUTE_UNUSED
365 = (struct traversal_state
*)state_p
;
366 struct ptr_data
*result
;
368 if (*ptr
== NULL
|| *ptr
== (void *)1)
371 result
= htab_find_with_hash (saving_htab
, *ptr
, POINTER_HASH (*ptr
));
374 *ptr
= result
->new_addr
;
377 /* Write out, after relocation, the pointers in TAB. */
379 write_pch_globals (const struct ggc_root_tab
* const *tab
,
380 struct traversal_state
*state
)
382 const struct ggc_root_tab
*const *rt
;
383 const struct ggc_root_tab
*rti
;
386 for (rt
= tab
; *rt
; rt
++)
387 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
388 for (i
= 0; i
< rti
->nelt
; i
++)
390 void *ptr
= *(void **)((char *)rti
->base
+ rti
->stride
* i
);
391 struct ptr_data
*new_ptr
;
392 if (ptr
== NULL
|| ptr
== (void *)1)
394 if (fwrite (&ptr
, sizeof (void *), 1, state
->f
)
396 fatal_error ("can't write PCH file: %m");
400 new_ptr
= htab_find_with_hash (saving_htab
, ptr
,
402 if (fwrite (&new_ptr
->new_addr
, sizeof (void *), 1, state
->f
)
404 fatal_error ("can't write PCH file: %m");
409 /* Hold the information we need to mmap the file back in. */
415 void *preferred_base
;
418 /* Write out the state of the compiler to F. */
421 gt_pch_save (FILE *f
)
423 const struct ggc_root_tab
*const *rt
;
424 const struct ggc_root_tab
*rti
;
426 struct traversal_state state
;
427 char *this_object
= NULL
;
428 size_t this_object_size
= 0;
429 struct mmap_info mmi
;
430 size_t page_size
= getpagesize();
432 gt_pch_save_stringpool ();
434 saving_htab
= htab_create (50000, saving_htab_hash
, saving_htab_eq
, free
);
436 for (rt
= gt_ggc_rtab
; *rt
; rt
++)
437 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
438 for (i
= 0; i
< rti
->nelt
; i
++)
439 (*rti
->pchw
)(*(void **)((char *)rti
->base
+ rti
->stride
* i
));
441 for (rt
= gt_pch_cache_rtab
; *rt
; rt
++)
442 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
443 for (i
= 0; i
< rti
->nelt
; i
++)
444 (*rti
->pchw
)(*(void **)((char *)rti
->base
+ rti
->stride
* i
));
446 /* Prepare the objects for writing, determine addresses and such. */
448 state
.d
= init_ggc_pch();
450 htab_traverse (saving_htab
, call_count
, &state
);
452 mmi
.size
= ggc_pch_total_size (state
.d
);
454 /* Try to arrange things so that no relocation is necessary, but
455 don't try very hard. On most platforms, this will always work,
456 and on the rest it's a lot of work to do better.
457 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
458 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
459 mmi
.preferred_base
= host_hooks
.gt_pch_get_address (mmi
.size
);
462 if (mmi
.preferred_base
== NULL
)
464 mmi
.preferred_base
= mmap (NULL
, mmi
.size
,
465 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
,
466 fileno (state
.f
), 0);
467 if (mmi
.preferred_base
== (void *) MAP_FAILED
)
468 mmi
.preferred_base
= NULL
;
470 munmap (mmi
.preferred_base
, mmi
.size
);
472 #endif /* HAVE_MMAP_FILE */
474 ggc_pch_this_base (state
.d
, mmi
.preferred_base
);
476 state
.ptrs
= xmalloc (state
.count
* sizeof (*state
.ptrs
));
478 htab_traverse (saving_htab
, call_alloc
, &state
);
479 qsort (state
.ptrs
, state
.count
, sizeof (*state
.ptrs
), compare_ptr_data
);
481 /* Write out all the scalar variables. */
482 for (rt
= gt_pch_scalar_rtab
; *rt
; rt
++)
483 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
484 if (fwrite (rti
->base
, rti
->stride
, 1, f
) != 1)
485 fatal_error ("can't write PCH file: %m");
487 /* Write out all the global pointers, after translation. */
488 write_pch_globals (gt_ggc_rtab
, &state
);
489 write_pch_globals (gt_pch_cache_rtab
, &state
);
491 ggc_pch_prepare_write (state
.d
, state
.f
);
493 /* Pad the PCH file so that the mmapped area starts on a page boundary. */
496 o
= ftell (state
.f
) + sizeof (mmi
);
498 fatal_error ("can't get position in PCH file: %m");
499 mmi
.offset
= page_size
- o
% page_size
;
500 if (mmi
.offset
== page_size
)
504 if (fwrite (&mmi
, sizeof (mmi
), 1, state
.f
) != 1)
505 fatal_error ("can't write PCH file: %m");
507 && fseek (state
.f
, mmi
.offset
, SEEK_SET
) != 0)
508 fatal_error ("can't write padding to PCH file: %m");
510 /* Actually write out the objects. */
511 for (i
= 0; i
< state
.count
; i
++)
513 if (this_object_size
< state
.ptrs
[i
]->size
)
515 this_object_size
= state
.ptrs
[i
]->size
;
516 this_object
= xrealloc (this_object
, this_object_size
);
518 memcpy (this_object
, state
.ptrs
[i
]->obj
, state
.ptrs
[i
]->size
);
519 if (state
.ptrs
[i
]->reorder_fn
!= NULL
)
520 state
.ptrs
[i
]->reorder_fn (state
.ptrs
[i
]->obj
,
521 state
.ptrs
[i
]->note_ptr_cookie
,
522 relocate_ptrs
, &state
);
523 state
.ptrs
[i
]->note_ptr_fn (state
.ptrs
[i
]->obj
,
524 state
.ptrs
[i
]->note_ptr_cookie
,
525 relocate_ptrs
, &state
);
526 ggc_pch_write_object (state
.d
, state
.f
, state
.ptrs
[i
]->obj
,
527 state
.ptrs
[i
]->new_addr
, state
.ptrs
[i
]->size
);
528 if (state
.ptrs
[i
]->note_ptr_fn
!= gt_pch_p_S
)
529 memcpy (state
.ptrs
[i
]->obj
, this_object
, state
.ptrs
[i
]->size
);
531 ggc_pch_finish (state
.d
, state
.f
);
532 gt_pch_fixup_stringpool ();
535 htab_delete (saving_htab
);
538 /* Read the state of the compiler back in from F. */
541 gt_pch_restore (FILE *f
)
543 const struct ggc_root_tab
*const *rt
;
544 const struct ggc_root_tab
*rti
;
546 struct mmap_info mmi
;
550 /* Delete any deletable objects. This makes ggc_pch_read much
551 faster, as it can be sure that no GCable objects remain other
552 than the ones just read in. */
553 for (rt
= gt_ggc_deletable_rtab
; *rt
; rt
++)
554 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
555 memset (rti
->base
, 0, rti
->stride
);
557 /* Read in all the scalar variables. */
558 for (rt
= gt_pch_scalar_rtab
; *rt
; rt
++)
559 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
560 if (fread (rti
->base
, rti
->stride
, 1, f
) != 1)
561 fatal_error ("can't read PCH file: %m");
563 /* Read in all the global pointers, in 6 easy loops. */
564 for (rt
= gt_ggc_rtab
; *rt
; rt
++)
565 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
566 for (i
= 0; i
< rti
->nelt
; i
++)
567 if (fread ((char *)rti
->base
+ rti
->stride
* i
,
568 sizeof (void *), 1, f
) != 1)
569 fatal_error ("can't read PCH file: %m");
571 for (rt
= gt_pch_cache_rtab
; *rt
; rt
++)
572 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
573 for (i
= 0; i
< rti
->nelt
; i
++)
574 if (fread ((char *)rti
->base
+ rti
->stride
* i
,
575 sizeof (void *), 1, f
) != 1)
576 fatal_error ("can't read PCH file: %m");
578 if (fread (&mmi
, sizeof (mmi
), 1, f
) != 1)
579 fatal_error ("can't read PCH file: %m");
581 if (host_hooks
.gt_pch_use_address (mmi
.preferred_base
, mmi
.size
))
586 mmap_result
= mmap (mmi
.preferred_base
, mmi
.size
,
587 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_FIXED
,
588 fileno (f
), mmi
.offset
);
590 /* The file might not be mmap-able. */
591 needs_read
= mmap_result
== (void *) MAP_FAILED
;
593 /* Sanity check for broken MAP_FIXED. */
594 if (! needs_read
&& mmap_result
!= mmi
.preferred_base
)
599 addr
= mmi
.preferred_base
;
604 addr
= mmap (mmi
.preferred_base
, mmi
.size
,
605 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
,
606 fileno (f
), mmi
.offset
);
609 if (addr
!= mmi
.preferred_base
)
611 size_t page_size
= getpagesize();
614 if (addr
!= (void *) MAP_FAILED
)
615 munmap (addr
, mmi
.size
);
617 /* We really want to be mapped at mmi.preferred_base
618 so we're going to resort to MAP_FIXED. But before,
619 make sure that we can do so without destroying a
620 previously mapped area, by looping over all pages
621 that would be affected by the fixed mapping. */
624 for (i
= 0; i
< mmi
.size
; i
+= page_size
)
625 if (mincore ((char *)mmi
.preferred_base
+ i
, page_size
,
626 (void *)&one_byte
) == -1
628 continue; /* The page is not mapped. */
633 addr
= mmap (mmi
.preferred_base
, mmi
.size
,
634 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_FIXED
,
635 fileno (f
), mmi
.offset
);
637 #endif /* HAVE_MINCORE */
639 needs_read
= addr
== (void *) MAP_FAILED
;
641 #else /* HAVE_MMAP_FILE */
643 #endif /* HAVE_MMAP_FILE */
645 addr
= xmalloc (mmi
.size
);
650 if (fseek (f
, mmi
.offset
, SEEK_SET
) != 0
651 || fread (&mmi
, mmi
.size
, 1, f
) != 1)
652 fatal_error ("can't read PCH file: %m");
654 else if (fseek (f
, mmi
.offset
+ mmi
.size
, SEEK_SET
) != 0)
655 fatal_error ("can't read PCH file: %m");
657 ggc_pch_read (f
, addr
);
659 if (addr
!= mmi
.preferred_base
)
661 for (rt
= gt_ggc_rtab
; *rt
; rt
++)
662 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
663 for (i
= 0; i
< rti
->nelt
; i
++)
665 char **ptr
= (char **)((char *)rti
->base
+ rti
->stride
* i
);
667 *ptr
+= (size_t)addr
- (size_t)mmi
.preferred_base
;
670 for (rt
= gt_pch_cache_rtab
; *rt
; rt
++)
671 for (rti
= *rt
; rti
->base
!= NULL
; rti
++)
672 for (i
= 0; i
< rti
->nelt
; i
++)
674 char **ptr
= (char **)((char *)rti
->base
+ rti
->stride
* i
);
676 *ptr
+= (size_t)addr
- (size_t)mmi
.preferred_base
;
679 sorry ("had to relocate PCH");
682 gt_pch_restore_stringpool ();
685 /* Modify the bound based on rlimits. Keep the smallest number found. */
687 ggc_rlimit_bound (double limit
)
689 #if defined(HAVE_GETRLIMIT)
692 if (getrlimit (RLIMIT_RSS
, &rlim
) == 0
693 && rlim
.rlim_cur
!= (rlim_t
) RLIM_INFINITY
694 && rlim
.rlim_cur
< limit
)
695 limit
= rlim
.rlim_cur
;
698 if (getrlimit (RLIMIT_DATA
, &rlim
) == 0
699 && rlim
.rlim_cur
!= (rlim_t
) RLIM_INFINITY
700 && rlim
.rlim_cur
< limit
)
701 limit
= rlim
.rlim_cur
;
704 if (getrlimit (RLIMIT_AS
, &rlim
) == 0
705 && rlim
.rlim_cur
!= (rlim_t
) RLIM_INFINITY
706 && rlim
.rlim_cur
< limit
)
707 limit
= rlim
.rlim_cur
;
709 #endif /* HAVE_GETRLIMIT */
714 /* Heuristic to set a default for GGC_MIN_EXPAND. */
716 ggc_min_expand_heuristic (void)
718 double min_expand
= physmem_total();
720 /* Adjust for rlimits. */
721 min_expand
= ggc_rlimit_bound (min_expand
);
723 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
724 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
725 min_expand
/= 1024*1024*1024;
727 min_expand
= MIN (min_expand
, 70);
733 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
735 ggc_min_heapsize_heuristic (void)
737 double min_heap_kbytes
= physmem_total();
739 /* Adjust for rlimits. */
740 min_heap_kbytes
= ggc_rlimit_bound (min_heap_kbytes
);
742 min_heap_kbytes
/= 1024; /* Convert to Kbytes. */
744 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
745 bound of 128M (when RAM >= 1GB). */
746 min_heap_kbytes
/= 8;
747 min_heap_kbytes
= MAX (min_heap_kbytes
, 4 * 1024);
748 min_heap_kbytes
= MIN (min_heap_kbytes
, 128 * 1024);
750 return min_heap_kbytes
;
754 init_ggc_heuristics (void)
756 #ifndef ENABLE_GC_ALWAYS_COLLECT
757 set_param_value ("ggc-min-expand", ggc_min_expand_heuristic());
758 set_param_value ("ggc-min-heapsize", ggc_min_heapsize_heuristic());