compiler, runtime: stop using __go_runtime_error
[official-gcc.git] / gcc / ipa-utils.c
blob2e8298045fac20c88bec3a9b1d5771822955de90
1 /* Utilities for ipa analysis.
2 Copyright (C) 2005-2020 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "predict.h"
28 #include "alloc-pool.h"
29 #include "cgraph.h"
30 #include "lto-streamer.h"
31 #include "dumpfile.h"
32 #include "splay-tree.h"
33 #include "ipa-utils.h"
34 #include "symbol-summary.h"
35 #include "tree-vrp.h"
36 #include "ipa-prop.h"
37 #include "ipa-fnsummary.h"
39 /* Debugging function for postorder and inorder code. NOTE is a string
40 that is printed before the nodes are printed. ORDER is an array of
41 cgraph_nodes that has COUNT useful nodes in it. */
43 void
44 ipa_print_order (FILE* out,
45 const char * note,
46 struct cgraph_node** order,
47 int count)
49 int i;
50 fprintf (out, "\n\n ordered call graph: %s\n", note);
52 for (i = count - 1; i >= 0; i--)
53 order[i]->dump (out);
54 fprintf (out, "\n");
55 fflush (out);
59 struct searchc_env {
60 struct cgraph_node **stack;
61 struct cgraph_node **result;
62 int stack_size;
63 int order_pos;
64 splay_tree nodes_marked_new;
65 bool reduce;
66 int count;
69 /* This is an implementation of Tarjan's strongly connected region
70 finder as reprinted in Aho Hopcraft and Ullman's The Design and
71 Analysis of Computer Programs (1975) pages 192-193. This version
72 has been customized for cgraph_nodes. The env parameter is because
73 it is recursive and there are no nested functions here. This
74 function should only be called from itself or
75 ipa_reduced_postorder. ENV is a stack env and would be
76 unnecessary if C had nested functions. V is the node to start
77 searching from. */
79 static void
80 searchc (struct searchc_env* env, struct cgraph_node *v,
81 bool (*ignore_edge) (struct cgraph_edge *))
83 struct cgraph_edge *edge;
84 struct ipa_dfs_info *v_info = (struct ipa_dfs_info *) v->aux;
86 /* mark node as old */
87 v_info->new_node = false;
88 splay_tree_remove (env->nodes_marked_new, v->get_uid ());
90 v_info->dfn_number = env->count;
91 v_info->low_link = env->count;
92 env->count++;
93 env->stack[(env->stack_size)++] = v;
94 v_info->on_stack = true;
96 for (edge = v->callees; edge; edge = edge->next_callee)
98 struct ipa_dfs_info * w_info;
99 enum availability avail;
100 struct cgraph_node *w = edge->callee->ultimate_alias_target (&avail);
102 if (!w || (ignore_edge && ignore_edge (edge)))
103 continue;
105 if (w->aux
106 && (avail >= AVAIL_INTERPOSABLE))
108 w_info = (struct ipa_dfs_info *) w->aux;
109 if (w_info->new_node)
111 searchc (env, w, ignore_edge);
112 v_info->low_link =
113 (v_info->low_link < w_info->low_link) ?
114 v_info->low_link : w_info->low_link;
116 else
117 if ((w_info->dfn_number < v_info->dfn_number)
118 && (w_info->on_stack))
119 v_info->low_link =
120 (w_info->dfn_number < v_info->low_link) ?
121 w_info->dfn_number : v_info->low_link;
126 if (v_info->low_link == v_info->dfn_number)
128 struct cgraph_node *last = NULL;
129 struct cgraph_node *x;
130 struct ipa_dfs_info *x_info;
131 do {
132 x = env->stack[--(env->stack_size)];
133 x_info = (struct ipa_dfs_info *) x->aux;
134 x_info->on_stack = false;
135 x_info->scc_no = v_info->dfn_number;
137 if (env->reduce)
139 x_info->next_cycle = last;
140 last = x;
142 else
143 env->result[env->order_pos++] = x;
145 while (v != x);
146 if (env->reduce)
147 env->result[env->order_pos++] = v;
151 /* Topsort the call graph by caller relation. Put the result in ORDER.
153 The REDUCE flag is true if you want the cycles reduced to single nodes.
154 You can use ipa_get_nodes_in_cycle to obtain a vector containing all real
155 call graph nodes in a reduced node.
157 Set ALLOW_OVERWRITABLE if nodes with such availability should be included.
158 IGNORE_EDGE, if non-NULL is a hook that may make some edges insignificant
159 for the topological sort. */
162 ipa_reduced_postorder (struct cgraph_node **order,
163 bool reduce,
164 bool (*ignore_edge) (struct cgraph_edge *))
166 struct cgraph_node *node;
167 struct searchc_env env;
168 splay_tree_node result;
169 env.stack = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
170 env.stack_size = 0;
171 env.result = order;
172 env.order_pos = 0;
173 env.nodes_marked_new = splay_tree_new (splay_tree_compare_ints, 0, 0);
174 env.count = 1;
175 env.reduce = reduce;
177 FOR_EACH_DEFINED_FUNCTION (node)
179 enum availability avail = node->get_availability ();
181 if (avail > AVAIL_INTERPOSABLE
182 || avail == AVAIL_INTERPOSABLE)
184 /* Reuse the info if it is already there. */
185 struct ipa_dfs_info *info = (struct ipa_dfs_info *) node->aux;
186 if (!info)
187 info = XCNEW (struct ipa_dfs_info);
188 info->new_node = true;
189 info->on_stack = false;
190 info->next_cycle = NULL;
191 node->aux = info;
193 splay_tree_insert (env.nodes_marked_new,
194 (splay_tree_key)node->get_uid (),
195 (splay_tree_value)node);
197 else
198 node->aux = NULL;
200 result = splay_tree_min (env.nodes_marked_new);
201 while (result)
203 node = (struct cgraph_node *)result->value;
204 searchc (&env, node, ignore_edge);
205 result = splay_tree_min (env.nodes_marked_new);
207 splay_tree_delete (env.nodes_marked_new);
208 free (env.stack);
210 return env.order_pos;
213 /* Deallocate all ipa_dfs_info structures pointed to by the aux pointer of call
214 graph nodes. */
216 void
217 ipa_free_postorder_info (void)
219 struct cgraph_node *node;
220 FOR_EACH_DEFINED_FUNCTION (node)
222 /* Get rid of the aux information. */
223 if (node->aux)
225 free (node->aux);
226 node->aux = NULL;
231 /* Get the set of nodes for the cycle in the reduced call graph starting
232 from NODE. */
234 vec<cgraph_node *>
235 ipa_get_nodes_in_cycle (struct cgraph_node *node)
237 vec<cgraph_node *> v = vNULL;
238 struct ipa_dfs_info *node_dfs_info;
239 while (node)
241 v.safe_push (node);
242 node_dfs_info = (struct ipa_dfs_info *) node->aux;
243 node = node_dfs_info->next_cycle;
245 return v;
248 /* Return true iff the CS is an edge within a strongly connected component as
249 computed by ipa_reduced_postorder. */
251 bool
252 ipa_edge_within_scc (struct cgraph_edge *cs)
254 struct ipa_dfs_info *caller_dfs = (struct ipa_dfs_info *) cs->caller->aux;
255 struct ipa_dfs_info *callee_dfs;
256 struct cgraph_node *callee = cs->callee->function_symbol ();
258 callee_dfs = (struct ipa_dfs_info *) callee->aux;
259 return (caller_dfs
260 && callee_dfs
261 && caller_dfs->scc_no == callee_dfs->scc_no);
264 struct postorder_stack
266 struct cgraph_node *node;
267 struct cgraph_edge *edge;
268 int ref;
271 /* Fill array order with all nodes with output flag set in the reverse
272 topological order. Return the number of elements in the array.
273 FIXME: While walking, consider aliases, too. */
276 ipa_reverse_postorder (struct cgraph_node **order)
278 struct cgraph_node *node, *node2;
279 int stack_size = 0;
280 int order_pos = 0;
281 struct cgraph_edge *edge;
282 int pass;
283 struct ipa_ref *ref = NULL;
285 struct postorder_stack *stack =
286 XCNEWVEC (struct postorder_stack, symtab->cgraph_count);
288 /* We have to deal with cycles nicely, so use a depth first traversal
289 output algorithm. Ignore the fact that some functions won't need
290 to be output and put them into order as well, so we get dependencies
291 right through inline functions. */
292 FOR_EACH_FUNCTION (node)
293 node->aux = NULL;
294 for (pass = 0; pass < 2; pass++)
295 FOR_EACH_FUNCTION (node)
296 if (!node->aux
297 && (pass
298 || (!node->address_taken
299 && !node->inlined_to
300 && !node->alias && !node->thunk.thunk_p
301 && !node->only_called_directly_p ())))
303 stack_size = 0;
304 stack[stack_size].node = node;
305 stack[stack_size].edge = node->callers;
306 stack[stack_size].ref = 0;
307 node->aux = (void *)(size_t)1;
308 while (stack_size >= 0)
310 while (true)
312 node2 = NULL;
313 while (stack[stack_size].edge && !node2)
315 edge = stack[stack_size].edge;
316 node2 = edge->caller;
317 stack[stack_size].edge = edge->next_caller;
318 /* Break possible cycles involving always-inline
319 functions by ignoring edges from always-inline
320 functions to non-always-inline functions. */
321 if (DECL_DISREGARD_INLINE_LIMITS (edge->caller->decl)
322 && !DECL_DISREGARD_INLINE_LIMITS
323 (edge->callee->function_symbol ()->decl))
324 node2 = NULL;
326 for (; stack[stack_size].node->iterate_referring (
327 stack[stack_size].ref,
328 ref) && !node2;
329 stack[stack_size].ref++)
331 if (ref->use == IPA_REF_ALIAS)
332 node2 = dyn_cast <cgraph_node *> (ref->referring);
334 if (!node2)
335 break;
336 if (!node2->aux)
338 stack[++stack_size].node = node2;
339 stack[stack_size].edge = node2->callers;
340 stack[stack_size].ref = 0;
341 node2->aux = (void *)(size_t)1;
344 order[order_pos++] = stack[stack_size--].node;
347 free (stack);
348 FOR_EACH_FUNCTION (node)
349 node->aux = NULL;
350 return order_pos;
355 /* Given a memory reference T, will return the variable at the bottom
356 of the access. Unlike get_base_address, this will recurse through
357 INDIRECT_REFS. */
359 tree
360 get_base_var (tree t)
362 while (!SSA_VAR_P (t)
363 && (!CONSTANT_CLASS_P (t))
364 && TREE_CODE (t) != LABEL_DECL
365 && TREE_CODE (t) != FUNCTION_DECL
366 && TREE_CODE (t) != CONST_DECL
367 && TREE_CODE (t) != CONSTRUCTOR)
369 t = TREE_OPERAND (t, 0);
371 return t;
374 /* Scale function of calls in NODE by ratio ORIG_COUNT/NODE->count. */
376 void
377 scale_ipa_profile_for_fn (struct cgraph_node *node, profile_count orig_count)
379 profile_count to = node->count;
380 profile_count::adjust_for_ipa_scaling (&to, &orig_count);
381 struct cgraph_edge *e;
383 for (e = node->callees; e; e = e->next_callee)
384 e->count = e->count.apply_scale (to, orig_count);
385 for (e = node->indirect_calls; e; e = e->next_callee)
386 e->count = e->count.apply_scale (to, orig_count);
389 /* SRC and DST are going to be merged. Take SRC's profile and merge it into
390 DST so it is not going to be lost. Possibly destroy SRC's body on the way
391 unless PRESERVE_BODY is set. */
393 void
394 ipa_merge_profiles (struct cgraph_node *dst,
395 struct cgraph_node *src,
396 bool preserve_body)
398 tree oldsrcdecl = src->decl;
399 struct function *srccfun, *dstcfun;
400 bool match = true;
401 bool copy_counts = false;
403 if (!src->definition
404 || !dst->definition)
405 return;
407 if (src->frequency < dst->frequency)
408 src->frequency = dst->frequency;
410 /* Time profiles are merged. */
411 if (dst->tp_first_run > src->tp_first_run && src->tp_first_run)
412 dst->tp_first_run = src->tp_first_run;
414 if (src->profile_id && !dst->profile_id)
415 dst->profile_id = src->profile_id;
417 /* Merging zero profile to dst is no-op. */
418 if (src->count.ipa () == profile_count::zero ())
419 return;
421 /* FIXME when we merge in unknown profile, we ought to set counts as
422 unsafe. */
423 if (!src->count.initialized_p ()
424 || !(src->count.ipa () == src->count))
425 return;
426 profile_count orig_count = dst->count;
428 /* Either sum the profiles if both are IPA and not global0, or
429 pick more informative one (that is nonzero IPA if other is
430 uninitialized, guessed or global0). */
432 if ((dst->count.ipa ().nonzero_p ()
433 || src->count.ipa ().nonzero_p ())
434 && dst->count.ipa ().initialized_p ()
435 && src->count.ipa ().initialized_p ())
436 dst->count = dst->count.ipa () + src->count.ipa ();
437 else if (dst->count.ipa ().initialized_p ())
439 else if (src->count.ipa ().initialized_p ())
441 copy_counts = true;
442 dst->count = src->count.ipa ();
445 /* If no updating needed return early. */
446 if (dst->count == orig_count)
447 return;
449 if (symtab->dump_file)
451 fprintf (symtab->dump_file, "Merging profiles of %s count:",
452 src->dump_name ());
453 src->count.dump (symtab->dump_file);
454 fprintf (symtab->dump_file, " to %s count:",
455 dst->dump_name ());
456 orig_count.dump (symtab->dump_file);
457 fprintf (symtab->dump_file, " resulting count:");
458 dst->count.dump (symtab->dump_file);
459 fprintf (symtab->dump_file, "\n");
462 /* First handle functions with no gimple body. */
463 if (dst->thunk.thunk_p || dst->alias
464 || src->thunk.thunk_p || src->alias)
466 scale_ipa_profile_for_fn (dst, orig_count);
467 return;
470 /* This is ugly. We need to get both function bodies into memory.
471 If declaration is merged, we need to duplicate it to be able
472 to load body that is being replaced. This makes symbol table
473 temporarily inconsistent. */
474 if (src->decl == dst->decl)
476 struct lto_in_decl_state temp;
477 struct lto_in_decl_state *state;
479 /* We are going to move the decl, we want to remove its file decl data.
480 and link these with the new decl. */
481 temp.fn_decl = src->decl;
482 lto_in_decl_state **slot
483 = src->lto_file_data->function_decl_states->find_slot (&temp,
484 NO_INSERT);
485 state = *slot;
486 src->lto_file_data->function_decl_states->clear_slot (slot);
487 gcc_assert (state);
489 /* Duplicate the decl and be sure it does not link into body of DST. */
490 src->decl = copy_node (src->decl);
491 DECL_STRUCT_FUNCTION (src->decl) = NULL;
492 DECL_ARGUMENTS (src->decl) = NULL;
493 DECL_INITIAL (src->decl) = NULL;
494 DECL_RESULT (src->decl) = NULL;
496 /* Associate the decl state with new declaration, so LTO streamer
497 can look it up. */
498 state->fn_decl = src->decl;
499 slot
500 = src->lto_file_data->function_decl_states->find_slot (state, INSERT);
501 gcc_assert (!*slot);
502 *slot = state;
504 src->get_untransformed_body ();
505 dst->get_untransformed_body ();
506 srccfun = DECL_STRUCT_FUNCTION (src->decl);
507 dstcfun = DECL_STRUCT_FUNCTION (dst->decl);
508 if (n_basic_blocks_for_fn (srccfun)
509 != n_basic_blocks_for_fn (dstcfun))
511 if (symtab->dump_file)
512 fprintf (symtab->dump_file,
513 "Giving up; number of basic block mismatch.\n");
514 match = false;
516 else if (last_basic_block_for_fn (srccfun)
517 != last_basic_block_for_fn (dstcfun))
519 if (symtab->dump_file)
520 fprintf (symtab->dump_file,
521 "Giving up; last block mismatch.\n");
522 match = false;
524 else
526 basic_block srcbb, dstbb;
527 struct cgraph_edge *e, *e2;
529 for (e = dst->callees, e2 = src->callees; e && e2 && match;
530 e2 = e2->next_callee, e = e->next_callee)
532 if (gimple_bb (e->call_stmt)->index
533 != gimple_bb (e2->call_stmt)->index)
535 if (symtab->dump_file)
536 fprintf (symtab->dump_file,
537 "Giving up; call stmt mismatch.\n");
538 match = false;
541 if (e || e2)
543 if (symtab->dump_file)
544 fprintf (symtab->dump_file,
545 "Giving up; number of calls differs.\n");
546 match = false;
548 for (e = dst->indirect_calls, e2 = src->indirect_calls; e && e2 && match;
549 e2 = e2->next_callee, e = e->next_callee)
551 if (gimple_bb (e->call_stmt)->index
552 != gimple_bb (e2->call_stmt)->index)
554 if (symtab->dump_file)
555 fprintf (symtab->dump_file,
556 "Giving up; indirect call stmt mismatch.\n");
557 match = false;
560 if (e || e2)
562 if (symtab->dump_file)
563 fprintf (symtab->dump_file,
564 "Giving up; number of indirect calls differs.\n");
565 match=false;
568 if (match)
569 FOR_ALL_BB_FN (srcbb, srccfun)
571 unsigned int i;
573 dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
574 if (dstbb == NULL)
576 if (symtab->dump_file)
577 fprintf (symtab->dump_file,
578 "No matching block for bb %i.\n",
579 srcbb->index);
580 match = false;
581 break;
583 if (EDGE_COUNT (srcbb->succs) != EDGE_COUNT (dstbb->succs))
585 if (symtab->dump_file)
586 fprintf (symtab->dump_file,
587 "Edge count mismatch for bb %i.\n",
588 srcbb->index);
589 match = false;
590 break;
592 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
594 edge srce = EDGE_SUCC (srcbb, i);
595 edge dste = EDGE_SUCC (dstbb, i);
596 if (srce->dest->index != dste->dest->index)
598 if (symtab->dump_file)
599 fprintf (symtab->dump_file,
600 "Succ edge mismatch for bb %i.\n",
601 srce->dest->index);
602 match = false;
603 break;
608 if (match)
610 struct cgraph_edge *e, *e2;
611 basic_block srcbb, dstbb;
613 /* Function and global profile may be out of sync. First scale it same
614 way as fixup_cfg would. */
615 profile_count srcnum = src->count;
616 profile_count srcden = ENTRY_BLOCK_PTR_FOR_FN (srccfun)->count;
617 bool srcscale = srcnum.initialized_p () && !(srcnum == srcden);
618 profile_count dstnum = orig_count;
619 profile_count dstden = ENTRY_BLOCK_PTR_FOR_FN (dstcfun)->count;
620 bool dstscale = !copy_counts
621 && dstnum.initialized_p () && !(dstnum == dstden);
623 /* TODO: merge also statement histograms. */
624 FOR_ALL_BB_FN (srcbb, srccfun)
626 unsigned int i;
628 dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
630 profile_count srccount = srcbb->count;
631 if (srcscale)
632 srccount = srccount.apply_scale (srcnum, srcden);
633 if (dstscale)
634 dstbb->count = dstbb->count.apply_scale (dstnum, dstden);
636 if (copy_counts)
638 dstbb->count = srccount;
639 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
641 edge srce = EDGE_SUCC (srcbb, i);
642 edge dste = EDGE_SUCC (dstbb, i);
643 if (srce->probability.initialized_p ())
644 dste->probability = srce->probability;
647 else
649 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
651 edge srce = EDGE_SUCC (srcbb, i);
652 edge dste = EDGE_SUCC (dstbb, i);
653 dste->probability =
654 dste->probability * dstbb->count.ipa ().probability_in
655 (dstbb->count.ipa ()
656 + srccount.ipa ())
657 + srce->probability * srcbb->count.ipa ().probability_in
658 (dstbb->count.ipa ()
659 + srccount.ipa ());
661 dstbb->count = dstbb->count.ipa () + srccount.ipa ();
664 push_cfun (dstcfun);
665 update_max_bb_count ();
666 compute_function_frequency ();
667 pop_cfun ();
668 for (e = dst->callees; e; e = e->next_callee)
670 if (e->speculative)
671 continue;
672 e->count = gimple_bb (e->call_stmt)->count;
674 for (e = dst->indirect_calls, e2 = src->indirect_calls; e;
675 e2 = (e2 ? e2->next_callee : NULL), e = e->next_callee)
677 profile_count count = gimple_bb (e->call_stmt)->count;
678 if (copy_counts)
680 e->indirect_info->common_target_id
681 = e2->indirect_info->common_target_id;
682 e->indirect_info->common_target_probability
683 = e2->indirect_info->common_target_probability;
685 else if (e->indirect_info->common_target_id
686 || e2->indirect_info->common_target_id)
688 sreal scale1
689 = e->count.ipa().to_sreal_scale (count);
690 sreal scale2
691 = e2->count.ipa().to_sreal_scale (count);
693 if (scale1 == 0 && scale2 == 0)
694 scale1 = scale2 = 1;
695 sreal sum = scale1 + scale2;
696 int scaled_probability1
697 = ((sreal)e->indirect_info->common_target_probability
698 * scale1 / sum).to_int ();
699 int scaled_probability2
700 = ((sreal)e2->indirect_info->common_target_probability
701 * scale2 / sum).to_int ();
702 if (symtab->dump_file)
704 fprintf (symtab->dump_file,
705 "Merging common targets %i prob %i"
706 " and %i prob %i with scales %f %f\n",
707 e->indirect_info->common_target_id,
708 e->indirect_info->common_target_probability,
709 e2->indirect_info->common_target_id,
710 e2->indirect_info->common_target_probability,
711 scale1.to_double (),
712 scale2.to_double ());
713 fprintf (symtab->dump_file, "Combined BB count ");
714 count.dump (symtab->dump_file);
715 fprintf (symtab->dump_file, " dst edge count ");
716 e->count.dump (symtab->dump_file);
717 fprintf (symtab->dump_file, " src edge count ");
718 e2->count.dump (symtab->dump_file);
719 fprintf (symtab->dump_file, "\n");
721 if (e->indirect_info->common_target_id
722 == e2->indirect_info->common_target_id)
723 e->indirect_info->common_target_probability
724 = scaled_probability1 + scaled_probability2;
725 else if (!e2->indirect_info->common_target_id
726 || scaled_probability1 > scaled_probability2)
727 e->indirect_info->common_target_probability
728 = scaled_probability1;
729 else
731 e->indirect_info->common_target_id
732 = e2->indirect_info->common_target_id;
733 e->indirect_info->common_target_probability
734 = scaled_probability2;
736 if (symtab->dump_file)
737 fprintf (symtab->dump_file, "Merged as %i prob %i\n",
738 e->indirect_info->common_target_id,
739 e->indirect_info->common_target_probability);
742 /* When call is speculative, we need to re-distribute probabilities
743 the same way as they was. This is not really correct because
744 in the other copy the speculation may differ; but probably it
745 is not really worth the effort. */
746 if (e->speculative)
748 cgraph_edge *direct, *indirect;
749 cgraph_edge *direct2 = NULL, *indirect2 = NULL;
750 ipa_ref *ref;
752 e->speculative_call_info (direct, indirect, ref);
753 gcc_assert (e == indirect);
754 if (e2 && e2->speculative)
755 e2->speculative_call_info (direct2, indirect2, ref);
756 if (indirect->count > profile_count::zero ()
757 || direct->count > profile_count::zero ())
759 /* We should mismatch earlier if there is no matching
760 indirect edge. */
761 if (!e2)
763 if (symtab->dump_file)
764 fprintf (symtab->dump_file,
765 "Mismatch in merging indirect edges\n");
767 else if (!e2->speculative)
768 indirect->count += e2->count;
769 else if (e2->speculative)
771 if (DECL_ASSEMBLER_NAME (direct2->callee->decl)
772 != DECL_ASSEMBLER_NAME (direct->callee->decl))
774 if (direct2->count >= direct->count)
776 direct->redirect_callee (direct2->callee);
777 indirect->count += indirect2->count
778 + direct->count;
779 direct->count = direct2->count;
781 else
782 indirect->count += indirect2->count + direct2->count;
784 else
786 direct->count += direct2->count;
787 indirect->count += indirect2->count;
791 else
792 /* At the moment we should have only profile feedback based
793 speculations when merging. */
794 gcc_unreachable ();
796 else if (e2 && e2->speculative)
798 cgraph_edge *direct, *indirect;
799 ipa_ref *ref;
801 e2->speculative_call_info (direct, indirect, ref);
802 e->count = count;
803 e->make_speculative (direct->callee, direct->count);
805 else
806 e->count = count;
808 if (!preserve_body)
809 src->release_body ();
810 /* Update summary. */
811 compute_fn_summary (dst, 0);
813 /* We can't update CFG profile, but we can scale IPA profile. CFG
814 will be scaled according to dst->count after IPA passes. */
815 else
816 scale_ipa_profile_for_fn (dst, orig_count);
817 src->decl = oldsrcdecl;
820 /* Return true if call to DEST is known to be self-recusive call withing FUNC. */
822 bool
823 recursive_call_p (tree func, tree dest)
825 struct cgraph_node *dest_node = cgraph_node::get_create (dest);
826 struct cgraph_node *cnode = cgraph_node::get_create (func);
827 ipa_ref *alias;
828 enum availability avail;
830 gcc_assert (!cnode->alias);
831 if (cnode != dest_node->ultimate_alias_target (&avail))
832 return false;
833 if (avail >= AVAIL_AVAILABLE)
834 return true;
835 if (!dest_node->semantically_equivalent_p (cnode))
836 return false;
837 /* If there is only one way to call the fuction or we know all of them
838 are semantically equivalent, we still can consider call recursive. */
839 FOR_EACH_ALIAS (cnode, alias)
840 if (!dest_node->semantically_equivalent_p (alias->referring))
841 return false;
842 return true;