typeck.c (cp_build_function_call_vec): When mark_used fails unconditionally return...
[official-gcc.git] / gcc / ipa-utils.c
blob79b250c394397b8d7026dec4c415f95fa94dae36
1 /* Utilities for ipa analysis.
2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "predict.h"
28 #include "alloc-pool.h"
29 #include "cgraph.h"
30 #include "lto-streamer.h"
31 #include "dumpfile.h"
32 #include "splay-tree.h"
33 #include "ipa-utils.h"
34 #include "symbol-summary.h"
35 #include "tree-vrp.h"
36 #include "ipa-prop.h"
37 #include "ipa-fnsummary.h"
39 /* Debugging function for postorder and inorder code. NOTE is a string
40 that is printed before the nodes are printed. ORDER is an array of
41 cgraph_nodes that has COUNT useful nodes in it. */
43 void
44 ipa_print_order (FILE* out,
45 const char * note,
46 struct cgraph_node** order,
47 int count)
49 int i;
50 fprintf (out, "\n\n ordered call graph: %s\n", note);
52 for (i = count - 1; i >= 0; i--)
53 order[i]->dump (out);
54 fprintf (out, "\n");
55 fflush (out);
59 struct searchc_env {
60 struct cgraph_node **stack;
61 struct cgraph_node **result;
62 int stack_size;
63 int order_pos;
64 splay_tree nodes_marked_new;
65 bool reduce;
66 int count;
69 /* This is an implementation of Tarjan's strongly connected region
70 finder as reprinted in Aho Hopcraft and Ullman's The Design and
71 Analysis of Computer Programs (1975) pages 192-193. This version
72 has been customized for cgraph_nodes. The env parameter is because
73 it is recursive and there are no nested functions here. This
74 function should only be called from itself or
75 ipa_reduced_postorder. ENV is a stack env and would be
76 unnecessary if C had nested functions. V is the node to start
77 searching from. */
79 static void
80 searchc (struct searchc_env* env, struct cgraph_node *v,
81 bool (*ignore_edge) (struct cgraph_edge *))
83 struct cgraph_edge *edge;
84 struct ipa_dfs_info *v_info = (struct ipa_dfs_info *) v->aux;
86 /* mark node as old */
87 v_info->new_node = false;
88 splay_tree_remove (env->nodes_marked_new, v->get_uid ());
90 v_info->dfn_number = env->count;
91 v_info->low_link = env->count;
92 env->count++;
93 env->stack[(env->stack_size)++] = v;
94 v_info->on_stack = true;
96 for (edge = v->callees; edge; edge = edge->next_callee)
98 struct ipa_dfs_info * w_info;
99 enum availability avail;
100 struct cgraph_node *w = edge->callee->ultimate_alias_target (&avail);
102 if (!w || (ignore_edge && ignore_edge (edge)))
103 continue;
105 if (w->aux
106 && (avail > AVAIL_INTERPOSABLE
107 || avail == AVAIL_INTERPOSABLE))
109 w_info = (struct ipa_dfs_info *) w->aux;
110 if (w_info->new_node)
112 searchc (env, w, ignore_edge);
113 v_info->low_link =
114 (v_info->low_link < w_info->low_link) ?
115 v_info->low_link : w_info->low_link;
117 else
118 if ((w_info->dfn_number < v_info->dfn_number)
119 && (w_info->on_stack))
120 v_info->low_link =
121 (w_info->dfn_number < v_info->low_link) ?
122 w_info->dfn_number : v_info->low_link;
127 if (v_info->low_link == v_info->dfn_number)
129 struct cgraph_node *last = NULL;
130 struct cgraph_node *x;
131 struct ipa_dfs_info *x_info;
132 do {
133 x = env->stack[--(env->stack_size)];
134 x_info = (struct ipa_dfs_info *) x->aux;
135 x_info->on_stack = false;
136 x_info->scc_no = v_info->dfn_number;
138 if (env->reduce)
140 x_info->next_cycle = last;
141 last = x;
143 else
144 env->result[env->order_pos++] = x;
146 while (v != x);
147 if (env->reduce)
148 env->result[env->order_pos++] = v;
152 /* Topsort the call graph by caller relation. Put the result in ORDER.
154 The REDUCE flag is true if you want the cycles reduced to single nodes.
155 You can use ipa_get_nodes_in_cycle to obtain a vector containing all real
156 call graph nodes in a reduced node.
158 Set ALLOW_OVERWRITABLE if nodes with such availability should be included.
159 IGNORE_EDGE, if non-NULL is a hook that may make some edges insignificant
160 for the topological sort. */
163 ipa_reduced_postorder (struct cgraph_node **order,
164 bool reduce,
165 bool (*ignore_edge) (struct cgraph_edge *))
167 struct cgraph_node *node;
168 struct searchc_env env;
169 splay_tree_node result;
170 env.stack = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
171 env.stack_size = 0;
172 env.result = order;
173 env.order_pos = 0;
174 env.nodes_marked_new = splay_tree_new (splay_tree_compare_ints, 0, 0);
175 env.count = 1;
176 env.reduce = reduce;
178 FOR_EACH_DEFINED_FUNCTION (node)
180 enum availability avail = node->get_availability ();
182 if (avail > AVAIL_INTERPOSABLE
183 || avail == AVAIL_INTERPOSABLE)
185 /* Reuse the info if it is already there. */
186 struct ipa_dfs_info *info = (struct ipa_dfs_info *) node->aux;
187 if (!info)
188 info = XCNEW (struct ipa_dfs_info);
189 info->new_node = true;
190 info->on_stack = false;
191 info->next_cycle = NULL;
192 node->aux = info;
194 splay_tree_insert (env.nodes_marked_new,
195 (splay_tree_key)node->get_uid (),
196 (splay_tree_value)node);
198 else
199 node->aux = NULL;
201 result = splay_tree_min (env.nodes_marked_new);
202 while (result)
204 node = (struct cgraph_node *)result->value;
205 searchc (&env, node, ignore_edge);
206 result = splay_tree_min (env.nodes_marked_new);
208 splay_tree_delete (env.nodes_marked_new);
209 free (env.stack);
211 return env.order_pos;
214 /* Deallocate all ipa_dfs_info structures pointed to by the aux pointer of call
215 graph nodes. */
217 void
218 ipa_free_postorder_info (void)
220 struct cgraph_node *node;
221 FOR_EACH_DEFINED_FUNCTION (node)
223 /* Get rid of the aux information. */
224 if (node->aux)
226 free (node->aux);
227 node->aux = NULL;
232 /* Get the set of nodes for the cycle in the reduced call graph starting
233 from NODE. */
235 vec<cgraph_node *>
236 ipa_get_nodes_in_cycle (struct cgraph_node *node)
238 vec<cgraph_node *> v = vNULL;
239 struct ipa_dfs_info *node_dfs_info;
240 while (node)
242 v.safe_push (node);
243 node_dfs_info = (struct ipa_dfs_info *) node->aux;
244 node = node_dfs_info->next_cycle;
246 return v;
249 /* Return true iff the CS is an edge within a strongly connected component as
250 computed by ipa_reduced_postorder. */
252 bool
253 ipa_edge_within_scc (struct cgraph_edge *cs)
255 struct ipa_dfs_info *caller_dfs = (struct ipa_dfs_info *) cs->caller->aux;
256 struct ipa_dfs_info *callee_dfs;
257 struct cgraph_node *callee = cs->callee->function_symbol ();
259 callee_dfs = (struct ipa_dfs_info *) callee->aux;
260 return (caller_dfs
261 && callee_dfs
262 && caller_dfs->scc_no == callee_dfs->scc_no);
265 struct postorder_stack
267 struct cgraph_node *node;
268 struct cgraph_edge *edge;
269 int ref;
272 /* Fill array order with all nodes with output flag set in the reverse
273 topological order. Return the number of elements in the array.
274 FIXME: While walking, consider aliases, too. */
277 ipa_reverse_postorder (struct cgraph_node **order)
279 struct cgraph_node *node, *node2;
280 int stack_size = 0;
281 int order_pos = 0;
282 struct cgraph_edge *edge;
283 int pass;
284 struct ipa_ref *ref = NULL;
286 struct postorder_stack *stack =
287 XCNEWVEC (struct postorder_stack, symtab->cgraph_count);
289 /* We have to deal with cycles nicely, so use a depth first traversal
290 output algorithm. Ignore the fact that some functions won't need
291 to be output and put them into order as well, so we get dependencies
292 right through inline functions. */
293 FOR_EACH_FUNCTION (node)
294 node->aux = NULL;
295 for (pass = 0; pass < 2; pass++)
296 FOR_EACH_FUNCTION (node)
297 if (!node->aux
298 && (pass
299 || (!node->address_taken
300 && !node->global.inlined_to
301 && !node->alias && !node->thunk.thunk_p
302 && !node->only_called_directly_p ())))
304 stack_size = 0;
305 stack[stack_size].node = node;
306 stack[stack_size].edge = node->callers;
307 stack[stack_size].ref = 0;
308 node->aux = (void *)(size_t)1;
309 while (stack_size >= 0)
311 while (true)
313 node2 = NULL;
314 while (stack[stack_size].edge && !node2)
316 edge = stack[stack_size].edge;
317 node2 = edge->caller;
318 stack[stack_size].edge = edge->next_caller;
319 /* Break possible cycles involving always-inline
320 functions by ignoring edges from always-inline
321 functions to non-always-inline functions. */
322 if (DECL_DISREGARD_INLINE_LIMITS (edge->caller->decl)
323 && !DECL_DISREGARD_INLINE_LIMITS
324 (edge->callee->function_symbol ()->decl))
325 node2 = NULL;
327 for (; stack[stack_size].node->iterate_referring (
328 stack[stack_size].ref,
329 ref) && !node2;
330 stack[stack_size].ref++)
332 if (ref->use == IPA_REF_ALIAS)
333 node2 = dyn_cast <cgraph_node *> (ref->referring);
335 if (!node2)
336 break;
337 if (!node2->aux)
339 stack[++stack_size].node = node2;
340 stack[stack_size].edge = node2->callers;
341 stack[stack_size].ref = 0;
342 node2->aux = (void *)(size_t)1;
345 order[order_pos++] = stack[stack_size--].node;
348 free (stack);
349 FOR_EACH_FUNCTION (node)
350 node->aux = NULL;
351 return order_pos;
356 /* Given a memory reference T, will return the variable at the bottom
357 of the access. Unlike get_base_address, this will recurse through
358 INDIRECT_REFS. */
360 tree
361 get_base_var (tree t)
363 while (!SSA_VAR_P (t)
364 && (!CONSTANT_CLASS_P (t))
365 && TREE_CODE (t) != LABEL_DECL
366 && TREE_CODE (t) != FUNCTION_DECL
367 && TREE_CODE (t) != CONST_DECL
368 && TREE_CODE (t) != CONSTRUCTOR)
370 t = TREE_OPERAND (t, 0);
372 return t;
375 /* Scale function of calls in NODE by ratio ORIG_COUNT/NODE->count. */
377 void
378 scale_ipa_profile_for_fn (struct cgraph_node *node, profile_count orig_count)
380 profile_count to = node->count;
381 profile_count::adjust_for_ipa_scaling (&to, &orig_count);
382 struct cgraph_edge *e;
384 for (e = node->callees; e; e = e->next_callee)
385 e->count = e->count.apply_scale (to, orig_count);
386 for (e = node->indirect_calls; e; e = e->next_callee)
387 e->count = e->count.apply_scale (to, orig_count);
390 /* SRC and DST are going to be merged. Take SRC's profile and merge it into
391 DST so it is not going to be lost. Possibly destroy SRC's body on the way
392 unless PRESERVE_BODY is set. */
394 void
395 ipa_merge_profiles (struct cgraph_node *dst,
396 struct cgraph_node *src,
397 bool preserve_body)
399 tree oldsrcdecl = src->decl;
400 struct function *srccfun, *dstcfun;
401 bool match = true;
403 if (!src->definition
404 || !dst->definition)
405 return;
407 if (src->frequency < dst->frequency)
408 src->frequency = dst->frequency;
410 /* Time profiles are merged. */
411 if (dst->tp_first_run > src->tp_first_run && src->tp_first_run)
412 dst->tp_first_run = src->tp_first_run;
414 if (src->profile_id && !dst->profile_id)
415 dst->profile_id = src->profile_id;
417 /* Merging zero profile to dst is no-op. */
418 if (src->count.ipa () == profile_count::zero ())
419 return;
421 /* FIXME when we merge in unknown profile, we ought to set counts as
422 unsafe. */
423 if (!src->count.initialized_p ()
424 || !(src->count.ipa () == src->count))
425 return;
426 if (symtab->dump_file)
428 fprintf (symtab->dump_file, "Merging profiles of %s to %s\n",
429 src->dump_name (), dst->dump_name ());
431 profile_count orig_count = dst->count;
433 if (dst->count.initialized_p () && dst->count.ipa () == dst->count)
434 dst->count += src->count.ipa ();
435 else
436 dst->count = src->count.ipa ();
438 /* First handle functions with no gimple body. */
439 if (dst->thunk.thunk_p || dst->alias
440 || src->thunk.thunk_p || src->alias)
442 scale_ipa_profile_for_fn (dst, orig_count);
443 return;
446 /* This is ugly. We need to get both function bodies into memory.
447 If declaration is merged, we need to duplicate it to be able
448 to load body that is being replaced. This makes symbol table
449 temporarily inconsistent. */
450 if (src->decl == dst->decl)
452 struct lto_in_decl_state temp;
453 struct lto_in_decl_state *state;
455 /* We are going to move the decl, we want to remove its file decl data.
456 and link these with the new decl. */
457 temp.fn_decl = src->decl;
458 lto_in_decl_state **slot
459 = src->lto_file_data->function_decl_states->find_slot (&temp,
460 NO_INSERT);
461 state = *slot;
462 src->lto_file_data->function_decl_states->clear_slot (slot);
463 gcc_assert (state);
465 /* Duplicate the decl and be sure it does not link into body of DST. */
466 src->decl = copy_node (src->decl);
467 DECL_STRUCT_FUNCTION (src->decl) = NULL;
468 DECL_ARGUMENTS (src->decl) = NULL;
469 DECL_INITIAL (src->decl) = NULL;
470 DECL_RESULT (src->decl) = NULL;
472 /* Associate the decl state with new declaration, so LTO streamer
473 can look it up. */
474 state->fn_decl = src->decl;
475 slot
476 = src->lto_file_data->function_decl_states->find_slot (state, INSERT);
477 gcc_assert (!*slot);
478 *slot = state;
480 src->get_untransformed_body ();
481 dst->get_untransformed_body ();
482 srccfun = DECL_STRUCT_FUNCTION (src->decl);
483 dstcfun = DECL_STRUCT_FUNCTION (dst->decl);
484 if (n_basic_blocks_for_fn (srccfun)
485 != n_basic_blocks_for_fn (dstcfun))
487 if (symtab->dump_file)
488 fprintf (symtab->dump_file,
489 "Giving up; number of basic block mismatch.\n");
490 match = false;
492 else if (last_basic_block_for_fn (srccfun)
493 != last_basic_block_for_fn (dstcfun))
495 if (symtab->dump_file)
496 fprintf (symtab->dump_file,
497 "Giving up; last block mismatch.\n");
498 match = false;
500 else
502 basic_block srcbb, dstbb;
504 FOR_ALL_BB_FN (srcbb, srccfun)
506 unsigned int i;
508 dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
509 if (dstbb == NULL)
511 if (symtab->dump_file)
512 fprintf (symtab->dump_file,
513 "No matching block for bb %i.\n",
514 srcbb->index);
515 match = false;
516 break;
518 if (EDGE_COUNT (srcbb->succs) != EDGE_COUNT (dstbb->succs))
520 if (symtab->dump_file)
521 fprintf (symtab->dump_file,
522 "Edge count mistmatch for bb %i.\n",
523 srcbb->index);
524 match = false;
525 break;
527 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
529 edge srce = EDGE_SUCC (srcbb, i);
530 edge dste = EDGE_SUCC (dstbb, i);
531 if (srce->dest->index != dste->dest->index)
533 if (symtab->dump_file)
534 fprintf (symtab->dump_file,
535 "Succ edge mistmatch for bb %i.\n",
536 srce->dest->index);
537 match = false;
538 break;
543 if (match)
545 struct cgraph_edge *e, *e2;
546 basic_block srcbb, dstbb;
548 /* TODO: merge also statement histograms. */
549 FOR_ALL_BB_FN (srcbb, srccfun)
551 unsigned int i;
553 dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
555 /* Either sum the profiles if both are IPA and not global0, or
556 pick more informative one (that is nonzero IPA if other is
557 uninitialized, guessed or global0). */
558 if (!dstbb->count.ipa ().initialized_p ()
559 || (dstbb->count.ipa () == profile_count::zero ()
560 && (srcbb->count.ipa ().initialized_p ()
561 && !(srcbb->count.ipa () == profile_count::zero ()))))
563 dstbb->count = srcbb->count;
564 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
566 edge srce = EDGE_SUCC (srcbb, i);
567 edge dste = EDGE_SUCC (dstbb, i);
568 if (srce->probability.initialized_p ())
569 dste->probability = srce->probability;
572 else if (srcbb->count.ipa ().initialized_p ()
573 && !(srcbb->count.ipa () == profile_count::zero ()))
575 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
577 edge srce = EDGE_SUCC (srcbb, i);
578 edge dste = EDGE_SUCC (dstbb, i);
579 dste->probability =
580 dste->probability * dstbb->count.probability_in (dstbb->count + srcbb->count)
581 + srce->probability * srcbb->count.probability_in (dstbb->count + srcbb->count);
583 dstbb->count += srcbb->count;
586 push_cfun (dstcfun);
587 update_max_bb_count ();
588 compute_function_frequency ();
589 pop_cfun ();
590 for (e = dst->callees; e; e = e->next_callee)
592 if (e->speculative)
593 continue;
594 e->count = gimple_bb (e->call_stmt)->count;
596 for (e = dst->indirect_calls, e2 = src->indirect_calls; e;
597 e2 = (e2 ? e2->next_callee : NULL), e = e->next_callee)
599 profile_count count = gimple_bb (e->call_stmt)->count;
600 /* When call is speculative, we need to re-distribute probabilities
601 the same way as they was. This is not really correct because
602 in the other copy the speculation may differ; but probably it
603 is not really worth the effort. */
604 if (e->speculative)
606 cgraph_edge *direct, *indirect;
607 cgraph_edge *direct2 = NULL, *indirect2 = NULL;
608 ipa_ref *ref;
610 e->speculative_call_info (direct, indirect, ref);
611 gcc_assert (e == indirect);
612 if (e2 && e2->speculative)
613 e2->speculative_call_info (direct2, indirect2, ref);
614 if (indirect->count > profile_count::zero ()
615 || direct->count > profile_count::zero ())
617 /* We should mismatch earlier if there is no matching
618 indirect edge. */
619 if (!e2)
621 if (dump_file)
622 fprintf (dump_file,
623 "Mismatch in merging indirect edges\n");
625 else if (!e2->speculative)
626 indirect->count += e2->count;
627 else if (e2->speculative)
629 if (DECL_ASSEMBLER_NAME (direct2->callee->decl)
630 != DECL_ASSEMBLER_NAME (direct->callee->decl))
632 if (direct2->count >= direct->count)
634 direct->redirect_callee (direct2->callee);
635 indirect->count += indirect2->count
636 + direct->count;
637 direct->count = direct2->count;
639 else
640 indirect->count += indirect2->count + direct2->count;
642 else
644 direct->count += direct2->count;
645 indirect->count += indirect2->count;
649 else
650 /* At the moment we should have only profile feedback based
651 speculations when merging. */
652 gcc_unreachable ();
654 else if (e2 && e2->speculative)
656 cgraph_edge *direct, *indirect;
657 ipa_ref *ref;
659 e2->speculative_call_info (direct, indirect, ref);
660 e->count = count;
661 e->make_speculative (direct->callee, direct->count);
663 else
664 e->count = count;
666 if (!preserve_body)
667 src->release_body ();
668 /* Update summary. */
669 compute_fn_summary (dst, 0);
671 /* We can't update CFG profile, but we can scale IPA profile. CFG
672 will be scaled according to dst->count after IPA passes. */
673 else
674 scale_ipa_profile_for_fn (dst, orig_count);
675 src->decl = oldsrcdecl;
678 /* Return true if call to DEST is known to be self-recusive call withing FUNC. */
680 bool
681 recursive_call_p (tree func, tree dest)
683 struct cgraph_node *dest_node = cgraph_node::get_create (dest);
684 struct cgraph_node *cnode = cgraph_node::get_create (func);
685 ipa_ref *alias;
686 enum availability avail;
688 gcc_assert (!cnode->alias);
689 if (cnode != dest_node->ultimate_alias_target (&avail))
690 return false;
691 if (avail >= AVAIL_AVAILABLE)
692 return true;
693 if (!dest_node->semantically_equivalent_p (cnode))
694 return false;
695 /* If there is only one way to call the fuction or we know all of them
696 are semantically equivalent, we still can consider call recursive. */
697 FOR_EACH_ALIAS (cnode, alias)
698 if (!dest_node->semantically_equivalent_p (alias->referring))
699 return false;
700 return true;