Use gfc_add_*_component defines where appropriate
[official-gcc.git] / gcc / ipa-inline-analysis.c
blob5d6721813d8100eec3a6e3d1bbb5dd633db2003d
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2016 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Analysis used by the inliner and other passes limiting code size growth.
23 We estimate for each function
24 - function body size
25 - average function execution time
26 - inlining size benefit (that is how much of function body size
27 and its call sequence is expected to disappear by inlining)
28 - inlining time benefit
29 - function frame size
30 For each call
31 - call statement size and time
33 inlinie_summary datastructures store above information locally (i.e.
34 parameters of the function itself) and globally (i.e. parameters of
35 the function created by applying all the inline decisions already
36 present in the callgraph).
38 We provide accestor to the inline_summary datastructure and
39 basic logic updating the parameters when inlining is performed.
41 The summaries are context sensitive. Context means
42 1) partial assignment of known constant values of operands
43 2) whether function is inlined into the call or not.
44 It is easy to add more variants. To represent function size and time
45 that depends on context (i.e. it is known to be optimized away when
46 context is known either by inlining or from IP-CP and clonning),
47 we use predicates. Predicates are logical formulas in
48 conjunctive-disjunctive form consisting of clauses. Clauses are bitmaps
49 specifying what conditions must be true. Conditions are simple test
50 of the form described above.
52 In order to make predicate (possibly) true, all of its clauses must
53 be (possibly) true. To make clause (possibly) true, one of conditions
54 it mentions must be (possibly) true. There are fixed bounds on
55 number of clauses and conditions and all the manipulation functions
56 are conservative in positive direction. I.e. we may lose precision
57 by thinking that predicate may be true even when it is not.
59 estimate_edge_size and estimate_edge_growth can be used to query
60 function size/time in the given context. inline_merge_summary merges
61 properties of caller and callee after inlining.
63 Finally pass_inline_parameters is exported. This is used to drive
64 computation of function parameters used by the early inliner. IPA
65 inlined performs analysis via its analyze_function method. */
67 #include "config.h"
68 #include "system.h"
69 #include "coretypes.h"
70 #include "backend.h"
71 #include "tree.h"
72 #include "gimple.h"
73 #include "alloc-pool.h"
74 #include "tree-pass.h"
75 #include "ssa.h"
76 #include "tree-streamer.h"
77 #include "cgraph.h"
78 #include "diagnostic.h"
79 #include "fold-const.h"
80 #include "print-tree.h"
81 #include "tree-inline.h"
82 #include "gimple-pretty-print.h"
83 #include "params.h"
84 #include "cfganal.h"
85 #include "gimple-iterator.h"
86 #include "tree-cfg.h"
87 #include "tree-ssa-loop-niter.h"
88 #include "tree-ssa-loop.h"
89 #include "symbol-summary.h"
90 #include "ipa-prop.h"
91 #include "ipa-inline.h"
92 #include "cfgloop.h"
93 #include "tree-scalar-evolution.h"
94 #include "ipa-utils.h"
95 #include "cilk.h"
96 #include "cfgexpand.h"
97 #include "gimplify.h"
99 /* Estimate runtime of function can easilly run into huge numbers with many
100 nested loops. Be sure we can compute time * INLINE_SIZE_SCALE * 2 in an
101 integer. For anything larger we use gcov_type. */
102 #define MAX_TIME 500000
104 /* Number of bits in integer, but we really want to be stable across different
105 hosts. */
106 #define NUM_CONDITIONS 32
108 enum predicate_conditions
110 predicate_false_condition = 0,
111 predicate_not_inlined_condition = 1,
112 predicate_first_dynamic_condition = 2
115 /* Special condition code we use to represent test that operand is compile time
116 constant. */
117 #define IS_NOT_CONSTANT ERROR_MARK
118 /* Special condition code we use to represent test that operand is not changed
119 across invocation of the function. When operand IS_NOT_CONSTANT it is always
120 CHANGED, however i.e. loop invariants can be NOT_CHANGED given percentage
121 of executions even when they are not compile time constants. */
122 #define CHANGED IDENTIFIER_NODE
124 /* Holders of ipa cgraph hooks: */
125 static struct cgraph_2edge_hook_list *edge_duplication_hook_holder;
126 static struct cgraph_edge_hook_list *edge_removal_hook_holder;
127 static void inline_edge_removal_hook (struct cgraph_edge *, void *);
128 static void inline_edge_duplication_hook (struct cgraph_edge *,
129 struct cgraph_edge *, void *);
131 /* VECtor holding inline summaries.
132 In GGC memory because conditions might point to constant trees. */
133 function_summary <inline_summary *> *inline_summaries;
134 vec<inline_edge_summary_t> inline_edge_summary_vec;
136 /* Cached node/edge growths. */
137 vec<edge_growth_cache_entry> edge_growth_cache;
139 /* Edge predicates goes here. */
140 static object_allocator<predicate> edge_predicate_pool ("edge predicates");
142 /* Return true predicate (tautology).
143 We represent it by empty list of clauses. */
145 static inline struct predicate
146 true_predicate (void)
148 struct predicate p;
149 p.clause[0] = 0;
150 return p;
154 /* Return predicate testing single condition number COND. */
156 static inline struct predicate
157 single_cond_predicate (int cond)
159 struct predicate p;
160 p.clause[0] = 1 << cond;
161 p.clause[1] = 0;
162 return p;
166 /* Return false predicate. First clause require false condition. */
168 static inline struct predicate
169 false_predicate (void)
171 return single_cond_predicate (predicate_false_condition);
175 /* Return true if P is (true). */
177 static inline bool
178 true_predicate_p (struct predicate *p)
180 return !p->clause[0];
184 /* Return true if P is (false). */
186 static inline bool
187 false_predicate_p (struct predicate *p)
189 if (p->clause[0] == (1 << predicate_false_condition))
191 gcc_checking_assert (!p->clause[1]
192 && p->clause[0] == 1 << predicate_false_condition);
193 return true;
195 return false;
199 /* Return predicate that is set true when function is not inlined. */
201 static inline struct predicate
202 not_inlined_predicate (void)
204 return single_cond_predicate (predicate_not_inlined_condition);
207 /* Simple description of whether a memory load or a condition refers to a load
208 from an aggregate and if so, how and where from in the aggregate.
209 Individual fields have the same meaning like fields with the same name in
210 struct condition. */
212 struct agg_position_info
214 HOST_WIDE_INT offset;
215 bool agg_contents;
216 bool by_ref;
219 /* Add condition to condition list SUMMARY. OPERAND_NUM, SIZE, CODE and VAL
220 correspond to fields of condition structure. AGGPOS describes whether the
221 used operand is loaded from an aggregate and where in the aggregate it is.
222 It can be NULL, which means this not a load from an aggregate. */
224 static struct predicate
225 add_condition (struct inline_summary *summary, int operand_num,
226 HOST_WIDE_INT size, struct agg_position_info *aggpos,
227 enum tree_code code, tree val)
229 int i;
230 struct condition *c;
231 struct condition new_cond;
232 HOST_WIDE_INT offset;
233 bool agg_contents, by_ref;
235 if (aggpos)
237 offset = aggpos->offset;
238 agg_contents = aggpos->agg_contents;
239 by_ref = aggpos->by_ref;
241 else
243 offset = 0;
244 agg_contents = false;
245 by_ref = false;
248 gcc_checking_assert (operand_num >= 0);
249 for (i = 0; vec_safe_iterate (summary->conds, i, &c); i++)
251 if (c->operand_num == operand_num
252 && c->size == size
253 && c->code == code
254 && c->val == val
255 && c->agg_contents == agg_contents
256 && (!agg_contents || (c->offset == offset && c->by_ref == by_ref)))
257 return single_cond_predicate (i + predicate_first_dynamic_condition);
259 /* Too many conditions. Give up and return constant true. */
260 if (i == NUM_CONDITIONS - predicate_first_dynamic_condition)
261 return true_predicate ();
263 new_cond.operand_num = operand_num;
264 new_cond.code = code;
265 new_cond.val = val;
266 new_cond.agg_contents = agg_contents;
267 new_cond.by_ref = by_ref;
268 new_cond.offset = offset;
269 new_cond.size = size;
270 vec_safe_push (summary->conds, new_cond);
271 return single_cond_predicate (i + predicate_first_dynamic_condition);
275 /* Add clause CLAUSE into the predicate P. */
277 static inline void
278 add_clause (conditions conditions, struct predicate *p, clause_t clause)
280 int i;
281 int i2;
282 int insert_here = -1;
283 int c1, c2;
285 /* True clause. */
286 if (!clause)
287 return;
289 /* False clause makes the whole predicate false. Kill the other variants. */
290 if (clause == (1 << predicate_false_condition))
292 p->clause[0] = (1 << predicate_false_condition);
293 p->clause[1] = 0;
294 return;
296 if (false_predicate_p (p))
297 return;
299 /* No one should be silly enough to add false into nontrivial clauses. */
300 gcc_checking_assert (!(clause & (1 << predicate_false_condition)));
302 /* Look where to insert the clause. At the same time prune out
303 clauses of P that are implied by the new clause and thus
304 redundant. */
305 for (i = 0, i2 = 0; i <= MAX_CLAUSES; i++)
307 p->clause[i2] = p->clause[i];
309 if (!p->clause[i])
310 break;
312 /* If p->clause[i] implies clause, there is nothing to add. */
313 if ((p->clause[i] & clause) == p->clause[i])
315 /* We had nothing to add, none of clauses should've become
316 redundant. */
317 gcc_checking_assert (i == i2);
318 return;
321 if (p->clause[i] < clause && insert_here < 0)
322 insert_here = i2;
324 /* If clause implies p->clause[i], then p->clause[i] becomes redundant.
325 Otherwise the p->clause[i] has to stay. */
326 if ((p->clause[i] & clause) != clause)
327 i2++;
330 /* Look for clauses that are obviously true. I.e.
331 op0 == 5 || op0 != 5. */
332 for (c1 = predicate_first_dynamic_condition; c1 < NUM_CONDITIONS; c1++)
334 condition *cc1;
335 if (!(clause & (1 << c1)))
336 continue;
337 cc1 = &(*conditions)[c1 - predicate_first_dynamic_condition];
338 /* We have no way to represent !CHANGED and !IS_NOT_CONSTANT
339 and thus there is no point for looking for them. */
340 if (cc1->code == CHANGED || cc1->code == IS_NOT_CONSTANT)
341 continue;
342 for (c2 = c1 + 1; c2 < NUM_CONDITIONS; c2++)
343 if (clause & (1 << c2))
345 condition *cc1 =
346 &(*conditions)[c1 - predicate_first_dynamic_condition];
347 condition *cc2 =
348 &(*conditions)[c2 - predicate_first_dynamic_condition];
349 if (cc1->operand_num == cc2->operand_num
350 && cc1->val == cc2->val
351 && cc2->code != IS_NOT_CONSTANT
352 && cc2->code != CHANGED
353 && cc1->code == invert_tree_comparison (cc2->code,
354 HONOR_NANS (cc1->val)))
355 return;
360 /* We run out of variants. Be conservative in positive direction. */
361 if (i2 == MAX_CLAUSES)
362 return;
363 /* Keep clauses in decreasing order. This makes equivalence testing easy. */
364 p->clause[i2 + 1] = 0;
365 if (insert_here >= 0)
366 for (; i2 > insert_here; i2--)
367 p->clause[i2] = p->clause[i2 - 1];
368 else
369 insert_here = i2;
370 p->clause[insert_here] = clause;
374 /* Return P & P2. */
376 static struct predicate
377 and_predicates (conditions conditions,
378 struct predicate *p, struct predicate *p2)
380 struct predicate out = *p;
381 int i;
383 /* Avoid busy work. */
384 if (false_predicate_p (p2) || true_predicate_p (p))
385 return *p2;
386 if (false_predicate_p (p) || true_predicate_p (p2))
387 return *p;
389 /* See how far predicates match. */
390 for (i = 0; p->clause[i] && p->clause[i] == p2->clause[i]; i++)
392 gcc_checking_assert (i < MAX_CLAUSES);
395 /* Combine the predicates rest. */
396 for (; p2->clause[i]; i++)
398 gcc_checking_assert (i < MAX_CLAUSES);
399 add_clause (conditions, &out, p2->clause[i]);
401 return out;
405 /* Return true if predicates are obviously equal. */
407 static inline bool
408 predicates_equal_p (struct predicate *p, struct predicate *p2)
410 int i;
411 for (i = 0; p->clause[i]; i++)
413 gcc_checking_assert (i < MAX_CLAUSES);
414 gcc_checking_assert (p->clause[i] > p->clause[i + 1]);
415 gcc_checking_assert (!p2->clause[i]
416 || p2->clause[i] > p2->clause[i + 1]);
417 if (p->clause[i] != p2->clause[i])
418 return false;
420 return !p2->clause[i];
424 /* Return P | P2. */
426 static struct predicate
427 or_predicates (conditions conditions,
428 struct predicate *p, struct predicate *p2)
430 struct predicate out = true_predicate ();
431 int i, j;
433 /* Avoid busy work. */
434 if (false_predicate_p (p2) || true_predicate_p (p))
435 return *p;
436 if (false_predicate_p (p) || true_predicate_p (p2))
437 return *p2;
438 if (predicates_equal_p (p, p2))
439 return *p;
441 /* OK, combine the predicates. */
442 for (i = 0; p->clause[i]; i++)
443 for (j = 0; p2->clause[j]; j++)
445 gcc_checking_assert (i < MAX_CLAUSES && j < MAX_CLAUSES);
446 add_clause (conditions, &out, p->clause[i] | p2->clause[j]);
448 return out;
452 /* Having partial truth assignment in POSSIBLE_TRUTHS, return false
453 if predicate P is known to be false. */
455 static bool
456 evaluate_predicate (struct predicate *p, clause_t possible_truths)
458 int i;
460 /* True remains true. */
461 if (true_predicate_p (p))
462 return true;
464 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
466 /* See if we can find clause we can disprove. */
467 for (i = 0; p->clause[i]; i++)
469 gcc_checking_assert (i < MAX_CLAUSES);
470 if (!(p->clause[i] & possible_truths))
471 return false;
473 return true;
476 /* Return the probability in range 0...REG_BR_PROB_BASE that the predicated
477 instruction will be recomputed per invocation of the inlined call. */
479 static int
480 predicate_probability (conditions conds,
481 struct predicate *p, clause_t possible_truths,
482 vec<inline_param_summary> inline_param_summary)
484 int i;
485 int combined_prob = REG_BR_PROB_BASE;
487 /* True remains true. */
488 if (true_predicate_p (p))
489 return REG_BR_PROB_BASE;
491 if (false_predicate_p (p))
492 return 0;
494 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
496 /* See if we can find clause we can disprove. */
497 for (i = 0; p->clause[i]; i++)
499 gcc_checking_assert (i < MAX_CLAUSES);
500 if (!(p->clause[i] & possible_truths))
501 return 0;
502 else
504 int this_prob = 0;
505 int i2;
506 if (!inline_param_summary.exists ())
507 return REG_BR_PROB_BASE;
508 for (i2 = 0; i2 < NUM_CONDITIONS; i2++)
509 if ((p->clause[i] & possible_truths) & (1 << i2))
511 if (i2 >= predicate_first_dynamic_condition)
513 condition *c =
514 &(*conds)[i2 - predicate_first_dynamic_condition];
515 if (c->code == CHANGED
516 && (c->operand_num <
517 (int) inline_param_summary.length ()))
519 int iprob =
520 inline_param_summary[c->operand_num].change_prob;
521 this_prob = MAX (this_prob, iprob);
523 else
524 this_prob = REG_BR_PROB_BASE;
526 else
527 this_prob = REG_BR_PROB_BASE;
529 combined_prob = MIN (this_prob, combined_prob);
530 if (!combined_prob)
531 return 0;
534 return combined_prob;
538 /* Dump conditional COND. */
540 static void
541 dump_condition (FILE *f, conditions conditions, int cond)
543 condition *c;
544 if (cond == predicate_false_condition)
545 fprintf (f, "false");
546 else if (cond == predicate_not_inlined_condition)
547 fprintf (f, "not inlined");
548 else
550 c = &(*conditions)[cond - predicate_first_dynamic_condition];
551 fprintf (f, "op%i", c->operand_num);
552 if (c->agg_contents)
553 fprintf (f, "[%soffset: " HOST_WIDE_INT_PRINT_DEC "]",
554 c->by_ref ? "ref " : "", c->offset);
555 if (c->code == IS_NOT_CONSTANT)
557 fprintf (f, " not constant");
558 return;
560 if (c->code == CHANGED)
562 fprintf (f, " changed");
563 return;
565 fprintf (f, " %s ", op_symbol_code (c->code));
566 print_generic_expr (f, c->val, 1);
571 /* Dump clause CLAUSE. */
573 static void
574 dump_clause (FILE *f, conditions conds, clause_t clause)
576 int i;
577 bool found = false;
578 fprintf (f, "(");
579 if (!clause)
580 fprintf (f, "true");
581 for (i = 0; i < NUM_CONDITIONS; i++)
582 if (clause & (1 << i))
584 if (found)
585 fprintf (f, " || ");
586 found = true;
587 dump_condition (f, conds, i);
589 fprintf (f, ")");
593 /* Dump predicate PREDICATE. */
595 static void
596 dump_predicate (FILE *f, conditions conds, struct predicate *pred)
598 int i;
599 if (true_predicate_p (pred))
600 dump_clause (f, conds, 0);
601 else
602 for (i = 0; pred->clause[i]; i++)
604 if (i)
605 fprintf (f, " && ");
606 dump_clause (f, conds, pred->clause[i]);
608 fprintf (f, "\n");
612 /* Dump inline hints. */
613 void
614 dump_inline_hints (FILE *f, inline_hints hints)
616 if (!hints)
617 return;
618 fprintf (f, "inline hints:");
619 if (hints & INLINE_HINT_indirect_call)
621 hints &= ~INLINE_HINT_indirect_call;
622 fprintf (f, " indirect_call");
624 if (hints & INLINE_HINT_loop_iterations)
626 hints &= ~INLINE_HINT_loop_iterations;
627 fprintf (f, " loop_iterations");
629 if (hints & INLINE_HINT_loop_stride)
631 hints &= ~INLINE_HINT_loop_stride;
632 fprintf (f, " loop_stride");
634 if (hints & INLINE_HINT_same_scc)
636 hints &= ~INLINE_HINT_same_scc;
637 fprintf (f, " same_scc");
639 if (hints & INLINE_HINT_in_scc)
641 hints &= ~INLINE_HINT_in_scc;
642 fprintf (f, " in_scc");
644 if (hints & INLINE_HINT_cross_module)
646 hints &= ~INLINE_HINT_cross_module;
647 fprintf (f, " cross_module");
649 if (hints & INLINE_HINT_declared_inline)
651 hints &= ~INLINE_HINT_declared_inline;
652 fprintf (f, " declared_inline");
654 if (hints & INLINE_HINT_array_index)
656 hints &= ~INLINE_HINT_array_index;
657 fprintf (f, " array_index");
659 if (hints & INLINE_HINT_known_hot)
661 hints &= ~INLINE_HINT_known_hot;
662 fprintf (f, " known_hot");
664 gcc_assert (!hints);
668 /* Record SIZE and TIME under condition PRED into the inline summary. */
670 static void
671 account_size_time (struct inline_summary *summary, int size, int time,
672 struct predicate *pred)
674 size_time_entry *e;
675 bool found = false;
676 int i;
678 if (false_predicate_p (pred))
679 return;
681 /* We need to create initial empty unconitional clause, but otherwie
682 we don't need to account empty times and sizes. */
683 if (!size && !time && summary->entry)
684 return;
686 /* Watch overflow that might result from insane profiles. */
687 if (time > MAX_TIME * INLINE_TIME_SCALE)
688 time = MAX_TIME * INLINE_TIME_SCALE;
689 gcc_assert (time >= 0);
691 for (i = 0; vec_safe_iterate (summary->entry, i, &e); i++)
692 if (predicates_equal_p (&e->predicate, pred))
694 found = true;
695 break;
697 if (i == 256)
699 i = 0;
700 found = true;
701 e = &(*summary->entry)[0];
702 gcc_assert (!e->predicate.clause[0]);
703 if (dump_file && (dump_flags & TDF_DETAILS))
704 fprintf (dump_file,
705 "\t\tReached limit on number of entries, "
706 "ignoring the predicate.");
708 if (dump_file && (dump_flags & TDF_DETAILS) && (time || size))
710 fprintf (dump_file,
711 "\t\tAccounting size:%3.2f, time:%3.2f on %spredicate:",
712 ((double) size) / INLINE_SIZE_SCALE,
713 ((double) time) / INLINE_TIME_SCALE, found ? "" : "new ");
714 dump_predicate (dump_file, summary->conds, pred);
716 if (!found)
718 struct size_time_entry new_entry;
719 new_entry.size = size;
720 new_entry.time = time;
721 new_entry.predicate = *pred;
722 vec_safe_push (summary->entry, new_entry);
724 else
726 e->size += size;
727 e->time += time;
728 if (e->time > MAX_TIME * INLINE_TIME_SCALE)
729 e->time = MAX_TIME * INLINE_TIME_SCALE;
733 /* We proved E to be unreachable, redirect it to __bultin_unreachable. */
735 static struct cgraph_edge *
736 redirect_to_unreachable (struct cgraph_edge *e)
738 struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL;
739 struct cgraph_node *target = cgraph_node::get_create
740 (builtin_decl_implicit (BUILT_IN_UNREACHABLE));
742 if (e->speculative)
743 e = e->resolve_speculation (target->decl);
744 else if (!e->callee)
745 e->make_direct (target);
746 else
747 e->redirect_callee (target);
748 struct inline_edge_summary *es = inline_edge_summary (e);
749 e->inline_failed = CIF_UNREACHABLE;
750 e->frequency = 0;
751 e->count = 0;
752 es->call_stmt_size = 0;
753 es->call_stmt_time = 0;
754 if (callee)
755 callee->remove_symbol_and_inline_clones ();
756 return e;
759 /* Set predicate for edge E. */
761 static void
762 edge_set_predicate (struct cgraph_edge *e, struct predicate *predicate)
764 /* If the edge is determined to be never executed, redirect it
765 to BUILTIN_UNREACHABLE to save inliner from inlining into it. */
766 if (predicate && false_predicate_p (predicate)
767 /* When handling speculative edges, we need to do the redirection
768 just once. Do it always on the direct edge, so we do not
769 attempt to resolve speculation while duplicating the edge. */
770 && (!e->speculative || e->callee))
771 e = redirect_to_unreachable (e);
773 struct inline_edge_summary *es = inline_edge_summary (e);
774 if (predicate && !true_predicate_p (predicate))
776 if (!es->predicate)
777 es->predicate = edge_predicate_pool.allocate ();
778 *es->predicate = *predicate;
780 else
782 if (es->predicate)
783 edge_predicate_pool.remove (es->predicate);
784 es->predicate = NULL;
788 /* Set predicate for hint *P. */
790 static void
791 set_hint_predicate (struct predicate **p, struct predicate new_predicate)
793 if (false_predicate_p (&new_predicate) || true_predicate_p (&new_predicate))
795 if (*p)
796 edge_predicate_pool.remove (*p);
797 *p = NULL;
799 else
801 if (!*p)
802 *p = edge_predicate_pool.allocate ();
803 **p = new_predicate;
808 /* KNOWN_VALS is partial mapping of parameters of NODE to constant values.
809 KNOWN_AGGS is a vector of aggreggate jump functions for each parameter.
810 Return clause of possible truths. When INLINE_P is true, assume that we are
811 inlining.
813 ERROR_MARK means compile time invariant. */
815 static clause_t
816 evaluate_conditions_for_known_args (struct cgraph_node *node,
817 bool inline_p,
818 vec<tree> known_vals,
819 vec<ipa_agg_jump_function_p>
820 known_aggs)
822 clause_t clause = inline_p ? 0 : 1 << predicate_not_inlined_condition;
823 struct inline_summary *info = inline_summaries->get (node);
824 int i;
825 struct condition *c;
827 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
829 tree val;
830 tree res;
832 /* We allow call stmt to have fewer arguments than the callee function
833 (especially for K&R style programs). So bound check here (we assume
834 known_aggs vector, if non-NULL, has the same length as
835 known_vals). */
836 gcc_checking_assert (!known_aggs.exists ()
837 || (known_vals.length () == known_aggs.length ()));
838 if (c->operand_num >= (int) known_vals.length ())
840 clause |= 1 << (i + predicate_first_dynamic_condition);
841 continue;
844 if (c->agg_contents)
846 struct ipa_agg_jump_function *agg;
848 if (c->code == CHANGED
849 && !c->by_ref
850 && (known_vals[c->operand_num] == error_mark_node))
851 continue;
853 if (known_aggs.exists ())
855 agg = known_aggs[c->operand_num];
856 val = ipa_find_agg_cst_for_param (agg, known_vals[c->operand_num],
857 c->offset, c->by_ref);
859 else
860 val = NULL_TREE;
862 else
864 val = known_vals[c->operand_num];
865 if (val == error_mark_node && c->code != CHANGED)
866 val = NULL_TREE;
869 if (!val)
871 clause |= 1 << (i + predicate_first_dynamic_condition);
872 continue;
874 if (c->code == CHANGED)
875 continue;
877 if (tree_to_shwi (TYPE_SIZE (TREE_TYPE (val))) != c->size)
879 clause |= 1 << (i + predicate_first_dynamic_condition);
880 continue;
882 if (c->code == IS_NOT_CONSTANT)
883 continue;
885 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (c->val), val);
886 res = val
887 ? fold_binary_to_constant (c->code, boolean_type_node, val, c->val)
888 : NULL;
890 if (res && integer_zerop (res))
891 continue;
893 clause |= 1 << (i + predicate_first_dynamic_condition);
895 return clause;
899 /* Work out what conditions might be true at invocation of E. */
901 static void
902 evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
903 clause_t *clause_ptr,
904 vec<tree> *known_vals_ptr,
905 vec<ipa_polymorphic_call_context>
906 *known_contexts_ptr,
907 vec<ipa_agg_jump_function_p> *known_aggs_ptr)
909 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
910 struct inline_summary *info = inline_summaries->get (callee);
911 vec<tree> known_vals = vNULL;
912 vec<ipa_agg_jump_function_p> known_aggs = vNULL;
914 if (clause_ptr)
915 *clause_ptr = inline_p ? 0 : 1 << predicate_not_inlined_condition;
916 if (known_vals_ptr)
917 known_vals_ptr->create (0);
918 if (known_contexts_ptr)
919 known_contexts_ptr->create (0);
921 if (ipa_node_params_sum
922 && !e->call_stmt_cannot_inline_p
923 && ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr))
925 struct ipa_node_params *parms_info;
926 struct ipa_edge_args *args = IPA_EDGE_REF (e);
927 struct inline_edge_summary *es = inline_edge_summary (e);
928 int i, count = ipa_get_cs_argument_count (args);
930 if (e->caller->global.inlined_to)
931 parms_info = IPA_NODE_REF (e->caller->global.inlined_to);
932 else
933 parms_info = IPA_NODE_REF (e->caller);
935 if (count && (info->conds || known_vals_ptr))
936 known_vals.safe_grow_cleared (count);
937 if (count && (info->conds || known_aggs_ptr))
938 known_aggs.safe_grow_cleared (count);
939 if (count && known_contexts_ptr)
940 known_contexts_ptr->safe_grow_cleared (count);
942 for (i = 0; i < count; i++)
944 struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i);
945 tree cst = ipa_value_from_jfunc (parms_info, jf);
947 if (!cst && e->call_stmt
948 && i < (int)gimple_call_num_args (e->call_stmt))
950 cst = gimple_call_arg (e->call_stmt, i);
951 if (!is_gimple_min_invariant (cst))
952 cst = NULL;
954 if (cst)
956 gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO);
957 if (known_vals.exists ())
958 known_vals[i] = cst;
960 else if (inline_p && !es->param[i].change_prob)
961 known_vals[i] = error_mark_node;
963 if (known_contexts_ptr)
964 (*known_contexts_ptr)[i] = ipa_context_from_jfunc (parms_info, e,
965 i, jf);
966 /* TODO: When IPA-CP starts propagating and merging aggregate jump
967 functions, use its knowledge of the caller too, just like the
968 scalar case above. */
969 known_aggs[i] = &jf->agg;
972 else if (e->call_stmt && !e->call_stmt_cannot_inline_p
973 && ((clause_ptr && info->conds) || known_vals_ptr))
975 int i, count = (int)gimple_call_num_args (e->call_stmt);
977 if (count && (info->conds || known_vals_ptr))
978 known_vals.safe_grow_cleared (count);
979 for (i = 0; i < count; i++)
981 tree cst = gimple_call_arg (e->call_stmt, i);
982 if (!is_gimple_min_invariant (cst))
983 cst = NULL;
984 if (cst)
985 known_vals[i] = cst;
989 if (clause_ptr)
990 *clause_ptr = evaluate_conditions_for_known_args (callee, inline_p,
991 known_vals, known_aggs);
993 if (known_vals_ptr)
994 *known_vals_ptr = known_vals;
995 else
996 known_vals.release ();
998 if (known_aggs_ptr)
999 *known_aggs_ptr = known_aggs;
1000 else
1001 known_aggs.release ();
1005 /* Allocate the inline summary vector or resize it to cover all cgraph nodes. */
1007 static void
1008 inline_summary_alloc (void)
1010 if (!edge_removal_hook_holder)
1011 edge_removal_hook_holder =
1012 symtab->add_edge_removal_hook (&inline_edge_removal_hook, NULL);
1013 if (!edge_duplication_hook_holder)
1014 edge_duplication_hook_holder =
1015 symtab->add_edge_duplication_hook (&inline_edge_duplication_hook, NULL);
1017 if (!inline_summaries)
1018 inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab);
1020 if (inline_edge_summary_vec.length () <= (unsigned) symtab->edges_max_uid)
1021 inline_edge_summary_vec.safe_grow_cleared (symtab->edges_max_uid + 1);
1024 /* We are called multiple time for given function; clear
1025 data from previous run so they are not cumulated. */
1027 static void
1028 reset_inline_edge_summary (struct cgraph_edge *e)
1030 if (e->uid < (int) inline_edge_summary_vec.length ())
1032 struct inline_edge_summary *es = inline_edge_summary (e);
1034 es->call_stmt_size = es->call_stmt_time = 0;
1035 if (es->predicate)
1036 edge_predicate_pool.remove (es->predicate);
1037 es->predicate = NULL;
1038 es->param.release ();
1042 /* We are called multiple time for given function; clear
1043 data from previous run so they are not cumulated. */
1045 static void
1046 reset_inline_summary (struct cgraph_node *node,
1047 inline_summary *info)
1049 struct cgraph_edge *e;
1051 info->self_size = info->self_time = 0;
1052 info->estimated_stack_size = 0;
1053 info->estimated_self_stack_size = 0;
1054 info->stack_frame_offset = 0;
1055 info->size = 0;
1056 info->time = 0;
1057 info->growth = 0;
1058 info->scc_no = 0;
1059 if (info->loop_iterations)
1061 edge_predicate_pool.remove (info->loop_iterations);
1062 info->loop_iterations = NULL;
1064 if (info->loop_stride)
1066 edge_predicate_pool.remove (info->loop_stride);
1067 info->loop_stride = NULL;
1069 if (info->array_index)
1071 edge_predicate_pool.remove (info->array_index);
1072 info->array_index = NULL;
1074 vec_free (info->conds);
1075 vec_free (info->entry);
1076 for (e = node->callees; e; e = e->next_callee)
1077 reset_inline_edge_summary (e);
1078 for (e = node->indirect_calls; e; e = e->next_callee)
1079 reset_inline_edge_summary (e);
1080 info->fp_expressions = false;
1083 /* Hook that is called by cgraph.c when a node is removed. */
1085 void
1086 inline_summary_t::remove (cgraph_node *node, inline_summary *info)
1088 reset_inline_summary (node, info);
1091 /* Remap predicate P of former function to be predicate of duplicated function.
1092 POSSIBLE_TRUTHS is clause of possible truths in the duplicated node,
1093 INFO is inline summary of the duplicated node. */
1095 static struct predicate
1096 remap_predicate_after_duplication (struct predicate *p,
1097 clause_t possible_truths,
1098 struct inline_summary *info)
1100 struct predicate new_predicate = true_predicate ();
1101 int j;
1102 for (j = 0; p->clause[j]; j++)
1103 if (!(possible_truths & p->clause[j]))
1105 new_predicate = false_predicate ();
1106 break;
1108 else
1109 add_clause (info->conds, &new_predicate,
1110 possible_truths & p->clause[j]);
1111 return new_predicate;
1114 /* Same as remap_predicate_after_duplication but handle hint predicate *P.
1115 Additionally care about allocating new memory slot for updated predicate
1116 and set it to NULL when it becomes true or false (and thus uninteresting).
1119 static void
1120 remap_hint_predicate_after_duplication (struct predicate **p,
1121 clause_t possible_truths,
1122 struct inline_summary *info)
1124 struct predicate new_predicate;
1126 if (!*p)
1127 return;
1129 new_predicate = remap_predicate_after_duplication (*p,
1130 possible_truths, info);
1131 /* We do not want to free previous predicate; it is used by node origin. */
1132 *p = NULL;
1133 set_hint_predicate (p, new_predicate);
1137 /* Hook that is called by cgraph.c when a node is duplicated. */
1138 void
1139 inline_summary_t::duplicate (cgraph_node *src,
1140 cgraph_node *dst,
1141 inline_summary *,
1142 inline_summary *info)
1144 inline_summary_alloc ();
1145 memcpy (info, inline_summaries->get (src), sizeof (inline_summary));
1146 /* TODO: as an optimization, we may avoid copying conditions
1147 that are known to be false or true. */
1148 info->conds = vec_safe_copy (info->conds);
1150 /* When there are any replacements in the function body, see if we can figure
1151 out that something was optimized out. */
1152 if (ipa_node_params_sum && dst->clone.tree_map)
1154 vec<size_time_entry, va_gc> *entry = info->entry;
1155 /* Use SRC parm info since it may not be copied yet. */
1156 struct ipa_node_params *parms_info = IPA_NODE_REF (src);
1157 vec<tree> known_vals = vNULL;
1158 int count = ipa_get_param_count (parms_info);
1159 int i, j;
1160 clause_t possible_truths;
1161 struct predicate true_pred = true_predicate ();
1162 size_time_entry *e;
1163 int optimized_out_size = 0;
1164 bool inlined_to_p = false;
1165 struct cgraph_edge *edge, *next;
1167 info->entry = 0;
1168 known_vals.safe_grow_cleared (count);
1169 for (i = 0; i < count; i++)
1171 struct ipa_replace_map *r;
1173 for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++)
1175 if (((!r->old_tree && r->parm_num == i)
1176 || (r->old_tree && r->old_tree == ipa_get_param (parms_info, i)))
1177 && r->replace_p && !r->ref_p)
1179 known_vals[i] = r->new_tree;
1180 break;
1184 possible_truths = evaluate_conditions_for_known_args (dst, false,
1185 known_vals,
1186 vNULL);
1187 known_vals.release ();
1189 account_size_time (info, 0, 0, &true_pred);
1191 /* Remap size_time vectors.
1192 Simplify the predicate by prunning out alternatives that are known
1193 to be false.
1194 TODO: as on optimization, we can also eliminate conditions known
1195 to be true. */
1196 for (i = 0; vec_safe_iterate (entry, i, &e); i++)
1198 struct predicate new_predicate;
1199 new_predicate = remap_predicate_after_duplication (&e->predicate,
1200 possible_truths,
1201 info);
1202 if (false_predicate_p (&new_predicate))
1203 optimized_out_size += e->size;
1204 else
1205 account_size_time (info, e->size, e->time, &new_predicate);
1208 /* Remap edge predicates with the same simplification as above.
1209 Also copy constantness arrays. */
1210 for (edge = dst->callees; edge; edge = next)
1212 struct predicate new_predicate;
1213 struct inline_edge_summary *es = inline_edge_summary (edge);
1214 next = edge->next_callee;
1216 if (!edge->inline_failed)
1217 inlined_to_p = true;
1218 if (!es->predicate)
1219 continue;
1220 new_predicate = remap_predicate_after_duplication (es->predicate,
1221 possible_truths,
1222 info);
1223 if (false_predicate_p (&new_predicate)
1224 && !false_predicate_p (es->predicate))
1225 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1226 edge_set_predicate (edge, &new_predicate);
1229 /* Remap indirect edge predicates with the same simplificaiton as above.
1230 Also copy constantness arrays. */
1231 for (edge = dst->indirect_calls; edge; edge = next)
1233 struct predicate new_predicate;
1234 struct inline_edge_summary *es = inline_edge_summary (edge);
1235 next = edge->next_callee;
1237 gcc_checking_assert (edge->inline_failed);
1238 if (!es->predicate)
1239 continue;
1240 new_predicate = remap_predicate_after_duplication (es->predicate,
1241 possible_truths,
1242 info);
1243 if (false_predicate_p (&new_predicate)
1244 && !false_predicate_p (es->predicate))
1245 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1246 edge_set_predicate (edge, &new_predicate);
1248 remap_hint_predicate_after_duplication (&info->loop_iterations,
1249 possible_truths, info);
1250 remap_hint_predicate_after_duplication (&info->loop_stride,
1251 possible_truths, info);
1252 remap_hint_predicate_after_duplication (&info->array_index,
1253 possible_truths, info);
1255 /* If inliner or someone after inliner will ever start producing
1256 non-trivial clones, we will get trouble with lack of information
1257 about updating self sizes, because size vectors already contains
1258 sizes of the calees. */
1259 gcc_assert (!inlined_to_p || !optimized_out_size);
1261 else
1263 info->entry = vec_safe_copy (info->entry);
1264 if (info->loop_iterations)
1266 predicate p = *info->loop_iterations;
1267 info->loop_iterations = NULL;
1268 set_hint_predicate (&info->loop_iterations, p);
1270 if (info->loop_stride)
1272 predicate p = *info->loop_stride;
1273 info->loop_stride = NULL;
1274 set_hint_predicate (&info->loop_stride, p);
1276 if (info->array_index)
1278 predicate p = *info->array_index;
1279 info->array_index = NULL;
1280 set_hint_predicate (&info->array_index, p);
1283 if (!dst->global.inlined_to)
1284 inline_update_overall_summary (dst);
1288 /* Hook that is called by cgraph.c when a node is duplicated. */
1290 static void
1291 inline_edge_duplication_hook (struct cgraph_edge *src,
1292 struct cgraph_edge *dst,
1293 ATTRIBUTE_UNUSED void *data)
1295 struct inline_edge_summary *info;
1296 struct inline_edge_summary *srcinfo;
1297 inline_summary_alloc ();
1298 info = inline_edge_summary (dst);
1299 srcinfo = inline_edge_summary (src);
1300 memcpy (info, srcinfo, sizeof (struct inline_edge_summary));
1301 info->predicate = NULL;
1302 edge_set_predicate (dst, srcinfo->predicate);
1303 info->param = srcinfo->param.copy ();
1304 if (!dst->indirect_unknown_callee && src->indirect_unknown_callee)
1306 info->call_stmt_size -= (eni_size_weights.indirect_call_cost
1307 - eni_size_weights.call_cost);
1308 info->call_stmt_time -= (eni_time_weights.indirect_call_cost
1309 - eni_time_weights.call_cost);
1314 /* Keep edge cache consistent across edge removal. */
1316 static void
1317 inline_edge_removal_hook (struct cgraph_edge *edge,
1318 void *data ATTRIBUTE_UNUSED)
1320 if (edge_growth_cache.exists ())
1321 reset_edge_growth_cache (edge);
1322 reset_inline_edge_summary (edge);
1326 /* Initialize growth caches. */
1328 void
1329 initialize_growth_caches (void)
1331 if (symtab->edges_max_uid)
1332 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
1336 /* Free growth caches. */
1338 void
1339 free_growth_caches (void)
1341 edge_growth_cache.release ();
1345 /* Dump edge summaries associated to NODE and recursively to all clones.
1346 Indent by INDENT. */
1348 static void
1349 dump_inline_edge_summary (FILE *f, int indent, struct cgraph_node *node,
1350 struct inline_summary *info)
1352 struct cgraph_edge *edge;
1353 for (edge = node->callees; edge; edge = edge->next_callee)
1355 struct inline_edge_summary *es = inline_edge_summary (edge);
1356 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1357 int i;
1359 fprintf (f,
1360 "%*s%s/%i %s\n%*s loop depth:%2i freq:%4i size:%2i"
1361 " time: %2i callee size:%2i stack:%2i",
1362 indent, "", callee->name (), callee->order,
1363 !edge->inline_failed
1364 ? "inlined" : cgraph_inline_failed_string (edge-> inline_failed),
1365 indent, "", es->loop_depth, edge->frequency,
1366 es->call_stmt_size, es->call_stmt_time,
1367 (int) inline_summaries->get (callee)->size / INLINE_SIZE_SCALE,
1368 (int) inline_summaries->get (callee)->estimated_stack_size);
1370 if (es->predicate)
1372 fprintf (f, " predicate: ");
1373 dump_predicate (f, info->conds, es->predicate);
1375 else
1376 fprintf (f, "\n");
1377 if (es->param.exists ())
1378 for (i = 0; i < (int) es->param.length (); i++)
1380 int prob = es->param[i].change_prob;
1382 if (!prob)
1383 fprintf (f, "%*s op%i is compile time invariant\n",
1384 indent + 2, "", i);
1385 else if (prob != REG_BR_PROB_BASE)
1386 fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i,
1387 prob * 100.0 / REG_BR_PROB_BASE);
1389 if (!edge->inline_failed)
1391 fprintf (f, "%*sStack frame offset %i, callee self size %i,"
1392 " callee size %i\n",
1393 indent + 2, "",
1394 (int) inline_summaries->get (callee)->stack_frame_offset,
1395 (int) inline_summaries->get (callee)->estimated_self_stack_size,
1396 (int) inline_summaries->get (callee)->estimated_stack_size);
1397 dump_inline_edge_summary (f, indent + 2, callee, info);
1400 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
1402 struct inline_edge_summary *es = inline_edge_summary (edge);
1403 fprintf (f, "%*sindirect call loop depth:%2i freq:%4i size:%2i"
1404 " time: %2i",
1405 indent, "",
1406 es->loop_depth,
1407 edge->frequency, es->call_stmt_size, es->call_stmt_time);
1408 if (es->predicate)
1410 fprintf (f, "predicate: ");
1411 dump_predicate (f, info->conds, es->predicate);
1413 else
1414 fprintf (f, "\n");
1419 void
1420 dump_inline_summary (FILE *f, struct cgraph_node *node)
1422 if (node->definition)
1424 struct inline_summary *s = inline_summaries->get (node);
1425 size_time_entry *e;
1426 int i;
1427 fprintf (f, "Inline summary for %s/%i", node->name (),
1428 node->order);
1429 if (DECL_DISREGARD_INLINE_LIMITS (node->decl))
1430 fprintf (f, " always_inline");
1431 if (s->inlinable)
1432 fprintf (f, " inlinable");
1433 if (s->contains_cilk_spawn)
1434 fprintf (f, " contains_cilk_spawn");
1435 if (s->fp_expressions)
1436 fprintf (f, " fp_expression");
1437 fprintf (f, "\n self time: %i\n", s->self_time);
1438 fprintf (f, " global time: %i\n", s->time);
1439 fprintf (f, " self size: %i\n", s->self_size);
1440 fprintf (f, " global size: %i\n", s->size);
1441 fprintf (f, " min size: %i\n", s->min_size);
1442 fprintf (f, " self stack: %i\n",
1443 (int) s->estimated_self_stack_size);
1444 fprintf (f, " global stack: %i\n", (int) s->estimated_stack_size);
1445 if (s->growth)
1446 fprintf (f, " estimated growth:%i\n", (int) s->growth);
1447 if (s->scc_no)
1448 fprintf (f, " In SCC: %i\n", (int) s->scc_no);
1449 for (i = 0; vec_safe_iterate (s->entry, i, &e); i++)
1451 fprintf (f, " size:%f, time:%f, predicate:",
1452 (double) e->size / INLINE_SIZE_SCALE,
1453 (double) e->time / INLINE_TIME_SCALE);
1454 dump_predicate (f, s->conds, &e->predicate);
1456 if (s->loop_iterations)
1458 fprintf (f, " loop iterations:");
1459 dump_predicate (f, s->conds, s->loop_iterations);
1461 if (s->loop_stride)
1463 fprintf (f, " loop stride:");
1464 dump_predicate (f, s->conds, s->loop_stride);
1466 if (s->array_index)
1468 fprintf (f, " array index:");
1469 dump_predicate (f, s->conds, s->array_index);
1471 fprintf (f, " calls:\n");
1472 dump_inline_edge_summary (f, 4, node, s);
1473 fprintf (f, "\n");
1477 DEBUG_FUNCTION void
1478 debug_inline_summary (struct cgraph_node *node)
1480 dump_inline_summary (stderr, node);
1483 void
1484 dump_inline_summaries (FILE *f)
1486 struct cgraph_node *node;
1488 FOR_EACH_DEFINED_FUNCTION (node)
1489 if (!node->global.inlined_to)
1490 dump_inline_summary (f, node);
1493 /* Give initial reasons why inlining would fail on EDGE. This gets either
1494 nullified or usually overwritten by more precise reasons later. */
1496 void
1497 initialize_inline_failed (struct cgraph_edge *e)
1499 struct cgraph_node *callee = e->callee;
1501 if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE
1502 && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
1504 else if (e->indirect_unknown_callee)
1505 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
1506 else if (!callee->definition)
1507 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
1508 else if (callee->local.redefined_extern_inline)
1509 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
1510 else if (cfun && fn_contains_cilk_spawn_p (cfun))
1511 /* We can't inline if the function is spawing a function. */
1512 e->inline_failed = CIF_CILK_SPAWN;
1513 else
1514 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
1515 gcc_checking_assert (!e->call_stmt_cannot_inline_p
1516 || cgraph_inline_failed_type (e->inline_failed)
1517 == CIF_FINAL_ERROR);
1520 /* Callback of walk_aliased_vdefs. Flags that it has been invoked to the
1521 boolean variable pointed to by DATA. */
1523 static bool
1524 mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED,
1525 void *data)
1527 bool *b = (bool *) data;
1528 *b = true;
1529 return true;
1532 /* If OP refers to value of function parameter, return the corresponding
1533 parameter. If non-NULL, the size of the memory load (or the SSA_NAME of the
1534 PARM_DECL) will be stored to *SIZE_P in that case too. */
1536 static tree
1537 unmodified_parm_1 (gimple *stmt, tree op, HOST_WIDE_INT *size_p)
1539 /* SSA_NAME referring to parm default def? */
1540 if (TREE_CODE (op) == SSA_NAME
1541 && SSA_NAME_IS_DEFAULT_DEF (op)
1542 && TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL)
1544 if (size_p)
1545 *size_p = tree_to_shwi (TYPE_SIZE (TREE_TYPE (op)));
1546 return SSA_NAME_VAR (op);
1548 /* Non-SSA parm reference? */
1549 if (TREE_CODE (op) == PARM_DECL)
1551 bool modified = false;
1553 ao_ref refd;
1554 ao_ref_init (&refd, op);
1555 walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, &modified,
1556 NULL);
1557 if (!modified)
1559 if (size_p)
1560 *size_p = tree_to_shwi (TYPE_SIZE (TREE_TYPE (op)));
1561 return op;
1564 return NULL_TREE;
1567 /* If OP refers to value of function parameter, return the corresponding
1568 parameter. Also traverse chains of SSA register assignments. If non-NULL,
1569 the size of the memory load (or the SSA_NAME of the PARM_DECL) will be
1570 stored to *SIZE_P in that case too. */
1572 static tree
1573 unmodified_parm (gimple *stmt, tree op, HOST_WIDE_INT *size_p)
1575 tree res = unmodified_parm_1 (stmt, op, size_p);
1576 if (res)
1577 return res;
1579 if (TREE_CODE (op) == SSA_NAME
1580 && !SSA_NAME_IS_DEFAULT_DEF (op)
1581 && gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1582 return unmodified_parm (SSA_NAME_DEF_STMT (op),
1583 gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)),
1584 size_p);
1585 return NULL_TREE;
1588 /* If OP refers to a value of a function parameter or value loaded from an
1589 aggregate passed to a parameter (either by value or reference), return TRUE
1590 and store the number of the parameter to *INDEX_P, the access size into
1591 *SIZE_P, and information whether and how it has been loaded from an
1592 aggregate into *AGGPOS. INFO describes the function parameters, STMT is the
1593 statement in which OP is used or loaded. */
1595 static bool
1596 unmodified_parm_or_parm_agg_item (struct ipa_func_body_info *fbi,
1597 gimple *stmt, tree op, int *index_p,
1598 HOST_WIDE_INT *size_p,
1599 struct agg_position_info *aggpos)
1601 tree res = unmodified_parm_1 (stmt, op, size_p);
1603 gcc_checking_assert (aggpos);
1604 if (res)
1606 *index_p = ipa_get_param_decl_index (fbi->info, res);
1607 if (*index_p < 0)
1608 return false;
1609 aggpos->agg_contents = false;
1610 aggpos->by_ref = false;
1611 return true;
1614 if (TREE_CODE (op) == SSA_NAME)
1616 if (SSA_NAME_IS_DEFAULT_DEF (op)
1617 || !gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1618 return false;
1619 stmt = SSA_NAME_DEF_STMT (op);
1620 op = gimple_assign_rhs1 (stmt);
1621 if (!REFERENCE_CLASS_P (op))
1622 return unmodified_parm_or_parm_agg_item (fbi, stmt, op, index_p, size_p,
1623 aggpos);
1626 aggpos->agg_contents = true;
1627 return ipa_load_from_parm_agg (fbi, fbi->info->descriptors,
1628 stmt, op, index_p, &aggpos->offset,
1629 size_p, &aggpos->by_ref);
1632 /* See if statement might disappear after inlining.
1633 0 - means not eliminated
1634 1 - half of statements goes away
1635 2 - for sure it is eliminated.
1636 We are not terribly sophisticated, basically looking for simple abstraction
1637 penalty wrappers. */
1639 static int
1640 eliminated_by_inlining_prob (gimple *stmt)
1642 enum gimple_code code = gimple_code (stmt);
1643 enum tree_code rhs_code;
1645 if (!optimize)
1646 return 0;
1648 switch (code)
1650 case GIMPLE_RETURN:
1651 return 2;
1652 case GIMPLE_ASSIGN:
1653 if (gimple_num_ops (stmt) != 2)
1654 return 0;
1656 rhs_code = gimple_assign_rhs_code (stmt);
1658 /* Casts of parameters, loads from parameters passed by reference
1659 and stores to return value or parameters are often free after
1660 inlining dua to SRA and further combining.
1661 Assume that half of statements goes away. */
1662 if (CONVERT_EXPR_CODE_P (rhs_code)
1663 || rhs_code == VIEW_CONVERT_EXPR
1664 || rhs_code == ADDR_EXPR
1665 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1667 tree rhs = gimple_assign_rhs1 (stmt);
1668 tree lhs = gimple_assign_lhs (stmt);
1669 tree inner_rhs = get_base_address (rhs);
1670 tree inner_lhs = get_base_address (lhs);
1671 bool rhs_free = false;
1672 bool lhs_free = false;
1674 if (!inner_rhs)
1675 inner_rhs = rhs;
1676 if (!inner_lhs)
1677 inner_lhs = lhs;
1679 /* Reads of parameter are expected to be free. */
1680 if (unmodified_parm (stmt, inner_rhs, NULL))
1681 rhs_free = true;
1682 /* Match expressions of form &this->field. Those will most likely
1683 combine with something upstream after inlining. */
1684 else if (TREE_CODE (inner_rhs) == ADDR_EXPR)
1686 tree op = get_base_address (TREE_OPERAND (inner_rhs, 0));
1687 if (TREE_CODE (op) == PARM_DECL)
1688 rhs_free = true;
1689 else if (TREE_CODE (op) == MEM_REF
1690 && unmodified_parm (stmt, TREE_OPERAND (op, 0), NULL))
1691 rhs_free = true;
1694 /* When parameter is not SSA register because its address is taken
1695 and it is just copied into one, the statement will be completely
1696 free after inlining (we will copy propagate backward). */
1697 if (rhs_free && is_gimple_reg (lhs))
1698 return 2;
1700 /* Reads of parameters passed by reference
1701 expected to be free (i.e. optimized out after inlining). */
1702 if (TREE_CODE (inner_rhs) == MEM_REF
1703 && unmodified_parm (stmt, TREE_OPERAND (inner_rhs, 0), NULL))
1704 rhs_free = true;
1706 /* Copying parameter passed by reference into gimple register is
1707 probably also going to copy propagate, but we can't be quite
1708 sure. */
1709 if (rhs_free && is_gimple_reg (lhs))
1710 lhs_free = true;
1712 /* Writes to parameters, parameters passed by value and return value
1713 (either dirrectly or passed via invisible reference) are free.
1715 TODO: We ought to handle testcase like
1716 struct a {int a,b;};
1717 struct a
1718 retrurnsturct (void)
1720 struct a a ={1,2};
1721 return a;
1724 This translate into:
1726 retrurnsturct ()
1728 int a$b;
1729 int a$a;
1730 struct a a;
1731 struct a D.2739;
1733 <bb 2>:
1734 D.2739.a = 1;
1735 D.2739.b = 2;
1736 return D.2739;
1739 For that we either need to copy ipa-split logic detecting writes
1740 to return value. */
1741 if (TREE_CODE (inner_lhs) == PARM_DECL
1742 || TREE_CODE (inner_lhs) == RESULT_DECL
1743 || (TREE_CODE (inner_lhs) == MEM_REF
1744 && (unmodified_parm (stmt, TREE_OPERAND (inner_lhs, 0), NULL)
1745 || (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME
1746 && SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0))
1747 && TREE_CODE (SSA_NAME_VAR (TREE_OPERAND
1748 (inner_lhs,
1749 0))) == RESULT_DECL))))
1750 lhs_free = true;
1751 if (lhs_free
1752 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1753 rhs_free = true;
1754 if (lhs_free && rhs_free)
1755 return 1;
1757 return 0;
1758 default:
1759 return 0;
1764 /* If BB ends by a conditional we can turn into predicates, attach corresponding
1765 predicates to the CFG edges. */
1767 static void
1768 set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi,
1769 struct inline_summary *summary,
1770 basic_block bb)
1772 gimple *last;
1773 tree op;
1774 int index;
1775 HOST_WIDE_INT size;
1776 struct agg_position_info aggpos;
1777 enum tree_code code, inverted_code;
1778 edge e;
1779 edge_iterator ei;
1780 gimple *set_stmt;
1781 tree op2;
1783 last = last_stmt (bb);
1784 if (!last || gimple_code (last) != GIMPLE_COND)
1785 return;
1786 if (!is_gimple_ip_invariant (gimple_cond_rhs (last)))
1787 return;
1788 op = gimple_cond_lhs (last);
1789 /* TODO: handle conditionals like
1790 var = op0 < 4;
1791 if (var != 0). */
1792 if (unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &size, &aggpos))
1794 code = gimple_cond_code (last);
1795 inverted_code = invert_tree_comparison (code, HONOR_NANS (op));
1797 FOR_EACH_EDGE (e, ei, bb->succs)
1799 enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE
1800 ? code : inverted_code);
1801 /* invert_tree_comparison will return ERROR_MARK on FP
1802 comparsions that are not EQ/NE instead of returning proper
1803 unordered one. Be sure it is not confused with NON_CONSTANT. */
1804 if (this_code != ERROR_MARK)
1806 struct predicate p
1807 = add_condition (summary, index, size, &aggpos, this_code,
1808 unshare_expr_without_location
1809 (gimple_cond_rhs (last)));
1810 e->aux = edge_predicate_pool.allocate ();
1811 *(struct predicate *) e->aux = p;
1816 if (TREE_CODE (op) != SSA_NAME)
1817 return;
1818 /* Special case
1819 if (builtin_constant_p (op))
1820 constant_code
1821 else
1822 nonconstant_code.
1823 Here we can predicate nonconstant_code. We can't
1824 really handle constant_code since we have no predicate
1825 for this and also the constant code is not known to be
1826 optimized away when inliner doen't see operand is constant.
1827 Other optimizers might think otherwise. */
1828 if (gimple_cond_code (last) != NE_EXPR
1829 || !integer_zerop (gimple_cond_rhs (last)))
1830 return;
1831 set_stmt = SSA_NAME_DEF_STMT (op);
1832 if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P)
1833 || gimple_call_num_args (set_stmt) != 1)
1834 return;
1835 op2 = gimple_call_arg (set_stmt, 0);
1836 if (!unmodified_parm_or_parm_agg_item (fbi, set_stmt, op2, &index, &size,
1837 &aggpos))
1838 return;
1839 FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE)
1841 struct predicate p = add_condition (summary, index, size, &aggpos,
1842 IS_NOT_CONSTANT, NULL_TREE);
1843 e->aux = edge_predicate_pool.allocate ();
1844 *(struct predicate *) e->aux = p;
1849 /* If BB ends by a switch we can turn into predicates, attach corresponding
1850 predicates to the CFG edges. */
1852 static void
1853 set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi,
1854 struct inline_summary *summary,
1855 basic_block bb)
1857 gimple *lastg;
1858 tree op;
1859 int index;
1860 HOST_WIDE_INT size;
1861 struct agg_position_info aggpos;
1862 edge e;
1863 edge_iterator ei;
1864 size_t n;
1865 size_t case_idx;
1867 lastg = last_stmt (bb);
1868 if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH)
1869 return;
1870 gswitch *last = as_a <gswitch *> (lastg);
1871 op = gimple_switch_index (last);
1872 if (!unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &size, &aggpos))
1873 return;
1875 FOR_EACH_EDGE (e, ei, bb->succs)
1877 e->aux = edge_predicate_pool.allocate ();
1878 *(struct predicate *) e->aux = false_predicate ();
1880 n = gimple_switch_num_labels (last);
1881 for (case_idx = 0; case_idx < n; ++case_idx)
1883 tree cl = gimple_switch_label (last, case_idx);
1884 tree min, max;
1885 struct predicate p;
1887 e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
1888 min = CASE_LOW (cl);
1889 max = CASE_HIGH (cl);
1891 /* For default we might want to construct predicate that none
1892 of cases is met, but it is bit hard to do not having negations
1893 of conditionals handy. */
1894 if (!min && !max)
1895 p = true_predicate ();
1896 else if (!max)
1897 p = add_condition (summary, index, size, &aggpos, EQ_EXPR,
1898 unshare_expr_without_location (min));
1899 else
1901 struct predicate p1, p2;
1902 p1 = add_condition (summary, index, size, &aggpos, GE_EXPR,
1903 unshare_expr_without_location (min));
1904 p2 = add_condition (summary, index, size, &aggpos, LE_EXPR,
1905 unshare_expr_without_location (max));
1906 p = and_predicates (summary->conds, &p1, &p2);
1908 *(struct predicate *) e->aux
1909 = or_predicates (summary->conds, &p, (struct predicate *) e->aux);
1914 /* For each BB in NODE attach to its AUX pointer predicate under
1915 which it is executable. */
1917 static void
1918 compute_bb_predicates (struct ipa_func_body_info *fbi,
1919 struct cgraph_node *node,
1920 struct inline_summary *summary)
1922 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1923 bool done = false;
1924 basic_block bb;
1926 FOR_EACH_BB_FN (bb, my_function)
1928 set_cond_stmt_execution_predicate (fbi, summary, bb);
1929 set_switch_stmt_execution_predicate (fbi, summary, bb);
1932 /* Entry block is always executable. */
1933 ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1934 = edge_predicate_pool.allocate ();
1935 *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1936 = true_predicate ();
1938 /* A simple dataflow propagation of predicates forward in the CFG.
1939 TODO: work in reverse postorder. */
1940 while (!done)
1942 done = true;
1943 FOR_EACH_BB_FN (bb, my_function)
1945 struct predicate p = false_predicate ();
1946 edge e;
1947 edge_iterator ei;
1948 FOR_EACH_EDGE (e, ei, bb->preds)
1950 if (e->src->aux)
1952 struct predicate this_bb_predicate
1953 = *(struct predicate *) e->src->aux;
1954 if (e->aux)
1955 this_bb_predicate
1956 = and_predicates (summary->conds, &this_bb_predicate,
1957 (struct predicate *) e->aux);
1958 p = or_predicates (summary->conds, &p, &this_bb_predicate);
1959 if (true_predicate_p (&p))
1960 break;
1963 if (false_predicate_p (&p))
1964 gcc_assert (!bb->aux);
1965 else
1967 if (!bb->aux)
1969 done = false;
1970 bb->aux = edge_predicate_pool.allocate ();
1971 *((struct predicate *) bb->aux) = p;
1973 else if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1975 /* This OR operation is needed to ensure monotonous data flow
1976 in the case we hit the limit on number of clauses and the
1977 and/or operations above give approximate answers. */
1978 p = or_predicates (summary->conds, &p, (struct predicate *)bb->aux);
1979 if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1981 done = false;
1982 *((struct predicate *) bb->aux) = p;
1991 /* We keep info about constantness of SSA names. */
1993 typedef struct predicate predicate_t;
1994 /* Return predicate specifying when the STMT might have result that is not
1995 a compile time constant. */
1997 static struct predicate
1998 will_be_nonconstant_expr_predicate (struct ipa_node_params *info,
1999 struct inline_summary *summary,
2000 tree expr,
2001 vec<predicate_t> nonconstant_names)
2003 tree parm;
2004 int index;
2005 HOST_WIDE_INT size;
2007 while (UNARY_CLASS_P (expr))
2008 expr = TREE_OPERAND (expr, 0);
2010 parm = unmodified_parm (NULL, expr, &size);
2011 if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0)
2012 return add_condition (summary, index, size, NULL, CHANGED, NULL_TREE);
2013 if (is_gimple_min_invariant (expr))
2014 return false_predicate ();
2015 if (TREE_CODE (expr) == SSA_NAME)
2016 return nonconstant_names[SSA_NAME_VERSION (expr)];
2017 if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr))
2019 struct predicate p1 = will_be_nonconstant_expr_predicate
2020 (info, summary, TREE_OPERAND (expr, 0),
2021 nonconstant_names);
2022 struct predicate p2;
2023 if (true_predicate_p (&p1))
2024 return p1;
2025 p2 = will_be_nonconstant_expr_predicate (info, summary,
2026 TREE_OPERAND (expr, 1),
2027 nonconstant_names);
2028 return or_predicates (summary->conds, &p1, &p2);
2030 else if (TREE_CODE (expr) == COND_EXPR)
2032 struct predicate p1 = will_be_nonconstant_expr_predicate
2033 (info, summary, TREE_OPERAND (expr, 0),
2034 nonconstant_names);
2035 struct predicate p2;
2036 if (true_predicate_p (&p1))
2037 return p1;
2038 p2 = will_be_nonconstant_expr_predicate (info, summary,
2039 TREE_OPERAND (expr, 1),
2040 nonconstant_names);
2041 if (true_predicate_p (&p2))
2042 return p2;
2043 p1 = or_predicates (summary->conds, &p1, &p2);
2044 p2 = will_be_nonconstant_expr_predicate (info, summary,
2045 TREE_OPERAND (expr, 2),
2046 nonconstant_names);
2047 return or_predicates (summary->conds, &p1, &p2);
2049 else
2051 debug_tree (expr);
2052 gcc_unreachable ();
2054 return false_predicate ();
2058 /* Return predicate specifying when the STMT might have result that is not
2059 a compile time constant. */
2061 static struct predicate
2062 will_be_nonconstant_predicate (struct ipa_func_body_info *fbi,
2063 struct inline_summary *summary,
2064 gimple *stmt,
2065 vec<predicate_t> nonconstant_names)
2067 struct predicate p = true_predicate ();
2068 ssa_op_iter iter;
2069 tree use;
2070 struct predicate op_non_const;
2071 bool is_load;
2072 int base_index;
2073 HOST_WIDE_INT size;
2074 struct agg_position_info aggpos;
2076 /* What statments might be optimized away
2077 when their arguments are constant. */
2078 if (gimple_code (stmt) != GIMPLE_ASSIGN
2079 && gimple_code (stmt) != GIMPLE_COND
2080 && gimple_code (stmt) != GIMPLE_SWITCH
2081 && (gimple_code (stmt) != GIMPLE_CALL
2082 || !(gimple_call_flags (stmt) & ECF_CONST)))
2083 return p;
2085 /* Stores will stay anyway. */
2086 if (gimple_store_p (stmt))
2087 return p;
2089 is_load = gimple_assign_load_p (stmt);
2091 /* Loads can be optimized when the value is known. */
2092 if (is_load)
2094 tree op;
2095 gcc_assert (gimple_assign_single_p (stmt));
2096 op = gimple_assign_rhs1 (stmt);
2097 if (!unmodified_parm_or_parm_agg_item (fbi, stmt, op, &base_index, &size,
2098 &aggpos))
2099 return p;
2101 else
2102 base_index = -1;
2104 /* See if we understand all operands before we start
2105 adding conditionals. */
2106 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2108 tree parm = unmodified_parm (stmt, use, NULL);
2109 /* For arguments we can build a condition. */
2110 if (parm && ipa_get_param_decl_index (fbi->info, parm) >= 0)
2111 continue;
2112 if (TREE_CODE (use) != SSA_NAME)
2113 return p;
2114 /* If we know when operand is constant,
2115 we still can say something useful. */
2116 if (!true_predicate_p (&nonconstant_names[SSA_NAME_VERSION (use)]))
2117 continue;
2118 return p;
2121 if (is_load)
2122 op_non_const =
2123 add_condition (summary, base_index, size, &aggpos, CHANGED, NULL);
2124 else
2125 op_non_const = false_predicate ();
2126 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2128 HOST_WIDE_INT size;
2129 tree parm = unmodified_parm (stmt, use, &size);
2130 int index;
2132 if (parm && (index = ipa_get_param_decl_index (fbi->info, parm)) >= 0)
2134 if (index != base_index)
2135 p = add_condition (summary, index, size, NULL, CHANGED, NULL_TREE);
2136 else
2137 continue;
2139 else
2140 p = nonconstant_names[SSA_NAME_VERSION (use)];
2141 op_non_const = or_predicates (summary->conds, &p, &op_non_const);
2143 if ((gimple_code (stmt) == GIMPLE_ASSIGN || gimple_code (stmt) == GIMPLE_CALL)
2144 && gimple_op (stmt, 0)
2145 && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
2146 nonconstant_names[SSA_NAME_VERSION (gimple_op (stmt, 0))]
2147 = op_non_const;
2148 return op_non_const;
2151 struct record_modified_bb_info
2153 bitmap bb_set;
2154 gimple *stmt;
2157 /* Callback of walk_aliased_vdefs. Records basic blocks where the value may be
2158 set except for info->stmt. */
2160 static bool
2161 record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data)
2163 struct record_modified_bb_info *info =
2164 (struct record_modified_bb_info *) data;
2165 if (SSA_NAME_DEF_STMT (vdef) == info->stmt)
2166 return false;
2167 bitmap_set_bit (info->bb_set,
2168 SSA_NAME_IS_DEFAULT_DEF (vdef)
2169 ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index
2170 : gimple_bb (SSA_NAME_DEF_STMT (vdef))->index);
2171 return false;
2174 /* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT
2175 will change since last invocation of STMT.
2177 Value 0 is reserved for compile time invariants.
2178 For common parameters it is REG_BR_PROB_BASE. For loop invariants it
2179 ought to be REG_BR_PROB_BASE / estimated_iters. */
2181 static int
2182 param_change_prob (gimple *stmt, int i)
2184 tree op = gimple_call_arg (stmt, i);
2185 basic_block bb = gimple_bb (stmt);
2186 tree base;
2188 /* Global invariants neve change. */
2189 if (is_gimple_min_invariant (op))
2190 return 0;
2191 /* We would have to do non-trivial analysis to really work out what
2192 is the probability of value to change (i.e. when init statement
2193 is in a sibling loop of the call).
2195 We do an conservative estimate: when call is executed N times more often
2196 than the statement defining value, we take the frequency 1/N. */
2197 if (TREE_CODE (op) == SSA_NAME)
2199 int init_freq;
2201 if (!bb->frequency)
2202 return REG_BR_PROB_BASE;
2204 if (SSA_NAME_IS_DEFAULT_DEF (op))
2205 init_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2206 else
2207 init_freq = gimple_bb (SSA_NAME_DEF_STMT (op))->frequency;
2209 if (!init_freq)
2210 init_freq = 1;
2211 if (init_freq < bb->frequency)
2212 return MAX (GCOV_COMPUTE_SCALE (init_freq, bb->frequency), 1);
2213 else
2214 return REG_BR_PROB_BASE;
2217 base = get_base_address (op);
2218 if (base)
2220 ao_ref refd;
2221 int max;
2222 struct record_modified_bb_info info;
2223 bitmap_iterator bi;
2224 unsigned index;
2225 tree init = ctor_for_folding (base);
2227 if (init != error_mark_node)
2228 return 0;
2229 if (!bb->frequency)
2230 return REG_BR_PROB_BASE;
2231 ao_ref_init (&refd, op);
2232 info.stmt = stmt;
2233 info.bb_set = BITMAP_ALLOC (NULL);
2234 walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info,
2235 NULL);
2236 if (bitmap_bit_p (info.bb_set, bb->index))
2238 BITMAP_FREE (info.bb_set);
2239 return REG_BR_PROB_BASE;
2242 /* Assume that every memory is initialized at entry.
2243 TODO: Can we easilly determine if value is always defined
2244 and thus we may skip entry block? */
2245 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
2246 max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2247 else
2248 max = 1;
2250 EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi)
2251 max = MIN (max, BASIC_BLOCK_FOR_FN (cfun, index)->frequency);
2253 BITMAP_FREE (info.bb_set);
2254 if (max < bb->frequency)
2255 return MAX (GCOV_COMPUTE_SCALE (max, bb->frequency), 1);
2256 else
2257 return REG_BR_PROB_BASE;
2259 return REG_BR_PROB_BASE;
2262 /* Find whether a basic block BB is the final block of a (half) diamond CFG
2263 sub-graph and if the predicate the condition depends on is known. If so,
2264 return true and store the pointer the predicate in *P. */
2266 static bool
2267 phi_result_unknown_predicate (struct ipa_node_params *info,
2268 inline_summary *summary, basic_block bb,
2269 struct predicate *p,
2270 vec<predicate_t> nonconstant_names)
2272 edge e;
2273 edge_iterator ei;
2274 basic_block first_bb = NULL;
2275 gimple *stmt;
2277 if (single_pred_p (bb))
2279 *p = false_predicate ();
2280 return true;
2283 FOR_EACH_EDGE (e, ei, bb->preds)
2285 if (single_succ_p (e->src))
2287 if (!single_pred_p (e->src))
2288 return false;
2289 if (!first_bb)
2290 first_bb = single_pred (e->src);
2291 else if (single_pred (e->src) != first_bb)
2292 return false;
2294 else
2296 if (!first_bb)
2297 first_bb = e->src;
2298 else if (e->src != first_bb)
2299 return false;
2303 if (!first_bb)
2304 return false;
2306 stmt = last_stmt (first_bb);
2307 if (!stmt
2308 || gimple_code (stmt) != GIMPLE_COND
2309 || !is_gimple_ip_invariant (gimple_cond_rhs (stmt)))
2310 return false;
2312 *p = will_be_nonconstant_expr_predicate (info, summary,
2313 gimple_cond_lhs (stmt),
2314 nonconstant_names);
2315 if (true_predicate_p (p))
2316 return false;
2317 else
2318 return true;
2321 /* Given a PHI statement in a function described by inline properties SUMMARY
2322 and *P being the predicate describing whether the selected PHI argument is
2323 known, store a predicate for the result of the PHI statement into
2324 NONCONSTANT_NAMES, if possible. */
2326 static void
2327 predicate_for_phi_result (struct inline_summary *summary, gphi *phi,
2328 struct predicate *p,
2329 vec<predicate_t> nonconstant_names)
2331 unsigned i;
2333 for (i = 0; i < gimple_phi_num_args (phi); i++)
2335 tree arg = gimple_phi_arg (phi, i)->def;
2336 if (!is_gimple_min_invariant (arg))
2338 gcc_assert (TREE_CODE (arg) == SSA_NAME);
2339 *p = or_predicates (summary->conds, p,
2340 &nonconstant_names[SSA_NAME_VERSION (arg)]);
2341 if (true_predicate_p (p))
2342 return;
2346 if (dump_file && (dump_flags & TDF_DETAILS))
2348 fprintf (dump_file, "\t\tphi predicate: ");
2349 dump_predicate (dump_file, summary->conds, p);
2351 nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p;
2354 /* Return predicate specifying when array index in access OP becomes non-constant. */
2356 static struct predicate
2357 array_index_predicate (inline_summary *info,
2358 vec< predicate_t> nonconstant_names, tree op)
2360 struct predicate p = false_predicate ();
2361 while (handled_component_p (op))
2363 if (TREE_CODE (op) == ARRAY_REF || TREE_CODE (op) == ARRAY_RANGE_REF)
2365 if (TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME)
2366 p = or_predicates (info->conds, &p,
2367 &nonconstant_names[SSA_NAME_VERSION
2368 (TREE_OPERAND (op, 1))]);
2370 op = TREE_OPERAND (op, 0);
2372 return p;
2375 /* For a typical usage of __builtin_expect (a<b, 1), we
2376 may introduce an extra relation stmt:
2377 With the builtin, we have
2378 t1 = a <= b;
2379 t2 = (long int) t1;
2380 t3 = __builtin_expect (t2, 1);
2381 if (t3 != 0)
2382 goto ...
2383 Without the builtin, we have
2384 if (a<=b)
2385 goto...
2386 This affects the size/time estimation and may have
2387 an impact on the earlier inlining.
2388 Here find this pattern and fix it up later. */
2390 static gimple *
2391 find_foldable_builtin_expect (basic_block bb)
2393 gimple_stmt_iterator bsi;
2395 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2397 gimple *stmt = gsi_stmt (bsi);
2398 if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT)
2399 || (is_gimple_call (stmt)
2400 && gimple_call_internal_p (stmt)
2401 && gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT))
2403 tree var = gimple_call_lhs (stmt);
2404 tree arg = gimple_call_arg (stmt, 0);
2405 use_operand_p use_p;
2406 gimple *use_stmt;
2407 bool match = false;
2408 bool done = false;
2410 if (!var || !arg)
2411 continue;
2412 gcc_assert (TREE_CODE (var) == SSA_NAME);
2414 while (TREE_CODE (arg) == SSA_NAME)
2416 gimple *stmt_tmp = SSA_NAME_DEF_STMT (arg);
2417 if (!is_gimple_assign (stmt_tmp))
2418 break;
2419 switch (gimple_assign_rhs_code (stmt_tmp))
2421 case LT_EXPR:
2422 case LE_EXPR:
2423 case GT_EXPR:
2424 case GE_EXPR:
2425 case EQ_EXPR:
2426 case NE_EXPR:
2427 match = true;
2428 done = true;
2429 break;
2430 CASE_CONVERT:
2431 break;
2432 default:
2433 done = true;
2434 break;
2436 if (done)
2437 break;
2438 arg = gimple_assign_rhs1 (stmt_tmp);
2441 if (match && single_imm_use (var, &use_p, &use_stmt)
2442 && gimple_code (use_stmt) == GIMPLE_COND)
2443 return use_stmt;
2446 return NULL;
2449 /* Return true when the basic blocks contains only clobbers followed by RESX.
2450 Such BBs are kept around to make removal of dead stores possible with
2451 presence of EH and will be optimized out by optimize_clobbers later in the
2452 game.
2454 NEED_EH is used to recurse in case the clobber has non-EH predecestors
2455 that can be clobber only, too.. When it is false, the RESX is not necessary
2456 on the end of basic block. */
2458 static bool
2459 clobber_only_eh_bb_p (basic_block bb, bool need_eh = true)
2461 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2462 edge_iterator ei;
2463 edge e;
2465 if (need_eh)
2467 if (gsi_end_p (gsi))
2468 return false;
2469 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX)
2470 return false;
2471 gsi_prev (&gsi);
2473 else if (!single_succ_p (bb))
2474 return false;
2476 for (; !gsi_end_p (gsi); gsi_prev (&gsi))
2478 gimple *stmt = gsi_stmt (gsi);
2479 if (is_gimple_debug (stmt))
2480 continue;
2481 if (gimple_clobber_p (stmt))
2482 continue;
2483 if (gimple_code (stmt) == GIMPLE_LABEL)
2484 break;
2485 return false;
2488 /* See if all predecestors are either throws or clobber only BBs. */
2489 FOR_EACH_EDGE (e, ei, bb->preds)
2490 if (!(e->flags & EDGE_EH)
2491 && !clobber_only_eh_bb_p (e->src, false))
2492 return false;
2494 return true;
2497 /* Return true if STMT compute a floating point expression that may be affected
2498 by -ffast-math and similar flags. */
2500 static bool
2501 fp_expression_p (gimple *stmt)
2503 ssa_op_iter i;
2504 tree op;
2506 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF|SSA_OP_USE)
2507 if (FLOAT_TYPE_P (TREE_TYPE (op)))
2508 return true;
2509 return false;
2512 /* Compute function body size parameters for NODE.
2513 When EARLY is true, we compute only simple summaries without
2514 non-trivial predicates to drive the early inliner. */
2516 static void
2517 estimate_function_body_sizes (struct cgraph_node *node, bool early)
2519 gcov_type time = 0;
2520 /* Estimate static overhead for function prologue/epilogue and alignment. */
2521 int size = 2;
2522 /* Benefits are scaled by probability of elimination that is in range
2523 <0,2>. */
2524 basic_block bb;
2525 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
2526 int freq;
2527 struct inline_summary *info = inline_summaries->get (node);
2528 struct predicate bb_predicate;
2529 struct ipa_func_body_info fbi;
2530 vec<predicate_t> nonconstant_names = vNULL;
2531 int nblocks, n;
2532 int *order;
2533 predicate array_index = true_predicate ();
2534 gimple *fix_builtin_expect_stmt;
2536 gcc_assert (my_function && my_function->cfg);
2537 gcc_assert (cfun == my_function);
2539 memset(&fbi, 0, sizeof(fbi));
2540 info->conds = NULL;
2541 info->entry = NULL;
2543 /* When optimizing and analyzing for IPA inliner, initialize loop optimizer
2544 so we can produce proper inline hints.
2546 When optimizing and analyzing for early inliner, initialize node params
2547 so we can produce correct BB predicates. */
2549 if (opt_for_fn (node->decl, optimize))
2551 calculate_dominance_info (CDI_DOMINATORS);
2552 if (!early)
2553 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2554 else
2556 ipa_check_create_node_params ();
2557 ipa_initialize_node_params (node);
2560 if (ipa_node_params_sum)
2562 fbi.node = node;
2563 fbi.info = IPA_NODE_REF (node);
2564 fbi.bb_infos = vNULL;
2565 fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
2566 fbi.param_count = count_formal_params(node->decl);
2567 nonconstant_names.safe_grow_cleared
2568 (SSANAMES (my_function)->length ());
2572 if (dump_file)
2573 fprintf (dump_file, "\nAnalyzing function body size: %s\n",
2574 node->name ());
2576 /* When we run into maximal number of entries, we assign everything to the
2577 constant truth case. Be sure to have it in list. */
2578 bb_predicate = true_predicate ();
2579 account_size_time (info, 0, 0, &bb_predicate);
2581 bb_predicate = not_inlined_predicate ();
2582 account_size_time (info, 2 * INLINE_SIZE_SCALE, 0, &bb_predicate);
2584 if (fbi.info)
2585 compute_bb_predicates (&fbi, node, info);
2586 order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
2587 nblocks = pre_and_rev_post_order_compute (NULL, order, false);
2588 for (n = 0; n < nblocks; n++)
2590 bb = BASIC_BLOCK_FOR_FN (cfun, order[n]);
2591 freq = compute_call_stmt_bb_frequency (node->decl, bb);
2592 if (clobber_only_eh_bb_p (bb))
2594 if (dump_file && (dump_flags & TDF_DETAILS))
2595 fprintf (dump_file, "\n Ignoring BB %i;"
2596 " it will be optimized away by cleanup_clobbers\n",
2597 bb->index);
2598 continue;
2601 /* TODO: Obviously predicates can be propagated down across CFG. */
2602 if (fbi.info)
2604 if (bb->aux)
2605 bb_predicate = *(struct predicate *) bb->aux;
2606 else
2607 bb_predicate = false_predicate ();
2609 else
2610 bb_predicate = true_predicate ();
2612 if (dump_file && (dump_flags & TDF_DETAILS))
2614 fprintf (dump_file, "\n BB %i predicate:", bb->index);
2615 dump_predicate (dump_file, info->conds, &bb_predicate);
2618 if (fbi.info && nonconstant_names.exists ())
2620 struct predicate phi_predicate;
2621 bool first_phi = true;
2623 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
2624 gsi_next (&bsi))
2626 if (first_phi
2627 && !phi_result_unknown_predicate (fbi.info, info, bb,
2628 &phi_predicate,
2629 nonconstant_names))
2630 break;
2631 first_phi = false;
2632 if (dump_file && (dump_flags & TDF_DETAILS))
2634 fprintf (dump_file, " ");
2635 print_gimple_stmt (dump_file, gsi_stmt (bsi), 0, 0);
2637 predicate_for_phi_result (info, bsi.phi (), &phi_predicate,
2638 nonconstant_names);
2642 fix_builtin_expect_stmt = find_foldable_builtin_expect (bb);
2644 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
2645 gsi_next (&bsi))
2647 gimple *stmt = gsi_stmt (bsi);
2648 int this_size = estimate_num_insns (stmt, &eni_size_weights);
2649 int this_time = estimate_num_insns (stmt, &eni_time_weights);
2650 int prob;
2651 struct predicate will_be_nonconstant;
2653 /* This relation stmt should be folded after we remove
2654 buildin_expect call. Adjust the cost here. */
2655 if (stmt == fix_builtin_expect_stmt)
2657 this_size--;
2658 this_time--;
2661 if (dump_file && (dump_flags & TDF_DETAILS))
2663 fprintf (dump_file, " ");
2664 print_gimple_stmt (dump_file, stmt, 0, 0);
2665 fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n",
2666 ((double) freq) / CGRAPH_FREQ_BASE, this_size,
2667 this_time);
2670 if (gimple_assign_load_p (stmt) && nonconstant_names.exists ())
2672 struct predicate this_array_index;
2673 this_array_index =
2674 array_index_predicate (info, nonconstant_names,
2675 gimple_assign_rhs1 (stmt));
2676 if (!false_predicate_p (&this_array_index))
2677 array_index =
2678 and_predicates (info->conds, &array_index,
2679 &this_array_index);
2681 if (gimple_store_p (stmt) && nonconstant_names.exists ())
2683 struct predicate this_array_index;
2684 this_array_index =
2685 array_index_predicate (info, nonconstant_names,
2686 gimple_get_lhs (stmt));
2687 if (!false_predicate_p (&this_array_index))
2688 array_index =
2689 and_predicates (info->conds, &array_index,
2690 &this_array_index);
2694 if (is_gimple_call (stmt)
2695 && !gimple_call_internal_p (stmt))
2697 struct cgraph_edge *edge = node->get_edge (stmt);
2698 struct inline_edge_summary *es = inline_edge_summary (edge);
2700 /* Special case: results of BUILT_IN_CONSTANT_P will be always
2701 resolved as constant. We however don't want to optimize
2702 out the cgraph edges. */
2703 if (nonconstant_names.exists ()
2704 && gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P)
2705 && gimple_call_lhs (stmt)
2706 && TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME)
2708 struct predicate false_p = false_predicate ();
2709 nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))]
2710 = false_p;
2712 if (ipa_node_params_sum)
2714 int count = gimple_call_num_args (stmt);
2715 int i;
2717 if (count)
2718 es->param.safe_grow_cleared (count);
2719 for (i = 0; i < count; i++)
2721 int prob = param_change_prob (stmt, i);
2722 gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
2723 es->param[i].change_prob = prob;
2727 es->call_stmt_size = this_size;
2728 es->call_stmt_time = this_time;
2729 es->loop_depth = bb_loop_depth (bb);
2730 edge_set_predicate (edge, &bb_predicate);
2733 /* TODO: When conditional jump or swithc is known to be constant, but
2734 we did not translate it into the predicates, we really can account
2735 just maximum of the possible paths. */
2736 if (fbi.info)
2737 will_be_nonconstant
2738 = will_be_nonconstant_predicate (&fbi, info,
2739 stmt, nonconstant_names);
2740 if (this_time || this_size)
2742 struct predicate p;
2744 this_time *= freq;
2746 prob = eliminated_by_inlining_prob (stmt);
2747 if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS))
2748 fprintf (dump_file,
2749 "\t\t50%% will be eliminated by inlining\n");
2750 if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
2751 fprintf (dump_file, "\t\tWill be eliminated by inlining\n");
2753 if (fbi.info)
2754 p = and_predicates (info->conds, &bb_predicate,
2755 &will_be_nonconstant);
2756 else
2757 p = true_predicate ();
2759 if (!false_predicate_p (&p)
2760 || (is_gimple_call (stmt)
2761 && !false_predicate_p (&bb_predicate)))
2763 time += this_time;
2764 size += this_size;
2765 if (time > MAX_TIME * INLINE_TIME_SCALE)
2766 time = MAX_TIME * INLINE_TIME_SCALE;
2769 /* We account everything but the calls. Calls have their own
2770 size/time info attached to cgraph edges. This is necessary
2771 in order to make the cost disappear after inlining. */
2772 if (!is_gimple_call (stmt))
2774 if (prob)
2776 struct predicate ip = not_inlined_predicate ();
2777 ip = and_predicates (info->conds, &ip, &p);
2778 account_size_time (info, this_size * prob,
2779 this_time * prob, &ip);
2781 if (prob != 2)
2782 account_size_time (info, this_size * (2 - prob),
2783 this_time * (2 - prob), &p);
2786 if (!info->fp_expressions && fp_expression_p (stmt))
2788 info->fp_expressions = true;
2789 if (dump_file)
2790 fprintf (dump_file, " fp_expression set\n");
2793 gcc_assert (time >= 0);
2794 gcc_assert (size >= 0);
2798 set_hint_predicate (&inline_summaries->get (node)->array_index, array_index);
2799 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
2800 if (time > MAX_TIME)
2801 time = MAX_TIME;
2802 free (order);
2804 if (nonconstant_names.exists () && !early)
2806 struct loop *loop;
2807 predicate loop_iterations = true_predicate ();
2808 predicate loop_stride = true_predicate ();
2810 if (dump_file && (dump_flags & TDF_DETAILS))
2811 flow_loops_dump (dump_file, NULL, 0);
2812 scev_initialize ();
2813 FOR_EACH_LOOP (loop, 0)
2815 vec<edge> exits;
2816 edge ex;
2817 unsigned int j;
2818 struct tree_niter_desc niter_desc;
2819 bb_predicate = *(struct predicate *) loop->header->aux;
2821 exits = get_loop_exit_edges (loop);
2822 FOR_EACH_VEC_ELT (exits, j, ex)
2823 if (number_of_iterations_exit (loop, ex, &niter_desc, false)
2824 && !is_gimple_min_invariant (niter_desc.niter))
2826 predicate will_be_nonconstant
2827 = will_be_nonconstant_expr_predicate (fbi.info, info,
2828 niter_desc.niter,
2829 nonconstant_names);
2830 if (!true_predicate_p (&will_be_nonconstant))
2831 will_be_nonconstant = and_predicates (info->conds,
2832 &bb_predicate,
2833 &will_be_nonconstant);
2834 if (!true_predicate_p (&will_be_nonconstant)
2835 && !false_predicate_p (&will_be_nonconstant))
2836 /* This is slightly inprecise. We may want to represent each
2837 loop with independent predicate. */
2838 loop_iterations =
2839 and_predicates (info->conds, &loop_iterations,
2840 &will_be_nonconstant);
2842 exits.release ();
2845 /* To avoid quadratic behavior we analyze stride predicates only
2846 with respect to the containing loop. Thus we simply iterate
2847 over all defs in the outermost loop body. */
2848 for (loop = loops_for_fn (cfun)->tree_root->inner;
2849 loop != NULL; loop = loop->next)
2851 basic_block *body = get_loop_body (loop);
2852 for (unsigned i = 0; i < loop->num_nodes; i++)
2854 gimple_stmt_iterator gsi;
2855 bb_predicate = *(struct predicate *) body[i]->aux;
2856 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi);
2857 gsi_next (&gsi))
2859 gimple *stmt = gsi_stmt (gsi);
2861 if (!is_gimple_assign (stmt))
2862 continue;
2864 tree def = gimple_assign_lhs (stmt);
2865 if (TREE_CODE (def) != SSA_NAME)
2866 continue;
2868 affine_iv iv;
2869 if (!simple_iv (loop_containing_stmt (stmt),
2870 loop_containing_stmt (stmt),
2871 def, &iv, true)
2872 || is_gimple_min_invariant (iv.step))
2873 continue;
2875 predicate will_be_nonconstant
2876 = will_be_nonconstant_expr_predicate (fbi.info, info,
2877 iv.step,
2878 nonconstant_names);
2879 if (!true_predicate_p (&will_be_nonconstant))
2880 will_be_nonconstant
2881 = and_predicates (info->conds, &bb_predicate,
2882 &will_be_nonconstant);
2883 if (!true_predicate_p (&will_be_nonconstant)
2884 && !false_predicate_p (&will_be_nonconstant))
2885 /* This is slightly inprecise. We may want to represent
2886 each loop with independent predicate. */
2887 loop_stride = and_predicates (info->conds, &loop_stride,
2888 &will_be_nonconstant);
2891 free (body);
2893 set_hint_predicate (&inline_summaries->get (node)->loop_iterations,
2894 loop_iterations);
2895 set_hint_predicate (&inline_summaries->get (node)->loop_stride,
2896 loop_stride);
2897 scev_finalize ();
2899 FOR_ALL_BB_FN (bb, my_function)
2901 edge e;
2902 edge_iterator ei;
2904 if (bb->aux)
2905 edge_predicate_pool.remove ((predicate *)bb->aux);
2906 bb->aux = NULL;
2907 FOR_EACH_EDGE (e, ei, bb->succs)
2909 if (e->aux)
2910 edge_predicate_pool.remove ((predicate *) e->aux);
2911 e->aux = NULL;
2914 inline_summaries->get (node)->self_time = time;
2915 inline_summaries->get (node)->self_size = size;
2916 nonconstant_names.release ();
2917 ipa_release_body_info (&fbi);
2918 if (opt_for_fn (node->decl, optimize))
2920 if (!early)
2921 loop_optimizer_finalize ();
2922 else if (!ipa_edge_args_vector)
2923 ipa_free_all_node_params ();
2924 free_dominance_info (CDI_DOMINATORS);
2926 if (dump_file)
2928 fprintf (dump_file, "\n");
2929 dump_inline_summary (dump_file, node);
2934 /* Compute parameters of functions used by inliner.
2935 EARLY is true when we compute parameters for the early inliner */
2937 void
2938 compute_inline_parameters (struct cgraph_node *node, bool early)
2940 HOST_WIDE_INT self_stack_size;
2941 struct cgraph_edge *e;
2942 struct inline_summary *info;
2944 gcc_assert (!node->global.inlined_to);
2946 inline_summary_alloc ();
2948 info = inline_summaries->get (node);
2949 reset_inline_summary (node, info);
2951 /* Estimate the stack size for the function if we're optimizing. */
2952 self_stack_size = optimize && !node->thunk.thunk_p
2953 ? estimated_stack_frame_size (node) : 0;
2954 info->estimated_self_stack_size = self_stack_size;
2955 info->estimated_stack_size = self_stack_size;
2956 info->stack_frame_offset = 0;
2958 if (node->thunk.thunk_p)
2960 struct inline_edge_summary *es = inline_edge_summary (node->callees);
2961 struct predicate t = true_predicate ();
2963 node->local.can_change_signature = false;
2964 es->call_stmt_size = eni_size_weights.call_cost;
2965 es->call_stmt_time = eni_time_weights.call_cost;
2966 account_size_time (info, INLINE_SIZE_SCALE * 2,
2967 INLINE_TIME_SCALE * 2, &t);
2968 t = not_inlined_predicate ();
2969 account_size_time (info, 2 * INLINE_SIZE_SCALE, 0, &t);
2970 inline_update_overall_summary (node);
2971 info->self_size = info->size;
2972 info->self_time = info->time;
2973 /* We can not inline instrumetnation clones. */
2974 if (node->thunk.add_pointer_bounds_args)
2976 info->inlinable = false;
2977 node->callees->inline_failed = CIF_CHKP;
2979 else
2980 info->inlinable = true;
2982 else
2984 /* Even is_gimple_min_invariant rely on current_function_decl. */
2985 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2987 /* Can this function be inlined at all? */
2988 if (!opt_for_fn (node->decl, optimize)
2989 && !lookup_attribute ("always_inline",
2990 DECL_ATTRIBUTES (node->decl)))
2991 info->inlinable = false;
2992 else
2993 info->inlinable = tree_inlinable_function_p (node->decl);
2995 info->contains_cilk_spawn = fn_contains_cilk_spawn_p (cfun);
2997 /* Type attributes can use parameter indices to describe them. */
2998 if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl)))
2999 node->local.can_change_signature = false;
3000 else
3002 /* Otherwise, inlinable functions always can change signature. */
3003 if (info->inlinable)
3004 node->local.can_change_signature = true;
3005 else
3007 /* Functions calling builtin_apply can not change signature. */
3008 for (e = node->callees; e; e = e->next_callee)
3010 tree cdecl = e->callee->decl;
3011 if (DECL_BUILT_IN (cdecl)
3012 && DECL_BUILT_IN_CLASS (cdecl) == BUILT_IN_NORMAL
3013 && (DECL_FUNCTION_CODE (cdecl) == BUILT_IN_APPLY_ARGS
3014 || DECL_FUNCTION_CODE (cdecl) == BUILT_IN_VA_START))
3015 break;
3017 node->local.can_change_signature = !e;
3020 estimate_function_body_sizes (node, early);
3021 pop_cfun ();
3023 for (e = node->callees; e; e = e->next_callee)
3024 if (e->callee->comdat_local_p ())
3025 break;
3026 node->calls_comdat_local = (e != NULL);
3028 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
3029 info->time = info->self_time;
3030 info->size = info->self_size;
3031 info->stack_frame_offset = 0;
3032 info->estimated_stack_size = info->estimated_self_stack_size;
3033 if (flag_checking)
3035 inline_update_overall_summary (node);
3036 gcc_assert (info->time == info->self_time
3037 && info->size == info->self_size);
3042 /* Compute parameters of functions used by inliner using
3043 current_function_decl. */
3045 static unsigned int
3046 compute_inline_parameters_for_current (void)
3048 compute_inline_parameters (cgraph_node::get (current_function_decl), true);
3049 return 0;
3052 namespace {
3054 const pass_data pass_data_inline_parameters =
3056 GIMPLE_PASS, /* type */
3057 "inline_param", /* name */
3058 OPTGROUP_INLINE, /* optinfo_flags */
3059 TV_INLINE_PARAMETERS, /* tv_id */
3060 0, /* properties_required */
3061 0, /* properties_provided */
3062 0, /* properties_destroyed */
3063 0, /* todo_flags_start */
3064 0, /* todo_flags_finish */
3067 class pass_inline_parameters : public gimple_opt_pass
3069 public:
3070 pass_inline_parameters (gcc::context *ctxt)
3071 : gimple_opt_pass (pass_data_inline_parameters, ctxt)
3074 /* opt_pass methods: */
3075 opt_pass * clone () { return new pass_inline_parameters (m_ctxt); }
3076 virtual unsigned int execute (function *)
3078 return compute_inline_parameters_for_current ();
3081 }; // class pass_inline_parameters
3083 } // anon namespace
3085 gimple_opt_pass *
3086 make_pass_inline_parameters (gcc::context *ctxt)
3088 return new pass_inline_parameters (ctxt);
3092 /* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS,
3093 KNOWN_CONTEXTS and KNOWN_AGGS. */
3095 static bool
3096 estimate_edge_devirt_benefit (struct cgraph_edge *ie,
3097 int *size, int *time,
3098 vec<tree> known_vals,
3099 vec<ipa_polymorphic_call_context> known_contexts,
3100 vec<ipa_agg_jump_function_p> known_aggs)
3102 tree target;
3103 struct cgraph_node *callee;
3104 struct inline_summary *isummary;
3105 enum availability avail;
3106 bool speculative;
3108 if (!known_vals.exists () && !known_contexts.exists ())
3109 return false;
3110 if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining))
3111 return false;
3113 target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts,
3114 known_aggs, &speculative);
3115 if (!target || speculative)
3116 return false;
3118 /* Account for difference in cost between indirect and direct calls. */
3119 *size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost);
3120 *time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost);
3121 gcc_checking_assert (*time >= 0);
3122 gcc_checking_assert (*size >= 0);
3124 callee = cgraph_node::get (target);
3125 if (!callee || !callee->definition)
3126 return false;
3127 callee = callee->function_symbol (&avail);
3128 if (avail < AVAIL_AVAILABLE)
3129 return false;
3130 isummary = inline_summaries->get (callee);
3131 return isummary->inlinable;
3134 /* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to
3135 handle edge E with probability PROB.
3136 Set HINTS if edge may be devirtualized.
3137 KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call
3138 site. */
3140 static inline void
3141 estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
3142 int *time,
3143 int prob,
3144 vec<tree> known_vals,
3145 vec<ipa_polymorphic_call_context> known_contexts,
3146 vec<ipa_agg_jump_function_p> known_aggs,
3147 inline_hints *hints)
3149 struct inline_edge_summary *es = inline_edge_summary (e);
3150 int call_size = es->call_stmt_size;
3151 int call_time = es->call_stmt_time;
3152 int cur_size;
3153 if (!e->callee
3154 && estimate_edge_devirt_benefit (e, &call_size, &call_time,
3155 known_vals, known_contexts, known_aggs)
3156 && hints && e->maybe_hot_p ())
3157 *hints |= INLINE_HINT_indirect_call;
3158 cur_size = call_size * INLINE_SIZE_SCALE;
3159 *size += cur_size;
3160 if (min_size)
3161 *min_size += cur_size;
3162 *time += apply_probability ((gcov_type) call_time, prob)
3163 * e->frequency * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE);
3164 if (*time > MAX_TIME * INLINE_TIME_SCALE)
3165 *time = MAX_TIME * INLINE_TIME_SCALE;
3170 /* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
3171 calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3172 describe context of the call site. */
3174 static void
3175 estimate_calls_size_and_time (struct cgraph_node *node, int *size,
3176 int *min_size, int *time,
3177 inline_hints *hints,
3178 clause_t possible_truths,
3179 vec<tree> known_vals,
3180 vec<ipa_polymorphic_call_context> known_contexts,
3181 vec<ipa_agg_jump_function_p> known_aggs)
3183 struct cgraph_edge *e;
3184 for (e = node->callees; e; e = e->next_callee)
3186 if (inline_edge_summary_vec.length () <= (unsigned) e->uid)
3187 continue;
3189 struct inline_edge_summary *es = inline_edge_summary (e);
3191 /* Do not care about zero sized builtins. */
3192 if (e->inline_failed && !es->call_stmt_size)
3194 gcc_checking_assert (!es->call_stmt_time);
3195 continue;
3197 if (!es->predicate
3198 || evaluate_predicate (es->predicate, possible_truths))
3200 if (e->inline_failed)
3202 /* Predicates of calls shall not use NOT_CHANGED codes,
3203 sowe do not need to compute probabilities. */
3204 estimate_edge_size_and_time (e, size,
3205 es->predicate ? NULL : min_size,
3206 time, REG_BR_PROB_BASE,
3207 known_vals, known_contexts,
3208 known_aggs, hints);
3210 else
3211 estimate_calls_size_and_time (e->callee, size, min_size, time,
3212 hints,
3213 possible_truths,
3214 known_vals, known_contexts,
3215 known_aggs);
3218 for (e = node->indirect_calls; e; e = e->next_callee)
3220 if (inline_edge_summary_vec.length () <= (unsigned) e->uid)
3221 continue;
3223 struct inline_edge_summary *es = inline_edge_summary (e);
3224 if (!es->predicate
3225 || evaluate_predicate (es->predicate, possible_truths))
3226 estimate_edge_size_and_time (e, size,
3227 es->predicate ? NULL : min_size,
3228 time, REG_BR_PROB_BASE,
3229 known_vals, known_contexts, known_aggs,
3230 hints);
3235 /* Estimate size and time needed to execute NODE assuming
3236 POSSIBLE_TRUTHS clause, and KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3237 information about NODE's arguments. If non-NULL use also probability
3238 information present in INLINE_PARAM_SUMMARY vector.
3239 Additionally detemine hints determined by the context. Finally compute
3240 minimal size needed for the call that is independent on the call context and
3241 can be used for fast estimates. Return the values in RET_SIZE,
3242 RET_MIN_SIZE, RET_TIME and RET_HINTS. */
3244 static void
3245 estimate_node_size_and_time (struct cgraph_node *node,
3246 clause_t possible_truths,
3247 vec<tree> known_vals,
3248 vec<ipa_polymorphic_call_context> known_contexts,
3249 vec<ipa_agg_jump_function_p> known_aggs,
3250 int *ret_size, int *ret_min_size, int *ret_time,
3251 inline_hints *ret_hints,
3252 vec<inline_param_summary>
3253 inline_param_summary)
3255 struct inline_summary *info = inline_summaries->get (node);
3256 size_time_entry *e;
3257 int size = 0;
3258 int time = 0;
3259 int min_size = 0;
3260 inline_hints hints = 0;
3261 int i;
3263 if (dump_file && (dump_flags & TDF_DETAILS))
3265 bool found = false;
3266 fprintf (dump_file, " Estimating body: %s/%i\n"
3267 " Known to be false: ", node->name (),
3268 node->order);
3270 for (i = predicate_not_inlined_condition;
3271 i < (predicate_first_dynamic_condition
3272 + (int) vec_safe_length (info->conds)); i++)
3273 if (!(possible_truths & (1 << i)))
3275 if (found)
3276 fprintf (dump_file, ", ");
3277 found = true;
3278 dump_condition (dump_file, info->conds, i);
3282 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3283 if (evaluate_predicate (&e->predicate, possible_truths))
3285 size += e->size;
3286 gcc_checking_assert (e->time >= 0);
3287 gcc_checking_assert (time >= 0);
3288 if (!inline_param_summary.exists ())
3289 time += e->time;
3290 else
3292 int prob = predicate_probability (info->conds,
3293 &e->predicate,
3294 possible_truths,
3295 inline_param_summary);
3296 gcc_checking_assert (prob >= 0);
3297 gcc_checking_assert (prob <= REG_BR_PROB_BASE);
3298 time += apply_probability ((gcov_type) e->time, prob);
3300 if (time > MAX_TIME * INLINE_TIME_SCALE)
3301 time = MAX_TIME * INLINE_TIME_SCALE;
3302 gcc_checking_assert (time >= 0);
3305 gcc_checking_assert (true_predicate_p (&(*info->entry)[0].predicate));
3306 min_size = (*info->entry)[0].size;
3307 gcc_checking_assert (size >= 0);
3308 gcc_checking_assert (time >= 0);
3310 if (info->loop_iterations
3311 && !evaluate_predicate (info->loop_iterations, possible_truths))
3312 hints |= INLINE_HINT_loop_iterations;
3313 if (info->loop_stride
3314 && !evaluate_predicate (info->loop_stride, possible_truths))
3315 hints |= INLINE_HINT_loop_stride;
3316 if (info->array_index
3317 && !evaluate_predicate (info->array_index, possible_truths))
3318 hints |= INLINE_HINT_array_index;
3319 if (info->scc_no)
3320 hints |= INLINE_HINT_in_scc;
3321 if (DECL_DECLARED_INLINE_P (node->decl))
3322 hints |= INLINE_HINT_declared_inline;
3324 estimate_calls_size_and_time (node, &size, &min_size, &time, &hints, possible_truths,
3325 known_vals, known_contexts, known_aggs);
3326 gcc_checking_assert (size >= 0);
3327 gcc_checking_assert (time >= 0);
3328 time = RDIV (time, INLINE_TIME_SCALE);
3329 size = RDIV (size, INLINE_SIZE_SCALE);
3330 min_size = RDIV (min_size, INLINE_SIZE_SCALE);
3332 if (dump_file && (dump_flags & TDF_DETAILS))
3333 fprintf (dump_file, "\n size:%i time:%i\n", (int) size, (int) time);
3334 if (ret_time)
3335 *ret_time = time;
3336 if (ret_size)
3337 *ret_size = size;
3338 if (ret_min_size)
3339 *ret_min_size = min_size;
3340 if (ret_hints)
3341 *ret_hints = hints;
3342 return;
3346 /* Estimate size and time needed to execute callee of EDGE assuming that
3347 parameters known to be constant at caller of EDGE are propagated.
3348 KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values
3349 and types for parameters. */
3351 void
3352 estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
3353 vec<tree> known_vals,
3354 vec<ipa_polymorphic_call_context>
3355 known_contexts,
3356 vec<ipa_agg_jump_function_p> known_aggs,
3357 int *ret_size, int *ret_time,
3358 inline_hints *hints)
3360 clause_t clause;
3362 clause = evaluate_conditions_for_known_args (node, false, known_vals,
3363 known_aggs);
3364 estimate_node_size_and_time (node, clause, known_vals, known_contexts,
3365 known_aggs, ret_size, NULL, ret_time, hints, vNULL);
3368 /* Translate all conditions from callee representation into caller
3369 representation and symbolically evaluate predicate P into new predicate.
3371 INFO is inline_summary of function we are adding predicate into, CALLEE_INFO
3372 is summary of function predicate P is from. OPERAND_MAP is array giving
3373 callee formal IDs the caller formal IDs. POSSSIBLE_TRUTHS is clausule of all
3374 callee conditions that may be true in caller context. TOPLEV_PREDICATE is
3375 predicate under which callee is executed. OFFSET_MAP is an array of of
3376 offsets that need to be added to conditions, negative offset means that
3377 conditions relying on values passed by reference have to be discarded
3378 because they might not be preserved (and should be considered offset zero
3379 for other purposes). */
3381 static struct predicate
3382 remap_predicate (struct inline_summary *info,
3383 struct inline_summary *callee_info,
3384 struct predicate *p,
3385 vec<int> operand_map,
3386 vec<int> offset_map,
3387 clause_t possible_truths, struct predicate *toplev_predicate)
3389 int i;
3390 struct predicate out = true_predicate ();
3392 /* True predicate is easy. */
3393 if (true_predicate_p (p))
3394 return *toplev_predicate;
3395 for (i = 0; p->clause[i]; i++)
3397 clause_t clause = p->clause[i];
3398 int cond;
3399 struct predicate clause_predicate = false_predicate ();
3401 gcc_assert (i < MAX_CLAUSES);
3403 for (cond = 0; cond < NUM_CONDITIONS; cond++)
3404 /* Do we have condition we can't disprove? */
3405 if (clause & possible_truths & (1 << cond))
3407 struct predicate cond_predicate;
3408 /* Work out if the condition can translate to predicate in the
3409 inlined function. */
3410 if (cond >= predicate_first_dynamic_condition)
3412 struct condition *c;
3414 c = &(*callee_info->conds)[cond
3416 predicate_first_dynamic_condition];
3417 /* See if we can remap condition operand to caller's operand.
3418 Otherwise give up. */
3419 if (!operand_map.exists ()
3420 || (int) operand_map.length () <= c->operand_num
3421 || operand_map[c->operand_num] == -1
3422 /* TODO: For non-aggregate conditions, adding an offset is
3423 basically an arithmetic jump function processing which
3424 we should support in future. */
3425 || ((!c->agg_contents || !c->by_ref)
3426 && offset_map[c->operand_num] > 0)
3427 || (c->agg_contents && c->by_ref
3428 && offset_map[c->operand_num] < 0))
3429 cond_predicate = true_predicate ();
3430 else
3432 struct agg_position_info ap;
3433 HOST_WIDE_INT offset_delta = offset_map[c->operand_num];
3434 if (offset_delta < 0)
3436 gcc_checking_assert (!c->agg_contents || !c->by_ref);
3437 offset_delta = 0;
3439 gcc_assert (!c->agg_contents
3440 || c->by_ref || offset_delta == 0);
3441 ap.offset = c->offset + offset_delta;
3442 ap.agg_contents = c->agg_contents;
3443 ap.by_ref = c->by_ref;
3444 cond_predicate = add_condition (info,
3445 operand_map[c->operand_num],
3446 c->size, &ap, c->code,
3447 c->val);
3450 /* Fixed conditions remains same, construct single
3451 condition predicate. */
3452 else
3454 cond_predicate.clause[0] = 1 << cond;
3455 cond_predicate.clause[1] = 0;
3457 clause_predicate = or_predicates (info->conds, &clause_predicate,
3458 &cond_predicate);
3460 out = and_predicates (info->conds, &out, &clause_predicate);
3462 return and_predicates (info->conds, &out, toplev_predicate);
3466 /* Update summary information of inline clones after inlining.
3467 Compute peak stack usage. */
3469 static void
3470 inline_update_callee_summaries (struct cgraph_node *node, int depth)
3472 struct cgraph_edge *e;
3473 struct inline_summary *callee_info = inline_summaries->get (node);
3474 struct inline_summary *caller_info = inline_summaries->get (node->callers->caller);
3475 HOST_WIDE_INT peak;
3477 callee_info->stack_frame_offset
3478 = caller_info->stack_frame_offset
3479 + caller_info->estimated_self_stack_size;
3480 peak = callee_info->stack_frame_offset
3481 + callee_info->estimated_self_stack_size;
3482 if (inline_summaries->get (node->global.inlined_to)->estimated_stack_size < peak)
3483 inline_summaries->get (node->global.inlined_to)->estimated_stack_size = peak;
3484 ipa_propagate_frequency (node);
3485 for (e = node->callees; e; e = e->next_callee)
3487 if (!e->inline_failed)
3488 inline_update_callee_summaries (e->callee, depth);
3489 inline_edge_summary (e)->loop_depth += depth;
3491 for (e = node->indirect_calls; e; e = e->next_callee)
3492 inline_edge_summary (e)->loop_depth += depth;
3495 /* Update change_prob of EDGE after INLINED_EDGE has been inlined.
3496 When functoin A is inlined in B and A calls C with parameter that
3497 changes with probability PROB1 and C is known to be passthroug
3498 of argument if B that change with probability PROB2, the probability
3499 of change is now PROB1*PROB2. */
3501 static void
3502 remap_edge_change_prob (struct cgraph_edge *inlined_edge,
3503 struct cgraph_edge *edge)
3505 if (ipa_node_params_sum)
3507 int i;
3508 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3509 struct inline_edge_summary *es = inline_edge_summary (edge);
3510 struct inline_edge_summary *inlined_es
3511 = inline_edge_summary (inlined_edge);
3513 for (i = 0; i < ipa_get_cs_argument_count (args); i++)
3515 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3516 if (jfunc->type == IPA_JF_PASS_THROUGH
3517 && (ipa_get_jf_pass_through_formal_id (jfunc)
3518 < (int) inlined_es->param.length ()))
3520 int jf_formal_id = ipa_get_jf_pass_through_formal_id (jfunc);
3521 int prob1 = es->param[i].change_prob;
3522 int prob2 = inlined_es->param[jf_formal_id].change_prob;
3523 int prob = combine_probabilities (prob1, prob2);
3525 if (prob1 && prob2 && !prob)
3526 prob = 1;
3528 es->param[i].change_prob = prob;
3534 /* Update edge summaries of NODE after INLINED_EDGE has been inlined.
3536 Remap predicates of callees of NODE. Rest of arguments match
3537 remap_predicate.
3539 Also update change probabilities. */
3541 static void
3542 remap_edge_summaries (struct cgraph_edge *inlined_edge,
3543 struct cgraph_node *node,
3544 struct inline_summary *info,
3545 struct inline_summary *callee_info,
3546 vec<int> operand_map,
3547 vec<int> offset_map,
3548 clause_t possible_truths,
3549 struct predicate *toplev_predicate)
3551 struct cgraph_edge *e, *next;
3552 for (e = node->callees; e; e = next)
3554 struct inline_edge_summary *es = inline_edge_summary (e);
3555 struct predicate p;
3556 next = e->next_callee;
3558 if (e->inline_failed)
3560 remap_edge_change_prob (inlined_edge, e);
3562 if (es->predicate)
3564 p = remap_predicate (info, callee_info,
3565 es->predicate, operand_map, offset_map,
3566 possible_truths, toplev_predicate);
3567 edge_set_predicate (e, &p);
3569 else
3570 edge_set_predicate (e, toplev_predicate);
3572 else
3573 remap_edge_summaries (inlined_edge, e->callee, info, callee_info,
3574 operand_map, offset_map, possible_truths,
3575 toplev_predicate);
3577 for (e = node->indirect_calls; e; e = next)
3579 struct inline_edge_summary *es = inline_edge_summary (e);
3580 struct predicate p;
3581 next = e->next_callee;
3583 remap_edge_change_prob (inlined_edge, e);
3584 if (es->predicate)
3586 p = remap_predicate (info, callee_info,
3587 es->predicate, operand_map, offset_map,
3588 possible_truths, toplev_predicate);
3589 edge_set_predicate (e, &p);
3591 else
3592 edge_set_predicate (e, toplev_predicate);
3596 /* Same as remap_predicate, but set result into hint *HINT. */
3598 static void
3599 remap_hint_predicate (struct inline_summary *info,
3600 struct inline_summary *callee_info,
3601 struct predicate **hint,
3602 vec<int> operand_map,
3603 vec<int> offset_map,
3604 clause_t possible_truths,
3605 struct predicate *toplev_predicate)
3607 predicate p;
3609 if (!*hint)
3610 return;
3611 p = remap_predicate (info, callee_info,
3612 *hint,
3613 operand_map, offset_map,
3614 possible_truths, toplev_predicate);
3615 if (!false_predicate_p (&p) && !true_predicate_p (&p))
3617 if (!*hint)
3618 set_hint_predicate (hint, p);
3619 else
3620 **hint = and_predicates (info->conds, *hint, &p);
3624 /* We inlined EDGE. Update summary of the function we inlined into. */
3626 void
3627 inline_merge_summary (struct cgraph_edge *edge)
3629 struct inline_summary *callee_info = inline_summaries->get (edge->callee);
3630 struct cgraph_node *to = (edge->caller->global.inlined_to
3631 ? edge->caller->global.inlined_to : edge->caller);
3632 struct inline_summary *info = inline_summaries->get (to);
3633 clause_t clause = 0; /* not_inline is known to be false. */
3634 size_time_entry *e;
3635 vec<int> operand_map = vNULL;
3636 vec<int> offset_map = vNULL;
3637 int i;
3638 struct predicate toplev_predicate;
3639 struct predicate true_p = true_predicate ();
3640 struct inline_edge_summary *es = inline_edge_summary (edge);
3642 if (es->predicate)
3643 toplev_predicate = *es->predicate;
3644 else
3645 toplev_predicate = true_predicate ();
3647 info->fp_expressions |= callee_info->fp_expressions;
3649 if (callee_info->conds)
3650 evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL);
3651 if (ipa_node_params_sum && callee_info->conds)
3653 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3654 int count = ipa_get_cs_argument_count (args);
3655 int i;
3657 if (count)
3659 operand_map.safe_grow_cleared (count);
3660 offset_map.safe_grow_cleared (count);
3662 for (i = 0; i < count; i++)
3664 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3665 int map = -1;
3667 /* TODO: handle non-NOPs when merging. */
3668 if (jfunc->type == IPA_JF_PASS_THROUGH)
3670 if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
3671 map = ipa_get_jf_pass_through_formal_id (jfunc);
3672 if (!ipa_get_jf_pass_through_agg_preserved (jfunc))
3673 offset_map[i] = -1;
3675 else if (jfunc->type == IPA_JF_ANCESTOR)
3677 HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc);
3678 if (offset >= 0 && offset < INT_MAX)
3680 map = ipa_get_jf_ancestor_formal_id (jfunc);
3681 if (!ipa_get_jf_ancestor_agg_preserved (jfunc))
3682 offset = -1;
3683 offset_map[i] = offset;
3686 operand_map[i] = map;
3687 gcc_assert (map < ipa_get_param_count (IPA_NODE_REF (to)));
3690 for (i = 0; vec_safe_iterate (callee_info->entry, i, &e); i++)
3692 struct predicate p = remap_predicate (info, callee_info,
3693 &e->predicate, operand_map,
3694 offset_map, clause,
3695 &toplev_predicate);
3696 if (!false_predicate_p (&p))
3698 gcov_type add_time = ((gcov_type) e->time * edge->frequency
3699 + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
3700 int prob = predicate_probability (callee_info->conds,
3701 &e->predicate,
3702 clause, es->param);
3703 add_time = apply_probability ((gcov_type) add_time, prob);
3704 if (add_time > MAX_TIME * INLINE_TIME_SCALE)
3705 add_time = MAX_TIME * INLINE_TIME_SCALE;
3706 if (prob != REG_BR_PROB_BASE
3707 && dump_file && (dump_flags & TDF_DETAILS))
3709 fprintf (dump_file, "\t\tScaling time by probability:%f\n",
3710 (double) prob / REG_BR_PROB_BASE);
3712 account_size_time (info, e->size, add_time, &p);
3715 remap_edge_summaries (edge, edge->callee, info, callee_info, operand_map,
3716 offset_map, clause, &toplev_predicate);
3717 remap_hint_predicate (info, callee_info,
3718 &callee_info->loop_iterations,
3719 operand_map, offset_map, clause, &toplev_predicate);
3720 remap_hint_predicate (info, callee_info,
3721 &callee_info->loop_stride,
3722 operand_map, offset_map, clause, &toplev_predicate);
3723 remap_hint_predicate (info, callee_info,
3724 &callee_info->array_index,
3725 operand_map, offset_map, clause, &toplev_predicate);
3727 inline_update_callee_summaries (edge->callee,
3728 inline_edge_summary (edge)->loop_depth);
3730 /* We do not maintain predicates of inlined edges, free it. */
3731 edge_set_predicate (edge, &true_p);
3732 /* Similarly remove param summaries. */
3733 es->param.release ();
3734 operand_map.release ();
3735 offset_map.release ();
3738 /* For performance reasons inline_merge_summary is not updating overall size
3739 and time. Recompute it. */
3741 void
3742 inline_update_overall_summary (struct cgraph_node *node)
3744 struct inline_summary *info = inline_summaries->get (node);
3745 size_time_entry *e;
3746 int i;
3748 info->size = 0;
3749 info->time = 0;
3750 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3752 info->size += e->size, info->time += e->time;
3753 if (info->time > MAX_TIME * INLINE_TIME_SCALE)
3754 info->time = MAX_TIME * INLINE_TIME_SCALE;
3756 estimate_calls_size_and_time (node, &info->size, &info->min_size,
3757 &info->time, NULL,
3758 ~(clause_t) (1 << predicate_false_condition),
3759 vNULL, vNULL, vNULL);
3760 info->time = (info->time + INLINE_TIME_SCALE / 2) / INLINE_TIME_SCALE;
3761 info->size = (info->size + INLINE_SIZE_SCALE / 2) / INLINE_SIZE_SCALE;
3764 /* Return hints derrived from EDGE. */
3766 simple_edge_hints (struct cgraph_edge *edge)
3768 int hints = 0;
3769 struct cgraph_node *to = (edge->caller->global.inlined_to
3770 ? edge->caller->global.inlined_to : edge->caller);
3771 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
3772 if (inline_summaries->get (to)->scc_no
3773 && inline_summaries->get (to)->scc_no
3774 == inline_summaries->get (callee)->scc_no
3775 && !edge->recursive_p ())
3776 hints |= INLINE_HINT_same_scc;
3778 if (callee->lto_file_data && edge->caller->lto_file_data
3779 && edge->caller->lto_file_data != callee->lto_file_data
3780 && !callee->merged_comdat && !callee->icf_merged)
3781 hints |= INLINE_HINT_cross_module;
3783 return hints;
3786 /* Estimate the time cost for the caller when inlining EDGE.
3787 Only to be called via estimate_edge_time, that handles the
3788 caching mechanism.
3790 When caching, also update the cache entry. Compute both time and
3791 size, since we always need both metrics eventually. */
3794 do_estimate_edge_time (struct cgraph_edge *edge)
3796 int time;
3797 int size;
3798 inline_hints hints;
3799 struct cgraph_node *callee;
3800 clause_t clause;
3801 vec<tree> known_vals;
3802 vec<ipa_polymorphic_call_context> known_contexts;
3803 vec<ipa_agg_jump_function_p> known_aggs;
3804 struct inline_edge_summary *es = inline_edge_summary (edge);
3805 int min_size;
3807 callee = edge->callee->ultimate_alias_target ();
3809 gcc_checking_assert (edge->inline_failed);
3810 evaluate_properties_for_edge (edge, true,
3811 &clause, &known_vals, &known_contexts,
3812 &known_aggs);
3813 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3814 known_aggs, &size, &min_size, &time, &hints, es->param);
3816 /* When we have profile feedback, we can quite safely identify hot
3817 edges and for those we disable size limits. Don't do that when
3818 probability that caller will call the callee is low however, since it
3819 may hurt optimization of the caller's hot path. */
3820 if (edge->count && edge->maybe_hot_p ()
3821 && (edge->count * 2
3822 > (edge->caller->global.inlined_to
3823 ? edge->caller->global.inlined_to->count : edge->caller->count)))
3824 hints |= INLINE_HINT_known_hot;
3826 known_vals.release ();
3827 known_contexts.release ();
3828 known_aggs.release ();
3829 gcc_checking_assert (size >= 0);
3830 gcc_checking_assert (time >= 0);
3832 /* When caching, update the cache entry. */
3833 if (edge_growth_cache.exists ())
3835 inline_summaries->get (edge->callee)->min_size = min_size;
3836 if ((int) edge_growth_cache.length () <= edge->uid)
3837 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
3838 edge_growth_cache[edge->uid].time = time + (time >= 0);
3840 edge_growth_cache[edge->uid].size = size + (size >= 0);
3841 hints |= simple_edge_hints (edge);
3842 edge_growth_cache[edge->uid].hints = hints + 1;
3844 return time;
3848 /* Return estimated callee growth after inlining EDGE.
3849 Only to be called via estimate_edge_size. */
3852 do_estimate_edge_size (struct cgraph_edge *edge)
3854 int size;
3855 struct cgraph_node *callee;
3856 clause_t clause;
3857 vec<tree> known_vals;
3858 vec<ipa_polymorphic_call_context> known_contexts;
3859 vec<ipa_agg_jump_function_p> known_aggs;
3861 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3863 if (edge_growth_cache.exists ())
3865 do_estimate_edge_time (edge);
3866 size = edge_growth_cache[edge->uid].size;
3867 gcc_checking_assert (size);
3868 return size - (size > 0);
3871 callee = edge->callee->ultimate_alias_target ();
3873 /* Early inliner runs without caching, go ahead and do the dirty work. */
3874 gcc_checking_assert (edge->inline_failed);
3875 evaluate_properties_for_edge (edge, true,
3876 &clause, &known_vals, &known_contexts,
3877 &known_aggs);
3878 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3879 known_aggs, &size, NULL, NULL, NULL, vNULL);
3880 known_vals.release ();
3881 known_contexts.release ();
3882 known_aggs.release ();
3883 return size;
3887 /* Estimate the growth of the caller when inlining EDGE.
3888 Only to be called via estimate_edge_size. */
3890 inline_hints
3891 do_estimate_edge_hints (struct cgraph_edge *edge)
3893 inline_hints hints;
3894 struct cgraph_node *callee;
3895 clause_t clause;
3896 vec<tree> known_vals;
3897 vec<ipa_polymorphic_call_context> known_contexts;
3898 vec<ipa_agg_jump_function_p> known_aggs;
3900 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3902 if (edge_growth_cache.exists ())
3904 do_estimate_edge_time (edge);
3905 hints = edge_growth_cache[edge->uid].hints;
3906 gcc_checking_assert (hints);
3907 return hints - 1;
3910 callee = edge->callee->ultimate_alias_target ();
3912 /* Early inliner runs without caching, go ahead and do the dirty work. */
3913 gcc_checking_assert (edge->inline_failed);
3914 evaluate_properties_for_edge (edge, true,
3915 &clause, &known_vals, &known_contexts,
3916 &known_aggs);
3917 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3918 known_aggs, NULL, NULL, NULL, &hints, vNULL);
3919 known_vals.release ();
3920 known_contexts.release ();
3921 known_aggs.release ();
3922 hints |= simple_edge_hints (edge);
3923 return hints;
3927 /* Estimate self time of the function NODE after inlining EDGE. */
3930 estimate_time_after_inlining (struct cgraph_node *node,
3931 struct cgraph_edge *edge)
3933 struct inline_edge_summary *es = inline_edge_summary (edge);
3934 if (!es->predicate || !false_predicate_p (es->predicate))
3936 gcov_type time =
3937 inline_summaries->get (node)->time + estimate_edge_time (edge);
3938 if (time < 0)
3939 time = 0;
3940 if (time > MAX_TIME)
3941 time = MAX_TIME;
3942 return time;
3944 return inline_summaries->get (node)->time;
3948 /* Estimate the size of NODE after inlining EDGE which should be an
3949 edge to either NODE or a call inlined into NODE. */
3952 estimate_size_after_inlining (struct cgraph_node *node,
3953 struct cgraph_edge *edge)
3955 struct inline_edge_summary *es = inline_edge_summary (edge);
3956 if (!es->predicate || !false_predicate_p (es->predicate))
3958 int size = inline_summaries->get (node)->size + estimate_edge_growth (edge);
3959 gcc_assert (size >= 0);
3960 return size;
3962 return inline_summaries->get (node)->size;
3966 struct growth_data
3968 struct cgraph_node *node;
3969 bool self_recursive;
3970 bool uninlinable;
3971 int growth;
3975 /* Worker for do_estimate_growth. Collect growth for all callers. */
3977 static bool
3978 do_estimate_growth_1 (struct cgraph_node *node, void *data)
3980 struct cgraph_edge *e;
3981 struct growth_data *d = (struct growth_data *) data;
3983 for (e = node->callers; e; e = e->next_caller)
3985 gcc_checking_assert (e->inline_failed);
3987 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
3989 d->uninlinable = true;
3990 continue;
3993 if (e->recursive_p ())
3995 d->self_recursive = true;
3996 continue;
3998 d->growth += estimate_edge_growth (e);
4000 return false;
4004 /* Estimate the growth caused by inlining NODE into all callees. */
4007 estimate_growth (struct cgraph_node *node)
4009 struct growth_data d = { node, false, false, 0 };
4010 struct inline_summary *info = inline_summaries->get (node);
4012 node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true);
4014 /* For self recursive functions the growth estimation really should be
4015 infinity. We don't want to return very large values because the growth
4016 plays various roles in badness computation fractions. Be sure to not
4017 return zero or negative growths. */
4018 if (d.self_recursive)
4019 d.growth = d.growth < info->size ? info->size : d.growth;
4020 else if (DECL_EXTERNAL (node->decl) || d.uninlinable)
4022 else
4024 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
4025 d.growth -= info->size;
4026 /* COMDAT functions are very often not shared across multiple units
4027 since they come from various template instantiations.
4028 Take this into account. */
4029 else if (DECL_COMDAT (node->decl)
4030 && node->can_remove_if_no_direct_calls_p ())
4031 d.growth -= (info->size
4032 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
4033 + 50) / 100;
4036 return d.growth;
4039 /* Verify if there are fewer than MAX_CALLERS. */
4041 static bool
4042 check_callers (cgraph_node *node, int *max_callers)
4044 ipa_ref *ref;
4046 if (!node->can_remove_if_no_direct_calls_and_refs_p ())
4047 return true;
4049 for (cgraph_edge *e = node->callers; e; e = e->next_caller)
4051 (*max_callers)--;
4052 if (!*max_callers
4053 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
4054 return true;
4057 FOR_EACH_ALIAS (node, ref)
4058 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), max_callers))
4059 return true;
4061 return false;
4065 /* Make cheap estimation if growth of NODE is likely positive knowing
4066 EDGE_GROWTH of one particular edge.
4067 We assume that most of other edges will have similar growth
4068 and skip computation if there are too many callers. */
4070 bool
4071 growth_likely_positive (struct cgraph_node *node,
4072 int edge_growth)
4074 int max_callers;
4075 struct cgraph_edge *e;
4076 gcc_checking_assert (edge_growth > 0);
4078 /* First quickly check if NODE is removable at all. */
4079 if (DECL_EXTERNAL (node->decl))
4080 return true;
4081 if (!node->can_remove_if_no_direct_calls_and_refs_p ()
4082 || node->address_taken)
4083 return true;
4085 max_callers = inline_summaries->get (node)->size * 4 / edge_growth + 2;
4087 for (e = node->callers; e; e = e->next_caller)
4089 max_callers--;
4090 if (!max_callers
4091 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
4092 return true;
4095 ipa_ref *ref;
4096 FOR_EACH_ALIAS (node, ref)
4097 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), &max_callers))
4098 return true;
4100 /* Unlike for functions called once, we play unsafe with
4101 COMDATs. We can allow that since we know functions
4102 in consideration are small (and thus risk is small) and
4103 moreover grow estimates already accounts that COMDAT
4104 functions may or may not disappear when eliminated from
4105 current unit. With good probability making aggressive
4106 choice in all units is going to make overall program
4107 smaller. */
4108 if (DECL_COMDAT (node->decl))
4110 if (!node->can_remove_if_no_direct_calls_p ())
4111 return true;
4113 else if (!node->will_be_removed_from_program_if_no_direct_calls_p ())
4114 return true;
4116 return estimate_growth (node) > 0;
4120 /* This function performs intraprocedural analysis in NODE that is required to
4121 inline indirect calls. */
4123 static void
4124 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
4126 ipa_analyze_node (node);
4127 if (dump_file && (dump_flags & TDF_DETAILS))
4129 ipa_print_node_params (dump_file, node);
4130 ipa_print_node_jump_functions (dump_file, node);
4135 /* Note function body size. */
4137 void
4138 inline_analyze_function (struct cgraph_node *node)
4140 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
4142 if (dump_file)
4143 fprintf (dump_file, "\nAnalyzing function: %s/%u\n",
4144 node->name (), node->order);
4145 if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p)
4146 inline_indirect_intraprocedural_analysis (node);
4147 compute_inline_parameters (node, false);
4148 if (!optimize)
4150 struct cgraph_edge *e;
4151 for (e = node->callees; e; e = e->next_callee)
4152 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4153 for (e = node->indirect_calls; e; e = e->next_callee)
4154 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4157 pop_cfun ();
4161 /* Called when new function is inserted to callgraph late. */
4163 void
4164 inline_summary_t::insert (struct cgraph_node *node, inline_summary *)
4166 inline_analyze_function (node);
4169 /* Note function body size. */
4171 void
4172 inline_generate_summary (void)
4174 struct cgraph_node *node;
4176 FOR_EACH_DEFINED_FUNCTION (node)
4177 if (DECL_STRUCT_FUNCTION (node->decl))
4178 node->local.versionable = tree_versionable_function_p (node->decl);
4180 /* When not optimizing, do not bother to analyze. Inlining is still done
4181 because edge redirection needs to happen there. */
4182 if (!optimize && !flag_generate_lto && !flag_generate_offload && !flag_wpa)
4183 return;
4185 if (!inline_summaries)
4186 inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab);
4188 inline_summaries->enable_insertion_hook ();
4190 ipa_register_cgraph_hooks ();
4191 inline_free_summary ();
4193 FOR_EACH_DEFINED_FUNCTION (node)
4194 if (!node->alias)
4195 inline_analyze_function (node);
4199 /* Read predicate from IB. */
4201 static struct predicate
4202 read_predicate (struct lto_input_block *ib)
4204 struct predicate out;
4205 clause_t clause;
4206 int k = 0;
4210 gcc_assert (k <= MAX_CLAUSES);
4211 clause = out.clause[k++] = streamer_read_uhwi (ib);
4213 while (clause);
4215 /* Zero-initialize the remaining clauses in OUT. */
4216 while (k <= MAX_CLAUSES)
4217 out.clause[k++] = 0;
4219 return out;
4223 /* Write inline summary for edge E to OB. */
4225 static void
4226 read_inline_edge_summary (struct lto_input_block *ib, struct cgraph_edge *e)
4228 struct inline_edge_summary *es = inline_edge_summary (e);
4229 struct predicate p;
4230 int length, i;
4232 es->call_stmt_size = streamer_read_uhwi (ib);
4233 es->call_stmt_time = streamer_read_uhwi (ib);
4234 es->loop_depth = streamer_read_uhwi (ib);
4235 p = read_predicate (ib);
4236 edge_set_predicate (e, &p);
4237 length = streamer_read_uhwi (ib);
4238 if (length)
4240 es->param.safe_grow_cleared (length);
4241 for (i = 0; i < length; i++)
4242 es->param[i].change_prob = streamer_read_uhwi (ib);
4247 /* Stream in inline summaries from the section. */
4249 static void
4250 inline_read_section (struct lto_file_decl_data *file_data, const char *data,
4251 size_t len)
4253 const struct lto_function_header *header =
4254 (const struct lto_function_header *) data;
4255 const int cfg_offset = sizeof (struct lto_function_header);
4256 const int main_offset = cfg_offset + header->cfg_size;
4257 const int string_offset = main_offset + header->main_size;
4258 struct data_in *data_in;
4259 unsigned int i, count2, j;
4260 unsigned int f_count;
4262 lto_input_block ib ((const char *) data + main_offset, header->main_size,
4263 file_data->mode_table);
4265 data_in =
4266 lto_data_in_create (file_data, (const char *) data + string_offset,
4267 header->string_size, vNULL);
4268 f_count = streamer_read_uhwi (&ib);
4269 for (i = 0; i < f_count; i++)
4271 unsigned int index;
4272 struct cgraph_node *node;
4273 struct inline_summary *info;
4274 lto_symtab_encoder_t encoder;
4275 struct bitpack_d bp;
4276 struct cgraph_edge *e;
4277 predicate p;
4279 index = streamer_read_uhwi (&ib);
4280 encoder = file_data->symtab_node_encoder;
4281 node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder,
4282 index));
4283 info = inline_summaries->get (node);
4285 info->estimated_stack_size
4286 = info->estimated_self_stack_size = streamer_read_uhwi (&ib);
4287 info->size = info->self_size = streamer_read_uhwi (&ib);
4288 info->time = info->self_time = streamer_read_uhwi (&ib);
4290 bp = streamer_read_bitpack (&ib);
4291 info->inlinable = bp_unpack_value (&bp, 1);
4292 info->contains_cilk_spawn = bp_unpack_value (&bp, 1);
4293 info->fp_expressions = bp_unpack_value (&bp, 1);
4295 count2 = streamer_read_uhwi (&ib);
4296 gcc_assert (!info->conds);
4297 for (j = 0; j < count2; j++)
4299 struct condition c;
4300 c.operand_num = streamer_read_uhwi (&ib);
4301 c.size = streamer_read_uhwi (&ib);
4302 c.code = (enum tree_code) streamer_read_uhwi (&ib);
4303 c.val = stream_read_tree (&ib, data_in);
4304 bp = streamer_read_bitpack (&ib);
4305 c.agg_contents = bp_unpack_value (&bp, 1);
4306 c.by_ref = bp_unpack_value (&bp, 1);
4307 if (c.agg_contents)
4308 c.offset = streamer_read_uhwi (&ib);
4309 vec_safe_push (info->conds, c);
4311 count2 = streamer_read_uhwi (&ib);
4312 gcc_assert (!info->entry);
4313 for (j = 0; j < count2; j++)
4315 struct size_time_entry e;
4317 e.size = streamer_read_uhwi (&ib);
4318 e.time = streamer_read_uhwi (&ib);
4319 e.predicate = read_predicate (&ib);
4321 vec_safe_push (info->entry, e);
4324 p = read_predicate (&ib);
4325 set_hint_predicate (&info->loop_iterations, p);
4326 p = read_predicate (&ib);
4327 set_hint_predicate (&info->loop_stride, p);
4328 p = read_predicate (&ib);
4329 set_hint_predicate (&info->array_index, p);
4330 for (e = node->callees; e; e = e->next_callee)
4331 read_inline_edge_summary (&ib, e);
4332 for (e = node->indirect_calls; e; e = e->next_callee)
4333 read_inline_edge_summary (&ib, e);
4336 lto_free_section_data (file_data, LTO_section_inline_summary, NULL, data,
4337 len);
4338 lto_data_in_delete (data_in);
4342 /* Read inline summary. Jump functions are shared among ipa-cp
4343 and inliner, so when ipa-cp is active, we don't need to write them
4344 twice. */
4346 void
4347 inline_read_summary (void)
4349 struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
4350 struct lto_file_decl_data *file_data;
4351 unsigned int j = 0;
4353 inline_summary_alloc ();
4355 while ((file_data = file_data_vec[j++]))
4357 size_t len;
4358 const char *data = lto_get_section_data (file_data,
4359 LTO_section_inline_summary,
4360 NULL, &len);
4361 if (data)
4362 inline_read_section (file_data, data, len);
4363 else
4364 /* Fatal error here. We do not want to support compiling ltrans units
4365 with different version of compiler or different flags than the WPA
4366 unit, so this should never happen. */
4367 fatal_error (input_location,
4368 "ipa inline summary is missing in input file");
4370 if (optimize)
4372 ipa_register_cgraph_hooks ();
4373 if (!flag_ipa_cp)
4374 ipa_prop_read_jump_functions ();
4377 gcc_assert (inline_summaries);
4378 inline_summaries->enable_insertion_hook ();
4382 /* Write predicate P to OB. */
4384 static void
4385 write_predicate (struct output_block *ob, struct predicate *p)
4387 int j;
4388 if (p)
4389 for (j = 0; p->clause[j]; j++)
4391 gcc_assert (j < MAX_CLAUSES);
4392 streamer_write_uhwi (ob, p->clause[j]);
4394 streamer_write_uhwi (ob, 0);
4398 /* Write inline summary for edge E to OB. */
4400 static void
4401 write_inline_edge_summary (struct output_block *ob, struct cgraph_edge *e)
4403 struct inline_edge_summary *es = inline_edge_summary (e);
4404 int i;
4406 streamer_write_uhwi (ob, es->call_stmt_size);
4407 streamer_write_uhwi (ob, es->call_stmt_time);
4408 streamer_write_uhwi (ob, es->loop_depth);
4409 write_predicate (ob, es->predicate);
4410 streamer_write_uhwi (ob, es->param.length ());
4411 for (i = 0; i < (int) es->param.length (); i++)
4412 streamer_write_uhwi (ob, es->param[i].change_prob);
4416 /* Write inline summary for node in SET.
4417 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
4418 active, we don't need to write them twice. */
4420 void
4421 inline_write_summary (void)
4423 struct cgraph_node *node;
4424 struct output_block *ob = create_output_block (LTO_section_inline_summary);
4425 lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
4426 unsigned int count = 0;
4427 int i;
4429 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4431 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4432 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4433 if (cnode && cnode->definition && !cnode->alias)
4434 count++;
4436 streamer_write_uhwi (ob, count);
4438 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4440 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4441 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4442 if (cnode && (node = cnode)->definition && !node->alias)
4444 struct inline_summary *info = inline_summaries->get (node);
4445 struct bitpack_d bp;
4446 struct cgraph_edge *edge;
4447 int i;
4448 size_time_entry *e;
4449 struct condition *c;
4451 streamer_write_uhwi (ob,
4452 lto_symtab_encoder_encode (encoder,
4454 node));
4455 streamer_write_hwi (ob, info->estimated_self_stack_size);
4456 streamer_write_hwi (ob, info->self_size);
4457 streamer_write_hwi (ob, info->self_time);
4458 bp = bitpack_create (ob->main_stream);
4459 bp_pack_value (&bp, info->inlinable, 1);
4460 bp_pack_value (&bp, info->contains_cilk_spawn, 1);
4461 bp_pack_value (&bp, info->fp_expressions, 1);
4462 streamer_write_bitpack (&bp);
4463 streamer_write_uhwi (ob, vec_safe_length (info->conds));
4464 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
4466 streamer_write_uhwi (ob, c->operand_num);
4467 streamer_write_uhwi (ob, c->size);
4468 streamer_write_uhwi (ob, c->code);
4469 stream_write_tree (ob, c->val, true);
4470 bp = bitpack_create (ob->main_stream);
4471 bp_pack_value (&bp, c->agg_contents, 1);
4472 bp_pack_value (&bp, c->by_ref, 1);
4473 streamer_write_bitpack (&bp);
4474 if (c->agg_contents)
4475 streamer_write_uhwi (ob, c->offset);
4477 streamer_write_uhwi (ob, vec_safe_length (info->entry));
4478 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
4480 streamer_write_uhwi (ob, e->size);
4481 streamer_write_uhwi (ob, e->time);
4482 write_predicate (ob, &e->predicate);
4484 write_predicate (ob, info->loop_iterations);
4485 write_predicate (ob, info->loop_stride);
4486 write_predicate (ob, info->array_index);
4487 for (edge = node->callees; edge; edge = edge->next_callee)
4488 write_inline_edge_summary (ob, edge);
4489 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
4490 write_inline_edge_summary (ob, edge);
4493 streamer_write_char_stream (ob->main_stream, 0);
4494 produce_asm (ob, NULL);
4495 destroy_output_block (ob);
4497 if (optimize && !flag_ipa_cp)
4498 ipa_prop_write_jump_functions ();
4502 /* Release inline summary. */
4504 void
4505 inline_free_summary (void)
4507 struct cgraph_node *node;
4508 if (edge_removal_hook_holder)
4509 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
4510 edge_removal_hook_holder = NULL;
4511 if (edge_duplication_hook_holder)
4512 symtab->remove_edge_duplication_hook (edge_duplication_hook_holder);
4513 edge_duplication_hook_holder = NULL;
4514 if (!inline_edge_summary_vec.exists ())
4515 return;
4516 FOR_EACH_DEFINED_FUNCTION (node)
4517 if (!node->alias)
4518 reset_inline_summary (node, inline_summaries->get (node));
4519 inline_summaries->release ();
4520 inline_summaries = NULL;
4521 inline_edge_summary_vec.release ();
4522 edge_predicate_pool.release ();