Fix warnings occured during profiledboostrap on
[official-gcc.git] / gcc / ipa-inline-analysis.c
blobd74716306170fc91dbe290edf6a1761ccbc15869
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Analysis used by the inliner and other passes limiting code size growth.
23 We estimate for each function
24 - function body size
25 - average function execution time
26 - inlining size benefit (that is how much of function body size
27 and its call sequence is expected to disappear by inlining)
28 - inlining time benefit
29 - function frame size
30 For each call
31 - call statement size and time
33 inlinie_summary datastructures store above information locally (i.e.
34 parameters of the function itself) and globally (i.e. parameters of
35 the function created by applying all the inline decisions already
36 present in the callgraph).
38 We provide accestor to the inline_summary datastructure and
39 basic logic updating the parameters when inlining is performed.
41 The summaries are context sensitive. Context means
42 1) partial assignment of known constant values of operands
43 2) whether function is inlined into the call or not.
44 It is easy to add more variants. To represent function size and time
45 that depends on context (i.e. it is known to be optimized away when
46 context is known either by inlining or from IP-CP and clonning),
47 we use predicates. Predicates are logical formulas in
48 conjunctive-disjunctive form consisting of clauses. Clauses are bitmaps
49 specifying what conditions must be true. Conditions are simple test
50 of the form described above.
52 In order to make predicate (possibly) true, all of its clauses must
53 be (possibly) true. To make clause (possibly) true, one of conditions
54 it mentions must be (possibly) true. There are fixed bounds on
55 number of clauses and conditions and all the manipulation functions
56 are conservative in positive direction. I.e. we may lose precision
57 by thinking that predicate may be true even when it is not.
59 estimate_edge_size and estimate_edge_growth can be used to query
60 function size/time in the given context. inline_merge_summary merges
61 properties of caller and callee after inlining.
63 Finally pass_inline_parameters is exported. This is used to drive
64 computation of function parameters used by the early inliner. IPA
65 inlined performs analysis via its analyze_function method. */
67 #include "config.h"
68 #include "system.h"
69 #include "coretypes.h"
70 #include "tm.h"
71 #include "hash-set.h"
72 #include "machmode.h"
73 #include "vec.h"
74 #include "double-int.h"
75 #include "input.h"
76 #include "alias.h"
77 #include "symtab.h"
78 #include "wide-int.h"
79 #include "inchash.h"
80 #include "real.h"
81 #include "tree.h"
82 #include "fold-const.h"
83 #include "stor-layout.h"
84 #include "stringpool.h"
85 #include "print-tree.h"
86 #include "tree-inline.h"
87 #include "langhooks.h"
88 #include "flags.h"
89 #include "diagnostic.h"
90 #include "gimple-pretty-print.h"
91 #include "params.h"
92 #include "tree-pass.h"
93 #include "coverage.h"
94 #include "predict.h"
95 #include "hard-reg-set.h"
96 #include "input.h"
97 #include "function.h"
98 #include "dominance.h"
99 #include "cfg.h"
100 #include "cfganal.h"
101 #include "basic-block.h"
102 #include "tree-ssa-alias.h"
103 #include "internal-fn.h"
104 #include "gimple-expr.h"
105 #include "is-a.h"
106 #include "gimple.h"
107 #include "gimple-iterator.h"
108 #include "gimple-ssa.h"
109 #include "tree-cfg.h"
110 #include "tree-phinodes.h"
111 #include "ssa-iterators.h"
112 #include "tree-ssanames.h"
113 #include "tree-ssa-loop-niter.h"
114 #include "tree-ssa-loop.h"
115 #include "hash-map.h"
116 #include "plugin-api.h"
117 #include "ipa-ref.h"
118 #include "cgraph.h"
119 #include "alloc-pool.h"
120 #include "symbol-summary.h"
121 #include "ipa-prop.h"
122 #include "lto-streamer.h"
123 #include "data-streamer.h"
124 #include "tree-streamer.h"
125 #include "ipa-inline.h"
126 #include "cfgloop.h"
127 #include "tree-scalar-evolution.h"
128 #include "ipa-utils.h"
129 #include "cilk.h"
130 #include "cfgexpand.h"
132 /* Estimate runtime of function can easilly run into huge numbers with many
133 nested loops. Be sure we can compute time * INLINE_SIZE_SCALE * 2 in an
134 integer. For anything larger we use gcov_type. */
135 #define MAX_TIME 500000
137 /* Number of bits in integer, but we really want to be stable across different
138 hosts. */
139 #define NUM_CONDITIONS 32
141 enum predicate_conditions
143 predicate_false_condition = 0,
144 predicate_not_inlined_condition = 1,
145 predicate_first_dynamic_condition = 2
148 /* Special condition code we use to represent test that operand is compile time
149 constant. */
150 #define IS_NOT_CONSTANT ERROR_MARK
151 /* Special condition code we use to represent test that operand is not changed
152 across invocation of the function. When operand IS_NOT_CONSTANT it is always
153 CHANGED, however i.e. loop invariants can be NOT_CHANGED given percentage
154 of executions even when they are not compile time constants. */
155 #define CHANGED IDENTIFIER_NODE
157 /* Holders of ipa cgraph hooks: */
158 static struct cgraph_2edge_hook_list *edge_duplication_hook_holder;
159 static struct cgraph_edge_hook_list *edge_removal_hook_holder;
160 static void inline_edge_removal_hook (struct cgraph_edge *, void *);
161 static void inline_edge_duplication_hook (struct cgraph_edge *,
162 struct cgraph_edge *, void *);
164 /* VECtor holding inline summaries.
165 In GGC memory because conditions might point to constant trees. */
166 function_summary <inline_summary *> *inline_summaries;
167 vec<inline_edge_summary_t> inline_edge_summary_vec;
169 /* Cached node/edge growths. */
170 vec<edge_growth_cache_entry> edge_growth_cache;
172 /* Edge predicates goes here. */
173 static alloc_pool edge_predicate_pool;
175 /* Return true predicate (tautology).
176 We represent it by empty list of clauses. */
178 static inline struct predicate
179 true_predicate (void)
181 struct predicate p;
182 p.clause[0] = 0;
183 return p;
187 /* Return predicate testing single condition number COND. */
189 static inline struct predicate
190 single_cond_predicate (int cond)
192 struct predicate p;
193 p.clause[0] = 1 << cond;
194 p.clause[1] = 0;
195 return p;
199 /* Return false predicate. First clause require false condition. */
201 static inline struct predicate
202 false_predicate (void)
204 return single_cond_predicate (predicate_false_condition);
208 /* Return true if P is (true). */
210 static inline bool
211 true_predicate_p (struct predicate *p)
213 return !p->clause[0];
217 /* Return true if P is (false). */
219 static inline bool
220 false_predicate_p (struct predicate *p)
222 if (p->clause[0] == (1 << predicate_false_condition))
224 gcc_checking_assert (!p->clause[1]
225 && p->clause[0] == 1 << predicate_false_condition);
226 return true;
228 return false;
232 /* Return predicate that is set true when function is not inlined. */
234 static inline struct predicate
235 not_inlined_predicate (void)
237 return single_cond_predicate (predicate_not_inlined_condition);
240 /* Simple description of whether a memory load or a condition refers to a load
241 from an aggregate and if so, how and where from in the aggregate.
242 Individual fields have the same meaning like fields with the same name in
243 struct condition. */
245 struct agg_position_info
247 HOST_WIDE_INT offset;
248 bool agg_contents;
249 bool by_ref;
252 /* Add condition to condition list CONDS. AGGPOS describes whether the used
253 oprand is loaded from an aggregate and where in the aggregate it is. It can
254 be NULL, which means this not a load from an aggregate. */
256 static struct predicate
257 add_condition (struct inline_summary *summary, int operand_num,
258 struct agg_position_info *aggpos,
259 enum tree_code code, tree val)
261 int i;
262 struct condition *c;
263 struct condition new_cond;
264 HOST_WIDE_INT offset;
265 bool agg_contents, by_ref;
267 if (aggpos)
269 offset = aggpos->offset;
270 agg_contents = aggpos->agg_contents;
271 by_ref = aggpos->by_ref;
273 else
275 offset = 0;
276 agg_contents = false;
277 by_ref = false;
280 gcc_checking_assert (operand_num >= 0);
281 for (i = 0; vec_safe_iterate (summary->conds, i, &c); i++)
283 if (c->operand_num == operand_num
284 && c->code == code
285 && c->val == val
286 && c->agg_contents == agg_contents
287 && (!agg_contents || (c->offset == offset && c->by_ref == by_ref)))
288 return single_cond_predicate (i + predicate_first_dynamic_condition);
290 /* Too many conditions. Give up and return constant true. */
291 if (i == NUM_CONDITIONS - predicate_first_dynamic_condition)
292 return true_predicate ();
294 new_cond.operand_num = operand_num;
295 new_cond.code = code;
296 new_cond.val = val;
297 new_cond.agg_contents = agg_contents;
298 new_cond.by_ref = by_ref;
299 new_cond.offset = offset;
300 vec_safe_push (summary->conds, new_cond);
301 return single_cond_predicate (i + predicate_first_dynamic_condition);
305 /* Add clause CLAUSE into the predicate P. */
307 static inline void
308 add_clause (conditions conditions, struct predicate *p, clause_t clause)
310 int i;
311 int i2;
312 int insert_here = -1;
313 int c1, c2;
315 /* True clause. */
316 if (!clause)
317 return;
319 /* False clause makes the whole predicate false. Kill the other variants. */
320 if (clause == (1 << predicate_false_condition))
322 p->clause[0] = (1 << predicate_false_condition);
323 p->clause[1] = 0;
324 return;
326 if (false_predicate_p (p))
327 return;
329 /* No one should be silly enough to add false into nontrivial clauses. */
330 gcc_checking_assert (!(clause & (1 << predicate_false_condition)));
332 /* Look where to insert the clause. At the same time prune out
333 clauses of P that are implied by the new clause and thus
334 redundant. */
335 for (i = 0, i2 = 0; i <= MAX_CLAUSES; i++)
337 p->clause[i2] = p->clause[i];
339 if (!p->clause[i])
340 break;
342 /* If p->clause[i] implies clause, there is nothing to add. */
343 if ((p->clause[i] & clause) == p->clause[i])
345 /* We had nothing to add, none of clauses should've become
346 redundant. */
347 gcc_checking_assert (i == i2);
348 return;
351 if (p->clause[i] < clause && insert_here < 0)
352 insert_here = i2;
354 /* If clause implies p->clause[i], then p->clause[i] becomes redundant.
355 Otherwise the p->clause[i] has to stay. */
356 if ((p->clause[i] & clause) != clause)
357 i2++;
360 /* Look for clauses that are obviously true. I.e.
361 op0 == 5 || op0 != 5. */
362 for (c1 = predicate_first_dynamic_condition; c1 < NUM_CONDITIONS; c1++)
364 condition *cc1;
365 if (!(clause & (1 << c1)))
366 continue;
367 cc1 = &(*conditions)[c1 - predicate_first_dynamic_condition];
368 /* We have no way to represent !CHANGED and !IS_NOT_CONSTANT
369 and thus there is no point for looking for them. */
370 if (cc1->code == CHANGED || cc1->code == IS_NOT_CONSTANT)
371 continue;
372 for (c2 = c1 + 1; c2 < NUM_CONDITIONS; c2++)
373 if (clause & (1 << c2))
375 condition *cc1 =
376 &(*conditions)[c1 - predicate_first_dynamic_condition];
377 condition *cc2 =
378 &(*conditions)[c2 - predicate_first_dynamic_condition];
379 if (cc1->operand_num == cc2->operand_num
380 && cc1->val == cc2->val
381 && cc2->code != IS_NOT_CONSTANT
382 && cc2->code != CHANGED
383 && cc1->code == invert_tree_comparison (cc2->code,
384 HONOR_NANS (cc1->val)))
385 return;
390 /* We run out of variants. Be conservative in positive direction. */
391 if (i2 == MAX_CLAUSES)
392 return;
393 /* Keep clauses in decreasing order. This makes equivalence testing easy. */
394 p->clause[i2 + 1] = 0;
395 if (insert_here >= 0)
396 for (; i2 > insert_here; i2--)
397 p->clause[i2] = p->clause[i2 - 1];
398 else
399 insert_here = i2;
400 p->clause[insert_here] = clause;
404 /* Return P & P2. */
406 static struct predicate
407 and_predicates (conditions conditions,
408 struct predicate *p, struct predicate *p2)
410 struct predicate out = *p;
411 int i;
413 /* Avoid busy work. */
414 if (false_predicate_p (p2) || true_predicate_p (p))
415 return *p2;
416 if (false_predicate_p (p) || true_predicate_p (p2))
417 return *p;
419 /* See how far predicates match. */
420 for (i = 0; p->clause[i] && p->clause[i] == p2->clause[i]; i++)
422 gcc_checking_assert (i < MAX_CLAUSES);
425 /* Combine the predicates rest. */
426 for (; p2->clause[i]; i++)
428 gcc_checking_assert (i < MAX_CLAUSES);
429 add_clause (conditions, &out, p2->clause[i]);
431 return out;
435 /* Return true if predicates are obviously equal. */
437 static inline bool
438 predicates_equal_p (struct predicate *p, struct predicate *p2)
440 int i;
441 for (i = 0; p->clause[i]; i++)
443 gcc_checking_assert (i < MAX_CLAUSES);
444 gcc_checking_assert (p->clause[i] > p->clause[i + 1]);
445 gcc_checking_assert (!p2->clause[i]
446 || p2->clause[i] > p2->clause[i + 1]);
447 if (p->clause[i] != p2->clause[i])
448 return false;
450 return !p2->clause[i];
454 /* Return P | P2. */
456 static struct predicate
457 or_predicates (conditions conditions,
458 struct predicate *p, struct predicate *p2)
460 struct predicate out = true_predicate ();
461 int i, j;
463 /* Avoid busy work. */
464 if (false_predicate_p (p2) || true_predicate_p (p))
465 return *p;
466 if (false_predicate_p (p) || true_predicate_p (p2))
467 return *p2;
468 if (predicates_equal_p (p, p2))
469 return *p;
471 /* OK, combine the predicates. */
472 for (i = 0; p->clause[i]; i++)
473 for (j = 0; p2->clause[j]; j++)
475 gcc_checking_assert (i < MAX_CLAUSES && j < MAX_CLAUSES);
476 add_clause (conditions, &out, p->clause[i] | p2->clause[j]);
478 return out;
482 /* Having partial truth assignment in POSSIBLE_TRUTHS, return false
483 if predicate P is known to be false. */
485 static bool
486 evaluate_predicate (struct predicate *p, clause_t possible_truths)
488 int i;
490 /* True remains true. */
491 if (true_predicate_p (p))
492 return true;
494 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
496 /* See if we can find clause we can disprove. */
497 for (i = 0; p->clause[i]; i++)
499 gcc_checking_assert (i < MAX_CLAUSES);
500 if (!(p->clause[i] & possible_truths))
501 return false;
503 return true;
506 /* Return the probability in range 0...REG_BR_PROB_BASE that the predicated
507 instruction will be recomputed per invocation of the inlined call. */
509 static int
510 predicate_probability (conditions conds,
511 struct predicate *p, clause_t possible_truths,
512 vec<inline_param_summary> inline_param_summary)
514 int i;
515 int combined_prob = REG_BR_PROB_BASE;
517 /* True remains true. */
518 if (true_predicate_p (p))
519 return REG_BR_PROB_BASE;
521 if (false_predicate_p (p))
522 return 0;
524 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
526 /* See if we can find clause we can disprove. */
527 for (i = 0; p->clause[i]; i++)
529 gcc_checking_assert (i < MAX_CLAUSES);
530 if (!(p->clause[i] & possible_truths))
531 return 0;
532 else
534 int this_prob = 0;
535 int i2;
536 if (!inline_param_summary.exists ())
537 return REG_BR_PROB_BASE;
538 for (i2 = 0; i2 < NUM_CONDITIONS; i2++)
539 if ((p->clause[i] & possible_truths) & (1 << i2))
541 if (i2 >= predicate_first_dynamic_condition)
543 condition *c =
544 &(*conds)[i2 - predicate_first_dynamic_condition];
545 if (c->code == CHANGED
546 && (c->operand_num <
547 (int) inline_param_summary.length ()))
549 int iprob =
550 inline_param_summary[c->operand_num].change_prob;
551 this_prob = MAX (this_prob, iprob);
553 else
554 this_prob = REG_BR_PROB_BASE;
556 else
557 this_prob = REG_BR_PROB_BASE;
559 combined_prob = MIN (this_prob, combined_prob);
560 if (!combined_prob)
561 return 0;
564 return combined_prob;
568 /* Dump conditional COND. */
570 static void
571 dump_condition (FILE *f, conditions conditions, int cond)
573 condition *c;
574 if (cond == predicate_false_condition)
575 fprintf (f, "false");
576 else if (cond == predicate_not_inlined_condition)
577 fprintf (f, "not inlined");
578 else
580 c = &(*conditions)[cond - predicate_first_dynamic_condition];
581 fprintf (f, "op%i", c->operand_num);
582 if (c->agg_contents)
583 fprintf (f, "[%soffset: " HOST_WIDE_INT_PRINT_DEC "]",
584 c->by_ref ? "ref " : "", c->offset);
585 if (c->code == IS_NOT_CONSTANT)
587 fprintf (f, " not constant");
588 return;
590 if (c->code == CHANGED)
592 fprintf (f, " changed");
593 return;
595 fprintf (f, " %s ", op_symbol_code (c->code));
596 print_generic_expr (f, c->val, 1);
601 /* Dump clause CLAUSE. */
603 static void
604 dump_clause (FILE *f, conditions conds, clause_t clause)
606 int i;
607 bool found = false;
608 fprintf (f, "(");
609 if (!clause)
610 fprintf (f, "true");
611 for (i = 0; i < NUM_CONDITIONS; i++)
612 if (clause & (1 << i))
614 if (found)
615 fprintf (f, " || ");
616 found = true;
617 dump_condition (f, conds, i);
619 fprintf (f, ")");
623 /* Dump predicate PREDICATE. */
625 static void
626 dump_predicate (FILE *f, conditions conds, struct predicate *pred)
628 int i;
629 if (true_predicate_p (pred))
630 dump_clause (f, conds, 0);
631 else
632 for (i = 0; pred->clause[i]; i++)
634 if (i)
635 fprintf (f, " && ");
636 dump_clause (f, conds, pred->clause[i]);
638 fprintf (f, "\n");
642 /* Dump inline hints. */
643 void
644 dump_inline_hints (FILE *f, inline_hints hints)
646 if (!hints)
647 return;
648 fprintf (f, "inline hints:");
649 if (hints & INLINE_HINT_indirect_call)
651 hints &= ~INLINE_HINT_indirect_call;
652 fprintf (f, " indirect_call");
654 if (hints & INLINE_HINT_loop_iterations)
656 hints &= ~INLINE_HINT_loop_iterations;
657 fprintf (f, " loop_iterations");
659 if (hints & INLINE_HINT_loop_stride)
661 hints &= ~INLINE_HINT_loop_stride;
662 fprintf (f, " loop_stride");
664 if (hints & INLINE_HINT_same_scc)
666 hints &= ~INLINE_HINT_same_scc;
667 fprintf (f, " same_scc");
669 if (hints & INLINE_HINT_in_scc)
671 hints &= ~INLINE_HINT_in_scc;
672 fprintf (f, " in_scc");
674 if (hints & INLINE_HINT_cross_module)
676 hints &= ~INLINE_HINT_cross_module;
677 fprintf (f, " cross_module");
679 if (hints & INLINE_HINT_declared_inline)
681 hints &= ~INLINE_HINT_declared_inline;
682 fprintf (f, " declared_inline");
684 if (hints & INLINE_HINT_array_index)
686 hints &= ~INLINE_HINT_array_index;
687 fprintf (f, " array_index");
689 if (hints & INLINE_HINT_known_hot)
691 hints &= ~INLINE_HINT_known_hot;
692 fprintf (f, " known_hot");
694 gcc_assert (!hints);
698 /* Record SIZE and TIME under condition PRED into the inline summary. */
700 static void
701 account_size_time (struct inline_summary *summary, int size, int time,
702 struct predicate *pred)
704 size_time_entry *e;
705 bool found = false;
706 int i;
708 if (false_predicate_p (pred))
709 return;
711 /* We need to create initial empty unconitional clause, but otherwie
712 we don't need to account empty times and sizes. */
713 if (!size && !time && summary->entry)
714 return;
716 /* Watch overflow that might result from insane profiles. */
717 if (time > MAX_TIME * INLINE_TIME_SCALE)
718 time = MAX_TIME * INLINE_TIME_SCALE;
719 gcc_assert (time >= 0);
721 for (i = 0; vec_safe_iterate (summary->entry, i, &e); i++)
722 if (predicates_equal_p (&e->predicate, pred))
724 found = true;
725 break;
727 if (i == 256)
729 i = 0;
730 found = true;
731 e = &(*summary->entry)[0];
732 gcc_assert (!e->predicate.clause[0]);
733 if (dump_file && (dump_flags & TDF_DETAILS))
734 fprintf (dump_file,
735 "\t\tReached limit on number of entries, "
736 "ignoring the predicate.");
738 if (dump_file && (dump_flags & TDF_DETAILS) && (time || size))
740 fprintf (dump_file,
741 "\t\tAccounting size:%3.2f, time:%3.2f on %spredicate:",
742 ((double) size) / INLINE_SIZE_SCALE,
743 ((double) time) / INLINE_TIME_SCALE, found ? "" : "new ");
744 dump_predicate (dump_file, summary->conds, pred);
746 if (!found)
748 struct size_time_entry new_entry;
749 new_entry.size = size;
750 new_entry.time = time;
751 new_entry.predicate = *pred;
752 vec_safe_push (summary->entry, new_entry);
754 else
756 e->size += size;
757 e->time += time;
758 if (e->time > MAX_TIME * INLINE_TIME_SCALE)
759 e->time = MAX_TIME * INLINE_TIME_SCALE;
763 /* Set predicate for edge E. */
765 static void
766 edge_set_predicate (struct cgraph_edge *e, struct predicate *predicate)
768 struct inline_edge_summary *es = inline_edge_summary (e);
770 /* If the edge is determined to be never executed, redirect it
771 to BUILTIN_UNREACHABLE to save inliner from inlining into it. */
772 if (predicate && false_predicate_p (predicate) && e->callee)
774 struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL;
776 e->redirect_callee (cgraph_node::get_create
777 (builtin_decl_implicit (BUILT_IN_UNREACHABLE)));
778 e->inline_failed = CIF_UNREACHABLE;
779 es->call_stmt_size = 0;
780 es->call_stmt_time = 0;
781 if (callee)
782 callee->remove_symbol_and_inline_clones ();
784 if (predicate && !true_predicate_p (predicate))
786 if (!es->predicate)
787 es->predicate = (struct predicate *) pool_alloc (edge_predicate_pool);
788 *es->predicate = *predicate;
790 else
792 if (es->predicate)
793 pool_free (edge_predicate_pool, es->predicate);
794 es->predicate = NULL;
798 /* Set predicate for hint *P. */
800 static void
801 set_hint_predicate (struct predicate **p, struct predicate new_predicate)
803 if (false_predicate_p (&new_predicate) || true_predicate_p (&new_predicate))
805 if (*p)
806 pool_free (edge_predicate_pool, *p);
807 *p = NULL;
809 else
811 if (!*p)
812 *p = (struct predicate *) pool_alloc (edge_predicate_pool);
813 **p = new_predicate;
818 /* KNOWN_VALS is partial mapping of parameters of NODE to constant values.
819 KNOWN_AGGS is a vector of aggreggate jump functions for each parameter.
820 Return clause of possible truths. When INLINE_P is true, assume that we are
821 inlining.
823 ERROR_MARK means compile time invariant. */
825 static clause_t
826 evaluate_conditions_for_known_args (struct cgraph_node *node,
827 bool inline_p,
828 vec<tree> known_vals,
829 vec<ipa_agg_jump_function_p>
830 known_aggs)
832 clause_t clause = inline_p ? 0 : 1 << predicate_not_inlined_condition;
833 struct inline_summary *info = inline_summaries->get (node);
834 int i;
835 struct condition *c;
837 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
839 tree val;
840 tree res;
842 /* We allow call stmt to have fewer arguments than the callee function
843 (especially for K&R style programs). So bound check here (we assume
844 known_aggs vector, if non-NULL, has the same length as
845 known_vals). */
846 gcc_checking_assert (!known_aggs.exists ()
847 || (known_vals.length () == known_aggs.length ()));
848 if (c->operand_num >= (int) known_vals.length ())
850 clause |= 1 << (i + predicate_first_dynamic_condition);
851 continue;
854 if (c->agg_contents)
856 struct ipa_agg_jump_function *agg;
858 if (c->code == CHANGED
859 && !c->by_ref
860 && (known_vals[c->operand_num] == error_mark_node))
861 continue;
863 if (known_aggs.exists ())
865 agg = known_aggs[c->operand_num];
866 val = ipa_find_agg_cst_for_param (agg, c->offset, c->by_ref);
868 else
869 val = NULL_TREE;
871 else
873 val = known_vals[c->operand_num];
874 if (val == error_mark_node && c->code != CHANGED)
875 val = NULL_TREE;
878 if (!val)
880 clause |= 1 << (i + predicate_first_dynamic_condition);
881 continue;
883 if (c->code == IS_NOT_CONSTANT || c->code == CHANGED)
884 continue;
886 if (operand_equal_p (TYPE_SIZE (TREE_TYPE (c->val)),
887 TYPE_SIZE (TREE_TYPE (val)), 0))
889 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (c->val), val);
891 res = val
892 ? fold_binary_to_constant (c->code, boolean_type_node, val, c->val)
893 : NULL;
895 if (res && integer_zerop (res))
896 continue;
898 clause |= 1 << (i + predicate_first_dynamic_condition);
900 return clause;
904 /* Work out what conditions might be true at invocation of E. */
906 static void
907 evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
908 clause_t *clause_ptr,
909 vec<tree> *known_vals_ptr,
910 vec<ipa_polymorphic_call_context>
911 *known_contexts_ptr,
912 vec<ipa_agg_jump_function_p> *known_aggs_ptr)
914 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
915 struct inline_summary *info = inline_summaries->get (callee);
916 vec<tree> known_vals = vNULL;
917 vec<ipa_agg_jump_function_p> known_aggs = vNULL;
919 if (clause_ptr)
920 *clause_ptr = inline_p ? 0 : 1 << predicate_not_inlined_condition;
921 if (known_vals_ptr)
922 known_vals_ptr->create (0);
923 if (known_contexts_ptr)
924 known_contexts_ptr->create (0);
926 if (ipa_node_params_sum
927 && !e->call_stmt_cannot_inline_p
928 && ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr))
930 struct ipa_node_params *parms_info;
931 struct ipa_edge_args *args = IPA_EDGE_REF (e);
932 struct inline_edge_summary *es = inline_edge_summary (e);
933 int i, count = ipa_get_cs_argument_count (args);
935 if (e->caller->global.inlined_to)
936 parms_info = IPA_NODE_REF (e->caller->global.inlined_to);
937 else
938 parms_info = IPA_NODE_REF (e->caller);
940 if (count && (info->conds || known_vals_ptr))
941 known_vals.safe_grow_cleared (count);
942 if (count && (info->conds || known_aggs_ptr))
943 known_aggs.safe_grow_cleared (count);
944 if (count && known_contexts_ptr)
945 known_contexts_ptr->safe_grow_cleared (count);
947 for (i = 0; i < count; i++)
949 struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i);
950 tree cst = ipa_value_from_jfunc (parms_info, jf);
952 if (!cst && e->call_stmt
953 && i < (int)gimple_call_num_args (e->call_stmt))
955 cst = gimple_call_arg (e->call_stmt, i);
956 if (!is_gimple_min_invariant (cst))
957 cst = NULL;
959 if (cst)
961 gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO);
962 if (known_vals.exists ())
963 known_vals[i] = cst;
965 else if (inline_p && !es->param[i].change_prob)
966 known_vals[i] = error_mark_node;
968 if (known_contexts_ptr)
969 (*known_contexts_ptr)[i] = ipa_context_from_jfunc (parms_info, e,
970 i, jf);
971 /* TODO: When IPA-CP starts propagating and merging aggregate jump
972 functions, use its knowledge of the caller too, just like the
973 scalar case above. */
974 known_aggs[i] = &jf->agg;
977 else if (e->call_stmt && !e->call_stmt_cannot_inline_p
978 && ((clause_ptr && info->conds) || known_vals_ptr))
980 int i, count = (int)gimple_call_num_args (e->call_stmt);
982 if (count && (info->conds || known_vals_ptr))
983 known_vals.safe_grow_cleared (count);
984 for (i = 0; i < count; i++)
986 tree cst = gimple_call_arg (e->call_stmt, i);
987 if (!is_gimple_min_invariant (cst))
988 cst = NULL;
989 if (cst)
990 known_vals[i] = cst;
994 if (clause_ptr)
995 *clause_ptr = evaluate_conditions_for_known_args (callee, inline_p,
996 known_vals, known_aggs);
998 if (known_vals_ptr)
999 *known_vals_ptr = known_vals;
1000 else
1001 known_vals.release ();
1003 if (known_aggs_ptr)
1004 *known_aggs_ptr = known_aggs;
1005 else
1006 known_aggs.release ();
1010 /* Allocate the inline summary vector or resize it to cover all cgraph nodes. */
1012 static void
1013 inline_summary_alloc (void)
1015 if (!edge_removal_hook_holder)
1016 edge_removal_hook_holder =
1017 symtab->add_edge_removal_hook (&inline_edge_removal_hook, NULL);
1018 if (!edge_duplication_hook_holder)
1019 edge_duplication_hook_holder =
1020 symtab->add_edge_duplication_hook (&inline_edge_duplication_hook, NULL);
1022 if (!inline_summaries)
1023 inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab);
1025 if (inline_edge_summary_vec.length () <= (unsigned) symtab->edges_max_uid)
1026 inline_edge_summary_vec.safe_grow_cleared (symtab->edges_max_uid + 1);
1027 if (!edge_predicate_pool)
1028 edge_predicate_pool = create_alloc_pool ("edge predicates",
1029 sizeof (struct predicate), 10);
1032 /* We are called multiple time for given function; clear
1033 data from previous run so they are not cumulated. */
1035 static void
1036 reset_inline_edge_summary (struct cgraph_edge *e)
1038 if (e->uid < (int) inline_edge_summary_vec.length ())
1040 struct inline_edge_summary *es = inline_edge_summary (e);
1042 es->call_stmt_size = es->call_stmt_time = 0;
1043 if (es->predicate)
1044 pool_free (edge_predicate_pool, es->predicate);
1045 es->predicate = NULL;
1046 es->param.release ();
1050 /* We are called multiple time for given function; clear
1051 data from previous run so they are not cumulated. */
1053 static void
1054 reset_inline_summary (struct cgraph_node *node,
1055 inline_summary *info)
1057 struct cgraph_edge *e;
1059 info->self_size = info->self_time = 0;
1060 info->estimated_stack_size = 0;
1061 info->estimated_self_stack_size = 0;
1062 info->stack_frame_offset = 0;
1063 info->size = 0;
1064 info->time = 0;
1065 info->growth = 0;
1066 info->scc_no = 0;
1067 if (info->loop_iterations)
1069 pool_free (edge_predicate_pool, info->loop_iterations);
1070 info->loop_iterations = NULL;
1072 if (info->loop_stride)
1074 pool_free (edge_predicate_pool, info->loop_stride);
1075 info->loop_stride = NULL;
1077 if (info->array_index)
1079 pool_free (edge_predicate_pool, info->array_index);
1080 info->array_index = NULL;
1082 vec_free (info->conds);
1083 vec_free (info->entry);
1084 for (e = node->callees; e; e = e->next_callee)
1085 reset_inline_edge_summary (e);
1086 for (e = node->indirect_calls; e; e = e->next_callee)
1087 reset_inline_edge_summary (e);
1090 /* Hook that is called by cgraph.c when a node is removed. */
1092 void
1093 inline_summary_t::remove (cgraph_node *node, inline_summary *info)
1095 reset_inline_summary (node, info);
1098 /* Remap predicate P of former function to be predicate of duplicated function.
1099 POSSIBLE_TRUTHS is clause of possible truths in the duplicated node,
1100 INFO is inline summary of the duplicated node. */
1102 static struct predicate
1103 remap_predicate_after_duplication (struct predicate *p,
1104 clause_t possible_truths,
1105 struct inline_summary *info)
1107 struct predicate new_predicate = true_predicate ();
1108 int j;
1109 for (j = 0; p->clause[j]; j++)
1110 if (!(possible_truths & p->clause[j]))
1112 new_predicate = false_predicate ();
1113 break;
1115 else
1116 add_clause (info->conds, &new_predicate,
1117 possible_truths & p->clause[j]);
1118 return new_predicate;
1121 /* Same as remap_predicate_after_duplication but handle hint predicate *P.
1122 Additionally care about allocating new memory slot for updated predicate
1123 and set it to NULL when it becomes true or false (and thus uninteresting).
1126 static void
1127 remap_hint_predicate_after_duplication (struct predicate **p,
1128 clause_t possible_truths,
1129 struct inline_summary *info)
1131 struct predicate new_predicate;
1133 if (!*p)
1134 return;
1136 new_predicate = remap_predicate_after_duplication (*p,
1137 possible_truths, info);
1138 /* We do not want to free previous predicate; it is used by node origin. */
1139 *p = NULL;
1140 set_hint_predicate (p, new_predicate);
1144 /* Hook that is called by cgraph.c when a node is duplicated. */
1145 void
1146 inline_summary_t::duplicate (cgraph_node *src,
1147 cgraph_node *dst,
1148 inline_summary *,
1149 inline_summary *info)
1151 inline_summary_alloc ();
1152 memcpy (info, inline_summaries->get (src), sizeof (inline_summary));
1153 /* TODO: as an optimization, we may avoid copying conditions
1154 that are known to be false or true. */
1155 info->conds = vec_safe_copy (info->conds);
1157 /* When there are any replacements in the function body, see if we can figure
1158 out that something was optimized out. */
1159 if (ipa_node_params_sum && dst->clone.tree_map)
1161 vec<size_time_entry, va_gc> *entry = info->entry;
1162 /* Use SRC parm info since it may not be copied yet. */
1163 struct ipa_node_params *parms_info = IPA_NODE_REF (src);
1164 vec<tree> known_vals = vNULL;
1165 int count = ipa_get_param_count (parms_info);
1166 int i, j;
1167 clause_t possible_truths;
1168 struct predicate true_pred = true_predicate ();
1169 size_time_entry *e;
1170 int optimized_out_size = 0;
1171 bool inlined_to_p = false;
1172 struct cgraph_edge *edge;
1174 info->entry = 0;
1175 known_vals.safe_grow_cleared (count);
1176 for (i = 0; i < count; i++)
1178 struct ipa_replace_map *r;
1180 for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++)
1182 if (((!r->old_tree && r->parm_num == i)
1183 || (r->old_tree && r->old_tree == ipa_get_param (parms_info, i)))
1184 && r->replace_p && !r->ref_p)
1186 known_vals[i] = r->new_tree;
1187 break;
1191 possible_truths = evaluate_conditions_for_known_args (dst, false,
1192 known_vals,
1193 vNULL);
1194 known_vals.release ();
1196 account_size_time (info, 0, 0, &true_pred);
1198 /* Remap size_time vectors.
1199 Simplify the predicate by prunning out alternatives that are known
1200 to be false.
1201 TODO: as on optimization, we can also eliminate conditions known
1202 to be true. */
1203 for (i = 0; vec_safe_iterate (entry, i, &e); i++)
1205 struct predicate new_predicate;
1206 new_predicate = remap_predicate_after_duplication (&e->predicate,
1207 possible_truths,
1208 info);
1209 if (false_predicate_p (&new_predicate))
1210 optimized_out_size += e->size;
1211 else
1212 account_size_time (info, e->size, e->time, &new_predicate);
1215 /* Remap edge predicates with the same simplification as above.
1216 Also copy constantness arrays. */
1217 for (edge = dst->callees; edge; edge = edge->next_callee)
1219 struct predicate new_predicate;
1220 struct inline_edge_summary *es = inline_edge_summary (edge);
1222 if (!edge->inline_failed)
1223 inlined_to_p = true;
1224 if (!es->predicate)
1225 continue;
1226 new_predicate = remap_predicate_after_duplication (es->predicate,
1227 possible_truths,
1228 info);
1229 if (false_predicate_p (&new_predicate)
1230 && !false_predicate_p (es->predicate))
1232 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1233 edge->frequency = 0;
1235 edge_set_predicate (edge, &new_predicate);
1238 /* Remap indirect edge predicates with the same simplificaiton as above.
1239 Also copy constantness arrays. */
1240 for (edge = dst->indirect_calls; edge; edge = edge->next_callee)
1242 struct predicate new_predicate;
1243 struct inline_edge_summary *es = inline_edge_summary (edge);
1245 gcc_checking_assert (edge->inline_failed);
1246 if (!es->predicate)
1247 continue;
1248 new_predicate = remap_predicate_after_duplication (es->predicate,
1249 possible_truths,
1250 info);
1251 if (false_predicate_p (&new_predicate)
1252 && !false_predicate_p (es->predicate))
1254 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1255 edge->frequency = 0;
1257 edge_set_predicate (edge, &new_predicate);
1259 remap_hint_predicate_after_duplication (&info->loop_iterations,
1260 possible_truths, info);
1261 remap_hint_predicate_after_duplication (&info->loop_stride,
1262 possible_truths, info);
1263 remap_hint_predicate_after_duplication (&info->array_index,
1264 possible_truths, info);
1266 /* If inliner or someone after inliner will ever start producing
1267 non-trivial clones, we will get trouble with lack of information
1268 about updating self sizes, because size vectors already contains
1269 sizes of the calees. */
1270 gcc_assert (!inlined_to_p || !optimized_out_size);
1272 else
1274 info->entry = vec_safe_copy (info->entry);
1275 if (info->loop_iterations)
1277 predicate p = *info->loop_iterations;
1278 info->loop_iterations = NULL;
1279 set_hint_predicate (&info->loop_iterations, p);
1281 if (info->loop_stride)
1283 predicate p = *info->loop_stride;
1284 info->loop_stride = NULL;
1285 set_hint_predicate (&info->loop_stride, p);
1287 if (info->array_index)
1289 predicate p = *info->array_index;
1290 info->array_index = NULL;
1291 set_hint_predicate (&info->array_index, p);
1294 if (!dst->global.inlined_to)
1295 inline_update_overall_summary (dst);
1299 /* Hook that is called by cgraph.c when a node is duplicated. */
1301 static void
1302 inline_edge_duplication_hook (struct cgraph_edge *src,
1303 struct cgraph_edge *dst,
1304 ATTRIBUTE_UNUSED void *data)
1306 struct inline_edge_summary *info;
1307 struct inline_edge_summary *srcinfo;
1308 inline_summary_alloc ();
1309 info = inline_edge_summary (dst);
1310 srcinfo = inline_edge_summary (src);
1311 memcpy (info, srcinfo, sizeof (struct inline_edge_summary));
1312 info->predicate = NULL;
1313 edge_set_predicate (dst, srcinfo->predicate);
1314 info->param = srcinfo->param.copy ();
1315 if (!dst->indirect_unknown_callee && src->indirect_unknown_callee)
1317 info->call_stmt_size -= (eni_size_weights.indirect_call_cost
1318 - eni_size_weights.call_cost);
1319 info->call_stmt_time -= (eni_time_weights.indirect_call_cost
1320 - eni_time_weights.call_cost);
1325 /* Keep edge cache consistent across edge removal. */
1327 static void
1328 inline_edge_removal_hook (struct cgraph_edge *edge,
1329 void *data ATTRIBUTE_UNUSED)
1331 if (edge_growth_cache.exists ())
1332 reset_edge_growth_cache (edge);
1333 reset_inline_edge_summary (edge);
1337 /* Initialize growth caches. */
1339 void
1340 initialize_growth_caches (void)
1342 if (symtab->edges_max_uid)
1343 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
1347 /* Free growth caches. */
1349 void
1350 free_growth_caches (void)
1352 edge_growth_cache.release ();
1356 /* Dump edge summaries associated to NODE and recursively to all clones.
1357 Indent by INDENT. */
1359 static void
1360 dump_inline_edge_summary (FILE *f, int indent, struct cgraph_node *node,
1361 struct inline_summary *info)
1363 struct cgraph_edge *edge;
1364 for (edge = node->callees; edge; edge = edge->next_callee)
1366 struct inline_edge_summary *es = inline_edge_summary (edge);
1367 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1368 int i;
1370 fprintf (f,
1371 "%*s%s/%i %s\n%*s loop depth:%2i freq:%4i size:%2i"
1372 " time: %2i callee size:%2i stack:%2i",
1373 indent, "", callee->name (), callee->order,
1374 !edge->inline_failed
1375 ? "inlined" : cgraph_inline_failed_string (edge-> inline_failed),
1376 indent, "", es->loop_depth, edge->frequency,
1377 es->call_stmt_size, es->call_stmt_time,
1378 (int) inline_summaries->get (callee)->size / INLINE_SIZE_SCALE,
1379 (int) inline_summaries->get (callee)->estimated_stack_size);
1381 if (es->predicate)
1383 fprintf (f, " predicate: ");
1384 dump_predicate (f, info->conds, es->predicate);
1386 else
1387 fprintf (f, "\n");
1388 if (es->param.exists ())
1389 for (i = 0; i < (int) es->param.length (); i++)
1391 int prob = es->param[i].change_prob;
1393 if (!prob)
1394 fprintf (f, "%*s op%i is compile time invariant\n",
1395 indent + 2, "", i);
1396 else if (prob != REG_BR_PROB_BASE)
1397 fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i,
1398 prob * 100.0 / REG_BR_PROB_BASE);
1400 if (!edge->inline_failed)
1402 fprintf (f, "%*sStack frame offset %i, callee self size %i,"
1403 " callee size %i\n",
1404 indent + 2, "",
1405 (int) inline_summaries->get (callee)->stack_frame_offset,
1406 (int) inline_summaries->get (callee)->estimated_self_stack_size,
1407 (int) inline_summaries->get (callee)->estimated_stack_size);
1408 dump_inline_edge_summary (f, indent + 2, callee, info);
1411 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
1413 struct inline_edge_summary *es = inline_edge_summary (edge);
1414 fprintf (f, "%*sindirect call loop depth:%2i freq:%4i size:%2i"
1415 " time: %2i",
1416 indent, "",
1417 es->loop_depth,
1418 edge->frequency, es->call_stmt_size, es->call_stmt_time);
1419 if (es->predicate)
1421 fprintf (f, "predicate: ");
1422 dump_predicate (f, info->conds, es->predicate);
1424 else
1425 fprintf (f, "\n");
1430 void
1431 dump_inline_summary (FILE *f, struct cgraph_node *node)
1433 if (node->definition)
1435 struct inline_summary *s = inline_summaries->get (node);
1436 size_time_entry *e;
1437 int i;
1438 fprintf (f, "Inline summary for %s/%i", node->name (),
1439 node->order);
1440 if (DECL_DISREGARD_INLINE_LIMITS (node->decl))
1441 fprintf (f, " always_inline");
1442 if (s->inlinable)
1443 fprintf (f, " inlinable");
1444 fprintf (f, "\n self time: %i\n", s->self_time);
1445 fprintf (f, " global time: %i\n", s->time);
1446 fprintf (f, " self size: %i\n", s->self_size);
1447 fprintf (f, " global size: %i\n", s->size);
1448 fprintf (f, " min size: %i\n", s->min_size);
1449 fprintf (f, " self stack: %i\n",
1450 (int) s->estimated_self_stack_size);
1451 fprintf (f, " global stack: %i\n", (int) s->estimated_stack_size);
1452 if (s->growth)
1453 fprintf (f, " estimated growth:%i\n", (int) s->growth);
1454 if (s->scc_no)
1455 fprintf (f, " In SCC: %i\n", (int) s->scc_no);
1456 for (i = 0; vec_safe_iterate (s->entry, i, &e); i++)
1458 fprintf (f, " size:%f, time:%f, predicate:",
1459 (double) e->size / INLINE_SIZE_SCALE,
1460 (double) e->time / INLINE_TIME_SCALE);
1461 dump_predicate (f, s->conds, &e->predicate);
1463 if (s->loop_iterations)
1465 fprintf (f, " loop iterations:");
1466 dump_predicate (f, s->conds, s->loop_iterations);
1468 if (s->loop_stride)
1470 fprintf (f, " loop stride:");
1471 dump_predicate (f, s->conds, s->loop_stride);
1473 if (s->array_index)
1475 fprintf (f, " array index:");
1476 dump_predicate (f, s->conds, s->array_index);
1478 fprintf (f, " calls:\n");
1479 dump_inline_edge_summary (f, 4, node, s);
1480 fprintf (f, "\n");
1484 DEBUG_FUNCTION void
1485 debug_inline_summary (struct cgraph_node *node)
1487 dump_inline_summary (stderr, node);
1490 void
1491 dump_inline_summaries (FILE *f)
1493 struct cgraph_node *node;
1495 FOR_EACH_DEFINED_FUNCTION (node)
1496 if (!node->global.inlined_to)
1497 dump_inline_summary (f, node);
1500 /* Give initial reasons why inlining would fail on EDGE. This gets either
1501 nullified or usually overwritten by more precise reasons later. */
1503 void
1504 initialize_inline_failed (struct cgraph_edge *e)
1506 struct cgraph_node *callee = e->callee;
1508 if (e->indirect_unknown_callee)
1509 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
1510 else if (!callee->definition)
1511 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
1512 else if (callee->local.redefined_extern_inline)
1513 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
1514 else if (e->call_stmt_cannot_inline_p)
1515 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
1516 else if (cfun && fn_contains_cilk_spawn_p (cfun))
1517 /* We can't inline if the function is spawing a function. */
1518 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
1519 else
1520 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
1523 /* Callback of walk_aliased_vdefs. Flags that it has been invoked to the
1524 boolean variable pointed to by DATA. */
1526 static bool
1527 mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED,
1528 void *data)
1530 bool *b = (bool *) data;
1531 *b = true;
1532 return true;
1535 /* If OP refers to value of function parameter, return the corresponding
1536 parameter. */
1538 static tree
1539 unmodified_parm_1 (gimple stmt, tree op)
1541 /* SSA_NAME referring to parm default def? */
1542 if (TREE_CODE (op) == SSA_NAME
1543 && SSA_NAME_IS_DEFAULT_DEF (op)
1544 && TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL)
1545 return SSA_NAME_VAR (op);
1546 /* Non-SSA parm reference? */
1547 if (TREE_CODE (op) == PARM_DECL)
1549 bool modified = false;
1551 ao_ref refd;
1552 ao_ref_init (&refd, op);
1553 walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, &modified,
1554 NULL);
1555 if (!modified)
1556 return op;
1558 return NULL_TREE;
1561 /* If OP refers to value of function parameter, return the corresponding
1562 parameter. Also traverse chains of SSA register assignments. */
1564 static tree
1565 unmodified_parm (gimple stmt, tree op)
1567 tree res = unmodified_parm_1 (stmt, op);
1568 if (res)
1569 return res;
1571 if (TREE_CODE (op) == SSA_NAME
1572 && !SSA_NAME_IS_DEFAULT_DEF (op)
1573 && gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1574 return unmodified_parm (SSA_NAME_DEF_STMT (op),
1575 gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)));
1576 return NULL_TREE;
1579 /* If OP refers to a value of a function parameter or value loaded from an
1580 aggregate passed to a parameter (either by value or reference), return TRUE
1581 and store the number of the parameter to *INDEX_P and information whether
1582 and how it has been loaded from an aggregate into *AGGPOS. INFO describes
1583 the function parameters, STMT is the statement in which OP is used or
1584 loaded. */
1586 static bool
1587 unmodified_parm_or_parm_agg_item (struct ipa_node_params *info,
1588 gimple stmt, tree op, int *index_p,
1589 struct agg_position_info *aggpos)
1591 tree res = unmodified_parm_1 (stmt, op);
1593 gcc_checking_assert (aggpos);
1594 if (res)
1596 *index_p = ipa_get_param_decl_index (info, res);
1597 if (*index_p < 0)
1598 return false;
1599 aggpos->agg_contents = false;
1600 aggpos->by_ref = false;
1601 return true;
1604 if (TREE_CODE (op) == SSA_NAME)
1606 if (SSA_NAME_IS_DEFAULT_DEF (op)
1607 || !gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1608 return false;
1609 stmt = SSA_NAME_DEF_STMT (op);
1610 op = gimple_assign_rhs1 (stmt);
1611 if (!REFERENCE_CLASS_P (op))
1612 return unmodified_parm_or_parm_agg_item (info, stmt, op, index_p,
1613 aggpos);
1616 aggpos->agg_contents = true;
1617 return ipa_load_from_parm_agg (info, stmt, op, index_p, &aggpos->offset,
1618 &aggpos->by_ref);
1621 /* See if statement might disappear after inlining.
1622 0 - means not eliminated
1623 1 - half of statements goes away
1624 2 - for sure it is eliminated.
1625 We are not terribly sophisticated, basically looking for simple abstraction
1626 penalty wrappers. */
1628 static int
1629 eliminated_by_inlining_prob (gimple stmt)
1631 enum gimple_code code = gimple_code (stmt);
1632 enum tree_code rhs_code;
1634 if (!optimize)
1635 return 0;
1637 switch (code)
1639 case GIMPLE_RETURN:
1640 return 2;
1641 case GIMPLE_ASSIGN:
1642 if (gimple_num_ops (stmt) != 2)
1643 return 0;
1645 rhs_code = gimple_assign_rhs_code (stmt);
1647 /* Casts of parameters, loads from parameters passed by reference
1648 and stores to return value or parameters are often free after
1649 inlining dua to SRA and further combining.
1650 Assume that half of statements goes away. */
1651 if (CONVERT_EXPR_CODE_P (rhs_code)
1652 || rhs_code == VIEW_CONVERT_EXPR
1653 || rhs_code == ADDR_EXPR
1654 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1656 tree rhs = gimple_assign_rhs1 (stmt);
1657 tree lhs = gimple_assign_lhs (stmt);
1658 tree inner_rhs = get_base_address (rhs);
1659 tree inner_lhs = get_base_address (lhs);
1660 bool rhs_free = false;
1661 bool lhs_free = false;
1663 if (!inner_rhs)
1664 inner_rhs = rhs;
1665 if (!inner_lhs)
1666 inner_lhs = lhs;
1668 /* Reads of parameter are expected to be free. */
1669 if (unmodified_parm (stmt, inner_rhs))
1670 rhs_free = true;
1671 /* Match expressions of form &this->field. Those will most likely
1672 combine with something upstream after inlining. */
1673 else if (TREE_CODE (inner_rhs) == ADDR_EXPR)
1675 tree op = get_base_address (TREE_OPERAND (inner_rhs, 0));
1676 if (TREE_CODE (op) == PARM_DECL)
1677 rhs_free = true;
1678 else if (TREE_CODE (op) == MEM_REF
1679 && unmodified_parm (stmt, TREE_OPERAND (op, 0)))
1680 rhs_free = true;
1683 /* When parameter is not SSA register because its address is taken
1684 and it is just copied into one, the statement will be completely
1685 free after inlining (we will copy propagate backward). */
1686 if (rhs_free && is_gimple_reg (lhs))
1687 return 2;
1689 /* Reads of parameters passed by reference
1690 expected to be free (i.e. optimized out after inlining). */
1691 if (TREE_CODE (inner_rhs) == MEM_REF
1692 && unmodified_parm (stmt, TREE_OPERAND (inner_rhs, 0)))
1693 rhs_free = true;
1695 /* Copying parameter passed by reference into gimple register is
1696 probably also going to copy propagate, but we can't be quite
1697 sure. */
1698 if (rhs_free && is_gimple_reg (lhs))
1699 lhs_free = true;
1701 /* Writes to parameters, parameters passed by value and return value
1702 (either dirrectly or passed via invisible reference) are free.
1704 TODO: We ought to handle testcase like
1705 struct a {int a,b;};
1706 struct a
1707 retrurnsturct (void)
1709 struct a a ={1,2};
1710 return a;
1713 This translate into:
1715 retrurnsturct ()
1717 int a$b;
1718 int a$a;
1719 struct a a;
1720 struct a D.2739;
1722 <bb 2>:
1723 D.2739.a = 1;
1724 D.2739.b = 2;
1725 return D.2739;
1728 For that we either need to copy ipa-split logic detecting writes
1729 to return value. */
1730 if (TREE_CODE (inner_lhs) == PARM_DECL
1731 || TREE_CODE (inner_lhs) == RESULT_DECL
1732 || (TREE_CODE (inner_lhs) == MEM_REF
1733 && (unmodified_parm (stmt, TREE_OPERAND (inner_lhs, 0))
1734 || (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME
1735 && SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0))
1736 && TREE_CODE (SSA_NAME_VAR (TREE_OPERAND
1737 (inner_lhs,
1738 0))) == RESULT_DECL))))
1739 lhs_free = true;
1740 if (lhs_free
1741 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1742 rhs_free = true;
1743 if (lhs_free && rhs_free)
1744 return 1;
1746 return 0;
1747 default:
1748 return 0;
1753 /* If BB ends by a conditional we can turn into predicates, attach corresponding
1754 predicates to the CFG edges. */
1756 static void
1757 set_cond_stmt_execution_predicate (struct ipa_node_params *info,
1758 struct inline_summary *summary,
1759 basic_block bb)
1761 gimple last;
1762 tree op;
1763 int index;
1764 struct agg_position_info aggpos;
1765 enum tree_code code, inverted_code;
1766 edge e;
1767 edge_iterator ei;
1768 gimple set_stmt;
1769 tree op2;
1771 last = last_stmt (bb);
1772 if (!last || gimple_code (last) != GIMPLE_COND)
1773 return;
1774 if (!is_gimple_ip_invariant (gimple_cond_rhs (last)))
1775 return;
1776 op = gimple_cond_lhs (last);
1777 /* TODO: handle conditionals like
1778 var = op0 < 4;
1779 if (var != 0). */
1780 if (unmodified_parm_or_parm_agg_item (info, last, op, &index, &aggpos))
1782 code = gimple_cond_code (last);
1783 inverted_code = invert_tree_comparison (code, HONOR_NANS (op));
1785 FOR_EACH_EDGE (e, ei, bb->succs)
1787 enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE
1788 ? code : inverted_code);
1789 /* invert_tree_comparison will return ERROR_MARK on FP
1790 comparsions that are not EQ/NE instead of returning proper
1791 unordered one. Be sure it is not confused with NON_CONSTANT. */
1792 if (this_code != ERROR_MARK)
1794 struct predicate p = add_condition (summary, index, &aggpos,
1795 this_code,
1796 gimple_cond_rhs (last));
1797 e->aux = pool_alloc (edge_predicate_pool);
1798 *(struct predicate *) e->aux = p;
1803 if (TREE_CODE (op) != SSA_NAME)
1804 return;
1805 /* Special case
1806 if (builtin_constant_p (op))
1807 constant_code
1808 else
1809 nonconstant_code.
1810 Here we can predicate nonconstant_code. We can't
1811 really handle constant_code since we have no predicate
1812 for this and also the constant code is not known to be
1813 optimized away when inliner doen't see operand is constant.
1814 Other optimizers might think otherwise. */
1815 if (gimple_cond_code (last) != NE_EXPR
1816 || !integer_zerop (gimple_cond_rhs (last)))
1817 return;
1818 set_stmt = SSA_NAME_DEF_STMT (op);
1819 if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P)
1820 || gimple_call_num_args (set_stmt) != 1)
1821 return;
1822 op2 = gimple_call_arg (set_stmt, 0);
1823 if (!unmodified_parm_or_parm_agg_item
1824 (info, set_stmt, op2, &index, &aggpos))
1825 return;
1826 FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE)
1828 struct predicate p = add_condition (summary, index, &aggpos,
1829 IS_NOT_CONSTANT, NULL_TREE);
1830 e->aux = pool_alloc (edge_predicate_pool);
1831 *(struct predicate *) e->aux = p;
1836 /* If BB ends by a switch we can turn into predicates, attach corresponding
1837 predicates to the CFG edges. */
1839 static void
1840 set_switch_stmt_execution_predicate (struct ipa_node_params *info,
1841 struct inline_summary *summary,
1842 basic_block bb)
1844 gimple lastg;
1845 tree op;
1846 int index;
1847 struct agg_position_info aggpos;
1848 edge e;
1849 edge_iterator ei;
1850 size_t n;
1851 size_t case_idx;
1853 lastg = last_stmt (bb);
1854 if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH)
1855 return;
1856 gswitch *last = as_a <gswitch *> (lastg);
1857 op = gimple_switch_index (last);
1858 if (!unmodified_parm_or_parm_agg_item (info, last, op, &index, &aggpos))
1859 return;
1861 FOR_EACH_EDGE (e, ei, bb->succs)
1863 e->aux = pool_alloc (edge_predicate_pool);
1864 *(struct predicate *) e->aux = false_predicate ();
1866 n = gimple_switch_num_labels (last);
1867 for (case_idx = 0; case_idx < n; ++case_idx)
1869 tree cl = gimple_switch_label (last, case_idx);
1870 tree min, max;
1871 struct predicate p;
1873 e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
1874 min = CASE_LOW (cl);
1875 max = CASE_HIGH (cl);
1877 /* For default we might want to construct predicate that none
1878 of cases is met, but it is bit hard to do not having negations
1879 of conditionals handy. */
1880 if (!min && !max)
1881 p = true_predicate ();
1882 else if (!max)
1883 p = add_condition (summary, index, &aggpos, EQ_EXPR, min);
1884 else
1886 struct predicate p1, p2;
1887 p1 = add_condition (summary, index, &aggpos, GE_EXPR, min);
1888 p2 = add_condition (summary, index, &aggpos, LE_EXPR, max);
1889 p = and_predicates (summary->conds, &p1, &p2);
1891 *(struct predicate *) e->aux
1892 = or_predicates (summary->conds, &p, (struct predicate *) e->aux);
1897 /* For each BB in NODE attach to its AUX pointer predicate under
1898 which it is executable. */
1900 static void
1901 compute_bb_predicates (struct cgraph_node *node,
1902 struct ipa_node_params *parms_info,
1903 struct inline_summary *summary)
1905 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1906 bool done = false;
1907 basic_block bb;
1909 FOR_EACH_BB_FN (bb, my_function)
1911 set_cond_stmt_execution_predicate (parms_info, summary, bb);
1912 set_switch_stmt_execution_predicate (parms_info, summary, bb);
1915 /* Entry block is always executable. */
1916 ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1917 = pool_alloc (edge_predicate_pool);
1918 *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1919 = true_predicate ();
1921 /* A simple dataflow propagation of predicates forward in the CFG.
1922 TODO: work in reverse postorder. */
1923 while (!done)
1925 done = true;
1926 FOR_EACH_BB_FN (bb, my_function)
1928 struct predicate p = false_predicate ();
1929 edge e;
1930 edge_iterator ei;
1931 FOR_EACH_EDGE (e, ei, bb->preds)
1933 if (e->src->aux)
1935 struct predicate this_bb_predicate
1936 = *(struct predicate *) e->src->aux;
1937 if (e->aux)
1938 this_bb_predicate
1939 = and_predicates (summary->conds, &this_bb_predicate,
1940 (struct predicate *) e->aux);
1941 p = or_predicates (summary->conds, &p, &this_bb_predicate);
1942 if (true_predicate_p (&p))
1943 break;
1946 if (false_predicate_p (&p))
1947 gcc_assert (!bb->aux);
1948 else
1950 if (!bb->aux)
1952 done = false;
1953 bb->aux = pool_alloc (edge_predicate_pool);
1954 *((struct predicate *) bb->aux) = p;
1956 else if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1958 /* This OR operation is needed to ensure monotonous data flow
1959 in the case we hit the limit on number of clauses and the
1960 and/or operations above give approximate answers. */
1961 p = or_predicates (summary->conds, &p, (struct predicate *)bb->aux);
1962 if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1964 done = false;
1965 *((struct predicate *) bb->aux) = p;
1974 /* We keep info about constantness of SSA names. */
1976 typedef struct predicate predicate_t;
1977 /* Return predicate specifying when the STMT might have result that is not
1978 a compile time constant. */
1980 static struct predicate
1981 will_be_nonconstant_expr_predicate (struct ipa_node_params *info,
1982 struct inline_summary *summary,
1983 tree expr,
1984 vec<predicate_t> nonconstant_names)
1986 tree parm;
1987 int index;
1989 while (UNARY_CLASS_P (expr))
1990 expr = TREE_OPERAND (expr, 0);
1992 parm = unmodified_parm (NULL, expr);
1993 if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0)
1994 return add_condition (summary, index, NULL, CHANGED, NULL_TREE);
1995 if (is_gimple_min_invariant (expr))
1996 return false_predicate ();
1997 if (TREE_CODE (expr) == SSA_NAME)
1998 return nonconstant_names[SSA_NAME_VERSION (expr)];
1999 if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr))
2001 struct predicate p1 = will_be_nonconstant_expr_predicate
2002 (info, summary, TREE_OPERAND (expr, 0),
2003 nonconstant_names);
2004 struct predicate p2;
2005 if (true_predicate_p (&p1))
2006 return p1;
2007 p2 = will_be_nonconstant_expr_predicate (info, summary,
2008 TREE_OPERAND (expr, 1),
2009 nonconstant_names);
2010 return or_predicates (summary->conds, &p1, &p2);
2012 else if (TREE_CODE (expr) == COND_EXPR)
2014 struct predicate p1 = will_be_nonconstant_expr_predicate
2015 (info, summary, TREE_OPERAND (expr, 0),
2016 nonconstant_names);
2017 struct predicate p2;
2018 if (true_predicate_p (&p1))
2019 return p1;
2020 p2 = will_be_nonconstant_expr_predicate (info, summary,
2021 TREE_OPERAND (expr, 1),
2022 nonconstant_names);
2023 if (true_predicate_p (&p2))
2024 return p2;
2025 p1 = or_predicates (summary->conds, &p1, &p2);
2026 p2 = will_be_nonconstant_expr_predicate (info, summary,
2027 TREE_OPERAND (expr, 2),
2028 nonconstant_names);
2029 return or_predicates (summary->conds, &p1, &p2);
2031 else
2033 debug_tree (expr);
2034 gcc_unreachable ();
2036 return false_predicate ();
2040 /* Return predicate specifying when the STMT might have result that is not
2041 a compile time constant. */
2043 static struct predicate
2044 will_be_nonconstant_predicate (struct ipa_node_params *info,
2045 struct inline_summary *summary,
2046 gimple stmt,
2047 vec<predicate_t> nonconstant_names)
2049 struct predicate p = true_predicate ();
2050 ssa_op_iter iter;
2051 tree use;
2052 struct predicate op_non_const;
2053 bool is_load;
2054 int base_index;
2055 struct agg_position_info aggpos;
2057 /* What statments might be optimized away
2058 when their arguments are constant. */
2059 if (gimple_code (stmt) != GIMPLE_ASSIGN
2060 && gimple_code (stmt) != GIMPLE_COND
2061 && gimple_code (stmt) != GIMPLE_SWITCH
2062 && (gimple_code (stmt) != GIMPLE_CALL
2063 || !(gimple_call_flags (stmt) & ECF_CONST)))
2064 return p;
2066 /* Stores will stay anyway. */
2067 if (gimple_store_p (stmt))
2068 return p;
2070 is_load = gimple_assign_load_p (stmt);
2072 /* Loads can be optimized when the value is known. */
2073 if (is_load)
2075 tree op;
2076 gcc_assert (gimple_assign_single_p (stmt));
2077 op = gimple_assign_rhs1 (stmt);
2078 if (!unmodified_parm_or_parm_agg_item (info, stmt, op, &base_index,
2079 &aggpos))
2080 return p;
2082 else
2083 base_index = -1;
2085 /* See if we understand all operands before we start
2086 adding conditionals. */
2087 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2089 tree parm = unmodified_parm (stmt, use);
2090 /* For arguments we can build a condition. */
2091 if (parm && ipa_get_param_decl_index (info, parm) >= 0)
2092 continue;
2093 if (TREE_CODE (use) != SSA_NAME)
2094 return p;
2095 /* If we know when operand is constant,
2096 we still can say something useful. */
2097 if (!true_predicate_p (&nonconstant_names[SSA_NAME_VERSION (use)]))
2098 continue;
2099 return p;
2102 if (is_load)
2103 op_non_const =
2104 add_condition (summary, base_index, &aggpos, CHANGED, NULL);
2105 else
2106 op_non_const = false_predicate ();
2107 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2109 tree parm = unmodified_parm (stmt, use);
2110 int index;
2112 if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0)
2114 if (index != base_index)
2115 p = add_condition (summary, index, NULL, CHANGED, NULL_TREE);
2116 else
2117 continue;
2119 else
2120 p = nonconstant_names[SSA_NAME_VERSION (use)];
2121 op_non_const = or_predicates (summary->conds, &p, &op_non_const);
2123 if ((gimple_code (stmt) == GIMPLE_ASSIGN || gimple_code (stmt) == GIMPLE_CALL)
2124 && gimple_op (stmt, 0)
2125 && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
2126 nonconstant_names[SSA_NAME_VERSION (gimple_op (stmt, 0))]
2127 = op_non_const;
2128 return op_non_const;
2131 struct record_modified_bb_info
2133 bitmap bb_set;
2134 gimple stmt;
2137 /* Callback of walk_aliased_vdefs. Records basic blocks where the value may be
2138 set except for info->stmt. */
2140 static bool
2141 record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data)
2143 struct record_modified_bb_info *info =
2144 (struct record_modified_bb_info *) data;
2145 if (SSA_NAME_DEF_STMT (vdef) == info->stmt)
2146 return false;
2147 bitmap_set_bit (info->bb_set,
2148 SSA_NAME_IS_DEFAULT_DEF (vdef)
2149 ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index
2150 : gimple_bb (SSA_NAME_DEF_STMT (vdef))->index);
2151 return false;
2154 /* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT
2155 will change since last invocation of STMT.
2157 Value 0 is reserved for compile time invariants.
2158 For common parameters it is REG_BR_PROB_BASE. For loop invariants it
2159 ought to be REG_BR_PROB_BASE / estimated_iters. */
2161 static int
2162 param_change_prob (gimple stmt, int i)
2164 tree op = gimple_call_arg (stmt, i);
2165 basic_block bb = gimple_bb (stmt);
2166 tree base;
2168 /* Global invariants neve change. */
2169 if (is_gimple_min_invariant (op))
2170 return 0;
2171 /* We would have to do non-trivial analysis to really work out what
2172 is the probability of value to change (i.e. when init statement
2173 is in a sibling loop of the call).
2175 We do an conservative estimate: when call is executed N times more often
2176 than the statement defining value, we take the frequency 1/N. */
2177 if (TREE_CODE (op) == SSA_NAME)
2179 int init_freq;
2181 if (!bb->frequency)
2182 return REG_BR_PROB_BASE;
2184 if (SSA_NAME_IS_DEFAULT_DEF (op))
2185 init_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2186 else
2187 init_freq = gimple_bb (SSA_NAME_DEF_STMT (op))->frequency;
2189 if (!init_freq)
2190 init_freq = 1;
2191 if (init_freq < bb->frequency)
2192 return MAX (GCOV_COMPUTE_SCALE (init_freq, bb->frequency), 1);
2193 else
2194 return REG_BR_PROB_BASE;
2197 base = get_base_address (op);
2198 if (base)
2200 ao_ref refd;
2201 int max;
2202 struct record_modified_bb_info info;
2203 bitmap_iterator bi;
2204 unsigned index;
2205 tree init = ctor_for_folding (base);
2207 if (init != error_mark_node)
2208 return 0;
2209 if (!bb->frequency)
2210 return REG_BR_PROB_BASE;
2211 ao_ref_init (&refd, op);
2212 info.stmt = stmt;
2213 info.bb_set = BITMAP_ALLOC (NULL);
2214 walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info,
2215 NULL);
2216 if (bitmap_bit_p (info.bb_set, bb->index))
2218 BITMAP_FREE (info.bb_set);
2219 return REG_BR_PROB_BASE;
2222 /* Assume that every memory is initialized at entry.
2223 TODO: Can we easilly determine if value is always defined
2224 and thus we may skip entry block? */
2225 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
2226 max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2227 else
2228 max = 1;
2230 EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi)
2231 max = MIN (max, BASIC_BLOCK_FOR_FN (cfun, index)->frequency);
2233 BITMAP_FREE (info.bb_set);
2234 if (max < bb->frequency)
2235 return MAX (GCOV_COMPUTE_SCALE (max, bb->frequency), 1);
2236 else
2237 return REG_BR_PROB_BASE;
2239 return REG_BR_PROB_BASE;
2242 /* Find whether a basic block BB is the final block of a (half) diamond CFG
2243 sub-graph and if the predicate the condition depends on is known. If so,
2244 return true and store the pointer the predicate in *P. */
2246 static bool
2247 phi_result_unknown_predicate (struct ipa_node_params *info,
2248 inline_summary *summary, basic_block bb,
2249 struct predicate *p,
2250 vec<predicate_t> nonconstant_names)
2252 edge e;
2253 edge_iterator ei;
2254 basic_block first_bb = NULL;
2255 gimple stmt;
2257 if (single_pred_p (bb))
2259 *p = false_predicate ();
2260 return true;
2263 FOR_EACH_EDGE (e, ei, bb->preds)
2265 if (single_succ_p (e->src))
2267 if (!single_pred_p (e->src))
2268 return false;
2269 if (!first_bb)
2270 first_bb = single_pred (e->src);
2271 else if (single_pred (e->src) != first_bb)
2272 return false;
2274 else
2276 if (!first_bb)
2277 first_bb = e->src;
2278 else if (e->src != first_bb)
2279 return false;
2283 if (!first_bb)
2284 return false;
2286 stmt = last_stmt (first_bb);
2287 if (!stmt
2288 || gimple_code (stmt) != GIMPLE_COND
2289 || !is_gimple_ip_invariant (gimple_cond_rhs (stmt)))
2290 return false;
2292 *p = will_be_nonconstant_expr_predicate (info, summary,
2293 gimple_cond_lhs (stmt),
2294 nonconstant_names);
2295 if (true_predicate_p (p))
2296 return false;
2297 else
2298 return true;
2301 /* Given a PHI statement in a function described by inline properties SUMMARY
2302 and *P being the predicate describing whether the selected PHI argument is
2303 known, store a predicate for the result of the PHI statement into
2304 NONCONSTANT_NAMES, if possible. */
2306 static void
2307 predicate_for_phi_result (struct inline_summary *summary, gphi *phi,
2308 struct predicate *p,
2309 vec<predicate_t> nonconstant_names)
2311 unsigned i;
2313 for (i = 0; i < gimple_phi_num_args (phi); i++)
2315 tree arg = gimple_phi_arg (phi, i)->def;
2316 if (!is_gimple_min_invariant (arg))
2318 gcc_assert (TREE_CODE (arg) == SSA_NAME);
2319 *p = or_predicates (summary->conds, p,
2320 &nonconstant_names[SSA_NAME_VERSION (arg)]);
2321 if (true_predicate_p (p))
2322 return;
2326 if (dump_file && (dump_flags & TDF_DETAILS))
2328 fprintf (dump_file, "\t\tphi predicate: ");
2329 dump_predicate (dump_file, summary->conds, p);
2331 nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p;
2334 /* Return predicate specifying when array index in access OP becomes non-constant. */
2336 static struct predicate
2337 array_index_predicate (inline_summary *info,
2338 vec< predicate_t> nonconstant_names, tree op)
2340 struct predicate p = false_predicate ();
2341 while (handled_component_p (op))
2343 if (TREE_CODE (op) == ARRAY_REF || TREE_CODE (op) == ARRAY_RANGE_REF)
2345 if (TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME)
2346 p = or_predicates (info->conds, &p,
2347 &nonconstant_names[SSA_NAME_VERSION
2348 (TREE_OPERAND (op, 1))]);
2350 op = TREE_OPERAND (op, 0);
2352 return p;
2355 /* For a typical usage of __builtin_expect (a<b, 1), we
2356 may introduce an extra relation stmt:
2357 With the builtin, we have
2358 t1 = a <= b;
2359 t2 = (long int) t1;
2360 t3 = __builtin_expect (t2, 1);
2361 if (t3 != 0)
2362 goto ...
2363 Without the builtin, we have
2364 if (a<=b)
2365 goto...
2366 This affects the size/time estimation and may have
2367 an impact on the earlier inlining.
2368 Here find this pattern and fix it up later. */
2370 static gimple
2371 find_foldable_builtin_expect (basic_block bb)
2373 gimple_stmt_iterator bsi;
2375 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2377 gimple stmt = gsi_stmt (bsi);
2378 if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT)
2379 || (is_gimple_call (stmt)
2380 && gimple_call_internal_p (stmt)
2381 && gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT))
2383 tree var = gimple_call_lhs (stmt);
2384 tree arg = gimple_call_arg (stmt, 0);
2385 use_operand_p use_p;
2386 gimple use_stmt;
2387 bool match = false;
2388 bool done = false;
2390 if (!var || !arg)
2391 continue;
2392 gcc_assert (TREE_CODE (var) == SSA_NAME);
2394 while (TREE_CODE (arg) == SSA_NAME)
2396 gimple stmt_tmp = SSA_NAME_DEF_STMT (arg);
2397 if (!is_gimple_assign (stmt_tmp))
2398 break;
2399 switch (gimple_assign_rhs_code (stmt_tmp))
2401 case LT_EXPR:
2402 case LE_EXPR:
2403 case GT_EXPR:
2404 case GE_EXPR:
2405 case EQ_EXPR:
2406 case NE_EXPR:
2407 match = true;
2408 done = true;
2409 break;
2410 CASE_CONVERT:
2411 break;
2412 default:
2413 done = true;
2414 break;
2416 if (done)
2417 break;
2418 arg = gimple_assign_rhs1 (stmt_tmp);
2421 if (match && single_imm_use (var, &use_p, &use_stmt)
2422 && gimple_code (use_stmt) == GIMPLE_COND)
2423 return use_stmt;
2426 return NULL;
2429 /* Return true when the basic blocks contains only clobbers followed by RESX.
2430 Such BBs are kept around to make removal of dead stores possible with
2431 presence of EH and will be optimized out by optimize_clobbers later in the
2432 game.
2434 NEED_EH is used to recurse in case the clobber has non-EH predecestors
2435 that can be clobber only, too.. When it is false, the RESX is not necessary
2436 on the end of basic block. */
2438 static bool
2439 clobber_only_eh_bb_p (basic_block bb, bool need_eh = true)
2441 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2442 edge_iterator ei;
2443 edge e;
2445 if (need_eh)
2447 if (gsi_end_p (gsi))
2448 return false;
2449 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX)
2450 return false;
2451 gsi_prev (&gsi);
2453 else if (!single_succ_p (bb))
2454 return false;
2456 for (; !gsi_end_p (gsi); gsi_prev (&gsi))
2458 gimple stmt = gsi_stmt (gsi);
2459 if (is_gimple_debug (stmt))
2460 continue;
2461 if (gimple_clobber_p (stmt))
2462 continue;
2463 if (gimple_code (stmt) == GIMPLE_LABEL)
2464 break;
2465 return false;
2468 /* See if all predecestors are either throws or clobber only BBs. */
2469 FOR_EACH_EDGE (e, ei, bb->preds)
2470 if (!(e->flags & EDGE_EH)
2471 && !clobber_only_eh_bb_p (e->src, false))
2472 return false;
2474 return true;
2477 /* Compute function body size parameters for NODE.
2478 When EARLY is true, we compute only simple summaries without
2479 non-trivial predicates to drive the early inliner. */
2481 static void
2482 estimate_function_body_sizes (struct cgraph_node *node, bool early)
2484 gcov_type time = 0;
2485 /* Estimate static overhead for function prologue/epilogue and alignment. */
2486 int size = 2;
2487 /* Benefits are scaled by probability of elimination that is in range
2488 <0,2>. */
2489 basic_block bb;
2490 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
2491 int freq;
2492 struct inline_summary *info = inline_summaries->get (node);
2493 struct predicate bb_predicate;
2494 struct ipa_node_params *parms_info = NULL;
2495 vec<predicate_t> nonconstant_names = vNULL;
2496 int nblocks, n;
2497 int *order;
2498 predicate array_index = true_predicate ();
2499 gimple fix_builtin_expect_stmt;
2501 info->conds = NULL;
2502 info->entry = NULL;
2504 /* When optimizing and analyzing for IPA inliner, initialize loop optimizer
2505 so we can produce proper inline hints.
2507 When optimizing and analyzing for early inliner, initialize node params
2508 so we can produce correct BB predicates. */
2510 if (opt_for_fn (node->decl, optimize))
2512 calculate_dominance_info (CDI_DOMINATORS);
2513 if (!early)
2514 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2515 else
2517 ipa_check_create_node_params ();
2518 ipa_initialize_node_params (node);
2521 if (ipa_node_params_sum)
2523 parms_info = IPA_NODE_REF (node);
2524 nonconstant_names.safe_grow_cleared
2525 (SSANAMES (my_function)->length ());
2529 if (dump_file)
2530 fprintf (dump_file, "\nAnalyzing function body size: %s\n",
2531 node->name ());
2533 /* When we run into maximal number of entries, we assign everything to the
2534 constant truth case. Be sure to have it in list. */
2535 bb_predicate = true_predicate ();
2536 account_size_time (info, 0, 0, &bb_predicate);
2538 bb_predicate = not_inlined_predicate ();
2539 account_size_time (info, 2 * INLINE_SIZE_SCALE, 0, &bb_predicate);
2541 gcc_assert (my_function && my_function->cfg);
2542 if (parms_info)
2543 compute_bb_predicates (node, parms_info, info);
2544 gcc_assert (cfun == my_function);
2545 order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
2546 nblocks = pre_and_rev_post_order_compute (NULL, order, false);
2547 for (n = 0; n < nblocks; n++)
2549 bb = BASIC_BLOCK_FOR_FN (cfun, order[n]);
2550 freq = compute_call_stmt_bb_frequency (node->decl, bb);
2551 if (clobber_only_eh_bb_p (bb))
2553 if (dump_file && (dump_flags & TDF_DETAILS))
2554 fprintf (dump_file, "\n Ignoring BB %i;"
2555 " it will be optimized away by cleanup_clobbers\n",
2556 bb->index);
2557 continue;
2560 /* TODO: Obviously predicates can be propagated down across CFG. */
2561 if (parms_info)
2563 if (bb->aux)
2564 bb_predicate = *(struct predicate *) bb->aux;
2565 else
2566 bb_predicate = false_predicate ();
2568 else
2569 bb_predicate = true_predicate ();
2571 if (dump_file && (dump_flags & TDF_DETAILS))
2573 fprintf (dump_file, "\n BB %i predicate:", bb->index);
2574 dump_predicate (dump_file, info->conds, &bb_predicate);
2577 if (parms_info && nonconstant_names.exists ())
2579 struct predicate phi_predicate;
2580 bool first_phi = true;
2582 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
2583 gsi_next (&bsi))
2585 if (first_phi
2586 && !phi_result_unknown_predicate (parms_info, info, bb,
2587 &phi_predicate,
2588 nonconstant_names))
2589 break;
2590 first_phi = false;
2591 if (dump_file && (dump_flags & TDF_DETAILS))
2593 fprintf (dump_file, " ");
2594 print_gimple_stmt (dump_file, gsi_stmt (bsi), 0, 0);
2596 predicate_for_phi_result (info, bsi.phi (), &phi_predicate,
2597 nonconstant_names);
2601 fix_builtin_expect_stmt = find_foldable_builtin_expect (bb);
2603 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
2604 gsi_next (&bsi))
2606 gimple stmt = gsi_stmt (bsi);
2607 int this_size = estimate_num_insns (stmt, &eni_size_weights);
2608 int this_time = estimate_num_insns (stmt, &eni_time_weights);
2609 int prob;
2610 struct predicate will_be_nonconstant;
2612 /* This relation stmt should be folded after we remove
2613 buildin_expect call. Adjust the cost here. */
2614 if (stmt == fix_builtin_expect_stmt)
2616 this_size--;
2617 this_time--;
2620 if (dump_file && (dump_flags & TDF_DETAILS))
2622 fprintf (dump_file, " ");
2623 print_gimple_stmt (dump_file, stmt, 0, 0);
2624 fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n",
2625 ((double) freq) / CGRAPH_FREQ_BASE, this_size,
2626 this_time);
2629 if (gimple_assign_load_p (stmt) && nonconstant_names.exists ())
2631 struct predicate this_array_index;
2632 this_array_index =
2633 array_index_predicate (info, nonconstant_names,
2634 gimple_assign_rhs1 (stmt));
2635 if (!false_predicate_p (&this_array_index))
2636 array_index =
2637 and_predicates (info->conds, &array_index,
2638 &this_array_index);
2640 if (gimple_store_p (stmt) && nonconstant_names.exists ())
2642 struct predicate this_array_index;
2643 this_array_index =
2644 array_index_predicate (info, nonconstant_names,
2645 gimple_get_lhs (stmt));
2646 if (!false_predicate_p (&this_array_index))
2647 array_index =
2648 and_predicates (info->conds, &array_index,
2649 &this_array_index);
2653 if (is_gimple_call (stmt)
2654 && !gimple_call_internal_p (stmt))
2656 struct cgraph_edge *edge = node->get_edge (stmt);
2657 struct inline_edge_summary *es = inline_edge_summary (edge);
2659 /* Special case: results of BUILT_IN_CONSTANT_P will be always
2660 resolved as constant. We however don't want to optimize
2661 out the cgraph edges. */
2662 if (nonconstant_names.exists ()
2663 && gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P)
2664 && gimple_call_lhs (stmt)
2665 && TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME)
2667 struct predicate false_p = false_predicate ();
2668 nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))]
2669 = false_p;
2671 if (ipa_node_params_sum)
2673 int count = gimple_call_num_args (stmt);
2674 int i;
2676 if (count)
2677 es->param.safe_grow_cleared (count);
2678 for (i = 0; i < count; i++)
2680 int prob = param_change_prob (stmt, i);
2681 gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
2682 es->param[i].change_prob = prob;
2686 es->call_stmt_size = this_size;
2687 es->call_stmt_time = this_time;
2688 es->loop_depth = bb_loop_depth (bb);
2689 edge_set_predicate (edge, &bb_predicate);
2692 /* TODO: When conditional jump or swithc is known to be constant, but
2693 we did not translate it into the predicates, we really can account
2694 just maximum of the possible paths. */
2695 if (parms_info)
2696 will_be_nonconstant
2697 = will_be_nonconstant_predicate (parms_info, info,
2698 stmt, nonconstant_names);
2699 if (this_time || this_size)
2701 struct predicate p;
2703 this_time *= freq;
2705 prob = eliminated_by_inlining_prob (stmt);
2706 if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS))
2707 fprintf (dump_file,
2708 "\t\t50%% will be eliminated by inlining\n");
2709 if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
2710 fprintf (dump_file, "\t\tWill be eliminated by inlining\n");
2712 if (parms_info)
2713 p = and_predicates (info->conds, &bb_predicate,
2714 &will_be_nonconstant);
2715 else
2716 p = true_predicate ();
2718 if (!false_predicate_p (&p)
2719 || (is_gimple_call (stmt)
2720 && !false_predicate_p (&bb_predicate)))
2722 time += this_time;
2723 size += this_size;
2724 if (time > MAX_TIME * INLINE_TIME_SCALE)
2725 time = MAX_TIME * INLINE_TIME_SCALE;
2728 /* We account everything but the calls. Calls have their own
2729 size/time info attached to cgraph edges. This is necessary
2730 in order to make the cost disappear after inlining. */
2731 if (!is_gimple_call (stmt))
2733 if (prob)
2735 struct predicate ip = not_inlined_predicate ();
2736 ip = and_predicates (info->conds, &ip, &p);
2737 account_size_time (info, this_size * prob,
2738 this_time * prob, &ip);
2740 if (prob != 2)
2741 account_size_time (info, this_size * (2 - prob),
2742 this_time * (2 - prob), &p);
2745 gcc_assert (time >= 0);
2746 gcc_assert (size >= 0);
2750 set_hint_predicate (&inline_summaries->get (node)->array_index, array_index);
2751 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
2752 if (time > MAX_TIME)
2753 time = MAX_TIME;
2754 free (order);
2756 if (nonconstant_names.exists () && !early)
2758 struct loop *loop;
2759 predicate loop_iterations = true_predicate ();
2760 predicate loop_stride = true_predicate ();
2762 if (dump_file && (dump_flags & TDF_DETAILS))
2763 flow_loops_dump (dump_file, NULL, 0);
2764 scev_initialize ();
2765 FOR_EACH_LOOP (loop, 0)
2767 vec<edge> exits;
2768 edge ex;
2769 unsigned int j, i;
2770 struct tree_niter_desc niter_desc;
2771 basic_block *body = get_loop_body (loop);
2772 bb_predicate = *(struct predicate *) loop->header->aux;
2774 exits = get_loop_exit_edges (loop);
2775 FOR_EACH_VEC_ELT (exits, j, ex)
2776 if (number_of_iterations_exit (loop, ex, &niter_desc, false)
2777 && !is_gimple_min_invariant (niter_desc.niter))
2779 predicate will_be_nonconstant
2780 = will_be_nonconstant_expr_predicate (parms_info, info,
2781 niter_desc.niter,
2782 nonconstant_names);
2783 if (!true_predicate_p (&will_be_nonconstant))
2784 will_be_nonconstant = and_predicates (info->conds,
2785 &bb_predicate,
2786 &will_be_nonconstant);
2787 if (!true_predicate_p (&will_be_nonconstant)
2788 && !false_predicate_p (&will_be_nonconstant))
2789 /* This is slightly inprecise. We may want to represent each
2790 loop with independent predicate. */
2791 loop_iterations =
2792 and_predicates (info->conds, &loop_iterations,
2793 &will_be_nonconstant);
2795 exits.release ();
2797 for (i = 0; i < loop->num_nodes; i++)
2799 gimple_stmt_iterator gsi;
2800 bb_predicate = *(struct predicate *) body[i]->aux;
2801 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi);
2802 gsi_next (&gsi))
2804 gimple stmt = gsi_stmt (gsi);
2805 affine_iv iv;
2806 ssa_op_iter iter;
2807 tree use;
2809 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2811 predicate will_be_nonconstant;
2813 if (!simple_iv
2814 (loop, loop_containing_stmt (stmt), use, &iv, true)
2815 || is_gimple_min_invariant (iv.step))
2816 continue;
2817 will_be_nonconstant
2818 = will_be_nonconstant_expr_predicate (parms_info, info,
2819 iv.step,
2820 nonconstant_names);
2821 if (!true_predicate_p (&will_be_nonconstant))
2822 will_be_nonconstant
2823 = and_predicates (info->conds,
2824 &bb_predicate,
2825 &will_be_nonconstant);
2826 if (!true_predicate_p (&will_be_nonconstant)
2827 && !false_predicate_p (&will_be_nonconstant))
2828 /* This is slightly inprecise. We may want to represent
2829 each loop with independent predicate. */
2830 loop_stride =
2831 and_predicates (info->conds, &loop_stride,
2832 &will_be_nonconstant);
2836 free (body);
2838 set_hint_predicate (&inline_summaries->get (node)->loop_iterations,
2839 loop_iterations);
2840 set_hint_predicate (&inline_summaries->get (node)->loop_stride, loop_stride);
2841 scev_finalize ();
2843 FOR_ALL_BB_FN (bb, my_function)
2845 edge e;
2846 edge_iterator ei;
2848 if (bb->aux)
2849 pool_free (edge_predicate_pool, bb->aux);
2850 bb->aux = NULL;
2851 FOR_EACH_EDGE (e, ei, bb->succs)
2853 if (e->aux)
2854 pool_free (edge_predicate_pool, e->aux);
2855 e->aux = NULL;
2858 inline_summaries->get (node)->self_time = time;
2859 inline_summaries->get (node)->self_size = size;
2860 nonconstant_names.release ();
2861 if (opt_for_fn (node->decl, optimize))
2863 if (!early)
2864 loop_optimizer_finalize ();
2865 else if (!ipa_edge_args_vector)
2866 ipa_free_all_node_params ();
2867 free_dominance_info (CDI_DOMINATORS);
2869 if (dump_file)
2871 fprintf (dump_file, "\n");
2872 dump_inline_summary (dump_file, node);
2877 /* Compute parameters of functions used by inliner.
2878 EARLY is true when we compute parameters for the early inliner */
2880 void
2881 compute_inline_parameters (struct cgraph_node *node, bool early)
2883 HOST_WIDE_INT self_stack_size;
2884 struct cgraph_edge *e;
2885 struct inline_summary *info;
2887 gcc_assert (!node->global.inlined_to);
2889 inline_summary_alloc ();
2891 info = inline_summaries->get (node);
2892 reset_inline_summary (node, info);
2894 /* FIXME: Thunks are inlinable, but tree-inline don't know how to do that.
2895 Once this happen, we will need to more curefully predict call
2896 statement size. */
2897 if (node->thunk.thunk_p)
2899 struct inline_edge_summary *es = inline_edge_summary (node->callees);
2900 struct predicate t = true_predicate ();
2902 info->inlinable = 0;
2903 node->callees->call_stmt_cannot_inline_p = true;
2904 node->local.can_change_signature = false;
2905 es->call_stmt_time = 1;
2906 es->call_stmt_size = 1;
2907 account_size_time (info, 0, 0, &t);
2908 return;
2911 /* Even is_gimple_min_invariant rely on current_function_decl. */
2912 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2914 /* Estimate the stack size for the function if we're optimizing. */
2915 self_stack_size = optimize ? estimated_stack_frame_size (node) : 0;
2916 info->estimated_self_stack_size = self_stack_size;
2917 info->estimated_stack_size = self_stack_size;
2918 info->stack_frame_offset = 0;
2920 /* Can this function be inlined at all? */
2921 if (!opt_for_fn (node->decl, optimize)
2922 && !lookup_attribute ("always_inline",
2923 DECL_ATTRIBUTES (node->decl)))
2924 info->inlinable = false;
2925 else
2926 info->inlinable = tree_inlinable_function_p (node->decl);
2928 /* Type attributes can use parameter indices to describe them. */
2929 if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl)))
2930 node->local.can_change_signature = false;
2931 else
2933 /* Otherwise, inlinable functions always can change signature. */
2934 if (info->inlinable)
2935 node->local.can_change_signature = true;
2936 else
2938 /* Functions calling builtin_apply can not change signature. */
2939 for (e = node->callees; e; e = e->next_callee)
2941 tree cdecl = e->callee->decl;
2942 if (DECL_BUILT_IN (cdecl)
2943 && DECL_BUILT_IN_CLASS (cdecl) == BUILT_IN_NORMAL
2944 && (DECL_FUNCTION_CODE (cdecl) == BUILT_IN_APPLY_ARGS
2945 || DECL_FUNCTION_CODE (cdecl) == BUILT_IN_VA_START))
2946 break;
2948 node->local.can_change_signature = !e;
2951 estimate_function_body_sizes (node, early);
2953 for (e = node->callees; e; e = e->next_callee)
2954 if (e->callee->comdat_local_p ())
2955 break;
2956 node->calls_comdat_local = (e != NULL);
2958 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2959 info->time = info->self_time;
2960 info->size = info->self_size;
2961 info->stack_frame_offset = 0;
2962 info->estimated_stack_size = info->estimated_self_stack_size;
2963 #ifdef ENABLE_CHECKING
2964 inline_update_overall_summary (node);
2965 gcc_assert (info->time == info->self_time && info->size == info->self_size);
2966 #endif
2968 pop_cfun ();
2972 /* Compute parameters of functions used by inliner using
2973 current_function_decl. */
2975 static unsigned int
2976 compute_inline_parameters_for_current (void)
2978 compute_inline_parameters (cgraph_node::get (current_function_decl), true);
2979 return 0;
2982 namespace {
2984 const pass_data pass_data_inline_parameters =
2986 GIMPLE_PASS, /* type */
2987 "inline_param", /* name */
2988 OPTGROUP_INLINE, /* optinfo_flags */
2989 TV_INLINE_PARAMETERS, /* tv_id */
2990 0, /* properties_required */
2991 0, /* properties_provided */
2992 0, /* properties_destroyed */
2993 0, /* todo_flags_start */
2994 0, /* todo_flags_finish */
2997 class pass_inline_parameters : public gimple_opt_pass
2999 public:
3000 pass_inline_parameters (gcc::context *ctxt)
3001 : gimple_opt_pass (pass_data_inline_parameters, ctxt)
3004 /* opt_pass methods: */
3005 opt_pass * clone () { return new pass_inline_parameters (m_ctxt); }
3006 virtual unsigned int execute (function *)
3008 return compute_inline_parameters_for_current ();
3011 }; // class pass_inline_parameters
3013 } // anon namespace
3015 gimple_opt_pass *
3016 make_pass_inline_parameters (gcc::context *ctxt)
3018 return new pass_inline_parameters (ctxt);
3022 /* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS,
3023 KNOWN_CONTEXTS and KNOWN_AGGS. */
3025 static bool
3026 estimate_edge_devirt_benefit (struct cgraph_edge *ie,
3027 int *size, int *time,
3028 vec<tree> known_vals,
3029 vec<ipa_polymorphic_call_context> known_contexts,
3030 vec<ipa_agg_jump_function_p> known_aggs)
3032 tree target;
3033 struct cgraph_node *callee;
3034 struct inline_summary *isummary;
3035 enum availability avail;
3036 bool speculative;
3038 if (!known_vals.exists () && !known_contexts.exists ())
3039 return false;
3040 if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining))
3041 return false;
3043 target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts,
3044 known_aggs, &speculative);
3045 if (!target || speculative)
3046 return false;
3048 /* Account for difference in cost between indirect and direct calls. */
3049 *size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost);
3050 *time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost);
3051 gcc_checking_assert (*time >= 0);
3052 gcc_checking_assert (*size >= 0);
3054 callee = cgraph_node::get (target);
3055 if (!callee || !callee->definition)
3056 return false;
3057 callee = callee->function_symbol (&avail);
3058 if (avail < AVAIL_AVAILABLE)
3059 return false;
3060 isummary = inline_summaries->get (callee);
3061 return isummary->inlinable;
3064 /* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to
3065 handle edge E with probability PROB.
3066 Set HINTS if edge may be devirtualized.
3067 KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call
3068 site. */
3070 static inline void
3071 estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
3072 int *time,
3073 int prob,
3074 vec<tree> known_vals,
3075 vec<ipa_polymorphic_call_context> known_contexts,
3076 vec<ipa_agg_jump_function_p> known_aggs,
3077 inline_hints *hints)
3079 struct inline_edge_summary *es = inline_edge_summary (e);
3080 int call_size = es->call_stmt_size;
3081 int call_time = es->call_stmt_time;
3082 int cur_size;
3083 if (!e->callee
3084 && estimate_edge_devirt_benefit (e, &call_size, &call_time,
3085 known_vals, known_contexts, known_aggs)
3086 && hints && e->maybe_hot_p ())
3087 *hints |= INLINE_HINT_indirect_call;
3088 cur_size = call_size * INLINE_SIZE_SCALE;
3089 *size += cur_size;
3090 if (min_size)
3091 *min_size += cur_size;
3092 *time += apply_probability ((gcov_type) call_time, prob)
3093 * e->frequency * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE);
3094 if (*time > MAX_TIME * INLINE_TIME_SCALE)
3095 *time = MAX_TIME * INLINE_TIME_SCALE;
3100 /* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
3101 calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3102 describe context of the call site. */
3104 static void
3105 estimate_calls_size_and_time (struct cgraph_node *node, int *size,
3106 int *min_size, int *time,
3107 inline_hints *hints,
3108 clause_t possible_truths,
3109 vec<tree> known_vals,
3110 vec<ipa_polymorphic_call_context> known_contexts,
3111 vec<ipa_agg_jump_function_p> known_aggs)
3113 struct cgraph_edge *e;
3114 for (e = node->callees; e; e = e->next_callee)
3116 struct inline_edge_summary *es = inline_edge_summary (e);
3118 /* Do not care about zero sized builtins. */
3119 if (e->inline_failed && !es->call_stmt_size)
3121 gcc_checking_assert (!es->call_stmt_time);
3122 continue;
3124 if (!es->predicate
3125 || evaluate_predicate (es->predicate, possible_truths))
3127 if (e->inline_failed)
3129 /* Predicates of calls shall not use NOT_CHANGED codes,
3130 sowe do not need to compute probabilities. */
3131 estimate_edge_size_and_time (e, size,
3132 es->predicate ? NULL : min_size,
3133 time, REG_BR_PROB_BASE,
3134 known_vals, known_contexts,
3135 known_aggs, hints);
3137 else
3138 estimate_calls_size_and_time (e->callee, size, min_size, time,
3139 hints,
3140 possible_truths,
3141 known_vals, known_contexts,
3142 known_aggs);
3145 for (e = node->indirect_calls; e; e = e->next_callee)
3147 struct inline_edge_summary *es = inline_edge_summary (e);
3148 if (!es->predicate
3149 || evaluate_predicate (es->predicate, possible_truths))
3150 estimate_edge_size_and_time (e, size,
3151 es->predicate ? NULL : min_size,
3152 time, REG_BR_PROB_BASE,
3153 known_vals, known_contexts, known_aggs,
3154 hints);
3159 /* Estimate size and time needed to execute NODE assuming
3160 POSSIBLE_TRUTHS clause, and KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3161 information about NODE's arguments. If non-NULL use also probability
3162 information present in INLINE_PARAM_SUMMARY vector.
3163 Additionally detemine hints determined by the context. Finally compute
3164 minimal size needed for the call that is independent on the call context and
3165 can be used for fast estimates. Return the values in RET_SIZE,
3166 RET_MIN_SIZE, RET_TIME and RET_HINTS. */
3168 static void
3169 estimate_node_size_and_time (struct cgraph_node *node,
3170 clause_t possible_truths,
3171 vec<tree> known_vals,
3172 vec<ipa_polymorphic_call_context> known_contexts,
3173 vec<ipa_agg_jump_function_p> known_aggs,
3174 int *ret_size, int *ret_min_size, int *ret_time,
3175 inline_hints *ret_hints,
3176 vec<inline_param_summary>
3177 inline_param_summary)
3179 struct inline_summary *info = inline_summaries->get (node);
3180 size_time_entry *e;
3181 int size = 0;
3182 int time = 0;
3183 int min_size = 0;
3184 inline_hints hints = 0;
3185 int i;
3187 if (dump_file && (dump_flags & TDF_DETAILS))
3189 bool found = false;
3190 fprintf (dump_file, " Estimating body: %s/%i\n"
3191 " Known to be false: ", node->name (),
3192 node->order);
3194 for (i = predicate_not_inlined_condition;
3195 i < (predicate_first_dynamic_condition
3196 + (int) vec_safe_length (info->conds)); i++)
3197 if (!(possible_truths & (1 << i)))
3199 if (found)
3200 fprintf (dump_file, ", ");
3201 found = true;
3202 dump_condition (dump_file, info->conds, i);
3206 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3207 if (evaluate_predicate (&e->predicate, possible_truths))
3209 size += e->size;
3210 gcc_checking_assert (e->time >= 0);
3211 gcc_checking_assert (time >= 0);
3212 if (!inline_param_summary.exists ())
3213 time += e->time;
3214 else
3216 int prob = predicate_probability (info->conds,
3217 &e->predicate,
3218 possible_truths,
3219 inline_param_summary);
3220 gcc_checking_assert (prob >= 0);
3221 gcc_checking_assert (prob <= REG_BR_PROB_BASE);
3222 time += apply_probability ((gcov_type) e->time, prob);
3224 if (time > MAX_TIME * INLINE_TIME_SCALE)
3225 time = MAX_TIME * INLINE_TIME_SCALE;
3226 gcc_checking_assert (time >= 0);
3229 gcc_checking_assert (true_predicate_p (&(*info->entry)[0].predicate));
3230 min_size = (*info->entry)[0].size;
3231 gcc_checking_assert (size >= 0);
3232 gcc_checking_assert (time >= 0);
3234 if (info->loop_iterations
3235 && !evaluate_predicate (info->loop_iterations, possible_truths))
3236 hints |= INLINE_HINT_loop_iterations;
3237 if (info->loop_stride
3238 && !evaluate_predicate (info->loop_stride, possible_truths))
3239 hints |= INLINE_HINT_loop_stride;
3240 if (info->array_index
3241 && !evaluate_predicate (info->array_index, possible_truths))
3242 hints |= INLINE_HINT_array_index;
3243 if (info->scc_no)
3244 hints |= INLINE_HINT_in_scc;
3245 if (DECL_DECLARED_INLINE_P (node->decl))
3246 hints |= INLINE_HINT_declared_inline;
3248 estimate_calls_size_and_time (node, &size, &min_size, &time, &hints, possible_truths,
3249 known_vals, known_contexts, known_aggs);
3250 gcc_checking_assert (size >= 0);
3251 gcc_checking_assert (time >= 0);
3252 time = RDIV (time, INLINE_TIME_SCALE);
3253 size = RDIV (size, INLINE_SIZE_SCALE);
3254 min_size = RDIV (min_size, INLINE_SIZE_SCALE);
3256 if (dump_file && (dump_flags & TDF_DETAILS))
3257 fprintf (dump_file, "\n size:%i time:%i\n", (int) size, (int) time);
3258 if (ret_time)
3259 *ret_time = time;
3260 if (ret_size)
3261 *ret_size = size;
3262 if (ret_min_size)
3263 *ret_min_size = min_size;
3264 if (ret_hints)
3265 *ret_hints = hints;
3266 return;
3270 /* Estimate size and time needed to execute callee of EDGE assuming that
3271 parameters known to be constant at caller of EDGE are propagated.
3272 KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values
3273 and types for parameters. */
3275 void
3276 estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
3277 vec<tree> known_vals,
3278 vec<ipa_polymorphic_call_context>
3279 known_contexts,
3280 vec<ipa_agg_jump_function_p> known_aggs,
3281 int *ret_size, int *ret_time,
3282 inline_hints *hints)
3284 clause_t clause;
3286 clause = evaluate_conditions_for_known_args (node, false, known_vals,
3287 known_aggs);
3288 estimate_node_size_and_time (node, clause, known_vals, known_contexts,
3289 known_aggs, ret_size, NULL, ret_time, hints, vNULL);
3292 /* Translate all conditions from callee representation into caller
3293 representation and symbolically evaluate predicate P into new predicate.
3295 INFO is inline_summary of function we are adding predicate into, CALLEE_INFO
3296 is summary of function predicate P is from. OPERAND_MAP is array giving
3297 callee formal IDs the caller formal IDs. POSSSIBLE_TRUTHS is clausule of all
3298 callee conditions that may be true in caller context. TOPLEV_PREDICATE is
3299 predicate under which callee is executed. OFFSET_MAP is an array of of
3300 offsets that need to be added to conditions, negative offset means that
3301 conditions relying on values passed by reference have to be discarded
3302 because they might not be preserved (and should be considered offset zero
3303 for other purposes). */
3305 static struct predicate
3306 remap_predicate (struct inline_summary *info,
3307 struct inline_summary *callee_info,
3308 struct predicate *p,
3309 vec<int> operand_map,
3310 vec<int> offset_map,
3311 clause_t possible_truths, struct predicate *toplev_predicate)
3313 int i;
3314 struct predicate out = true_predicate ();
3316 /* True predicate is easy. */
3317 if (true_predicate_p (p))
3318 return *toplev_predicate;
3319 for (i = 0; p->clause[i]; i++)
3321 clause_t clause = p->clause[i];
3322 int cond;
3323 struct predicate clause_predicate = false_predicate ();
3325 gcc_assert (i < MAX_CLAUSES);
3327 for (cond = 0; cond < NUM_CONDITIONS; cond++)
3328 /* Do we have condition we can't disprove? */
3329 if (clause & possible_truths & (1 << cond))
3331 struct predicate cond_predicate;
3332 /* Work out if the condition can translate to predicate in the
3333 inlined function. */
3334 if (cond >= predicate_first_dynamic_condition)
3336 struct condition *c;
3338 c = &(*callee_info->conds)[cond
3340 predicate_first_dynamic_condition];
3341 /* See if we can remap condition operand to caller's operand.
3342 Otherwise give up. */
3343 if (!operand_map.exists ()
3344 || (int) operand_map.length () <= c->operand_num
3345 || operand_map[c->operand_num] == -1
3346 /* TODO: For non-aggregate conditions, adding an offset is
3347 basically an arithmetic jump function processing which
3348 we should support in future. */
3349 || ((!c->agg_contents || !c->by_ref)
3350 && offset_map[c->operand_num] > 0)
3351 || (c->agg_contents && c->by_ref
3352 && offset_map[c->operand_num] < 0))
3353 cond_predicate = true_predicate ();
3354 else
3356 struct agg_position_info ap;
3357 HOST_WIDE_INT offset_delta = offset_map[c->operand_num];
3358 if (offset_delta < 0)
3360 gcc_checking_assert (!c->agg_contents || !c->by_ref);
3361 offset_delta = 0;
3363 gcc_assert (!c->agg_contents
3364 || c->by_ref || offset_delta == 0);
3365 ap.offset = c->offset + offset_delta;
3366 ap.agg_contents = c->agg_contents;
3367 ap.by_ref = c->by_ref;
3368 cond_predicate = add_condition (info,
3369 operand_map[c->operand_num],
3370 &ap, c->code, c->val);
3373 /* Fixed conditions remains same, construct single
3374 condition predicate. */
3375 else
3377 cond_predicate.clause[0] = 1 << cond;
3378 cond_predicate.clause[1] = 0;
3380 clause_predicate = or_predicates (info->conds, &clause_predicate,
3381 &cond_predicate);
3383 out = and_predicates (info->conds, &out, &clause_predicate);
3385 return and_predicates (info->conds, &out, toplev_predicate);
3389 /* Update summary information of inline clones after inlining.
3390 Compute peak stack usage. */
3392 static void
3393 inline_update_callee_summaries (struct cgraph_node *node, int depth)
3395 struct cgraph_edge *e;
3396 struct inline_summary *callee_info = inline_summaries->get (node);
3397 struct inline_summary *caller_info = inline_summaries->get (node->callers->caller);
3398 HOST_WIDE_INT peak;
3400 callee_info->stack_frame_offset
3401 = caller_info->stack_frame_offset
3402 + caller_info->estimated_self_stack_size;
3403 peak = callee_info->stack_frame_offset
3404 + callee_info->estimated_self_stack_size;
3405 if (inline_summaries->get (node->global.inlined_to)->estimated_stack_size < peak)
3406 inline_summaries->get (node->global.inlined_to)->estimated_stack_size = peak;
3407 ipa_propagate_frequency (node);
3408 for (e = node->callees; e; e = e->next_callee)
3410 if (!e->inline_failed)
3411 inline_update_callee_summaries (e->callee, depth);
3412 inline_edge_summary (e)->loop_depth += depth;
3414 for (e = node->indirect_calls; e; e = e->next_callee)
3415 inline_edge_summary (e)->loop_depth += depth;
3418 /* Update change_prob of EDGE after INLINED_EDGE has been inlined.
3419 When functoin A is inlined in B and A calls C with parameter that
3420 changes with probability PROB1 and C is known to be passthroug
3421 of argument if B that change with probability PROB2, the probability
3422 of change is now PROB1*PROB2. */
3424 static void
3425 remap_edge_change_prob (struct cgraph_edge *inlined_edge,
3426 struct cgraph_edge *edge)
3428 if (ipa_node_params_sum)
3430 int i;
3431 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3432 struct inline_edge_summary *es = inline_edge_summary (edge);
3433 struct inline_edge_summary *inlined_es
3434 = inline_edge_summary (inlined_edge);
3436 for (i = 0; i < ipa_get_cs_argument_count (args); i++)
3438 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3439 if (jfunc->type == IPA_JF_PASS_THROUGH
3440 && (ipa_get_jf_pass_through_formal_id (jfunc)
3441 < (int) inlined_es->param.length ()))
3443 int jf_formal_id = ipa_get_jf_pass_through_formal_id (jfunc);
3444 int prob1 = es->param[i].change_prob;
3445 int prob2 = inlined_es->param[jf_formal_id].change_prob;
3446 int prob = combine_probabilities (prob1, prob2);
3448 if (prob1 && prob2 && !prob)
3449 prob = 1;
3451 es->param[i].change_prob = prob;
3457 /* Update edge summaries of NODE after INLINED_EDGE has been inlined.
3459 Remap predicates of callees of NODE. Rest of arguments match
3460 remap_predicate.
3462 Also update change probabilities. */
3464 static void
3465 remap_edge_summaries (struct cgraph_edge *inlined_edge,
3466 struct cgraph_node *node,
3467 struct inline_summary *info,
3468 struct inline_summary *callee_info,
3469 vec<int> operand_map,
3470 vec<int> offset_map,
3471 clause_t possible_truths,
3472 struct predicate *toplev_predicate)
3474 struct cgraph_edge *e;
3475 for (e = node->callees; e; e = e->next_callee)
3477 struct inline_edge_summary *es = inline_edge_summary (e);
3478 struct predicate p;
3480 if (e->inline_failed)
3482 remap_edge_change_prob (inlined_edge, e);
3484 if (es->predicate)
3486 p = remap_predicate (info, callee_info,
3487 es->predicate, operand_map, offset_map,
3488 possible_truths, toplev_predicate);
3489 edge_set_predicate (e, &p);
3490 /* TODO: We should remove the edge for code that will be
3491 optimized out, but we need to keep verifiers and tree-inline
3492 happy. Make it cold for now. */
3493 if (false_predicate_p (&p))
3495 e->count = 0;
3496 e->frequency = 0;
3499 else
3500 edge_set_predicate (e, toplev_predicate);
3502 else
3503 remap_edge_summaries (inlined_edge, e->callee, info, callee_info,
3504 operand_map, offset_map, possible_truths,
3505 toplev_predicate);
3507 for (e = node->indirect_calls; e; e = e->next_callee)
3509 struct inline_edge_summary *es = inline_edge_summary (e);
3510 struct predicate p;
3512 remap_edge_change_prob (inlined_edge, e);
3513 if (es->predicate)
3515 p = remap_predicate (info, callee_info,
3516 es->predicate, operand_map, offset_map,
3517 possible_truths, toplev_predicate);
3518 edge_set_predicate (e, &p);
3519 /* TODO: We should remove the edge for code that will be optimized
3520 out, but we need to keep verifiers and tree-inline happy.
3521 Make it cold for now. */
3522 if (false_predicate_p (&p))
3524 e->count = 0;
3525 e->frequency = 0;
3528 else
3529 edge_set_predicate (e, toplev_predicate);
3533 /* Same as remap_predicate, but set result into hint *HINT. */
3535 static void
3536 remap_hint_predicate (struct inline_summary *info,
3537 struct inline_summary *callee_info,
3538 struct predicate **hint,
3539 vec<int> operand_map,
3540 vec<int> offset_map,
3541 clause_t possible_truths,
3542 struct predicate *toplev_predicate)
3544 predicate p;
3546 if (!*hint)
3547 return;
3548 p = remap_predicate (info, callee_info,
3549 *hint,
3550 operand_map, offset_map,
3551 possible_truths, toplev_predicate);
3552 if (!false_predicate_p (&p) && !true_predicate_p (&p))
3554 if (!*hint)
3555 set_hint_predicate (hint, p);
3556 else
3557 **hint = and_predicates (info->conds, *hint, &p);
3561 /* We inlined EDGE. Update summary of the function we inlined into. */
3563 void
3564 inline_merge_summary (struct cgraph_edge *edge)
3566 struct inline_summary *callee_info = inline_summaries->get (edge->callee);
3567 struct cgraph_node *to = (edge->caller->global.inlined_to
3568 ? edge->caller->global.inlined_to : edge->caller);
3569 struct inline_summary *info = inline_summaries->get (to);
3570 clause_t clause = 0; /* not_inline is known to be false. */
3571 size_time_entry *e;
3572 vec<int> operand_map = vNULL;
3573 vec<int> offset_map = vNULL;
3574 int i;
3575 struct predicate toplev_predicate;
3576 struct predicate true_p = true_predicate ();
3577 struct inline_edge_summary *es = inline_edge_summary (edge);
3579 if (es->predicate)
3580 toplev_predicate = *es->predicate;
3581 else
3582 toplev_predicate = true_predicate ();
3584 if (callee_info->conds)
3585 evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL);
3586 if (ipa_node_params_sum && callee_info->conds)
3588 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3589 int count = ipa_get_cs_argument_count (args);
3590 int i;
3592 if (count)
3594 operand_map.safe_grow_cleared (count);
3595 offset_map.safe_grow_cleared (count);
3597 for (i = 0; i < count; i++)
3599 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3600 int map = -1;
3602 /* TODO: handle non-NOPs when merging. */
3603 if (jfunc->type == IPA_JF_PASS_THROUGH)
3605 if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
3606 map = ipa_get_jf_pass_through_formal_id (jfunc);
3607 if (!ipa_get_jf_pass_through_agg_preserved (jfunc))
3608 offset_map[i] = -1;
3610 else if (jfunc->type == IPA_JF_ANCESTOR)
3612 HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc);
3613 if (offset >= 0 && offset < INT_MAX)
3615 map = ipa_get_jf_ancestor_formal_id (jfunc);
3616 if (!ipa_get_jf_ancestor_agg_preserved (jfunc))
3617 offset = -1;
3618 offset_map[i] = offset;
3621 operand_map[i] = map;
3622 gcc_assert (map < ipa_get_param_count (IPA_NODE_REF (to)));
3625 for (i = 0; vec_safe_iterate (callee_info->entry, i, &e); i++)
3627 struct predicate p = remap_predicate (info, callee_info,
3628 &e->predicate, operand_map,
3629 offset_map, clause,
3630 &toplev_predicate);
3631 if (!false_predicate_p (&p))
3633 gcov_type add_time = ((gcov_type) e->time * edge->frequency
3634 + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
3635 int prob = predicate_probability (callee_info->conds,
3636 &e->predicate,
3637 clause, es->param);
3638 add_time = apply_probability ((gcov_type) add_time, prob);
3639 if (add_time > MAX_TIME * INLINE_TIME_SCALE)
3640 add_time = MAX_TIME * INLINE_TIME_SCALE;
3641 if (prob != REG_BR_PROB_BASE
3642 && dump_file && (dump_flags & TDF_DETAILS))
3644 fprintf (dump_file, "\t\tScaling time by probability:%f\n",
3645 (double) prob / REG_BR_PROB_BASE);
3647 account_size_time (info, e->size, add_time, &p);
3650 remap_edge_summaries (edge, edge->callee, info, callee_info, operand_map,
3651 offset_map, clause, &toplev_predicate);
3652 remap_hint_predicate (info, callee_info,
3653 &callee_info->loop_iterations,
3654 operand_map, offset_map, clause, &toplev_predicate);
3655 remap_hint_predicate (info, callee_info,
3656 &callee_info->loop_stride,
3657 operand_map, offset_map, clause, &toplev_predicate);
3658 remap_hint_predicate (info, callee_info,
3659 &callee_info->array_index,
3660 operand_map, offset_map, clause, &toplev_predicate);
3662 inline_update_callee_summaries (edge->callee,
3663 inline_edge_summary (edge)->loop_depth);
3665 /* We do not maintain predicates of inlined edges, free it. */
3666 edge_set_predicate (edge, &true_p);
3667 /* Similarly remove param summaries. */
3668 es->param.release ();
3669 operand_map.release ();
3670 offset_map.release ();
3673 /* For performance reasons inline_merge_summary is not updating overall size
3674 and time. Recompute it. */
3676 void
3677 inline_update_overall_summary (struct cgraph_node *node)
3679 struct inline_summary *info = inline_summaries->get (node);
3680 size_time_entry *e;
3681 int i;
3683 info->size = 0;
3684 info->time = 0;
3685 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3687 info->size += e->size, info->time += e->time;
3688 if (info->time > MAX_TIME * INLINE_TIME_SCALE)
3689 info->time = MAX_TIME * INLINE_TIME_SCALE;
3691 estimate_calls_size_and_time (node, &info->size, &info->min_size,
3692 &info->time, NULL,
3693 ~(clause_t) (1 << predicate_false_condition),
3694 vNULL, vNULL, vNULL);
3695 info->time = (info->time + INLINE_TIME_SCALE / 2) / INLINE_TIME_SCALE;
3696 info->size = (info->size + INLINE_SIZE_SCALE / 2) / INLINE_SIZE_SCALE;
3699 /* Return hints derrived from EDGE. */
3701 simple_edge_hints (struct cgraph_edge *edge)
3703 int hints = 0;
3704 struct cgraph_node *to = (edge->caller->global.inlined_to
3705 ? edge->caller->global.inlined_to : edge->caller);
3706 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
3707 if (inline_summaries->get (to)->scc_no
3708 && inline_summaries->get (to)->scc_no
3709 == inline_summaries->get (callee)->scc_no
3710 && !edge->recursive_p ())
3711 hints |= INLINE_HINT_same_scc;
3713 if (callee->lto_file_data && edge->caller->lto_file_data
3714 && edge->caller->lto_file_data != callee->lto_file_data
3715 && !callee->merged)
3716 hints |= INLINE_HINT_cross_module;
3718 return hints;
3721 /* Estimate the time cost for the caller when inlining EDGE.
3722 Only to be called via estimate_edge_time, that handles the
3723 caching mechanism.
3725 When caching, also update the cache entry. Compute both time and
3726 size, since we always need both metrics eventually. */
3729 do_estimate_edge_time (struct cgraph_edge *edge)
3731 int time;
3732 int size;
3733 inline_hints hints;
3734 struct cgraph_node *callee;
3735 clause_t clause;
3736 vec<tree> known_vals;
3737 vec<ipa_polymorphic_call_context> known_contexts;
3738 vec<ipa_agg_jump_function_p> known_aggs;
3739 struct inline_edge_summary *es = inline_edge_summary (edge);
3740 int min_size;
3742 callee = edge->callee->ultimate_alias_target ();
3744 gcc_checking_assert (edge->inline_failed);
3745 evaluate_properties_for_edge (edge, true,
3746 &clause, &known_vals, &known_contexts,
3747 &known_aggs);
3748 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3749 known_aggs, &size, &min_size, &time, &hints, es->param);
3751 /* When we have profile feedback, we can quite safely identify hot
3752 edges and for those we disable size limits. Don't do that when
3753 probability that caller will call the callee is low however, since it
3754 may hurt optimization of the caller's hot path. */
3755 if (edge->count && edge->maybe_hot_p ()
3756 && (edge->count * 2
3757 > (edge->caller->global.inlined_to
3758 ? edge->caller->global.inlined_to->count : edge->caller->count)))
3759 hints |= INLINE_HINT_known_hot;
3761 known_vals.release ();
3762 known_contexts.release ();
3763 known_aggs.release ();
3764 gcc_checking_assert (size >= 0);
3765 gcc_checking_assert (time >= 0);
3767 /* When caching, update the cache entry. */
3768 if (edge_growth_cache.exists ())
3770 inline_summaries->get (edge->callee)->min_size = min_size;
3771 if ((int) edge_growth_cache.length () <= edge->uid)
3772 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
3773 edge_growth_cache[edge->uid].time = time + (time >= 0);
3775 edge_growth_cache[edge->uid].size = size + (size >= 0);
3776 hints |= simple_edge_hints (edge);
3777 edge_growth_cache[edge->uid].hints = hints + 1;
3779 return time;
3783 /* Return estimated callee growth after inlining EDGE.
3784 Only to be called via estimate_edge_size. */
3787 do_estimate_edge_size (struct cgraph_edge *edge)
3789 int size;
3790 struct cgraph_node *callee;
3791 clause_t clause;
3792 vec<tree> known_vals;
3793 vec<ipa_polymorphic_call_context> known_contexts;
3794 vec<ipa_agg_jump_function_p> known_aggs;
3796 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3798 if (edge_growth_cache.exists ())
3800 do_estimate_edge_time (edge);
3801 size = edge_growth_cache[edge->uid].size;
3802 gcc_checking_assert (size);
3803 return size - (size > 0);
3806 callee = edge->callee->ultimate_alias_target ();
3808 /* Early inliner runs without caching, go ahead and do the dirty work. */
3809 gcc_checking_assert (edge->inline_failed);
3810 evaluate_properties_for_edge (edge, true,
3811 &clause, &known_vals, &known_contexts,
3812 &known_aggs);
3813 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3814 known_aggs, &size, NULL, NULL, NULL, vNULL);
3815 known_vals.release ();
3816 known_contexts.release ();
3817 known_aggs.release ();
3818 return size;
3822 /* Estimate the growth of the caller when inlining EDGE.
3823 Only to be called via estimate_edge_size. */
3825 inline_hints
3826 do_estimate_edge_hints (struct cgraph_edge *edge)
3828 inline_hints hints;
3829 struct cgraph_node *callee;
3830 clause_t clause;
3831 vec<tree> known_vals;
3832 vec<ipa_polymorphic_call_context> known_contexts;
3833 vec<ipa_agg_jump_function_p> known_aggs;
3835 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3837 if (edge_growth_cache.exists ())
3839 do_estimate_edge_time (edge);
3840 hints = edge_growth_cache[edge->uid].hints;
3841 gcc_checking_assert (hints);
3842 return hints - 1;
3845 callee = edge->callee->ultimate_alias_target ();
3847 /* Early inliner runs without caching, go ahead and do the dirty work. */
3848 gcc_checking_assert (edge->inline_failed);
3849 evaluate_properties_for_edge (edge, true,
3850 &clause, &known_vals, &known_contexts,
3851 &known_aggs);
3852 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3853 known_aggs, NULL, NULL, NULL, &hints, vNULL);
3854 known_vals.release ();
3855 known_contexts.release ();
3856 known_aggs.release ();
3857 hints |= simple_edge_hints (edge);
3858 return hints;
3862 /* Estimate self time of the function NODE after inlining EDGE. */
3865 estimate_time_after_inlining (struct cgraph_node *node,
3866 struct cgraph_edge *edge)
3868 struct inline_edge_summary *es = inline_edge_summary (edge);
3869 if (!es->predicate || !false_predicate_p (es->predicate))
3871 gcov_type time =
3872 inline_summaries->get (node)->time + estimate_edge_time (edge);
3873 if (time < 0)
3874 time = 0;
3875 if (time > MAX_TIME)
3876 time = MAX_TIME;
3877 return time;
3879 return inline_summaries->get (node)->time;
3883 /* Estimate the size of NODE after inlining EDGE which should be an
3884 edge to either NODE or a call inlined into NODE. */
3887 estimate_size_after_inlining (struct cgraph_node *node,
3888 struct cgraph_edge *edge)
3890 struct inline_edge_summary *es = inline_edge_summary (edge);
3891 if (!es->predicate || !false_predicate_p (es->predicate))
3893 int size = inline_summaries->get (node)->size + estimate_edge_growth (edge);
3894 gcc_assert (size >= 0);
3895 return size;
3897 return inline_summaries->get (node)->size;
3901 struct growth_data
3903 struct cgraph_node *node;
3904 bool self_recursive;
3905 bool uninlinable;
3906 int growth;
3910 /* Worker for do_estimate_growth. Collect growth for all callers. */
3912 static bool
3913 do_estimate_growth_1 (struct cgraph_node *node, void *data)
3915 struct cgraph_edge *e;
3916 struct growth_data *d = (struct growth_data *) data;
3918 for (e = node->callers; e; e = e->next_caller)
3920 gcc_checking_assert (e->inline_failed);
3922 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
3924 d->uninlinable = true;
3925 continue;
3928 if (e->recursive_p ())
3930 d->self_recursive = true;
3931 continue;
3933 d->growth += estimate_edge_growth (e);
3935 return false;
3939 /* Estimate the growth caused by inlining NODE into all callees. */
3942 estimate_growth (struct cgraph_node *node)
3944 struct growth_data d = { node, false, false, 0 };
3945 struct inline_summary *info = inline_summaries->get (node);
3947 node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true);
3949 /* For self recursive functions the growth estimation really should be
3950 infinity. We don't want to return very large values because the growth
3951 plays various roles in badness computation fractions. Be sure to not
3952 return zero or negative growths. */
3953 if (d.self_recursive)
3954 d.growth = d.growth < info->size ? info->size : d.growth;
3955 else if (DECL_EXTERNAL (node->decl) || d.uninlinable)
3957 else
3959 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
3960 d.growth -= info->size;
3961 /* COMDAT functions are very often not shared across multiple units
3962 since they come from various template instantiations.
3963 Take this into account. */
3964 else if (DECL_COMDAT (node->decl)
3965 && node->can_remove_if_no_direct_calls_p ())
3966 d.growth -= (info->size
3967 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
3968 + 50) / 100;
3971 return d.growth;
3974 /* Verify if there are fewer than MAX_CALLERS. */
3976 static bool
3977 check_callers (cgraph_node *node, int *max_callers)
3979 ipa_ref *ref;
3981 for (cgraph_edge *e = node->callers; e; e = e->next_caller)
3983 (*max_callers)--;
3984 if (!*max_callers
3985 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
3986 return true;
3989 FOR_EACH_ALIAS (node, ref)
3990 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), max_callers))
3991 return true;
3993 return false;
3997 /* Make cheap estimation if growth of NODE is likely positive knowing
3998 EDGE_GROWTH of one particular edge.
3999 We assume that most of other edges will have similar growth
4000 and skip computation if there are too many callers. */
4002 bool
4003 growth_likely_positive (struct cgraph_node *node,
4004 int edge_growth)
4006 int max_callers;
4007 struct cgraph_edge *e;
4008 gcc_checking_assert (edge_growth > 0);
4010 if (DECL_EXTERNAL (node->decl))
4011 return true;
4012 /* Unlike for functions called once, we play unsafe with
4013 COMDATs. We can allow that since we know functions
4014 in consideration are small (and thus risk is small) and
4015 moreover grow estimates already accounts that COMDAT
4016 functions may or may not disappear when eliminated from
4017 current unit. With good probability making aggressive
4018 choice in all units is going to make overall program
4019 smaller. */
4020 if (DECL_COMDAT (node->decl))
4022 if (!node->can_remove_if_no_direct_calls_p ())
4023 return true;
4025 else if (!node->will_be_removed_from_program_if_no_direct_calls_p ())
4026 return true;
4027 max_callers = inline_summaries->get (node)->size * 4 / edge_growth + 2;
4029 for (e = node->callers; e; e = e->next_caller)
4031 max_callers--;
4032 if (!max_callers
4033 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
4034 return true;
4037 ipa_ref *ref;
4038 FOR_EACH_ALIAS (node, ref)
4039 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), &max_callers))
4040 return true;
4042 return estimate_growth (node) > 0;
4046 /* This function performs intraprocedural analysis in NODE that is required to
4047 inline indirect calls. */
4049 static void
4050 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
4052 ipa_analyze_node (node);
4053 if (dump_file && (dump_flags & TDF_DETAILS))
4055 ipa_print_node_params (dump_file, node);
4056 ipa_print_node_jump_functions (dump_file, node);
4061 /* Note function body size. */
4063 void
4064 inline_analyze_function (struct cgraph_node *node)
4066 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
4068 if (dump_file)
4069 fprintf (dump_file, "\nAnalyzing function: %s/%u\n",
4070 node->name (), node->order);
4071 if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p)
4072 inline_indirect_intraprocedural_analysis (node);
4073 compute_inline_parameters (node, false);
4074 if (!optimize)
4076 struct cgraph_edge *e;
4077 for (e = node->callees; e; e = e->next_callee)
4079 if (e->inline_failed == CIF_FUNCTION_NOT_CONSIDERED)
4080 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4081 e->call_stmt_cannot_inline_p = true;
4083 for (e = node->indirect_calls; e; e = e->next_callee)
4085 if (e->inline_failed == CIF_FUNCTION_NOT_CONSIDERED)
4086 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4087 e->call_stmt_cannot_inline_p = true;
4091 pop_cfun ();
4095 /* Called when new function is inserted to callgraph late. */
4097 void
4098 inline_summary_t::insert (struct cgraph_node *node, inline_summary *)
4100 inline_analyze_function (node);
4103 /* Note function body size. */
4105 void
4106 inline_generate_summary (void)
4108 struct cgraph_node *node;
4110 /* When not optimizing, do not bother to analyze. Inlining is still done
4111 because edge redirection needs to happen there. */
4112 if (!optimize && !flag_generate_lto && !flag_generate_offload && !flag_wpa)
4113 return;
4115 if (!inline_summaries)
4116 inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab);
4118 inline_summaries->enable_insertion_hook ();
4120 ipa_register_cgraph_hooks ();
4121 inline_free_summary ();
4123 FOR_EACH_DEFINED_FUNCTION (node)
4124 if (!node->alias)
4125 inline_analyze_function (node);
4129 /* Read predicate from IB. */
4131 static struct predicate
4132 read_predicate (struct lto_input_block *ib)
4134 struct predicate out;
4135 clause_t clause;
4136 int k = 0;
4140 gcc_assert (k <= MAX_CLAUSES);
4141 clause = out.clause[k++] = streamer_read_uhwi (ib);
4143 while (clause);
4145 /* Zero-initialize the remaining clauses in OUT. */
4146 while (k <= MAX_CLAUSES)
4147 out.clause[k++] = 0;
4149 return out;
4153 /* Write inline summary for edge E to OB. */
4155 static void
4156 read_inline_edge_summary (struct lto_input_block *ib, struct cgraph_edge *e)
4158 struct inline_edge_summary *es = inline_edge_summary (e);
4159 struct predicate p;
4160 int length, i;
4162 es->call_stmt_size = streamer_read_uhwi (ib);
4163 es->call_stmt_time = streamer_read_uhwi (ib);
4164 es->loop_depth = streamer_read_uhwi (ib);
4165 p = read_predicate (ib);
4166 edge_set_predicate (e, &p);
4167 length = streamer_read_uhwi (ib);
4168 if (length)
4170 es->param.safe_grow_cleared (length);
4171 for (i = 0; i < length; i++)
4172 es->param[i].change_prob = streamer_read_uhwi (ib);
4177 /* Stream in inline summaries from the section. */
4179 static void
4180 inline_read_section (struct lto_file_decl_data *file_data, const char *data,
4181 size_t len)
4183 const struct lto_function_header *header =
4184 (const struct lto_function_header *) data;
4185 const int cfg_offset = sizeof (struct lto_function_header);
4186 const int main_offset = cfg_offset + header->cfg_size;
4187 const int string_offset = main_offset + header->main_size;
4188 struct data_in *data_in;
4189 unsigned int i, count2, j;
4190 unsigned int f_count;
4192 lto_input_block ib ((const char *) data + main_offset, header->main_size,
4193 file_data->mode_table);
4195 data_in =
4196 lto_data_in_create (file_data, (const char *) data + string_offset,
4197 header->string_size, vNULL);
4198 f_count = streamer_read_uhwi (&ib);
4199 for (i = 0; i < f_count; i++)
4201 unsigned int index;
4202 struct cgraph_node *node;
4203 struct inline_summary *info;
4204 lto_symtab_encoder_t encoder;
4205 struct bitpack_d bp;
4206 struct cgraph_edge *e;
4207 predicate p;
4209 index = streamer_read_uhwi (&ib);
4210 encoder = file_data->symtab_node_encoder;
4211 node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder,
4212 index));
4213 info = inline_summaries->get (node);
4215 info->estimated_stack_size
4216 = info->estimated_self_stack_size = streamer_read_uhwi (&ib);
4217 info->size = info->self_size = streamer_read_uhwi (&ib);
4218 info->time = info->self_time = streamer_read_uhwi (&ib);
4220 bp = streamer_read_bitpack (&ib);
4221 info->inlinable = bp_unpack_value (&bp, 1);
4223 count2 = streamer_read_uhwi (&ib);
4224 gcc_assert (!info->conds);
4225 for (j = 0; j < count2; j++)
4227 struct condition c;
4228 c.operand_num = streamer_read_uhwi (&ib);
4229 c.code = (enum tree_code) streamer_read_uhwi (&ib);
4230 c.val = stream_read_tree (&ib, data_in);
4231 bp = streamer_read_bitpack (&ib);
4232 c.agg_contents = bp_unpack_value (&bp, 1);
4233 c.by_ref = bp_unpack_value (&bp, 1);
4234 if (c.agg_contents)
4235 c.offset = streamer_read_uhwi (&ib);
4236 vec_safe_push (info->conds, c);
4238 count2 = streamer_read_uhwi (&ib);
4239 gcc_assert (!info->entry);
4240 for (j = 0; j < count2; j++)
4242 struct size_time_entry e;
4244 e.size = streamer_read_uhwi (&ib);
4245 e.time = streamer_read_uhwi (&ib);
4246 e.predicate = read_predicate (&ib);
4248 vec_safe_push (info->entry, e);
4251 p = read_predicate (&ib);
4252 set_hint_predicate (&info->loop_iterations, p);
4253 p = read_predicate (&ib);
4254 set_hint_predicate (&info->loop_stride, p);
4255 p = read_predicate (&ib);
4256 set_hint_predicate (&info->array_index, p);
4257 for (e = node->callees; e; e = e->next_callee)
4258 read_inline_edge_summary (&ib, e);
4259 for (e = node->indirect_calls; e; e = e->next_callee)
4260 read_inline_edge_summary (&ib, e);
4263 lto_free_section_data (file_data, LTO_section_inline_summary, NULL, data,
4264 len);
4265 lto_data_in_delete (data_in);
4269 /* Read inline summary. Jump functions are shared among ipa-cp
4270 and inliner, so when ipa-cp is active, we don't need to write them
4271 twice. */
4273 void
4274 inline_read_summary (void)
4276 struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
4277 struct lto_file_decl_data *file_data;
4278 unsigned int j = 0;
4280 inline_summary_alloc ();
4282 while ((file_data = file_data_vec[j++]))
4284 size_t len;
4285 const char *data = lto_get_section_data (file_data,
4286 LTO_section_inline_summary,
4287 NULL, &len);
4288 if (data)
4289 inline_read_section (file_data, data, len);
4290 else
4291 /* Fatal error here. We do not want to support compiling ltrans units
4292 with different version of compiler or different flags than the WPA
4293 unit, so this should never happen. */
4294 fatal_error (input_location,
4295 "ipa inline summary is missing in input file");
4297 if (optimize)
4299 ipa_register_cgraph_hooks ();
4300 if (!flag_ipa_cp)
4301 ipa_prop_read_jump_functions ();
4304 gcc_assert (inline_summaries);
4305 inline_summaries->enable_insertion_hook ();
4309 /* Write predicate P to OB. */
4311 static void
4312 write_predicate (struct output_block *ob, struct predicate *p)
4314 int j;
4315 if (p)
4316 for (j = 0; p->clause[j]; j++)
4318 gcc_assert (j < MAX_CLAUSES);
4319 streamer_write_uhwi (ob, p->clause[j]);
4321 streamer_write_uhwi (ob, 0);
4325 /* Write inline summary for edge E to OB. */
4327 static void
4328 write_inline_edge_summary (struct output_block *ob, struct cgraph_edge *e)
4330 struct inline_edge_summary *es = inline_edge_summary (e);
4331 int i;
4333 streamer_write_uhwi (ob, es->call_stmt_size);
4334 streamer_write_uhwi (ob, es->call_stmt_time);
4335 streamer_write_uhwi (ob, es->loop_depth);
4336 write_predicate (ob, es->predicate);
4337 streamer_write_uhwi (ob, es->param.length ());
4338 for (i = 0; i < (int) es->param.length (); i++)
4339 streamer_write_uhwi (ob, es->param[i].change_prob);
4343 /* Write inline summary for node in SET.
4344 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
4345 active, we don't need to write them twice. */
4347 void
4348 inline_write_summary (void)
4350 struct cgraph_node *node;
4351 struct output_block *ob = create_output_block (LTO_section_inline_summary);
4352 lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
4353 unsigned int count = 0;
4354 int i;
4356 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4358 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4359 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4360 if (cnode && cnode->definition && !cnode->alias)
4361 count++;
4363 streamer_write_uhwi (ob, count);
4365 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4367 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4368 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4369 if (cnode && (node = cnode)->definition && !node->alias)
4371 struct inline_summary *info = inline_summaries->get (node);
4372 struct bitpack_d bp;
4373 struct cgraph_edge *edge;
4374 int i;
4375 size_time_entry *e;
4376 struct condition *c;
4378 streamer_write_uhwi (ob,
4379 lto_symtab_encoder_encode (encoder,
4381 node));
4382 streamer_write_hwi (ob, info->estimated_self_stack_size);
4383 streamer_write_hwi (ob, info->self_size);
4384 streamer_write_hwi (ob, info->self_time);
4385 bp = bitpack_create (ob->main_stream);
4386 bp_pack_value (&bp, info->inlinable, 1);
4387 streamer_write_bitpack (&bp);
4388 streamer_write_uhwi (ob, vec_safe_length (info->conds));
4389 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
4391 streamer_write_uhwi (ob, c->operand_num);
4392 streamer_write_uhwi (ob, c->code);
4393 stream_write_tree (ob, c->val, true);
4394 bp = bitpack_create (ob->main_stream);
4395 bp_pack_value (&bp, c->agg_contents, 1);
4396 bp_pack_value (&bp, c->by_ref, 1);
4397 streamer_write_bitpack (&bp);
4398 if (c->agg_contents)
4399 streamer_write_uhwi (ob, c->offset);
4401 streamer_write_uhwi (ob, vec_safe_length (info->entry));
4402 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
4404 streamer_write_uhwi (ob, e->size);
4405 streamer_write_uhwi (ob, e->time);
4406 write_predicate (ob, &e->predicate);
4408 write_predicate (ob, info->loop_iterations);
4409 write_predicate (ob, info->loop_stride);
4410 write_predicate (ob, info->array_index);
4411 for (edge = node->callees; edge; edge = edge->next_callee)
4412 write_inline_edge_summary (ob, edge);
4413 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
4414 write_inline_edge_summary (ob, edge);
4417 streamer_write_char_stream (ob->main_stream, 0);
4418 produce_asm (ob, NULL);
4419 destroy_output_block (ob);
4421 if (optimize && !flag_ipa_cp)
4422 ipa_prop_write_jump_functions ();
4426 /* Release inline summary. */
4428 void
4429 inline_free_summary (void)
4431 struct cgraph_node *node;
4432 if (edge_removal_hook_holder)
4433 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
4434 edge_removal_hook_holder = NULL;
4435 if (edge_duplication_hook_holder)
4436 symtab->remove_edge_duplication_hook (edge_duplication_hook_holder);
4437 edge_duplication_hook_holder = NULL;
4438 if (!inline_edge_summary_vec.exists ())
4439 return;
4440 FOR_EACH_DEFINED_FUNCTION (node)
4441 if (!node->alias)
4442 reset_inline_summary (node, inline_summaries->get (node));
4443 inline_summaries->release ();
4444 inline_summaries = NULL;
4445 inline_edge_summary_vec.release ();
4446 if (edge_predicate_pool)
4447 free_alloc_pool (edge_predicate_pool);
4448 edge_predicate_pool = 0;