* hooks.h (hook_uint_uintp_false): Rename to...
[official-gcc.git] / gcc / ipa-inline-analysis.c
blob132779d2621f5548afac45453bf96e1ad6a49d8e
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2016 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Analysis used by the inliner and other passes limiting code size growth.
23 We estimate for each function
24 - function body size
25 - average function execution time
26 - inlining size benefit (that is how much of function body size
27 and its call sequence is expected to disappear by inlining)
28 - inlining time benefit
29 - function frame size
30 For each call
31 - call statement size and time
33 inlinie_summary datastructures store above information locally (i.e.
34 parameters of the function itself) and globally (i.e. parameters of
35 the function created by applying all the inline decisions already
36 present in the callgraph).
38 We provide accestor to the inline_summary datastructure and
39 basic logic updating the parameters when inlining is performed.
41 The summaries are context sensitive. Context means
42 1) partial assignment of known constant values of operands
43 2) whether function is inlined into the call or not.
44 It is easy to add more variants. To represent function size and time
45 that depends on context (i.e. it is known to be optimized away when
46 context is known either by inlining or from IP-CP and clonning),
47 we use predicates. Predicates are logical formulas in
48 conjunctive-disjunctive form consisting of clauses. Clauses are bitmaps
49 specifying what conditions must be true. Conditions are simple test
50 of the form described above.
52 In order to make predicate (possibly) true, all of its clauses must
53 be (possibly) true. To make clause (possibly) true, one of conditions
54 it mentions must be (possibly) true. There are fixed bounds on
55 number of clauses and conditions and all the manipulation functions
56 are conservative in positive direction. I.e. we may lose precision
57 by thinking that predicate may be true even when it is not.
59 estimate_edge_size and estimate_edge_growth can be used to query
60 function size/time in the given context. inline_merge_summary merges
61 properties of caller and callee after inlining.
63 Finally pass_inline_parameters is exported. This is used to drive
64 computation of function parameters used by the early inliner. IPA
65 inlined performs analysis via its analyze_function method. */
67 #include "config.h"
68 #include "system.h"
69 #include "coretypes.h"
70 #include "backend.h"
71 #include "tree.h"
72 #include "gimple.h"
73 #include "alloc-pool.h"
74 #include "tree-pass.h"
75 #include "ssa.h"
76 #include "tree-streamer.h"
77 #include "cgraph.h"
78 #include "diagnostic.h"
79 #include "fold-const.h"
80 #include "print-tree.h"
81 #include "tree-inline.h"
82 #include "gimple-pretty-print.h"
83 #include "params.h"
84 #include "cfganal.h"
85 #include "gimple-iterator.h"
86 #include "tree-cfg.h"
87 #include "tree-ssa-loop-niter.h"
88 #include "tree-ssa-loop.h"
89 #include "symbol-summary.h"
90 #include "ipa-prop.h"
91 #include "ipa-inline.h"
92 #include "cfgloop.h"
93 #include "tree-scalar-evolution.h"
94 #include "ipa-utils.h"
95 #include "cilk.h"
96 #include "cfgexpand.h"
97 #include "gimplify.h"
99 /* Estimate runtime of function can easilly run into huge numbers with many
100 nested loops. Be sure we can compute time * INLINE_SIZE_SCALE * 2 in an
101 integer. For anything larger we use gcov_type. */
102 #define MAX_TIME 500000
104 /* Number of bits in integer, but we really want to be stable across different
105 hosts. */
106 #define NUM_CONDITIONS 32
108 enum predicate_conditions
110 predicate_false_condition = 0,
111 predicate_not_inlined_condition = 1,
112 predicate_first_dynamic_condition = 2
115 /* Special condition code we use to represent test that operand is compile time
116 constant. */
117 #define IS_NOT_CONSTANT ERROR_MARK
118 /* Special condition code we use to represent test that operand is not changed
119 across invocation of the function. When operand IS_NOT_CONSTANT it is always
120 CHANGED, however i.e. loop invariants can be NOT_CHANGED given percentage
121 of executions even when they are not compile time constants. */
122 #define CHANGED IDENTIFIER_NODE
124 /* Holders of ipa cgraph hooks: */
125 static struct cgraph_2edge_hook_list *edge_duplication_hook_holder;
126 static struct cgraph_edge_hook_list *edge_removal_hook_holder;
127 static void inline_edge_removal_hook (struct cgraph_edge *, void *);
128 static void inline_edge_duplication_hook (struct cgraph_edge *,
129 struct cgraph_edge *, void *);
131 /* VECtor holding inline summaries.
132 In GGC memory because conditions might point to constant trees. */
133 function_summary <inline_summary *> *inline_summaries;
134 vec<inline_edge_summary_t> inline_edge_summary_vec;
136 /* Cached node/edge growths. */
137 vec<edge_growth_cache_entry> edge_growth_cache;
139 /* Edge predicates goes here. */
140 static object_allocator<predicate> edge_predicate_pool ("edge predicates");
142 /* Return true predicate (tautology).
143 We represent it by empty list of clauses. */
145 static inline struct predicate
146 true_predicate (void)
148 struct predicate p;
149 p.clause[0] = 0;
150 return p;
154 /* Return predicate testing single condition number COND. */
156 static inline struct predicate
157 single_cond_predicate (int cond)
159 struct predicate p;
160 p.clause[0] = 1 << cond;
161 p.clause[1] = 0;
162 return p;
166 /* Return false predicate. First clause require false condition. */
168 static inline struct predicate
169 false_predicate (void)
171 return single_cond_predicate (predicate_false_condition);
175 /* Return true if P is (true). */
177 static inline bool
178 true_predicate_p (struct predicate *p)
180 return !p->clause[0];
184 /* Return true if P is (false). */
186 static inline bool
187 false_predicate_p (struct predicate *p)
189 if (p->clause[0] == (1 << predicate_false_condition))
191 gcc_checking_assert (!p->clause[1]
192 && p->clause[0] == 1 << predicate_false_condition);
193 return true;
195 return false;
199 /* Return predicate that is set true when function is not inlined. */
201 static inline struct predicate
202 not_inlined_predicate (void)
204 return single_cond_predicate (predicate_not_inlined_condition);
207 /* Simple description of whether a memory load or a condition refers to a load
208 from an aggregate and if so, how and where from in the aggregate.
209 Individual fields have the same meaning like fields with the same name in
210 struct condition. */
212 struct agg_position_info
214 HOST_WIDE_INT offset;
215 bool agg_contents;
216 bool by_ref;
219 /* Add condition to condition list SUMMARY. OPERAND_NUM, SIZE, CODE and VAL
220 correspond to fields of condition structure. AGGPOS describes whether the
221 used operand is loaded from an aggregate and where in the aggregate it is.
222 It can be NULL, which means this not a load from an aggregate. */
224 static struct predicate
225 add_condition (struct inline_summary *summary, int operand_num,
226 HOST_WIDE_INT size, struct agg_position_info *aggpos,
227 enum tree_code code, tree val)
229 int i;
230 struct condition *c;
231 struct condition new_cond;
232 HOST_WIDE_INT offset;
233 bool agg_contents, by_ref;
235 if (aggpos)
237 offset = aggpos->offset;
238 agg_contents = aggpos->agg_contents;
239 by_ref = aggpos->by_ref;
241 else
243 offset = 0;
244 agg_contents = false;
245 by_ref = false;
248 gcc_checking_assert (operand_num >= 0);
249 for (i = 0; vec_safe_iterate (summary->conds, i, &c); i++)
251 if (c->operand_num == operand_num
252 && c->size == size
253 && c->code == code
254 && c->val == val
255 && c->agg_contents == agg_contents
256 && (!agg_contents || (c->offset == offset && c->by_ref == by_ref)))
257 return single_cond_predicate (i + predicate_first_dynamic_condition);
259 /* Too many conditions. Give up and return constant true. */
260 if (i == NUM_CONDITIONS - predicate_first_dynamic_condition)
261 return true_predicate ();
263 new_cond.operand_num = operand_num;
264 new_cond.code = code;
265 new_cond.val = val;
266 new_cond.agg_contents = agg_contents;
267 new_cond.by_ref = by_ref;
268 new_cond.offset = offset;
269 new_cond.size = size;
270 vec_safe_push (summary->conds, new_cond);
271 return single_cond_predicate (i + predicate_first_dynamic_condition);
275 /* Add clause CLAUSE into the predicate P. */
277 static inline void
278 add_clause (conditions conditions, struct predicate *p, clause_t clause)
280 int i;
281 int i2;
282 int insert_here = -1;
283 int c1, c2;
285 /* True clause. */
286 if (!clause)
287 return;
289 /* False clause makes the whole predicate false. Kill the other variants. */
290 if (clause == (1 << predicate_false_condition))
292 p->clause[0] = (1 << predicate_false_condition);
293 p->clause[1] = 0;
294 return;
296 if (false_predicate_p (p))
297 return;
299 /* No one should be silly enough to add false into nontrivial clauses. */
300 gcc_checking_assert (!(clause & (1 << predicate_false_condition)));
302 /* Look where to insert the clause. At the same time prune out
303 clauses of P that are implied by the new clause and thus
304 redundant. */
305 for (i = 0, i2 = 0; i <= MAX_CLAUSES; i++)
307 p->clause[i2] = p->clause[i];
309 if (!p->clause[i])
310 break;
312 /* If p->clause[i] implies clause, there is nothing to add. */
313 if ((p->clause[i] & clause) == p->clause[i])
315 /* We had nothing to add, none of clauses should've become
316 redundant. */
317 gcc_checking_assert (i == i2);
318 return;
321 if (p->clause[i] < clause && insert_here < 0)
322 insert_here = i2;
324 /* If clause implies p->clause[i], then p->clause[i] becomes redundant.
325 Otherwise the p->clause[i] has to stay. */
326 if ((p->clause[i] & clause) != clause)
327 i2++;
330 /* Look for clauses that are obviously true. I.e.
331 op0 == 5 || op0 != 5. */
332 for (c1 = predicate_first_dynamic_condition; c1 < NUM_CONDITIONS; c1++)
334 condition *cc1;
335 if (!(clause & (1 << c1)))
336 continue;
337 cc1 = &(*conditions)[c1 - predicate_first_dynamic_condition];
338 /* We have no way to represent !CHANGED and !IS_NOT_CONSTANT
339 and thus there is no point for looking for them. */
340 if (cc1->code == CHANGED || cc1->code == IS_NOT_CONSTANT)
341 continue;
342 for (c2 = c1 + 1; c2 < NUM_CONDITIONS; c2++)
343 if (clause & (1 << c2))
345 condition *cc1 =
346 &(*conditions)[c1 - predicate_first_dynamic_condition];
347 condition *cc2 =
348 &(*conditions)[c2 - predicate_first_dynamic_condition];
349 if (cc1->operand_num == cc2->operand_num
350 && cc1->val == cc2->val
351 && cc2->code != IS_NOT_CONSTANT
352 && cc2->code != CHANGED
353 && cc1->code == invert_tree_comparison (cc2->code,
354 HONOR_NANS (cc1->val)))
355 return;
360 /* We run out of variants. Be conservative in positive direction. */
361 if (i2 == MAX_CLAUSES)
362 return;
363 /* Keep clauses in decreasing order. This makes equivalence testing easy. */
364 p->clause[i2 + 1] = 0;
365 if (insert_here >= 0)
366 for (; i2 > insert_here; i2--)
367 p->clause[i2] = p->clause[i2 - 1];
368 else
369 insert_here = i2;
370 p->clause[insert_here] = clause;
374 /* Return P & P2. */
376 static struct predicate
377 and_predicates (conditions conditions,
378 struct predicate *p, struct predicate *p2)
380 struct predicate out = *p;
381 int i;
383 /* Avoid busy work. */
384 if (false_predicate_p (p2) || true_predicate_p (p))
385 return *p2;
386 if (false_predicate_p (p) || true_predicate_p (p2))
387 return *p;
389 /* See how far predicates match. */
390 for (i = 0; p->clause[i] && p->clause[i] == p2->clause[i]; i++)
392 gcc_checking_assert (i < MAX_CLAUSES);
395 /* Combine the predicates rest. */
396 for (; p2->clause[i]; i++)
398 gcc_checking_assert (i < MAX_CLAUSES);
399 add_clause (conditions, &out, p2->clause[i]);
401 return out;
405 /* Return true if predicates are obviously equal. */
407 static inline bool
408 predicates_equal_p (struct predicate *p, struct predicate *p2)
410 int i;
411 for (i = 0; p->clause[i]; i++)
413 gcc_checking_assert (i < MAX_CLAUSES);
414 gcc_checking_assert (p->clause[i] > p->clause[i + 1]);
415 gcc_checking_assert (!p2->clause[i]
416 || p2->clause[i] > p2->clause[i + 1]);
417 if (p->clause[i] != p2->clause[i])
418 return false;
420 return !p2->clause[i];
424 /* Return P | P2. */
426 static struct predicate
427 or_predicates (conditions conditions,
428 struct predicate *p, struct predicate *p2)
430 struct predicate out = true_predicate ();
431 int i, j;
433 /* Avoid busy work. */
434 if (false_predicate_p (p2) || true_predicate_p (p))
435 return *p;
436 if (false_predicate_p (p) || true_predicate_p (p2))
437 return *p2;
438 if (predicates_equal_p (p, p2))
439 return *p;
441 /* OK, combine the predicates. */
442 for (i = 0; p->clause[i]; i++)
443 for (j = 0; p2->clause[j]; j++)
445 gcc_checking_assert (i < MAX_CLAUSES && j < MAX_CLAUSES);
446 add_clause (conditions, &out, p->clause[i] | p2->clause[j]);
448 return out;
452 /* Having partial truth assignment in POSSIBLE_TRUTHS, return false
453 if predicate P is known to be false. */
455 static bool
456 evaluate_predicate (struct predicate *p, clause_t possible_truths)
458 int i;
460 /* True remains true. */
461 if (true_predicate_p (p))
462 return true;
464 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
466 /* See if we can find clause we can disprove. */
467 for (i = 0; p->clause[i]; i++)
469 gcc_checking_assert (i < MAX_CLAUSES);
470 if (!(p->clause[i] & possible_truths))
471 return false;
473 return true;
476 /* Return the probability in range 0...REG_BR_PROB_BASE that the predicated
477 instruction will be recomputed per invocation of the inlined call. */
479 static int
480 predicate_probability (conditions conds,
481 struct predicate *p, clause_t possible_truths,
482 vec<inline_param_summary> inline_param_summary)
484 int i;
485 int combined_prob = REG_BR_PROB_BASE;
487 /* True remains true. */
488 if (true_predicate_p (p))
489 return REG_BR_PROB_BASE;
491 if (false_predicate_p (p))
492 return 0;
494 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
496 /* See if we can find clause we can disprove. */
497 for (i = 0; p->clause[i]; i++)
499 gcc_checking_assert (i < MAX_CLAUSES);
500 if (!(p->clause[i] & possible_truths))
501 return 0;
502 else
504 int this_prob = 0;
505 int i2;
506 if (!inline_param_summary.exists ())
507 return REG_BR_PROB_BASE;
508 for (i2 = 0; i2 < NUM_CONDITIONS; i2++)
509 if ((p->clause[i] & possible_truths) & (1 << i2))
511 if (i2 >= predicate_first_dynamic_condition)
513 condition *c =
514 &(*conds)[i2 - predicate_first_dynamic_condition];
515 if (c->code == CHANGED
516 && (c->operand_num <
517 (int) inline_param_summary.length ()))
519 int iprob =
520 inline_param_summary[c->operand_num].change_prob;
521 this_prob = MAX (this_prob, iprob);
523 else
524 this_prob = REG_BR_PROB_BASE;
526 else
527 this_prob = REG_BR_PROB_BASE;
529 combined_prob = MIN (this_prob, combined_prob);
530 if (!combined_prob)
531 return 0;
534 return combined_prob;
538 /* Dump conditional COND. */
540 static void
541 dump_condition (FILE *f, conditions conditions, int cond)
543 condition *c;
544 if (cond == predicate_false_condition)
545 fprintf (f, "false");
546 else if (cond == predicate_not_inlined_condition)
547 fprintf (f, "not inlined");
548 else
550 c = &(*conditions)[cond - predicate_first_dynamic_condition];
551 fprintf (f, "op%i", c->operand_num);
552 if (c->agg_contents)
553 fprintf (f, "[%soffset: " HOST_WIDE_INT_PRINT_DEC "]",
554 c->by_ref ? "ref " : "", c->offset);
555 if (c->code == IS_NOT_CONSTANT)
557 fprintf (f, " not constant");
558 return;
560 if (c->code == CHANGED)
562 fprintf (f, " changed");
563 return;
565 fprintf (f, " %s ", op_symbol_code (c->code));
566 print_generic_expr (f, c->val, 1);
571 /* Dump clause CLAUSE. */
573 static void
574 dump_clause (FILE *f, conditions conds, clause_t clause)
576 int i;
577 bool found = false;
578 fprintf (f, "(");
579 if (!clause)
580 fprintf (f, "true");
581 for (i = 0; i < NUM_CONDITIONS; i++)
582 if (clause & (1 << i))
584 if (found)
585 fprintf (f, " || ");
586 found = true;
587 dump_condition (f, conds, i);
589 fprintf (f, ")");
593 /* Dump predicate PREDICATE. */
595 static void
596 dump_predicate (FILE *f, conditions conds, struct predicate *pred)
598 int i;
599 if (true_predicate_p (pred))
600 dump_clause (f, conds, 0);
601 else
602 for (i = 0; pred->clause[i]; i++)
604 if (i)
605 fprintf (f, " && ");
606 dump_clause (f, conds, pred->clause[i]);
608 fprintf (f, "\n");
612 /* Dump inline hints. */
613 void
614 dump_inline_hints (FILE *f, inline_hints hints)
616 if (!hints)
617 return;
618 fprintf (f, "inline hints:");
619 if (hints & INLINE_HINT_indirect_call)
621 hints &= ~INLINE_HINT_indirect_call;
622 fprintf (f, " indirect_call");
624 if (hints & INLINE_HINT_loop_iterations)
626 hints &= ~INLINE_HINT_loop_iterations;
627 fprintf (f, " loop_iterations");
629 if (hints & INLINE_HINT_loop_stride)
631 hints &= ~INLINE_HINT_loop_stride;
632 fprintf (f, " loop_stride");
634 if (hints & INLINE_HINT_same_scc)
636 hints &= ~INLINE_HINT_same_scc;
637 fprintf (f, " same_scc");
639 if (hints & INLINE_HINT_in_scc)
641 hints &= ~INLINE_HINT_in_scc;
642 fprintf (f, " in_scc");
644 if (hints & INLINE_HINT_cross_module)
646 hints &= ~INLINE_HINT_cross_module;
647 fprintf (f, " cross_module");
649 if (hints & INLINE_HINT_declared_inline)
651 hints &= ~INLINE_HINT_declared_inline;
652 fprintf (f, " declared_inline");
654 if (hints & INLINE_HINT_array_index)
656 hints &= ~INLINE_HINT_array_index;
657 fprintf (f, " array_index");
659 if (hints & INLINE_HINT_known_hot)
661 hints &= ~INLINE_HINT_known_hot;
662 fprintf (f, " known_hot");
664 gcc_assert (!hints);
668 /* Record SIZE and TIME under condition PRED into the inline summary. */
670 static void
671 account_size_time (struct inline_summary *summary, int size, int time,
672 struct predicate *pred)
674 size_time_entry *e;
675 bool found = false;
676 int i;
678 if (false_predicate_p (pred))
679 return;
681 /* We need to create initial empty unconitional clause, but otherwie
682 we don't need to account empty times and sizes. */
683 if (!size && !time && summary->entry)
684 return;
686 /* Watch overflow that might result from insane profiles. */
687 if (time > MAX_TIME * INLINE_TIME_SCALE)
688 time = MAX_TIME * INLINE_TIME_SCALE;
689 gcc_assert (time >= 0);
691 for (i = 0; vec_safe_iterate (summary->entry, i, &e); i++)
692 if (predicates_equal_p (&e->predicate, pred))
694 found = true;
695 break;
697 if (i == 256)
699 i = 0;
700 found = true;
701 e = &(*summary->entry)[0];
702 gcc_assert (!e->predicate.clause[0]);
703 if (dump_file && (dump_flags & TDF_DETAILS))
704 fprintf (dump_file,
705 "\t\tReached limit on number of entries, "
706 "ignoring the predicate.");
708 if (dump_file && (dump_flags & TDF_DETAILS) && (time || size))
710 fprintf (dump_file,
711 "\t\tAccounting size:%3.2f, time:%3.2f on %spredicate:",
712 ((double) size) / INLINE_SIZE_SCALE,
713 ((double) time) / INLINE_TIME_SCALE, found ? "" : "new ");
714 dump_predicate (dump_file, summary->conds, pred);
716 if (!found)
718 struct size_time_entry new_entry;
719 new_entry.size = size;
720 new_entry.time = time;
721 new_entry.predicate = *pred;
722 vec_safe_push (summary->entry, new_entry);
724 else
726 e->size += size;
727 e->time += time;
728 if (e->time > MAX_TIME * INLINE_TIME_SCALE)
729 e->time = MAX_TIME * INLINE_TIME_SCALE;
733 /* We proved E to be unreachable, redirect it to __bultin_unreachable. */
735 static struct cgraph_edge *
736 redirect_to_unreachable (struct cgraph_edge *e)
738 struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL;
739 struct cgraph_node *target = cgraph_node::get_create
740 (builtin_decl_implicit (BUILT_IN_UNREACHABLE));
742 if (e->speculative)
743 e = e->resolve_speculation (target->decl);
744 else if (!e->callee)
745 e->make_direct (target);
746 else
747 e->redirect_callee (target);
748 struct inline_edge_summary *es = inline_edge_summary (e);
749 e->inline_failed = CIF_UNREACHABLE;
750 e->frequency = 0;
751 e->count = 0;
752 es->call_stmt_size = 0;
753 es->call_stmt_time = 0;
754 if (callee)
755 callee->remove_symbol_and_inline_clones ();
756 return e;
759 /* Set predicate for edge E. */
761 static void
762 edge_set_predicate (struct cgraph_edge *e, struct predicate *predicate)
764 /* If the edge is determined to be never executed, redirect it
765 to BUILTIN_UNREACHABLE to save inliner from inlining into it. */
766 if (predicate && false_predicate_p (predicate)
767 /* When handling speculative edges, we need to do the redirection
768 just once. Do it always on the direct edge, so we do not
769 attempt to resolve speculation while duplicating the edge. */
770 && (!e->speculative || e->callee))
771 e = redirect_to_unreachable (e);
773 struct inline_edge_summary *es = inline_edge_summary (e);
774 if (predicate && !true_predicate_p (predicate))
776 if (!es->predicate)
777 es->predicate = edge_predicate_pool.allocate ();
778 *es->predicate = *predicate;
780 else
782 if (es->predicate)
783 edge_predicate_pool.remove (es->predicate);
784 es->predicate = NULL;
788 /* Set predicate for hint *P. */
790 static void
791 set_hint_predicate (struct predicate **p, struct predicate new_predicate)
793 if (false_predicate_p (&new_predicate) || true_predicate_p (&new_predicate))
795 if (*p)
796 edge_predicate_pool.remove (*p);
797 *p = NULL;
799 else
801 if (!*p)
802 *p = edge_predicate_pool.allocate ();
803 **p = new_predicate;
808 /* KNOWN_VALS is partial mapping of parameters of NODE to constant values.
809 KNOWN_AGGS is a vector of aggreggate jump functions for each parameter.
810 Return clause of possible truths. When INLINE_P is true, assume that we are
811 inlining.
813 ERROR_MARK means compile time invariant. */
815 static clause_t
816 evaluate_conditions_for_known_args (struct cgraph_node *node,
817 bool inline_p,
818 vec<tree> known_vals,
819 vec<ipa_agg_jump_function_p>
820 known_aggs)
822 clause_t clause = inline_p ? 0 : 1 << predicate_not_inlined_condition;
823 struct inline_summary *info = inline_summaries->get (node);
824 int i;
825 struct condition *c;
827 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
829 tree val;
830 tree res;
832 /* We allow call stmt to have fewer arguments than the callee function
833 (especially for K&R style programs). So bound check here (we assume
834 known_aggs vector, if non-NULL, has the same length as
835 known_vals). */
836 gcc_checking_assert (!known_aggs.exists ()
837 || (known_vals.length () == known_aggs.length ()));
838 if (c->operand_num >= (int) known_vals.length ())
840 clause |= 1 << (i + predicate_first_dynamic_condition);
841 continue;
844 if (c->agg_contents)
846 struct ipa_agg_jump_function *agg;
848 if (c->code == CHANGED
849 && !c->by_ref
850 && (known_vals[c->operand_num] == error_mark_node))
851 continue;
853 if (known_aggs.exists ())
855 agg = known_aggs[c->operand_num];
856 val = ipa_find_agg_cst_for_param (agg, known_vals[c->operand_num],
857 c->offset, c->by_ref);
859 else
860 val = NULL_TREE;
862 else
864 val = known_vals[c->operand_num];
865 if (val == error_mark_node && c->code != CHANGED)
866 val = NULL_TREE;
869 if (!val)
871 clause |= 1 << (i + predicate_first_dynamic_condition);
872 continue;
874 if (c->code == CHANGED)
875 continue;
877 if (tree_to_shwi (TYPE_SIZE (TREE_TYPE (val))) != c->size)
879 clause |= 1 << (i + predicate_first_dynamic_condition);
880 continue;
882 if (c->code == IS_NOT_CONSTANT)
883 continue;
885 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (c->val), val);
886 res = val
887 ? fold_binary_to_constant (c->code, boolean_type_node, val, c->val)
888 : NULL;
890 if (res && integer_zerop (res))
891 continue;
893 clause |= 1 << (i + predicate_first_dynamic_condition);
895 return clause;
899 /* Work out what conditions might be true at invocation of E. */
901 static void
902 evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
903 clause_t *clause_ptr,
904 vec<tree> *known_vals_ptr,
905 vec<ipa_polymorphic_call_context>
906 *known_contexts_ptr,
907 vec<ipa_agg_jump_function_p> *known_aggs_ptr)
909 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
910 struct inline_summary *info = inline_summaries->get (callee);
911 vec<tree> known_vals = vNULL;
912 vec<ipa_agg_jump_function_p> known_aggs = vNULL;
914 if (clause_ptr)
915 *clause_ptr = inline_p ? 0 : 1 << predicate_not_inlined_condition;
916 if (known_vals_ptr)
917 known_vals_ptr->create (0);
918 if (known_contexts_ptr)
919 known_contexts_ptr->create (0);
921 if (ipa_node_params_sum
922 && !e->call_stmt_cannot_inline_p
923 && ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr))
925 struct ipa_node_params *parms_info;
926 struct ipa_edge_args *args = IPA_EDGE_REF (e);
927 struct inline_edge_summary *es = inline_edge_summary (e);
928 int i, count = ipa_get_cs_argument_count (args);
930 if (e->caller->global.inlined_to)
931 parms_info = IPA_NODE_REF (e->caller->global.inlined_to);
932 else
933 parms_info = IPA_NODE_REF (e->caller);
935 if (count && (info->conds || known_vals_ptr))
936 known_vals.safe_grow_cleared (count);
937 if (count && (info->conds || known_aggs_ptr))
938 known_aggs.safe_grow_cleared (count);
939 if (count && known_contexts_ptr)
940 known_contexts_ptr->safe_grow_cleared (count);
942 for (i = 0; i < count; i++)
944 struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i);
945 tree cst = ipa_value_from_jfunc (parms_info, jf);
947 if (!cst && e->call_stmt
948 && i < (int)gimple_call_num_args (e->call_stmt))
950 cst = gimple_call_arg (e->call_stmt, i);
951 if (!is_gimple_min_invariant (cst))
952 cst = NULL;
954 if (cst)
956 gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO);
957 if (known_vals.exists ())
958 known_vals[i] = cst;
960 else if (inline_p && !es->param[i].change_prob)
961 known_vals[i] = error_mark_node;
963 if (known_contexts_ptr)
964 (*known_contexts_ptr)[i] = ipa_context_from_jfunc (parms_info, e,
965 i, jf);
966 /* TODO: When IPA-CP starts propagating and merging aggregate jump
967 functions, use its knowledge of the caller too, just like the
968 scalar case above. */
969 known_aggs[i] = &jf->agg;
972 else if (e->call_stmt && !e->call_stmt_cannot_inline_p
973 && ((clause_ptr && info->conds) || known_vals_ptr))
975 int i, count = (int)gimple_call_num_args (e->call_stmt);
977 if (count && (info->conds || known_vals_ptr))
978 known_vals.safe_grow_cleared (count);
979 for (i = 0; i < count; i++)
981 tree cst = gimple_call_arg (e->call_stmt, i);
982 if (!is_gimple_min_invariant (cst))
983 cst = NULL;
984 if (cst)
985 known_vals[i] = cst;
989 if (clause_ptr)
990 *clause_ptr = evaluate_conditions_for_known_args (callee, inline_p,
991 known_vals, known_aggs);
993 if (known_vals_ptr)
994 *known_vals_ptr = known_vals;
995 else
996 known_vals.release ();
998 if (known_aggs_ptr)
999 *known_aggs_ptr = known_aggs;
1000 else
1001 known_aggs.release ();
1005 /* Allocate the inline summary vector or resize it to cover all cgraph nodes. */
1007 static void
1008 inline_summary_alloc (void)
1010 if (!edge_removal_hook_holder)
1011 edge_removal_hook_holder =
1012 symtab->add_edge_removal_hook (&inline_edge_removal_hook, NULL);
1013 if (!edge_duplication_hook_holder)
1014 edge_duplication_hook_holder =
1015 symtab->add_edge_duplication_hook (&inline_edge_duplication_hook, NULL);
1017 if (!inline_summaries)
1018 inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab);
1020 if (inline_edge_summary_vec.length () <= (unsigned) symtab->edges_max_uid)
1021 inline_edge_summary_vec.safe_grow_cleared (symtab->edges_max_uid + 1);
1024 /* We are called multiple time for given function; clear
1025 data from previous run so they are not cumulated. */
1027 static void
1028 reset_inline_edge_summary (struct cgraph_edge *e)
1030 if (e->uid < (int) inline_edge_summary_vec.length ())
1032 struct inline_edge_summary *es = inline_edge_summary (e);
1034 es->call_stmt_size = es->call_stmt_time = 0;
1035 if (es->predicate)
1036 edge_predicate_pool.remove (es->predicate);
1037 es->predicate = NULL;
1038 es->param.release ();
1042 /* We are called multiple time for given function; clear
1043 data from previous run so they are not cumulated. */
1045 static void
1046 reset_inline_summary (struct cgraph_node *node,
1047 inline_summary *info)
1049 struct cgraph_edge *e;
1051 info->self_size = info->self_time = 0;
1052 info->estimated_stack_size = 0;
1053 info->estimated_self_stack_size = 0;
1054 info->stack_frame_offset = 0;
1055 info->size = 0;
1056 info->time = 0;
1057 info->growth = 0;
1058 info->scc_no = 0;
1059 if (info->loop_iterations)
1061 edge_predicate_pool.remove (info->loop_iterations);
1062 info->loop_iterations = NULL;
1064 if (info->loop_stride)
1066 edge_predicate_pool.remove (info->loop_stride);
1067 info->loop_stride = NULL;
1069 if (info->array_index)
1071 edge_predicate_pool.remove (info->array_index);
1072 info->array_index = NULL;
1074 vec_free (info->conds);
1075 vec_free (info->entry);
1076 for (e = node->callees; e; e = e->next_callee)
1077 reset_inline_edge_summary (e);
1078 for (e = node->indirect_calls; e; e = e->next_callee)
1079 reset_inline_edge_summary (e);
1080 info->fp_expressions = false;
1083 /* Hook that is called by cgraph.c when a node is removed. */
1085 void
1086 inline_summary_t::remove (cgraph_node *node, inline_summary *info)
1088 reset_inline_summary (node, info);
1091 /* Remap predicate P of former function to be predicate of duplicated function.
1092 POSSIBLE_TRUTHS is clause of possible truths in the duplicated node,
1093 INFO is inline summary of the duplicated node. */
1095 static struct predicate
1096 remap_predicate_after_duplication (struct predicate *p,
1097 clause_t possible_truths,
1098 struct inline_summary *info)
1100 struct predicate new_predicate = true_predicate ();
1101 int j;
1102 for (j = 0; p->clause[j]; j++)
1103 if (!(possible_truths & p->clause[j]))
1105 new_predicate = false_predicate ();
1106 break;
1108 else
1109 add_clause (info->conds, &new_predicate,
1110 possible_truths & p->clause[j]);
1111 return new_predicate;
1114 /* Same as remap_predicate_after_duplication but handle hint predicate *P.
1115 Additionally care about allocating new memory slot for updated predicate
1116 and set it to NULL when it becomes true or false (and thus uninteresting).
1119 static void
1120 remap_hint_predicate_after_duplication (struct predicate **p,
1121 clause_t possible_truths,
1122 struct inline_summary *info)
1124 struct predicate new_predicate;
1126 if (!*p)
1127 return;
1129 new_predicate = remap_predicate_after_duplication (*p,
1130 possible_truths, info);
1131 /* We do not want to free previous predicate; it is used by node origin. */
1132 *p = NULL;
1133 set_hint_predicate (p, new_predicate);
1137 /* Hook that is called by cgraph.c when a node is duplicated. */
1138 void
1139 inline_summary_t::duplicate (cgraph_node *src,
1140 cgraph_node *dst,
1141 inline_summary *,
1142 inline_summary *info)
1144 inline_summary_alloc ();
1145 memcpy (info, inline_summaries->get (src), sizeof (inline_summary));
1146 /* TODO: as an optimization, we may avoid copying conditions
1147 that are known to be false or true. */
1148 info->conds = vec_safe_copy (info->conds);
1150 /* When there are any replacements in the function body, see if we can figure
1151 out that something was optimized out. */
1152 if (ipa_node_params_sum && dst->clone.tree_map)
1154 vec<size_time_entry, va_gc> *entry = info->entry;
1155 /* Use SRC parm info since it may not be copied yet. */
1156 struct ipa_node_params *parms_info = IPA_NODE_REF (src);
1157 vec<tree> known_vals = vNULL;
1158 int count = ipa_get_param_count (parms_info);
1159 int i, j;
1160 clause_t possible_truths;
1161 struct predicate true_pred = true_predicate ();
1162 size_time_entry *e;
1163 int optimized_out_size = 0;
1164 bool inlined_to_p = false;
1165 struct cgraph_edge *edge, *next;
1167 info->entry = 0;
1168 known_vals.safe_grow_cleared (count);
1169 for (i = 0; i < count; i++)
1171 struct ipa_replace_map *r;
1173 for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++)
1175 if (((!r->old_tree && r->parm_num == i)
1176 || (r->old_tree && r->old_tree == ipa_get_param (parms_info, i)))
1177 && r->replace_p && !r->ref_p)
1179 known_vals[i] = r->new_tree;
1180 break;
1184 possible_truths = evaluate_conditions_for_known_args (dst, false,
1185 known_vals,
1186 vNULL);
1187 known_vals.release ();
1189 account_size_time (info, 0, 0, &true_pred);
1191 /* Remap size_time vectors.
1192 Simplify the predicate by prunning out alternatives that are known
1193 to be false.
1194 TODO: as on optimization, we can also eliminate conditions known
1195 to be true. */
1196 for (i = 0; vec_safe_iterate (entry, i, &e); i++)
1198 struct predicate new_predicate;
1199 new_predicate = remap_predicate_after_duplication (&e->predicate,
1200 possible_truths,
1201 info);
1202 if (false_predicate_p (&new_predicate))
1203 optimized_out_size += e->size;
1204 else
1205 account_size_time (info, e->size, e->time, &new_predicate);
1208 /* Remap edge predicates with the same simplification as above.
1209 Also copy constantness arrays. */
1210 for (edge = dst->callees; edge; edge = next)
1212 struct predicate new_predicate;
1213 struct inline_edge_summary *es = inline_edge_summary (edge);
1214 next = edge->next_callee;
1216 if (!edge->inline_failed)
1217 inlined_to_p = true;
1218 if (!es->predicate)
1219 continue;
1220 new_predicate = remap_predicate_after_duplication (es->predicate,
1221 possible_truths,
1222 info);
1223 if (false_predicate_p (&new_predicate)
1224 && !false_predicate_p (es->predicate))
1225 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1226 edge_set_predicate (edge, &new_predicate);
1229 /* Remap indirect edge predicates with the same simplificaiton as above.
1230 Also copy constantness arrays. */
1231 for (edge = dst->indirect_calls; edge; edge = next)
1233 struct predicate new_predicate;
1234 struct inline_edge_summary *es = inline_edge_summary (edge);
1235 next = edge->next_callee;
1237 gcc_checking_assert (edge->inline_failed);
1238 if (!es->predicate)
1239 continue;
1240 new_predicate = remap_predicate_after_duplication (es->predicate,
1241 possible_truths,
1242 info);
1243 if (false_predicate_p (&new_predicate)
1244 && !false_predicate_p (es->predicate))
1245 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1246 edge_set_predicate (edge, &new_predicate);
1248 remap_hint_predicate_after_duplication (&info->loop_iterations,
1249 possible_truths, info);
1250 remap_hint_predicate_after_duplication (&info->loop_stride,
1251 possible_truths, info);
1252 remap_hint_predicate_after_duplication (&info->array_index,
1253 possible_truths, info);
1255 /* If inliner or someone after inliner will ever start producing
1256 non-trivial clones, we will get trouble with lack of information
1257 about updating self sizes, because size vectors already contains
1258 sizes of the calees. */
1259 gcc_assert (!inlined_to_p || !optimized_out_size);
1261 else
1263 info->entry = vec_safe_copy (info->entry);
1264 if (info->loop_iterations)
1266 predicate p = *info->loop_iterations;
1267 info->loop_iterations = NULL;
1268 set_hint_predicate (&info->loop_iterations, p);
1270 if (info->loop_stride)
1272 predicate p = *info->loop_stride;
1273 info->loop_stride = NULL;
1274 set_hint_predicate (&info->loop_stride, p);
1276 if (info->array_index)
1278 predicate p = *info->array_index;
1279 info->array_index = NULL;
1280 set_hint_predicate (&info->array_index, p);
1283 if (!dst->global.inlined_to)
1284 inline_update_overall_summary (dst);
1288 /* Hook that is called by cgraph.c when a node is duplicated. */
1290 static void
1291 inline_edge_duplication_hook (struct cgraph_edge *src,
1292 struct cgraph_edge *dst,
1293 ATTRIBUTE_UNUSED void *data)
1295 struct inline_edge_summary *info;
1296 struct inline_edge_summary *srcinfo;
1297 inline_summary_alloc ();
1298 info = inline_edge_summary (dst);
1299 srcinfo = inline_edge_summary (src);
1300 memcpy (info, srcinfo, sizeof (struct inline_edge_summary));
1301 info->predicate = NULL;
1302 edge_set_predicate (dst, srcinfo->predicate);
1303 info->param = srcinfo->param.copy ();
1304 if (!dst->indirect_unknown_callee && src->indirect_unknown_callee)
1306 info->call_stmt_size -= (eni_size_weights.indirect_call_cost
1307 - eni_size_weights.call_cost);
1308 info->call_stmt_time -= (eni_time_weights.indirect_call_cost
1309 - eni_time_weights.call_cost);
1314 /* Keep edge cache consistent across edge removal. */
1316 static void
1317 inline_edge_removal_hook (struct cgraph_edge *edge,
1318 void *data ATTRIBUTE_UNUSED)
1320 if (edge_growth_cache.exists ())
1321 reset_edge_growth_cache (edge);
1322 reset_inline_edge_summary (edge);
1326 /* Initialize growth caches. */
1328 void
1329 initialize_growth_caches (void)
1331 if (symtab->edges_max_uid)
1332 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
1336 /* Free growth caches. */
1338 void
1339 free_growth_caches (void)
1341 edge_growth_cache.release ();
1345 /* Dump edge summaries associated to NODE and recursively to all clones.
1346 Indent by INDENT. */
1348 static void
1349 dump_inline_edge_summary (FILE *f, int indent, struct cgraph_node *node,
1350 struct inline_summary *info)
1352 struct cgraph_edge *edge;
1353 for (edge = node->callees; edge; edge = edge->next_callee)
1355 struct inline_edge_summary *es = inline_edge_summary (edge);
1356 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1357 int i;
1359 fprintf (f,
1360 "%*s%s/%i %s\n%*s loop depth:%2i freq:%4i size:%2i"
1361 " time: %2i callee size:%2i stack:%2i",
1362 indent, "", callee->name (), callee->order,
1363 !edge->inline_failed
1364 ? "inlined" : cgraph_inline_failed_string (edge-> inline_failed),
1365 indent, "", es->loop_depth, edge->frequency,
1366 es->call_stmt_size, es->call_stmt_time,
1367 (int) inline_summaries->get (callee)->size / INLINE_SIZE_SCALE,
1368 (int) inline_summaries->get (callee)->estimated_stack_size);
1370 if (es->predicate)
1372 fprintf (f, " predicate: ");
1373 dump_predicate (f, info->conds, es->predicate);
1375 else
1376 fprintf (f, "\n");
1377 if (es->param.exists ())
1378 for (i = 0; i < (int) es->param.length (); i++)
1380 int prob = es->param[i].change_prob;
1382 if (!prob)
1383 fprintf (f, "%*s op%i is compile time invariant\n",
1384 indent + 2, "", i);
1385 else if (prob != REG_BR_PROB_BASE)
1386 fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i,
1387 prob * 100.0 / REG_BR_PROB_BASE);
1389 if (!edge->inline_failed)
1391 fprintf (f, "%*sStack frame offset %i, callee self size %i,"
1392 " callee size %i\n",
1393 indent + 2, "",
1394 (int) inline_summaries->get (callee)->stack_frame_offset,
1395 (int) inline_summaries->get (callee)->estimated_self_stack_size,
1396 (int) inline_summaries->get (callee)->estimated_stack_size);
1397 dump_inline_edge_summary (f, indent + 2, callee, info);
1400 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
1402 struct inline_edge_summary *es = inline_edge_summary (edge);
1403 fprintf (f, "%*sindirect call loop depth:%2i freq:%4i size:%2i"
1404 " time: %2i",
1405 indent, "",
1406 es->loop_depth,
1407 edge->frequency, es->call_stmt_size, es->call_stmt_time);
1408 if (es->predicate)
1410 fprintf (f, "predicate: ");
1411 dump_predicate (f, info->conds, es->predicate);
1413 else
1414 fprintf (f, "\n");
1419 void
1420 dump_inline_summary (FILE *f, struct cgraph_node *node)
1422 if (node->definition)
1424 struct inline_summary *s = inline_summaries->get (node);
1425 size_time_entry *e;
1426 int i;
1427 fprintf (f, "Inline summary for %s/%i", node->name (),
1428 node->order);
1429 if (DECL_DISREGARD_INLINE_LIMITS (node->decl))
1430 fprintf (f, " always_inline");
1431 if (s->inlinable)
1432 fprintf (f, " inlinable");
1433 if (s->contains_cilk_spawn)
1434 fprintf (f, " contains_cilk_spawn");
1435 if (s->fp_expressions)
1436 fprintf (f, " fp_expression");
1437 fprintf (f, "\n self time: %i\n", s->self_time);
1438 fprintf (f, " global time: %i\n", s->time);
1439 fprintf (f, " self size: %i\n", s->self_size);
1440 fprintf (f, " global size: %i\n", s->size);
1441 fprintf (f, " min size: %i\n", s->min_size);
1442 fprintf (f, " self stack: %i\n",
1443 (int) s->estimated_self_stack_size);
1444 fprintf (f, " global stack: %i\n", (int) s->estimated_stack_size);
1445 if (s->growth)
1446 fprintf (f, " estimated growth:%i\n", (int) s->growth);
1447 if (s->scc_no)
1448 fprintf (f, " In SCC: %i\n", (int) s->scc_no);
1449 for (i = 0; vec_safe_iterate (s->entry, i, &e); i++)
1451 fprintf (f, " size:%f, time:%f, predicate:",
1452 (double) e->size / INLINE_SIZE_SCALE,
1453 (double) e->time / INLINE_TIME_SCALE);
1454 dump_predicate (f, s->conds, &e->predicate);
1456 if (s->loop_iterations)
1458 fprintf (f, " loop iterations:");
1459 dump_predicate (f, s->conds, s->loop_iterations);
1461 if (s->loop_stride)
1463 fprintf (f, " loop stride:");
1464 dump_predicate (f, s->conds, s->loop_stride);
1466 if (s->array_index)
1468 fprintf (f, " array index:");
1469 dump_predicate (f, s->conds, s->array_index);
1471 fprintf (f, " calls:\n");
1472 dump_inline_edge_summary (f, 4, node, s);
1473 fprintf (f, "\n");
1477 DEBUG_FUNCTION void
1478 debug_inline_summary (struct cgraph_node *node)
1480 dump_inline_summary (stderr, node);
1483 void
1484 dump_inline_summaries (FILE *f)
1486 struct cgraph_node *node;
1488 FOR_EACH_DEFINED_FUNCTION (node)
1489 if (!node->global.inlined_to)
1490 dump_inline_summary (f, node);
1493 /* Give initial reasons why inlining would fail on EDGE. This gets either
1494 nullified or usually overwritten by more precise reasons later. */
1496 void
1497 initialize_inline_failed (struct cgraph_edge *e)
1499 struct cgraph_node *callee = e->callee;
1501 if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE
1502 && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
1504 else if (e->indirect_unknown_callee)
1505 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
1506 else if (!callee->definition)
1507 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
1508 else if (callee->local.redefined_extern_inline)
1509 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
1510 else if (cfun && fn_contains_cilk_spawn_p (cfun))
1511 /* We can't inline if the function is spawing a function. */
1512 e->inline_failed = CIF_CILK_SPAWN;
1513 else
1514 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
1515 gcc_checking_assert (!e->call_stmt_cannot_inline_p
1516 || cgraph_inline_failed_type (e->inline_failed)
1517 == CIF_FINAL_ERROR);
1520 /* Callback of walk_aliased_vdefs. Flags that it has been invoked to the
1521 boolean variable pointed to by DATA. */
1523 static bool
1524 mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED,
1525 void *data)
1527 bool *b = (bool *) data;
1528 *b = true;
1529 return true;
1532 /* If OP refers to value of function parameter, return the corresponding
1533 parameter. If non-NULL, the size of the memory load (or the SSA_NAME of the
1534 PARM_DECL) will be stored to *SIZE_P in that case too. */
1536 static tree
1537 unmodified_parm_1 (gimple *stmt, tree op, HOST_WIDE_INT *size_p)
1539 /* SSA_NAME referring to parm default def? */
1540 if (TREE_CODE (op) == SSA_NAME
1541 && SSA_NAME_IS_DEFAULT_DEF (op)
1542 && TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL)
1544 if (size_p)
1545 *size_p = tree_to_shwi (TYPE_SIZE (TREE_TYPE (op)));
1546 return SSA_NAME_VAR (op);
1548 /* Non-SSA parm reference? */
1549 if (TREE_CODE (op) == PARM_DECL)
1551 bool modified = false;
1553 ao_ref refd;
1554 ao_ref_init (&refd, op);
1555 walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, &modified,
1556 NULL);
1557 if (!modified)
1559 if (size_p)
1560 *size_p = tree_to_shwi (TYPE_SIZE (TREE_TYPE (op)));
1561 return op;
1564 return NULL_TREE;
1567 /* If OP refers to value of function parameter, return the corresponding
1568 parameter. Also traverse chains of SSA register assignments. If non-NULL,
1569 the size of the memory load (or the SSA_NAME of the PARM_DECL) will be
1570 stored to *SIZE_P in that case too. */
1572 static tree
1573 unmodified_parm (gimple *stmt, tree op, HOST_WIDE_INT *size_p)
1575 tree res = unmodified_parm_1 (stmt, op, size_p);
1576 if (res)
1577 return res;
1579 if (TREE_CODE (op) == SSA_NAME
1580 && !SSA_NAME_IS_DEFAULT_DEF (op)
1581 && gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1582 return unmodified_parm (SSA_NAME_DEF_STMT (op),
1583 gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)),
1584 size_p);
1585 return NULL_TREE;
1588 /* If OP refers to a value of a function parameter or value loaded from an
1589 aggregate passed to a parameter (either by value or reference), return TRUE
1590 and store the number of the parameter to *INDEX_P, the access size into
1591 *SIZE_P, and information whether and how it has been loaded from an
1592 aggregate into *AGGPOS. INFO describes the function parameters, STMT is the
1593 statement in which OP is used or loaded. */
1595 static bool
1596 unmodified_parm_or_parm_agg_item (struct ipa_func_body_info *fbi,
1597 gimple *stmt, tree op, int *index_p,
1598 HOST_WIDE_INT *size_p,
1599 struct agg_position_info *aggpos)
1601 tree res = unmodified_parm_1 (stmt, op, size_p);
1603 gcc_checking_assert (aggpos);
1604 if (res)
1606 *index_p = ipa_get_param_decl_index (fbi->info, res);
1607 if (*index_p < 0)
1608 return false;
1609 aggpos->agg_contents = false;
1610 aggpos->by_ref = false;
1611 return true;
1614 if (TREE_CODE (op) == SSA_NAME)
1616 if (SSA_NAME_IS_DEFAULT_DEF (op)
1617 || !gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1618 return false;
1619 stmt = SSA_NAME_DEF_STMT (op);
1620 op = gimple_assign_rhs1 (stmt);
1621 if (!REFERENCE_CLASS_P (op))
1622 return unmodified_parm_or_parm_agg_item (fbi, stmt, op, index_p, size_p,
1623 aggpos);
1626 aggpos->agg_contents = true;
1627 return ipa_load_from_parm_agg (fbi, fbi->info->descriptors,
1628 stmt, op, index_p, &aggpos->offset,
1629 size_p, &aggpos->by_ref);
1632 /* See if statement might disappear after inlining.
1633 0 - means not eliminated
1634 1 - half of statements goes away
1635 2 - for sure it is eliminated.
1636 We are not terribly sophisticated, basically looking for simple abstraction
1637 penalty wrappers. */
1639 static int
1640 eliminated_by_inlining_prob (gimple *stmt)
1642 enum gimple_code code = gimple_code (stmt);
1643 enum tree_code rhs_code;
1645 if (!optimize)
1646 return 0;
1648 switch (code)
1650 case GIMPLE_RETURN:
1651 return 2;
1652 case GIMPLE_ASSIGN:
1653 if (gimple_num_ops (stmt) != 2)
1654 return 0;
1656 rhs_code = gimple_assign_rhs_code (stmt);
1658 /* Casts of parameters, loads from parameters passed by reference
1659 and stores to return value or parameters are often free after
1660 inlining dua to SRA and further combining.
1661 Assume that half of statements goes away. */
1662 if (CONVERT_EXPR_CODE_P (rhs_code)
1663 || rhs_code == VIEW_CONVERT_EXPR
1664 || rhs_code == ADDR_EXPR
1665 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1667 tree rhs = gimple_assign_rhs1 (stmt);
1668 tree lhs = gimple_assign_lhs (stmt);
1669 tree inner_rhs = get_base_address (rhs);
1670 tree inner_lhs = get_base_address (lhs);
1671 bool rhs_free = false;
1672 bool lhs_free = false;
1674 if (!inner_rhs)
1675 inner_rhs = rhs;
1676 if (!inner_lhs)
1677 inner_lhs = lhs;
1679 /* Reads of parameter are expected to be free. */
1680 if (unmodified_parm (stmt, inner_rhs, NULL))
1681 rhs_free = true;
1682 /* Match expressions of form &this->field. Those will most likely
1683 combine with something upstream after inlining. */
1684 else if (TREE_CODE (inner_rhs) == ADDR_EXPR)
1686 tree op = get_base_address (TREE_OPERAND (inner_rhs, 0));
1687 if (TREE_CODE (op) == PARM_DECL)
1688 rhs_free = true;
1689 else if (TREE_CODE (op) == MEM_REF
1690 && unmodified_parm (stmt, TREE_OPERAND (op, 0), NULL))
1691 rhs_free = true;
1694 /* When parameter is not SSA register because its address is taken
1695 and it is just copied into one, the statement will be completely
1696 free after inlining (we will copy propagate backward). */
1697 if (rhs_free && is_gimple_reg (lhs))
1698 return 2;
1700 /* Reads of parameters passed by reference
1701 expected to be free (i.e. optimized out after inlining). */
1702 if (TREE_CODE (inner_rhs) == MEM_REF
1703 && unmodified_parm (stmt, TREE_OPERAND (inner_rhs, 0), NULL))
1704 rhs_free = true;
1706 /* Copying parameter passed by reference into gimple register is
1707 probably also going to copy propagate, but we can't be quite
1708 sure. */
1709 if (rhs_free && is_gimple_reg (lhs))
1710 lhs_free = true;
1712 /* Writes to parameters, parameters passed by value and return value
1713 (either dirrectly or passed via invisible reference) are free.
1715 TODO: We ought to handle testcase like
1716 struct a {int a,b;};
1717 struct a
1718 retrurnsturct (void)
1720 struct a a ={1,2};
1721 return a;
1724 This translate into:
1726 retrurnsturct ()
1728 int a$b;
1729 int a$a;
1730 struct a a;
1731 struct a D.2739;
1733 <bb 2>:
1734 D.2739.a = 1;
1735 D.2739.b = 2;
1736 return D.2739;
1739 For that we either need to copy ipa-split logic detecting writes
1740 to return value. */
1741 if (TREE_CODE (inner_lhs) == PARM_DECL
1742 || TREE_CODE (inner_lhs) == RESULT_DECL
1743 || (TREE_CODE (inner_lhs) == MEM_REF
1744 && (unmodified_parm (stmt, TREE_OPERAND (inner_lhs, 0), NULL)
1745 || (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME
1746 && SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0))
1747 && TREE_CODE (SSA_NAME_VAR (TREE_OPERAND
1748 (inner_lhs,
1749 0))) == RESULT_DECL))))
1750 lhs_free = true;
1751 if (lhs_free
1752 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1753 rhs_free = true;
1754 if (lhs_free && rhs_free)
1755 return 1;
1757 return 0;
1758 default:
1759 return 0;
1764 /* If BB ends by a conditional we can turn into predicates, attach corresponding
1765 predicates to the CFG edges. */
1767 static void
1768 set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi,
1769 struct inline_summary *summary,
1770 basic_block bb)
1772 gimple *last;
1773 tree op;
1774 int index;
1775 HOST_WIDE_INT size;
1776 struct agg_position_info aggpos;
1777 enum tree_code code, inverted_code;
1778 edge e;
1779 edge_iterator ei;
1780 gimple *set_stmt;
1781 tree op2;
1783 last = last_stmt (bb);
1784 if (!last || gimple_code (last) != GIMPLE_COND)
1785 return;
1786 if (!is_gimple_ip_invariant (gimple_cond_rhs (last)))
1787 return;
1788 op = gimple_cond_lhs (last);
1789 /* TODO: handle conditionals like
1790 var = op0 < 4;
1791 if (var != 0). */
1792 if (unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &size, &aggpos))
1794 code = gimple_cond_code (last);
1795 inverted_code = invert_tree_comparison (code, HONOR_NANS (op));
1797 FOR_EACH_EDGE (e, ei, bb->succs)
1799 enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE
1800 ? code : inverted_code);
1801 /* invert_tree_comparison will return ERROR_MARK on FP
1802 comparsions that are not EQ/NE instead of returning proper
1803 unordered one. Be sure it is not confused with NON_CONSTANT. */
1804 if (this_code != ERROR_MARK)
1806 struct predicate p
1807 = add_condition (summary, index, size, &aggpos, this_code,
1808 unshare_expr_without_location
1809 (gimple_cond_rhs (last)));
1810 e->aux = edge_predicate_pool.allocate ();
1811 *(struct predicate *) e->aux = p;
1816 if (TREE_CODE (op) != SSA_NAME)
1817 return;
1818 /* Special case
1819 if (builtin_constant_p (op))
1820 constant_code
1821 else
1822 nonconstant_code.
1823 Here we can predicate nonconstant_code. We can't
1824 really handle constant_code since we have no predicate
1825 for this and also the constant code is not known to be
1826 optimized away when inliner doen't see operand is constant.
1827 Other optimizers might think otherwise. */
1828 if (gimple_cond_code (last) != NE_EXPR
1829 || !integer_zerop (gimple_cond_rhs (last)))
1830 return;
1831 set_stmt = SSA_NAME_DEF_STMT (op);
1832 if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P)
1833 || gimple_call_num_args (set_stmt) != 1)
1834 return;
1835 op2 = gimple_call_arg (set_stmt, 0);
1836 if (!unmodified_parm_or_parm_agg_item (fbi, set_stmt, op2, &index, &size,
1837 &aggpos))
1838 return;
1839 FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE)
1841 struct predicate p = add_condition (summary, index, size, &aggpos,
1842 IS_NOT_CONSTANT, NULL_TREE);
1843 e->aux = edge_predicate_pool.allocate ();
1844 *(struct predicate *) e->aux = p;
1849 /* If BB ends by a switch we can turn into predicates, attach corresponding
1850 predicates to the CFG edges. */
1852 static void
1853 set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi,
1854 struct inline_summary *summary,
1855 basic_block bb)
1857 gimple *lastg;
1858 tree op;
1859 int index;
1860 HOST_WIDE_INT size;
1861 struct agg_position_info aggpos;
1862 edge e;
1863 edge_iterator ei;
1864 size_t n;
1865 size_t case_idx;
1867 lastg = last_stmt (bb);
1868 if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH)
1869 return;
1870 gswitch *last = as_a <gswitch *> (lastg);
1871 op = gimple_switch_index (last);
1872 if (!unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &size, &aggpos))
1873 return;
1875 FOR_EACH_EDGE (e, ei, bb->succs)
1877 e->aux = edge_predicate_pool.allocate ();
1878 *(struct predicate *) e->aux = false_predicate ();
1880 n = gimple_switch_num_labels (last);
1881 for (case_idx = 0; case_idx < n; ++case_idx)
1883 tree cl = gimple_switch_label (last, case_idx);
1884 tree min, max;
1885 struct predicate p;
1887 e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
1888 min = CASE_LOW (cl);
1889 max = CASE_HIGH (cl);
1891 /* For default we might want to construct predicate that none
1892 of cases is met, but it is bit hard to do not having negations
1893 of conditionals handy. */
1894 if (!min && !max)
1895 p = true_predicate ();
1896 else if (!max)
1897 p = add_condition (summary, index, size, &aggpos, EQ_EXPR,
1898 unshare_expr_without_location (min));
1899 else
1901 struct predicate p1, p2;
1902 p1 = add_condition (summary, index, size, &aggpos, GE_EXPR,
1903 unshare_expr_without_location (min));
1904 p2 = add_condition (summary, index, size, &aggpos, LE_EXPR,
1905 unshare_expr_without_location (max));
1906 p = and_predicates (summary->conds, &p1, &p2);
1908 *(struct predicate *) e->aux
1909 = or_predicates (summary->conds, &p, (struct predicate *) e->aux);
1914 /* For each BB in NODE attach to its AUX pointer predicate under
1915 which it is executable. */
1917 static void
1918 compute_bb_predicates (struct ipa_func_body_info *fbi,
1919 struct cgraph_node *node,
1920 struct inline_summary *summary)
1922 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1923 bool done = false;
1924 basic_block bb;
1926 FOR_EACH_BB_FN (bb, my_function)
1928 set_cond_stmt_execution_predicate (fbi, summary, bb);
1929 set_switch_stmt_execution_predicate (fbi, summary, bb);
1932 /* Entry block is always executable. */
1933 ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1934 = edge_predicate_pool.allocate ();
1935 *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1936 = true_predicate ();
1938 /* A simple dataflow propagation of predicates forward in the CFG.
1939 TODO: work in reverse postorder. */
1940 while (!done)
1942 done = true;
1943 FOR_EACH_BB_FN (bb, my_function)
1945 struct predicate p = false_predicate ();
1946 edge e;
1947 edge_iterator ei;
1948 FOR_EACH_EDGE (e, ei, bb->preds)
1950 if (e->src->aux)
1952 struct predicate this_bb_predicate
1953 = *(struct predicate *) e->src->aux;
1954 if (e->aux)
1955 this_bb_predicate
1956 = and_predicates (summary->conds, &this_bb_predicate,
1957 (struct predicate *) e->aux);
1958 p = or_predicates (summary->conds, &p, &this_bb_predicate);
1959 if (true_predicate_p (&p))
1960 break;
1963 if (false_predicate_p (&p))
1964 gcc_assert (!bb->aux);
1965 else
1967 if (!bb->aux)
1969 done = false;
1970 bb->aux = edge_predicate_pool.allocate ();
1971 *((struct predicate *) bb->aux) = p;
1973 else if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1975 /* This OR operation is needed to ensure monotonous data flow
1976 in the case we hit the limit on number of clauses and the
1977 and/or operations above give approximate answers. */
1978 p = or_predicates (summary->conds, &p, (struct predicate *)bb->aux);
1979 if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1981 done = false;
1982 *((struct predicate *) bb->aux) = p;
1991 /* We keep info about constantness of SSA names. */
1993 typedef struct predicate predicate_t;
1994 /* Return predicate specifying when the STMT might have result that is not
1995 a compile time constant. */
1997 static struct predicate
1998 will_be_nonconstant_expr_predicate (struct ipa_node_params *info,
1999 struct inline_summary *summary,
2000 tree expr,
2001 vec<predicate_t> nonconstant_names)
2003 tree parm;
2004 int index;
2005 HOST_WIDE_INT size;
2007 while (UNARY_CLASS_P (expr))
2008 expr = TREE_OPERAND (expr, 0);
2010 parm = unmodified_parm (NULL, expr, &size);
2011 if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0)
2012 return add_condition (summary, index, size, NULL, CHANGED, NULL_TREE);
2013 if (is_gimple_min_invariant (expr))
2014 return false_predicate ();
2015 if (TREE_CODE (expr) == SSA_NAME)
2016 return nonconstant_names[SSA_NAME_VERSION (expr)];
2017 if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr))
2019 struct predicate p1 = will_be_nonconstant_expr_predicate
2020 (info, summary, TREE_OPERAND (expr, 0),
2021 nonconstant_names);
2022 struct predicate p2;
2023 if (true_predicate_p (&p1))
2024 return p1;
2025 p2 = will_be_nonconstant_expr_predicate (info, summary,
2026 TREE_OPERAND (expr, 1),
2027 nonconstant_names);
2028 return or_predicates (summary->conds, &p1, &p2);
2030 else if (TREE_CODE (expr) == COND_EXPR)
2032 struct predicate p1 = will_be_nonconstant_expr_predicate
2033 (info, summary, TREE_OPERAND (expr, 0),
2034 nonconstant_names);
2035 struct predicate p2;
2036 if (true_predicate_p (&p1))
2037 return p1;
2038 p2 = will_be_nonconstant_expr_predicate (info, summary,
2039 TREE_OPERAND (expr, 1),
2040 nonconstant_names);
2041 if (true_predicate_p (&p2))
2042 return p2;
2043 p1 = or_predicates (summary->conds, &p1, &p2);
2044 p2 = will_be_nonconstant_expr_predicate (info, summary,
2045 TREE_OPERAND (expr, 2),
2046 nonconstant_names);
2047 return or_predicates (summary->conds, &p1, &p2);
2049 else
2051 debug_tree (expr);
2052 gcc_unreachable ();
2054 return false_predicate ();
2058 /* Return predicate specifying when the STMT might have result that is not
2059 a compile time constant. */
2061 static struct predicate
2062 will_be_nonconstant_predicate (struct ipa_func_body_info *fbi,
2063 struct inline_summary *summary,
2064 gimple *stmt,
2065 vec<predicate_t> nonconstant_names)
2067 struct predicate p = true_predicate ();
2068 ssa_op_iter iter;
2069 tree use;
2070 struct predicate op_non_const;
2071 bool is_load;
2072 int base_index;
2073 HOST_WIDE_INT size;
2074 struct agg_position_info aggpos;
2076 /* What statments might be optimized away
2077 when their arguments are constant. */
2078 if (gimple_code (stmt) != GIMPLE_ASSIGN
2079 && gimple_code (stmt) != GIMPLE_COND
2080 && gimple_code (stmt) != GIMPLE_SWITCH
2081 && (gimple_code (stmt) != GIMPLE_CALL
2082 || !(gimple_call_flags (stmt) & ECF_CONST)))
2083 return p;
2085 /* Stores will stay anyway. */
2086 if (gimple_store_p (stmt))
2087 return p;
2089 is_load = gimple_assign_load_p (stmt);
2091 /* Loads can be optimized when the value is known. */
2092 if (is_load)
2094 tree op;
2095 gcc_assert (gimple_assign_single_p (stmt));
2096 op = gimple_assign_rhs1 (stmt);
2097 if (!unmodified_parm_or_parm_agg_item (fbi, stmt, op, &base_index, &size,
2098 &aggpos))
2099 return p;
2101 else
2102 base_index = -1;
2104 /* See if we understand all operands before we start
2105 adding conditionals. */
2106 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2108 tree parm = unmodified_parm (stmt, use, NULL);
2109 /* For arguments we can build a condition. */
2110 if (parm && ipa_get_param_decl_index (fbi->info, parm) >= 0)
2111 continue;
2112 if (TREE_CODE (use) != SSA_NAME)
2113 return p;
2114 /* If we know when operand is constant,
2115 we still can say something useful. */
2116 if (!true_predicate_p (&nonconstant_names[SSA_NAME_VERSION (use)]))
2117 continue;
2118 return p;
2121 if (is_load)
2122 op_non_const =
2123 add_condition (summary, base_index, size, &aggpos, CHANGED, NULL);
2124 else
2125 op_non_const = false_predicate ();
2126 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2128 HOST_WIDE_INT size;
2129 tree parm = unmodified_parm (stmt, use, &size);
2130 int index;
2132 if (parm && (index = ipa_get_param_decl_index (fbi->info, parm)) >= 0)
2134 if (index != base_index)
2135 p = add_condition (summary, index, size, NULL, CHANGED, NULL_TREE);
2136 else
2137 continue;
2139 else
2140 p = nonconstant_names[SSA_NAME_VERSION (use)];
2141 op_non_const = or_predicates (summary->conds, &p, &op_non_const);
2143 if ((gimple_code (stmt) == GIMPLE_ASSIGN || gimple_code (stmt) == GIMPLE_CALL)
2144 && gimple_op (stmt, 0)
2145 && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
2146 nonconstant_names[SSA_NAME_VERSION (gimple_op (stmt, 0))]
2147 = op_non_const;
2148 return op_non_const;
2151 struct record_modified_bb_info
2153 bitmap bb_set;
2154 gimple *stmt;
2157 /* Callback of walk_aliased_vdefs. Records basic blocks where the value may be
2158 set except for info->stmt. */
2160 static bool
2161 record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data)
2163 struct record_modified_bb_info *info =
2164 (struct record_modified_bb_info *) data;
2165 if (SSA_NAME_DEF_STMT (vdef) == info->stmt)
2166 return false;
2167 bitmap_set_bit (info->bb_set,
2168 SSA_NAME_IS_DEFAULT_DEF (vdef)
2169 ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index
2170 : gimple_bb (SSA_NAME_DEF_STMT (vdef))->index);
2171 return false;
2174 /* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT
2175 will change since last invocation of STMT.
2177 Value 0 is reserved for compile time invariants.
2178 For common parameters it is REG_BR_PROB_BASE. For loop invariants it
2179 ought to be REG_BR_PROB_BASE / estimated_iters. */
2181 static int
2182 param_change_prob (gimple *stmt, int i)
2184 tree op = gimple_call_arg (stmt, i);
2185 basic_block bb = gimple_bb (stmt);
2187 if (TREE_CODE (op) == WITH_SIZE_EXPR)
2188 op = TREE_OPERAND (op, 0);
2190 tree base = get_base_address (op);
2192 /* Global invariants never change. */
2193 if (is_gimple_min_invariant (base))
2194 return 0;
2196 /* We would have to do non-trivial analysis to really work out what
2197 is the probability of value to change (i.e. when init statement
2198 is in a sibling loop of the call).
2200 We do an conservative estimate: when call is executed N times more often
2201 than the statement defining value, we take the frequency 1/N. */
2202 if (TREE_CODE (base) == SSA_NAME)
2204 int init_freq;
2206 if (!bb->frequency)
2207 return REG_BR_PROB_BASE;
2209 if (SSA_NAME_IS_DEFAULT_DEF (base))
2210 init_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2211 else
2212 init_freq = gimple_bb (SSA_NAME_DEF_STMT (base))->frequency;
2214 if (!init_freq)
2215 init_freq = 1;
2216 if (init_freq < bb->frequency)
2217 return MAX (GCOV_COMPUTE_SCALE (init_freq, bb->frequency), 1);
2218 else
2219 return REG_BR_PROB_BASE;
2221 else
2223 ao_ref refd;
2224 int max;
2225 struct record_modified_bb_info info;
2226 bitmap_iterator bi;
2227 unsigned index;
2228 tree init = ctor_for_folding (base);
2230 if (init != error_mark_node)
2231 return 0;
2232 if (!bb->frequency)
2233 return REG_BR_PROB_BASE;
2234 ao_ref_init (&refd, op);
2235 info.stmt = stmt;
2236 info.bb_set = BITMAP_ALLOC (NULL);
2237 walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info,
2238 NULL);
2239 if (bitmap_bit_p (info.bb_set, bb->index))
2241 BITMAP_FREE (info.bb_set);
2242 return REG_BR_PROB_BASE;
2245 /* Assume that every memory is initialized at entry.
2246 TODO: Can we easilly determine if value is always defined
2247 and thus we may skip entry block? */
2248 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
2249 max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2250 else
2251 max = 1;
2253 EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi)
2254 max = MIN (max, BASIC_BLOCK_FOR_FN (cfun, index)->frequency);
2256 BITMAP_FREE (info.bb_set);
2257 if (max < bb->frequency)
2258 return MAX (GCOV_COMPUTE_SCALE (max, bb->frequency), 1);
2259 else
2260 return REG_BR_PROB_BASE;
2264 /* Find whether a basic block BB is the final block of a (half) diamond CFG
2265 sub-graph and if the predicate the condition depends on is known. If so,
2266 return true and store the pointer the predicate in *P. */
2268 static bool
2269 phi_result_unknown_predicate (struct ipa_node_params *info,
2270 inline_summary *summary, basic_block bb,
2271 struct predicate *p,
2272 vec<predicate_t> nonconstant_names)
2274 edge e;
2275 edge_iterator ei;
2276 basic_block first_bb = NULL;
2277 gimple *stmt;
2279 if (single_pred_p (bb))
2281 *p = false_predicate ();
2282 return true;
2285 FOR_EACH_EDGE (e, ei, bb->preds)
2287 if (single_succ_p (e->src))
2289 if (!single_pred_p (e->src))
2290 return false;
2291 if (!first_bb)
2292 first_bb = single_pred (e->src);
2293 else if (single_pred (e->src) != first_bb)
2294 return false;
2296 else
2298 if (!first_bb)
2299 first_bb = e->src;
2300 else if (e->src != first_bb)
2301 return false;
2305 if (!first_bb)
2306 return false;
2308 stmt = last_stmt (first_bb);
2309 if (!stmt
2310 || gimple_code (stmt) != GIMPLE_COND
2311 || !is_gimple_ip_invariant (gimple_cond_rhs (stmt)))
2312 return false;
2314 *p = will_be_nonconstant_expr_predicate (info, summary,
2315 gimple_cond_lhs (stmt),
2316 nonconstant_names);
2317 if (true_predicate_p (p))
2318 return false;
2319 else
2320 return true;
2323 /* Given a PHI statement in a function described by inline properties SUMMARY
2324 and *P being the predicate describing whether the selected PHI argument is
2325 known, store a predicate for the result of the PHI statement into
2326 NONCONSTANT_NAMES, if possible. */
2328 static void
2329 predicate_for_phi_result (struct inline_summary *summary, gphi *phi,
2330 struct predicate *p,
2331 vec<predicate_t> nonconstant_names)
2333 unsigned i;
2335 for (i = 0; i < gimple_phi_num_args (phi); i++)
2337 tree arg = gimple_phi_arg (phi, i)->def;
2338 if (!is_gimple_min_invariant (arg))
2340 gcc_assert (TREE_CODE (arg) == SSA_NAME);
2341 *p = or_predicates (summary->conds, p,
2342 &nonconstant_names[SSA_NAME_VERSION (arg)]);
2343 if (true_predicate_p (p))
2344 return;
2348 if (dump_file && (dump_flags & TDF_DETAILS))
2350 fprintf (dump_file, "\t\tphi predicate: ");
2351 dump_predicate (dump_file, summary->conds, p);
2353 nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p;
2356 /* Return predicate specifying when array index in access OP becomes non-constant. */
2358 static struct predicate
2359 array_index_predicate (inline_summary *info,
2360 vec< predicate_t> nonconstant_names, tree op)
2362 struct predicate p = false_predicate ();
2363 while (handled_component_p (op))
2365 if (TREE_CODE (op) == ARRAY_REF || TREE_CODE (op) == ARRAY_RANGE_REF)
2367 if (TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME)
2368 p = or_predicates (info->conds, &p,
2369 &nonconstant_names[SSA_NAME_VERSION
2370 (TREE_OPERAND (op, 1))]);
2372 op = TREE_OPERAND (op, 0);
2374 return p;
2377 /* For a typical usage of __builtin_expect (a<b, 1), we
2378 may introduce an extra relation stmt:
2379 With the builtin, we have
2380 t1 = a <= b;
2381 t2 = (long int) t1;
2382 t3 = __builtin_expect (t2, 1);
2383 if (t3 != 0)
2384 goto ...
2385 Without the builtin, we have
2386 if (a<=b)
2387 goto...
2388 This affects the size/time estimation and may have
2389 an impact on the earlier inlining.
2390 Here find this pattern and fix it up later. */
2392 static gimple *
2393 find_foldable_builtin_expect (basic_block bb)
2395 gimple_stmt_iterator bsi;
2397 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2399 gimple *stmt = gsi_stmt (bsi);
2400 if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT)
2401 || (is_gimple_call (stmt)
2402 && gimple_call_internal_p (stmt)
2403 && gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT))
2405 tree var = gimple_call_lhs (stmt);
2406 tree arg = gimple_call_arg (stmt, 0);
2407 use_operand_p use_p;
2408 gimple *use_stmt;
2409 bool match = false;
2410 bool done = false;
2412 if (!var || !arg)
2413 continue;
2414 gcc_assert (TREE_CODE (var) == SSA_NAME);
2416 while (TREE_CODE (arg) == SSA_NAME)
2418 gimple *stmt_tmp = SSA_NAME_DEF_STMT (arg);
2419 if (!is_gimple_assign (stmt_tmp))
2420 break;
2421 switch (gimple_assign_rhs_code (stmt_tmp))
2423 case LT_EXPR:
2424 case LE_EXPR:
2425 case GT_EXPR:
2426 case GE_EXPR:
2427 case EQ_EXPR:
2428 case NE_EXPR:
2429 match = true;
2430 done = true;
2431 break;
2432 CASE_CONVERT:
2433 break;
2434 default:
2435 done = true;
2436 break;
2438 if (done)
2439 break;
2440 arg = gimple_assign_rhs1 (stmt_tmp);
2443 if (match && single_imm_use (var, &use_p, &use_stmt)
2444 && gimple_code (use_stmt) == GIMPLE_COND)
2445 return use_stmt;
2448 return NULL;
2451 /* Return true when the basic blocks contains only clobbers followed by RESX.
2452 Such BBs are kept around to make removal of dead stores possible with
2453 presence of EH and will be optimized out by optimize_clobbers later in the
2454 game.
2456 NEED_EH is used to recurse in case the clobber has non-EH predecestors
2457 that can be clobber only, too.. When it is false, the RESX is not necessary
2458 on the end of basic block. */
2460 static bool
2461 clobber_only_eh_bb_p (basic_block bb, bool need_eh = true)
2463 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2464 edge_iterator ei;
2465 edge e;
2467 if (need_eh)
2469 if (gsi_end_p (gsi))
2470 return false;
2471 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX)
2472 return false;
2473 gsi_prev (&gsi);
2475 else if (!single_succ_p (bb))
2476 return false;
2478 for (; !gsi_end_p (gsi); gsi_prev (&gsi))
2480 gimple *stmt = gsi_stmt (gsi);
2481 if (is_gimple_debug (stmt))
2482 continue;
2483 if (gimple_clobber_p (stmt))
2484 continue;
2485 if (gimple_code (stmt) == GIMPLE_LABEL)
2486 break;
2487 return false;
2490 /* See if all predecestors are either throws or clobber only BBs. */
2491 FOR_EACH_EDGE (e, ei, bb->preds)
2492 if (!(e->flags & EDGE_EH)
2493 && !clobber_only_eh_bb_p (e->src, false))
2494 return false;
2496 return true;
2499 /* Return true if STMT compute a floating point expression that may be affected
2500 by -ffast-math and similar flags. */
2502 static bool
2503 fp_expression_p (gimple *stmt)
2505 ssa_op_iter i;
2506 tree op;
2508 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF|SSA_OP_USE)
2509 if (FLOAT_TYPE_P (TREE_TYPE (op)))
2510 return true;
2511 return false;
2514 /* Compute function body size parameters for NODE.
2515 When EARLY is true, we compute only simple summaries without
2516 non-trivial predicates to drive the early inliner. */
2518 static void
2519 estimate_function_body_sizes (struct cgraph_node *node, bool early)
2521 gcov_type time = 0;
2522 /* Estimate static overhead for function prologue/epilogue and alignment. */
2523 int size = 2;
2524 /* Benefits are scaled by probability of elimination that is in range
2525 <0,2>. */
2526 basic_block bb;
2527 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
2528 int freq;
2529 struct inline_summary *info = inline_summaries->get (node);
2530 struct predicate bb_predicate;
2531 struct ipa_func_body_info fbi;
2532 vec<predicate_t> nonconstant_names = vNULL;
2533 int nblocks, n;
2534 int *order;
2535 predicate array_index = true_predicate ();
2536 gimple *fix_builtin_expect_stmt;
2538 gcc_assert (my_function && my_function->cfg);
2539 gcc_assert (cfun == my_function);
2541 memset(&fbi, 0, sizeof(fbi));
2542 info->conds = NULL;
2543 info->entry = NULL;
2545 /* When optimizing and analyzing for IPA inliner, initialize loop optimizer
2546 so we can produce proper inline hints.
2548 When optimizing and analyzing for early inliner, initialize node params
2549 so we can produce correct BB predicates. */
2551 if (opt_for_fn (node->decl, optimize))
2553 calculate_dominance_info (CDI_DOMINATORS);
2554 if (!early)
2555 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2556 else
2558 ipa_check_create_node_params ();
2559 ipa_initialize_node_params (node);
2562 if (ipa_node_params_sum)
2564 fbi.node = node;
2565 fbi.info = IPA_NODE_REF (node);
2566 fbi.bb_infos = vNULL;
2567 fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
2568 fbi.param_count = count_formal_params(node->decl);
2569 nonconstant_names.safe_grow_cleared
2570 (SSANAMES (my_function)->length ());
2574 if (dump_file)
2575 fprintf (dump_file, "\nAnalyzing function body size: %s\n",
2576 node->name ());
2578 /* When we run into maximal number of entries, we assign everything to the
2579 constant truth case. Be sure to have it in list. */
2580 bb_predicate = true_predicate ();
2581 account_size_time (info, 0, 0, &bb_predicate);
2583 bb_predicate = not_inlined_predicate ();
2584 account_size_time (info, 2 * INLINE_SIZE_SCALE, 0, &bb_predicate);
2586 if (fbi.info)
2587 compute_bb_predicates (&fbi, node, info);
2588 order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
2589 nblocks = pre_and_rev_post_order_compute (NULL, order, false);
2590 for (n = 0; n < nblocks; n++)
2592 bb = BASIC_BLOCK_FOR_FN (cfun, order[n]);
2593 freq = compute_call_stmt_bb_frequency (node->decl, bb);
2594 if (clobber_only_eh_bb_p (bb))
2596 if (dump_file && (dump_flags & TDF_DETAILS))
2597 fprintf (dump_file, "\n Ignoring BB %i;"
2598 " it will be optimized away by cleanup_clobbers\n",
2599 bb->index);
2600 continue;
2603 /* TODO: Obviously predicates can be propagated down across CFG. */
2604 if (fbi.info)
2606 if (bb->aux)
2607 bb_predicate = *(struct predicate *) bb->aux;
2608 else
2609 bb_predicate = false_predicate ();
2611 else
2612 bb_predicate = true_predicate ();
2614 if (dump_file && (dump_flags & TDF_DETAILS))
2616 fprintf (dump_file, "\n BB %i predicate:", bb->index);
2617 dump_predicate (dump_file, info->conds, &bb_predicate);
2620 if (fbi.info && nonconstant_names.exists ())
2622 struct predicate phi_predicate;
2623 bool first_phi = true;
2625 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
2626 gsi_next (&bsi))
2628 if (first_phi
2629 && !phi_result_unknown_predicate (fbi.info, info, bb,
2630 &phi_predicate,
2631 nonconstant_names))
2632 break;
2633 first_phi = false;
2634 if (dump_file && (dump_flags & TDF_DETAILS))
2636 fprintf (dump_file, " ");
2637 print_gimple_stmt (dump_file, gsi_stmt (bsi), 0, 0);
2639 predicate_for_phi_result (info, bsi.phi (), &phi_predicate,
2640 nonconstant_names);
2644 fix_builtin_expect_stmt = find_foldable_builtin_expect (bb);
2646 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
2647 gsi_next (&bsi))
2649 gimple *stmt = gsi_stmt (bsi);
2650 int this_size = estimate_num_insns (stmt, &eni_size_weights);
2651 int this_time = estimate_num_insns (stmt, &eni_time_weights);
2652 int prob;
2653 struct predicate will_be_nonconstant;
2655 /* This relation stmt should be folded after we remove
2656 buildin_expect call. Adjust the cost here. */
2657 if (stmt == fix_builtin_expect_stmt)
2659 this_size--;
2660 this_time--;
2663 if (dump_file && (dump_flags & TDF_DETAILS))
2665 fprintf (dump_file, " ");
2666 print_gimple_stmt (dump_file, stmt, 0, 0);
2667 fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n",
2668 ((double) freq) / CGRAPH_FREQ_BASE, this_size,
2669 this_time);
2672 if (gimple_assign_load_p (stmt) && nonconstant_names.exists ())
2674 struct predicate this_array_index;
2675 this_array_index =
2676 array_index_predicate (info, nonconstant_names,
2677 gimple_assign_rhs1 (stmt));
2678 if (!false_predicate_p (&this_array_index))
2679 array_index =
2680 and_predicates (info->conds, &array_index,
2681 &this_array_index);
2683 if (gimple_store_p (stmt) && nonconstant_names.exists ())
2685 struct predicate this_array_index;
2686 this_array_index =
2687 array_index_predicate (info, nonconstant_names,
2688 gimple_get_lhs (stmt));
2689 if (!false_predicate_p (&this_array_index))
2690 array_index =
2691 and_predicates (info->conds, &array_index,
2692 &this_array_index);
2696 if (is_gimple_call (stmt)
2697 && !gimple_call_internal_p (stmt))
2699 struct cgraph_edge *edge = node->get_edge (stmt);
2700 struct inline_edge_summary *es = inline_edge_summary (edge);
2702 /* Special case: results of BUILT_IN_CONSTANT_P will be always
2703 resolved as constant. We however don't want to optimize
2704 out the cgraph edges. */
2705 if (nonconstant_names.exists ()
2706 && gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P)
2707 && gimple_call_lhs (stmt)
2708 && TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME)
2710 struct predicate false_p = false_predicate ();
2711 nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))]
2712 = false_p;
2714 if (ipa_node_params_sum)
2716 int count = gimple_call_num_args (stmt);
2717 int i;
2719 if (count)
2720 es->param.safe_grow_cleared (count);
2721 for (i = 0; i < count; i++)
2723 int prob = param_change_prob (stmt, i);
2724 gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
2725 es->param[i].change_prob = prob;
2729 es->call_stmt_size = this_size;
2730 es->call_stmt_time = this_time;
2731 es->loop_depth = bb_loop_depth (bb);
2732 edge_set_predicate (edge, &bb_predicate);
2735 /* TODO: When conditional jump or swithc is known to be constant, but
2736 we did not translate it into the predicates, we really can account
2737 just maximum of the possible paths. */
2738 if (fbi.info)
2739 will_be_nonconstant
2740 = will_be_nonconstant_predicate (&fbi, info,
2741 stmt, nonconstant_names);
2742 if (this_time || this_size)
2744 struct predicate p;
2746 this_time *= freq;
2748 prob = eliminated_by_inlining_prob (stmt);
2749 if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS))
2750 fprintf (dump_file,
2751 "\t\t50%% will be eliminated by inlining\n");
2752 if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
2753 fprintf (dump_file, "\t\tWill be eliminated by inlining\n");
2755 if (fbi.info)
2756 p = and_predicates (info->conds, &bb_predicate,
2757 &will_be_nonconstant);
2758 else
2759 p = true_predicate ();
2761 if (!false_predicate_p (&p)
2762 || (is_gimple_call (stmt)
2763 && !false_predicate_p (&bb_predicate)))
2765 time += this_time;
2766 size += this_size;
2767 if (time > MAX_TIME * INLINE_TIME_SCALE)
2768 time = MAX_TIME * INLINE_TIME_SCALE;
2771 /* We account everything but the calls. Calls have their own
2772 size/time info attached to cgraph edges. This is necessary
2773 in order to make the cost disappear after inlining. */
2774 if (!is_gimple_call (stmt))
2776 if (prob)
2778 struct predicate ip = not_inlined_predicate ();
2779 ip = and_predicates (info->conds, &ip, &p);
2780 account_size_time (info, this_size * prob,
2781 this_time * prob, &ip);
2783 if (prob != 2)
2784 account_size_time (info, this_size * (2 - prob),
2785 this_time * (2 - prob), &p);
2788 if (!info->fp_expressions && fp_expression_p (stmt))
2790 info->fp_expressions = true;
2791 if (dump_file)
2792 fprintf (dump_file, " fp_expression set\n");
2795 gcc_assert (time >= 0);
2796 gcc_assert (size >= 0);
2800 set_hint_predicate (&inline_summaries->get (node)->array_index, array_index);
2801 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
2802 if (time > MAX_TIME)
2803 time = MAX_TIME;
2804 free (order);
2806 if (nonconstant_names.exists () && !early)
2808 struct loop *loop;
2809 predicate loop_iterations = true_predicate ();
2810 predicate loop_stride = true_predicate ();
2812 if (dump_file && (dump_flags & TDF_DETAILS))
2813 flow_loops_dump (dump_file, NULL, 0);
2814 scev_initialize ();
2815 FOR_EACH_LOOP (loop, 0)
2817 vec<edge> exits;
2818 edge ex;
2819 unsigned int j;
2820 struct tree_niter_desc niter_desc;
2821 bb_predicate = *(struct predicate *) loop->header->aux;
2823 exits = get_loop_exit_edges (loop);
2824 FOR_EACH_VEC_ELT (exits, j, ex)
2825 if (number_of_iterations_exit (loop, ex, &niter_desc, false)
2826 && !is_gimple_min_invariant (niter_desc.niter))
2828 predicate will_be_nonconstant
2829 = will_be_nonconstant_expr_predicate (fbi.info, info,
2830 niter_desc.niter,
2831 nonconstant_names);
2832 if (!true_predicate_p (&will_be_nonconstant))
2833 will_be_nonconstant = and_predicates (info->conds,
2834 &bb_predicate,
2835 &will_be_nonconstant);
2836 if (!true_predicate_p (&will_be_nonconstant)
2837 && !false_predicate_p (&will_be_nonconstant))
2838 /* This is slightly inprecise. We may want to represent each
2839 loop with independent predicate. */
2840 loop_iterations =
2841 and_predicates (info->conds, &loop_iterations,
2842 &will_be_nonconstant);
2844 exits.release ();
2847 /* To avoid quadratic behavior we analyze stride predicates only
2848 with respect to the containing loop. Thus we simply iterate
2849 over all defs in the outermost loop body. */
2850 for (loop = loops_for_fn (cfun)->tree_root->inner;
2851 loop != NULL; loop = loop->next)
2853 basic_block *body = get_loop_body (loop);
2854 for (unsigned i = 0; i < loop->num_nodes; i++)
2856 gimple_stmt_iterator gsi;
2857 bb_predicate = *(struct predicate *) body[i]->aux;
2858 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi);
2859 gsi_next (&gsi))
2861 gimple *stmt = gsi_stmt (gsi);
2863 if (!is_gimple_assign (stmt))
2864 continue;
2866 tree def = gimple_assign_lhs (stmt);
2867 if (TREE_CODE (def) != SSA_NAME)
2868 continue;
2870 affine_iv iv;
2871 if (!simple_iv (loop_containing_stmt (stmt),
2872 loop_containing_stmt (stmt),
2873 def, &iv, true)
2874 || is_gimple_min_invariant (iv.step))
2875 continue;
2877 predicate will_be_nonconstant
2878 = will_be_nonconstant_expr_predicate (fbi.info, info,
2879 iv.step,
2880 nonconstant_names);
2881 if (!true_predicate_p (&will_be_nonconstant))
2882 will_be_nonconstant
2883 = and_predicates (info->conds, &bb_predicate,
2884 &will_be_nonconstant);
2885 if (!true_predicate_p (&will_be_nonconstant)
2886 && !false_predicate_p (&will_be_nonconstant))
2887 /* This is slightly inprecise. We may want to represent
2888 each loop with independent predicate. */
2889 loop_stride = and_predicates (info->conds, &loop_stride,
2890 &will_be_nonconstant);
2893 free (body);
2895 set_hint_predicate (&inline_summaries->get (node)->loop_iterations,
2896 loop_iterations);
2897 set_hint_predicate (&inline_summaries->get (node)->loop_stride,
2898 loop_stride);
2899 scev_finalize ();
2901 FOR_ALL_BB_FN (bb, my_function)
2903 edge e;
2904 edge_iterator ei;
2906 if (bb->aux)
2907 edge_predicate_pool.remove ((predicate *)bb->aux);
2908 bb->aux = NULL;
2909 FOR_EACH_EDGE (e, ei, bb->succs)
2911 if (e->aux)
2912 edge_predicate_pool.remove ((predicate *) e->aux);
2913 e->aux = NULL;
2916 inline_summaries->get (node)->self_time = time;
2917 inline_summaries->get (node)->self_size = size;
2918 nonconstant_names.release ();
2919 ipa_release_body_info (&fbi);
2920 if (opt_for_fn (node->decl, optimize))
2922 if (!early)
2923 loop_optimizer_finalize ();
2924 else if (!ipa_edge_args_vector)
2925 ipa_free_all_node_params ();
2926 free_dominance_info (CDI_DOMINATORS);
2928 if (dump_file)
2930 fprintf (dump_file, "\n");
2931 dump_inline_summary (dump_file, node);
2936 /* Compute parameters of functions used by inliner.
2937 EARLY is true when we compute parameters for the early inliner */
2939 void
2940 compute_inline_parameters (struct cgraph_node *node, bool early)
2942 HOST_WIDE_INT self_stack_size;
2943 struct cgraph_edge *e;
2944 struct inline_summary *info;
2946 gcc_assert (!node->global.inlined_to);
2948 inline_summary_alloc ();
2950 info = inline_summaries->get (node);
2951 reset_inline_summary (node, info);
2953 /* Estimate the stack size for the function if we're optimizing. */
2954 self_stack_size = optimize && !node->thunk.thunk_p
2955 ? estimated_stack_frame_size (node) : 0;
2956 info->estimated_self_stack_size = self_stack_size;
2957 info->estimated_stack_size = self_stack_size;
2958 info->stack_frame_offset = 0;
2960 if (node->thunk.thunk_p)
2962 struct inline_edge_summary *es = inline_edge_summary (node->callees);
2963 struct predicate t = true_predicate ();
2965 node->local.can_change_signature = false;
2966 es->call_stmt_size = eni_size_weights.call_cost;
2967 es->call_stmt_time = eni_time_weights.call_cost;
2968 account_size_time (info, INLINE_SIZE_SCALE * 2,
2969 INLINE_TIME_SCALE * 2, &t);
2970 t = not_inlined_predicate ();
2971 account_size_time (info, 2 * INLINE_SIZE_SCALE, 0, &t);
2972 inline_update_overall_summary (node);
2973 info->self_size = info->size;
2974 info->self_time = info->time;
2975 /* We can not inline instrumetnation clones. */
2976 if (node->thunk.add_pointer_bounds_args)
2978 info->inlinable = false;
2979 node->callees->inline_failed = CIF_CHKP;
2981 else
2982 info->inlinable = true;
2984 else
2986 /* Even is_gimple_min_invariant rely on current_function_decl. */
2987 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2989 /* Can this function be inlined at all? */
2990 if (!opt_for_fn (node->decl, optimize)
2991 && !lookup_attribute ("always_inline",
2992 DECL_ATTRIBUTES (node->decl)))
2993 info->inlinable = false;
2994 else
2995 info->inlinable = tree_inlinable_function_p (node->decl);
2997 info->contains_cilk_spawn = fn_contains_cilk_spawn_p (cfun);
2999 /* Type attributes can use parameter indices to describe them. */
3000 if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl)))
3001 node->local.can_change_signature = false;
3002 else
3004 /* Otherwise, inlinable functions always can change signature. */
3005 if (info->inlinable)
3006 node->local.can_change_signature = true;
3007 else
3009 /* Functions calling builtin_apply can not change signature. */
3010 for (e = node->callees; e; e = e->next_callee)
3012 tree cdecl = e->callee->decl;
3013 if (DECL_BUILT_IN (cdecl)
3014 && DECL_BUILT_IN_CLASS (cdecl) == BUILT_IN_NORMAL
3015 && (DECL_FUNCTION_CODE (cdecl) == BUILT_IN_APPLY_ARGS
3016 || DECL_FUNCTION_CODE (cdecl) == BUILT_IN_VA_START))
3017 break;
3019 node->local.can_change_signature = !e;
3022 /* Functions called by instrumentation thunk can't change signature
3023 because instrumentation thunk modification is not supported. */
3024 if (node->local.can_change_signature)
3025 for (e = node->callers; e; e = e->next_caller)
3026 if (e->caller->thunk.thunk_p
3027 && e->caller->thunk.add_pointer_bounds_args)
3029 node->local.can_change_signature = false;
3030 break;
3032 estimate_function_body_sizes (node, early);
3033 pop_cfun ();
3035 for (e = node->callees; e; e = e->next_callee)
3036 if (e->callee->comdat_local_p ())
3037 break;
3038 node->calls_comdat_local = (e != NULL);
3040 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
3041 info->time = info->self_time;
3042 info->size = info->self_size;
3043 info->stack_frame_offset = 0;
3044 info->estimated_stack_size = info->estimated_self_stack_size;
3045 if (flag_checking)
3047 inline_update_overall_summary (node);
3048 gcc_assert (info->time == info->self_time
3049 && info->size == info->self_size);
3054 /* Compute parameters of functions used by inliner using
3055 current_function_decl. */
3057 static unsigned int
3058 compute_inline_parameters_for_current (void)
3060 compute_inline_parameters (cgraph_node::get (current_function_decl), true);
3061 return 0;
3064 namespace {
3066 const pass_data pass_data_inline_parameters =
3068 GIMPLE_PASS, /* type */
3069 "inline_param", /* name */
3070 OPTGROUP_INLINE, /* optinfo_flags */
3071 TV_INLINE_PARAMETERS, /* tv_id */
3072 0, /* properties_required */
3073 0, /* properties_provided */
3074 0, /* properties_destroyed */
3075 0, /* todo_flags_start */
3076 0, /* todo_flags_finish */
3079 class pass_inline_parameters : public gimple_opt_pass
3081 public:
3082 pass_inline_parameters (gcc::context *ctxt)
3083 : gimple_opt_pass (pass_data_inline_parameters, ctxt)
3086 /* opt_pass methods: */
3087 opt_pass * clone () { return new pass_inline_parameters (m_ctxt); }
3088 virtual unsigned int execute (function *)
3090 return compute_inline_parameters_for_current ();
3093 }; // class pass_inline_parameters
3095 } // anon namespace
3097 gimple_opt_pass *
3098 make_pass_inline_parameters (gcc::context *ctxt)
3100 return new pass_inline_parameters (ctxt);
3104 /* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS,
3105 KNOWN_CONTEXTS and KNOWN_AGGS. */
3107 static bool
3108 estimate_edge_devirt_benefit (struct cgraph_edge *ie,
3109 int *size, int *time,
3110 vec<tree> known_vals,
3111 vec<ipa_polymorphic_call_context> known_contexts,
3112 vec<ipa_agg_jump_function_p> known_aggs)
3114 tree target;
3115 struct cgraph_node *callee;
3116 struct inline_summary *isummary;
3117 enum availability avail;
3118 bool speculative;
3120 if (!known_vals.exists () && !known_contexts.exists ())
3121 return false;
3122 if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining))
3123 return false;
3125 target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts,
3126 known_aggs, &speculative);
3127 if (!target || speculative)
3128 return false;
3130 /* Account for difference in cost between indirect and direct calls. */
3131 *size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost);
3132 *time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost);
3133 gcc_checking_assert (*time >= 0);
3134 gcc_checking_assert (*size >= 0);
3136 callee = cgraph_node::get (target);
3137 if (!callee || !callee->definition)
3138 return false;
3139 callee = callee->function_symbol (&avail);
3140 if (avail < AVAIL_AVAILABLE)
3141 return false;
3142 isummary = inline_summaries->get (callee);
3143 return isummary->inlinable;
3146 /* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to
3147 handle edge E with probability PROB.
3148 Set HINTS if edge may be devirtualized.
3149 KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call
3150 site. */
3152 static inline void
3153 estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
3154 int *time,
3155 int prob,
3156 vec<tree> known_vals,
3157 vec<ipa_polymorphic_call_context> known_contexts,
3158 vec<ipa_agg_jump_function_p> known_aggs,
3159 inline_hints *hints)
3161 struct inline_edge_summary *es = inline_edge_summary (e);
3162 int call_size = es->call_stmt_size;
3163 int call_time = es->call_stmt_time;
3164 int cur_size;
3165 if (!e->callee
3166 && estimate_edge_devirt_benefit (e, &call_size, &call_time,
3167 known_vals, known_contexts, known_aggs)
3168 && hints && e->maybe_hot_p ())
3169 *hints |= INLINE_HINT_indirect_call;
3170 cur_size = call_size * INLINE_SIZE_SCALE;
3171 *size += cur_size;
3172 if (min_size)
3173 *min_size += cur_size;
3174 *time += apply_probability ((gcov_type) call_time, prob)
3175 * e->frequency * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE);
3176 if (*time > MAX_TIME * INLINE_TIME_SCALE)
3177 *time = MAX_TIME * INLINE_TIME_SCALE;
3182 /* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
3183 calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3184 describe context of the call site. */
3186 static void
3187 estimate_calls_size_and_time (struct cgraph_node *node, int *size,
3188 int *min_size, int *time,
3189 inline_hints *hints,
3190 clause_t possible_truths,
3191 vec<tree> known_vals,
3192 vec<ipa_polymorphic_call_context> known_contexts,
3193 vec<ipa_agg_jump_function_p> known_aggs)
3195 struct cgraph_edge *e;
3196 for (e = node->callees; e; e = e->next_callee)
3198 if (inline_edge_summary_vec.length () <= (unsigned) e->uid)
3199 continue;
3201 struct inline_edge_summary *es = inline_edge_summary (e);
3203 /* Do not care about zero sized builtins. */
3204 if (e->inline_failed && !es->call_stmt_size)
3206 gcc_checking_assert (!es->call_stmt_time);
3207 continue;
3209 if (!es->predicate
3210 || evaluate_predicate (es->predicate, possible_truths))
3212 if (e->inline_failed)
3214 /* Predicates of calls shall not use NOT_CHANGED codes,
3215 sowe do not need to compute probabilities. */
3216 estimate_edge_size_and_time (e, size,
3217 es->predicate ? NULL : min_size,
3218 time, REG_BR_PROB_BASE,
3219 known_vals, known_contexts,
3220 known_aggs, hints);
3222 else
3223 estimate_calls_size_and_time (e->callee, size, min_size, time,
3224 hints,
3225 possible_truths,
3226 known_vals, known_contexts,
3227 known_aggs);
3230 for (e = node->indirect_calls; e; e = e->next_callee)
3232 if (inline_edge_summary_vec.length () <= (unsigned) e->uid)
3233 continue;
3235 struct inline_edge_summary *es = inline_edge_summary (e);
3236 if (!es->predicate
3237 || evaluate_predicate (es->predicate, possible_truths))
3238 estimate_edge_size_and_time (e, size,
3239 es->predicate ? NULL : min_size,
3240 time, REG_BR_PROB_BASE,
3241 known_vals, known_contexts, known_aggs,
3242 hints);
3247 /* Estimate size and time needed to execute NODE assuming
3248 POSSIBLE_TRUTHS clause, and KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3249 information about NODE's arguments. If non-NULL use also probability
3250 information present in INLINE_PARAM_SUMMARY vector.
3251 Additionally detemine hints determined by the context. Finally compute
3252 minimal size needed for the call that is independent on the call context and
3253 can be used for fast estimates. Return the values in RET_SIZE,
3254 RET_MIN_SIZE, RET_TIME and RET_HINTS. */
3256 static void
3257 estimate_node_size_and_time (struct cgraph_node *node,
3258 clause_t possible_truths,
3259 vec<tree> known_vals,
3260 vec<ipa_polymorphic_call_context> known_contexts,
3261 vec<ipa_agg_jump_function_p> known_aggs,
3262 int *ret_size, int *ret_min_size, int *ret_time,
3263 inline_hints *ret_hints,
3264 vec<inline_param_summary>
3265 inline_param_summary)
3267 struct inline_summary *info = inline_summaries->get (node);
3268 size_time_entry *e;
3269 int size = 0;
3270 int time = 0;
3271 int min_size = 0;
3272 inline_hints hints = 0;
3273 int i;
3275 if (dump_file && (dump_flags & TDF_DETAILS))
3277 bool found = false;
3278 fprintf (dump_file, " Estimating body: %s/%i\n"
3279 " Known to be false: ", node->name (),
3280 node->order);
3282 for (i = predicate_not_inlined_condition;
3283 i < (predicate_first_dynamic_condition
3284 + (int) vec_safe_length (info->conds)); i++)
3285 if (!(possible_truths & (1 << i)))
3287 if (found)
3288 fprintf (dump_file, ", ");
3289 found = true;
3290 dump_condition (dump_file, info->conds, i);
3294 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3295 if (evaluate_predicate (&e->predicate, possible_truths))
3297 size += e->size;
3298 gcc_checking_assert (e->time >= 0);
3299 gcc_checking_assert (time >= 0);
3300 if (!inline_param_summary.exists ())
3301 time += e->time;
3302 else
3304 int prob = predicate_probability (info->conds,
3305 &e->predicate,
3306 possible_truths,
3307 inline_param_summary);
3308 gcc_checking_assert (prob >= 0);
3309 gcc_checking_assert (prob <= REG_BR_PROB_BASE);
3310 time += apply_probability ((gcov_type) e->time, prob);
3312 if (time > MAX_TIME * INLINE_TIME_SCALE)
3313 time = MAX_TIME * INLINE_TIME_SCALE;
3314 gcc_checking_assert (time >= 0);
3317 gcc_checking_assert (true_predicate_p (&(*info->entry)[0].predicate));
3318 min_size = (*info->entry)[0].size;
3319 gcc_checking_assert (size >= 0);
3320 gcc_checking_assert (time >= 0);
3322 if (info->loop_iterations
3323 && !evaluate_predicate (info->loop_iterations, possible_truths))
3324 hints |= INLINE_HINT_loop_iterations;
3325 if (info->loop_stride
3326 && !evaluate_predicate (info->loop_stride, possible_truths))
3327 hints |= INLINE_HINT_loop_stride;
3328 if (info->array_index
3329 && !evaluate_predicate (info->array_index, possible_truths))
3330 hints |= INLINE_HINT_array_index;
3331 if (info->scc_no)
3332 hints |= INLINE_HINT_in_scc;
3333 if (DECL_DECLARED_INLINE_P (node->decl))
3334 hints |= INLINE_HINT_declared_inline;
3336 estimate_calls_size_and_time (node, &size, &min_size, &time, &hints, possible_truths,
3337 known_vals, known_contexts, known_aggs);
3338 gcc_checking_assert (size >= 0);
3339 gcc_checking_assert (time >= 0);
3340 time = RDIV (time, INLINE_TIME_SCALE);
3341 size = RDIV (size, INLINE_SIZE_SCALE);
3342 min_size = RDIV (min_size, INLINE_SIZE_SCALE);
3344 if (dump_file && (dump_flags & TDF_DETAILS))
3345 fprintf (dump_file, "\n size:%i time:%i\n", (int) size, (int) time);
3346 if (ret_time)
3347 *ret_time = time;
3348 if (ret_size)
3349 *ret_size = size;
3350 if (ret_min_size)
3351 *ret_min_size = min_size;
3352 if (ret_hints)
3353 *ret_hints = hints;
3354 return;
3358 /* Estimate size and time needed to execute callee of EDGE assuming that
3359 parameters known to be constant at caller of EDGE are propagated.
3360 KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values
3361 and types for parameters. */
3363 void
3364 estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
3365 vec<tree> known_vals,
3366 vec<ipa_polymorphic_call_context>
3367 known_contexts,
3368 vec<ipa_agg_jump_function_p> known_aggs,
3369 int *ret_size, int *ret_time,
3370 inline_hints *hints)
3372 clause_t clause;
3374 clause = evaluate_conditions_for_known_args (node, false, known_vals,
3375 known_aggs);
3376 estimate_node_size_and_time (node, clause, known_vals, known_contexts,
3377 known_aggs, ret_size, NULL, ret_time, hints, vNULL);
3380 /* Translate all conditions from callee representation into caller
3381 representation and symbolically evaluate predicate P into new predicate.
3383 INFO is inline_summary of function we are adding predicate into, CALLEE_INFO
3384 is summary of function predicate P is from. OPERAND_MAP is array giving
3385 callee formal IDs the caller formal IDs. POSSSIBLE_TRUTHS is clausule of all
3386 callee conditions that may be true in caller context. TOPLEV_PREDICATE is
3387 predicate under which callee is executed. OFFSET_MAP is an array of of
3388 offsets that need to be added to conditions, negative offset means that
3389 conditions relying on values passed by reference have to be discarded
3390 because they might not be preserved (and should be considered offset zero
3391 for other purposes). */
3393 static struct predicate
3394 remap_predicate (struct inline_summary *info,
3395 struct inline_summary *callee_info,
3396 struct predicate *p,
3397 vec<int> operand_map,
3398 vec<int> offset_map,
3399 clause_t possible_truths, struct predicate *toplev_predicate)
3401 int i;
3402 struct predicate out = true_predicate ();
3404 /* True predicate is easy. */
3405 if (true_predicate_p (p))
3406 return *toplev_predicate;
3407 for (i = 0; p->clause[i]; i++)
3409 clause_t clause = p->clause[i];
3410 int cond;
3411 struct predicate clause_predicate = false_predicate ();
3413 gcc_assert (i < MAX_CLAUSES);
3415 for (cond = 0; cond < NUM_CONDITIONS; cond++)
3416 /* Do we have condition we can't disprove? */
3417 if (clause & possible_truths & (1 << cond))
3419 struct predicate cond_predicate;
3420 /* Work out if the condition can translate to predicate in the
3421 inlined function. */
3422 if (cond >= predicate_first_dynamic_condition)
3424 struct condition *c;
3426 c = &(*callee_info->conds)[cond
3428 predicate_first_dynamic_condition];
3429 /* See if we can remap condition operand to caller's operand.
3430 Otherwise give up. */
3431 if (!operand_map.exists ()
3432 || (int) operand_map.length () <= c->operand_num
3433 || operand_map[c->operand_num] == -1
3434 /* TODO: For non-aggregate conditions, adding an offset is
3435 basically an arithmetic jump function processing which
3436 we should support in future. */
3437 || ((!c->agg_contents || !c->by_ref)
3438 && offset_map[c->operand_num] > 0)
3439 || (c->agg_contents && c->by_ref
3440 && offset_map[c->operand_num] < 0))
3441 cond_predicate = true_predicate ();
3442 else
3444 struct agg_position_info ap;
3445 HOST_WIDE_INT offset_delta = offset_map[c->operand_num];
3446 if (offset_delta < 0)
3448 gcc_checking_assert (!c->agg_contents || !c->by_ref);
3449 offset_delta = 0;
3451 gcc_assert (!c->agg_contents
3452 || c->by_ref || offset_delta == 0);
3453 ap.offset = c->offset + offset_delta;
3454 ap.agg_contents = c->agg_contents;
3455 ap.by_ref = c->by_ref;
3456 cond_predicate = add_condition (info,
3457 operand_map[c->operand_num],
3458 c->size, &ap, c->code,
3459 c->val);
3462 /* Fixed conditions remains same, construct single
3463 condition predicate. */
3464 else
3466 cond_predicate.clause[0] = 1 << cond;
3467 cond_predicate.clause[1] = 0;
3469 clause_predicate = or_predicates (info->conds, &clause_predicate,
3470 &cond_predicate);
3472 out = and_predicates (info->conds, &out, &clause_predicate);
3474 return and_predicates (info->conds, &out, toplev_predicate);
3478 /* Update summary information of inline clones after inlining.
3479 Compute peak stack usage. */
3481 static void
3482 inline_update_callee_summaries (struct cgraph_node *node, int depth)
3484 struct cgraph_edge *e;
3485 struct inline_summary *callee_info = inline_summaries->get (node);
3486 struct inline_summary *caller_info = inline_summaries->get (node->callers->caller);
3487 HOST_WIDE_INT peak;
3489 callee_info->stack_frame_offset
3490 = caller_info->stack_frame_offset
3491 + caller_info->estimated_self_stack_size;
3492 peak = callee_info->stack_frame_offset
3493 + callee_info->estimated_self_stack_size;
3494 if (inline_summaries->get (node->global.inlined_to)->estimated_stack_size < peak)
3495 inline_summaries->get (node->global.inlined_to)->estimated_stack_size = peak;
3496 ipa_propagate_frequency (node);
3497 for (e = node->callees; e; e = e->next_callee)
3499 if (!e->inline_failed)
3500 inline_update_callee_summaries (e->callee, depth);
3501 inline_edge_summary (e)->loop_depth += depth;
3503 for (e = node->indirect_calls; e; e = e->next_callee)
3504 inline_edge_summary (e)->loop_depth += depth;
3507 /* Update change_prob of EDGE after INLINED_EDGE has been inlined.
3508 When functoin A is inlined in B and A calls C with parameter that
3509 changes with probability PROB1 and C is known to be passthroug
3510 of argument if B that change with probability PROB2, the probability
3511 of change is now PROB1*PROB2. */
3513 static void
3514 remap_edge_change_prob (struct cgraph_edge *inlined_edge,
3515 struct cgraph_edge *edge)
3517 if (ipa_node_params_sum)
3519 int i;
3520 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3521 struct inline_edge_summary *es = inline_edge_summary (edge);
3522 struct inline_edge_summary *inlined_es
3523 = inline_edge_summary (inlined_edge);
3525 for (i = 0; i < ipa_get_cs_argument_count (args); i++)
3527 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3528 if (jfunc->type == IPA_JF_PASS_THROUGH
3529 && (ipa_get_jf_pass_through_formal_id (jfunc)
3530 < (int) inlined_es->param.length ()))
3532 int jf_formal_id = ipa_get_jf_pass_through_formal_id (jfunc);
3533 int prob1 = es->param[i].change_prob;
3534 int prob2 = inlined_es->param[jf_formal_id].change_prob;
3535 int prob = combine_probabilities (prob1, prob2);
3537 if (prob1 && prob2 && !prob)
3538 prob = 1;
3540 es->param[i].change_prob = prob;
3546 /* Update edge summaries of NODE after INLINED_EDGE has been inlined.
3548 Remap predicates of callees of NODE. Rest of arguments match
3549 remap_predicate.
3551 Also update change probabilities. */
3553 static void
3554 remap_edge_summaries (struct cgraph_edge *inlined_edge,
3555 struct cgraph_node *node,
3556 struct inline_summary *info,
3557 struct inline_summary *callee_info,
3558 vec<int> operand_map,
3559 vec<int> offset_map,
3560 clause_t possible_truths,
3561 struct predicate *toplev_predicate)
3563 struct cgraph_edge *e, *next;
3564 for (e = node->callees; e; e = next)
3566 struct inline_edge_summary *es = inline_edge_summary (e);
3567 struct predicate p;
3568 next = e->next_callee;
3570 if (e->inline_failed)
3572 remap_edge_change_prob (inlined_edge, e);
3574 if (es->predicate)
3576 p = remap_predicate (info, callee_info,
3577 es->predicate, operand_map, offset_map,
3578 possible_truths, toplev_predicate);
3579 edge_set_predicate (e, &p);
3581 else
3582 edge_set_predicate (e, toplev_predicate);
3584 else
3585 remap_edge_summaries (inlined_edge, e->callee, info, callee_info,
3586 operand_map, offset_map, possible_truths,
3587 toplev_predicate);
3589 for (e = node->indirect_calls; e; e = next)
3591 struct inline_edge_summary *es = inline_edge_summary (e);
3592 struct predicate p;
3593 next = e->next_callee;
3595 remap_edge_change_prob (inlined_edge, e);
3596 if (es->predicate)
3598 p = remap_predicate (info, callee_info,
3599 es->predicate, operand_map, offset_map,
3600 possible_truths, toplev_predicate);
3601 edge_set_predicate (e, &p);
3603 else
3604 edge_set_predicate (e, toplev_predicate);
3608 /* Same as remap_predicate, but set result into hint *HINT. */
3610 static void
3611 remap_hint_predicate (struct inline_summary *info,
3612 struct inline_summary *callee_info,
3613 struct predicate **hint,
3614 vec<int> operand_map,
3615 vec<int> offset_map,
3616 clause_t possible_truths,
3617 struct predicate *toplev_predicate)
3619 predicate p;
3621 if (!*hint)
3622 return;
3623 p = remap_predicate (info, callee_info,
3624 *hint,
3625 operand_map, offset_map,
3626 possible_truths, toplev_predicate);
3627 if (!false_predicate_p (&p) && !true_predicate_p (&p))
3629 if (!*hint)
3630 set_hint_predicate (hint, p);
3631 else
3632 **hint = and_predicates (info->conds, *hint, &p);
3636 /* We inlined EDGE. Update summary of the function we inlined into. */
3638 void
3639 inline_merge_summary (struct cgraph_edge *edge)
3641 struct inline_summary *callee_info = inline_summaries->get (edge->callee);
3642 struct cgraph_node *to = (edge->caller->global.inlined_to
3643 ? edge->caller->global.inlined_to : edge->caller);
3644 struct inline_summary *info = inline_summaries->get (to);
3645 clause_t clause = 0; /* not_inline is known to be false. */
3646 size_time_entry *e;
3647 vec<int> operand_map = vNULL;
3648 vec<int> offset_map = vNULL;
3649 int i;
3650 struct predicate toplev_predicate;
3651 struct predicate true_p = true_predicate ();
3652 struct inline_edge_summary *es = inline_edge_summary (edge);
3654 if (es->predicate)
3655 toplev_predicate = *es->predicate;
3656 else
3657 toplev_predicate = true_predicate ();
3659 info->fp_expressions |= callee_info->fp_expressions;
3661 if (callee_info->conds)
3662 evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL);
3663 if (ipa_node_params_sum && callee_info->conds)
3665 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3666 int count = ipa_get_cs_argument_count (args);
3667 int i;
3669 if (count)
3671 operand_map.safe_grow_cleared (count);
3672 offset_map.safe_grow_cleared (count);
3674 for (i = 0; i < count; i++)
3676 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3677 int map = -1;
3679 /* TODO: handle non-NOPs when merging. */
3680 if (jfunc->type == IPA_JF_PASS_THROUGH)
3682 if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
3683 map = ipa_get_jf_pass_through_formal_id (jfunc);
3684 if (!ipa_get_jf_pass_through_agg_preserved (jfunc))
3685 offset_map[i] = -1;
3687 else if (jfunc->type == IPA_JF_ANCESTOR)
3689 HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc);
3690 if (offset >= 0 && offset < INT_MAX)
3692 map = ipa_get_jf_ancestor_formal_id (jfunc);
3693 if (!ipa_get_jf_ancestor_agg_preserved (jfunc))
3694 offset = -1;
3695 offset_map[i] = offset;
3698 operand_map[i] = map;
3699 gcc_assert (map < ipa_get_param_count (IPA_NODE_REF (to)));
3702 for (i = 0; vec_safe_iterate (callee_info->entry, i, &e); i++)
3704 struct predicate p = remap_predicate (info, callee_info,
3705 &e->predicate, operand_map,
3706 offset_map, clause,
3707 &toplev_predicate);
3708 if (!false_predicate_p (&p))
3710 gcov_type add_time = ((gcov_type) e->time * edge->frequency
3711 + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
3712 int prob = predicate_probability (callee_info->conds,
3713 &e->predicate,
3714 clause, es->param);
3715 add_time = apply_probability ((gcov_type) add_time, prob);
3716 if (add_time > MAX_TIME * INLINE_TIME_SCALE)
3717 add_time = MAX_TIME * INLINE_TIME_SCALE;
3718 if (prob != REG_BR_PROB_BASE
3719 && dump_file && (dump_flags & TDF_DETAILS))
3721 fprintf (dump_file, "\t\tScaling time by probability:%f\n",
3722 (double) prob / REG_BR_PROB_BASE);
3724 account_size_time (info, e->size, add_time, &p);
3727 remap_edge_summaries (edge, edge->callee, info, callee_info, operand_map,
3728 offset_map, clause, &toplev_predicate);
3729 remap_hint_predicate (info, callee_info,
3730 &callee_info->loop_iterations,
3731 operand_map, offset_map, clause, &toplev_predicate);
3732 remap_hint_predicate (info, callee_info,
3733 &callee_info->loop_stride,
3734 operand_map, offset_map, clause, &toplev_predicate);
3735 remap_hint_predicate (info, callee_info,
3736 &callee_info->array_index,
3737 operand_map, offset_map, clause, &toplev_predicate);
3739 inline_update_callee_summaries (edge->callee,
3740 inline_edge_summary (edge)->loop_depth);
3742 /* We do not maintain predicates of inlined edges, free it. */
3743 edge_set_predicate (edge, &true_p);
3744 /* Similarly remove param summaries. */
3745 es->param.release ();
3746 operand_map.release ();
3747 offset_map.release ();
3750 /* For performance reasons inline_merge_summary is not updating overall size
3751 and time. Recompute it. */
3753 void
3754 inline_update_overall_summary (struct cgraph_node *node)
3756 struct inline_summary *info = inline_summaries->get (node);
3757 size_time_entry *e;
3758 int i;
3760 info->size = 0;
3761 info->time = 0;
3762 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3764 info->size += e->size, info->time += e->time;
3765 if (info->time > MAX_TIME * INLINE_TIME_SCALE)
3766 info->time = MAX_TIME * INLINE_TIME_SCALE;
3768 estimate_calls_size_and_time (node, &info->size, &info->min_size,
3769 &info->time, NULL,
3770 ~(clause_t) (1 << predicate_false_condition),
3771 vNULL, vNULL, vNULL);
3772 info->time = (info->time + INLINE_TIME_SCALE / 2) / INLINE_TIME_SCALE;
3773 info->size = (info->size + INLINE_SIZE_SCALE / 2) / INLINE_SIZE_SCALE;
3776 /* Return hints derrived from EDGE. */
3778 simple_edge_hints (struct cgraph_edge *edge)
3780 int hints = 0;
3781 struct cgraph_node *to = (edge->caller->global.inlined_to
3782 ? edge->caller->global.inlined_to : edge->caller);
3783 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
3784 if (inline_summaries->get (to)->scc_no
3785 && inline_summaries->get (to)->scc_no
3786 == inline_summaries->get (callee)->scc_no
3787 && !edge->recursive_p ())
3788 hints |= INLINE_HINT_same_scc;
3790 if (callee->lto_file_data && edge->caller->lto_file_data
3791 && edge->caller->lto_file_data != callee->lto_file_data
3792 && !callee->merged_comdat && !callee->icf_merged)
3793 hints |= INLINE_HINT_cross_module;
3795 return hints;
3798 /* Estimate the time cost for the caller when inlining EDGE.
3799 Only to be called via estimate_edge_time, that handles the
3800 caching mechanism.
3802 When caching, also update the cache entry. Compute both time and
3803 size, since we always need both metrics eventually. */
3806 do_estimate_edge_time (struct cgraph_edge *edge)
3808 int time;
3809 int size;
3810 inline_hints hints;
3811 struct cgraph_node *callee;
3812 clause_t clause;
3813 vec<tree> known_vals;
3814 vec<ipa_polymorphic_call_context> known_contexts;
3815 vec<ipa_agg_jump_function_p> known_aggs;
3816 struct inline_edge_summary *es = inline_edge_summary (edge);
3817 int min_size;
3819 callee = edge->callee->ultimate_alias_target ();
3821 gcc_checking_assert (edge->inline_failed);
3822 evaluate_properties_for_edge (edge, true,
3823 &clause, &known_vals, &known_contexts,
3824 &known_aggs);
3825 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3826 known_aggs, &size, &min_size, &time, &hints, es->param);
3828 /* When we have profile feedback, we can quite safely identify hot
3829 edges and for those we disable size limits. Don't do that when
3830 probability that caller will call the callee is low however, since it
3831 may hurt optimization of the caller's hot path. */
3832 if (edge->count && edge->maybe_hot_p ()
3833 && (edge->count * 2
3834 > (edge->caller->global.inlined_to
3835 ? edge->caller->global.inlined_to->count : edge->caller->count)))
3836 hints |= INLINE_HINT_known_hot;
3838 known_vals.release ();
3839 known_contexts.release ();
3840 known_aggs.release ();
3841 gcc_checking_assert (size >= 0);
3842 gcc_checking_assert (time >= 0);
3844 /* When caching, update the cache entry. */
3845 if (edge_growth_cache.exists ())
3847 inline_summaries->get (edge->callee)->min_size = min_size;
3848 if ((int) edge_growth_cache.length () <= edge->uid)
3849 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
3850 edge_growth_cache[edge->uid].time = time + (time >= 0);
3852 edge_growth_cache[edge->uid].size = size + (size >= 0);
3853 hints |= simple_edge_hints (edge);
3854 edge_growth_cache[edge->uid].hints = hints + 1;
3856 return time;
3860 /* Return estimated callee growth after inlining EDGE.
3861 Only to be called via estimate_edge_size. */
3864 do_estimate_edge_size (struct cgraph_edge *edge)
3866 int size;
3867 struct cgraph_node *callee;
3868 clause_t clause;
3869 vec<tree> known_vals;
3870 vec<ipa_polymorphic_call_context> known_contexts;
3871 vec<ipa_agg_jump_function_p> known_aggs;
3873 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3875 if (edge_growth_cache.exists ())
3877 do_estimate_edge_time (edge);
3878 size = edge_growth_cache[edge->uid].size;
3879 gcc_checking_assert (size);
3880 return size - (size > 0);
3883 callee = edge->callee->ultimate_alias_target ();
3885 /* Early inliner runs without caching, go ahead and do the dirty work. */
3886 gcc_checking_assert (edge->inline_failed);
3887 evaluate_properties_for_edge (edge, true,
3888 &clause, &known_vals, &known_contexts,
3889 &known_aggs);
3890 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3891 known_aggs, &size, NULL, NULL, NULL, vNULL);
3892 known_vals.release ();
3893 known_contexts.release ();
3894 known_aggs.release ();
3895 return size;
3899 /* Estimate the growth of the caller when inlining EDGE.
3900 Only to be called via estimate_edge_size. */
3902 inline_hints
3903 do_estimate_edge_hints (struct cgraph_edge *edge)
3905 inline_hints hints;
3906 struct cgraph_node *callee;
3907 clause_t clause;
3908 vec<tree> known_vals;
3909 vec<ipa_polymorphic_call_context> known_contexts;
3910 vec<ipa_agg_jump_function_p> known_aggs;
3912 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3914 if (edge_growth_cache.exists ())
3916 do_estimate_edge_time (edge);
3917 hints = edge_growth_cache[edge->uid].hints;
3918 gcc_checking_assert (hints);
3919 return hints - 1;
3922 callee = edge->callee->ultimate_alias_target ();
3924 /* Early inliner runs without caching, go ahead and do the dirty work. */
3925 gcc_checking_assert (edge->inline_failed);
3926 evaluate_properties_for_edge (edge, true,
3927 &clause, &known_vals, &known_contexts,
3928 &known_aggs);
3929 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3930 known_aggs, NULL, NULL, NULL, &hints, vNULL);
3931 known_vals.release ();
3932 known_contexts.release ();
3933 known_aggs.release ();
3934 hints |= simple_edge_hints (edge);
3935 return hints;
3939 /* Estimate self time of the function NODE after inlining EDGE. */
3942 estimate_time_after_inlining (struct cgraph_node *node,
3943 struct cgraph_edge *edge)
3945 struct inline_edge_summary *es = inline_edge_summary (edge);
3946 if (!es->predicate || !false_predicate_p (es->predicate))
3948 gcov_type time =
3949 inline_summaries->get (node)->time + estimate_edge_time (edge);
3950 if (time < 0)
3951 time = 0;
3952 if (time > MAX_TIME)
3953 time = MAX_TIME;
3954 return time;
3956 return inline_summaries->get (node)->time;
3960 /* Estimate the size of NODE after inlining EDGE which should be an
3961 edge to either NODE or a call inlined into NODE. */
3964 estimate_size_after_inlining (struct cgraph_node *node,
3965 struct cgraph_edge *edge)
3967 struct inline_edge_summary *es = inline_edge_summary (edge);
3968 if (!es->predicate || !false_predicate_p (es->predicate))
3970 int size = inline_summaries->get (node)->size + estimate_edge_growth (edge);
3971 gcc_assert (size >= 0);
3972 return size;
3974 return inline_summaries->get (node)->size;
3978 struct growth_data
3980 struct cgraph_node *node;
3981 bool self_recursive;
3982 bool uninlinable;
3983 int growth;
3987 /* Worker for do_estimate_growth. Collect growth for all callers. */
3989 static bool
3990 do_estimate_growth_1 (struct cgraph_node *node, void *data)
3992 struct cgraph_edge *e;
3993 struct growth_data *d = (struct growth_data *) data;
3995 for (e = node->callers; e; e = e->next_caller)
3997 gcc_checking_assert (e->inline_failed);
3999 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
4001 d->uninlinable = true;
4002 continue;
4005 if (e->recursive_p ())
4007 d->self_recursive = true;
4008 continue;
4010 d->growth += estimate_edge_growth (e);
4012 return false;
4016 /* Estimate the growth caused by inlining NODE into all callees. */
4019 estimate_growth (struct cgraph_node *node)
4021 struct growth_data d = { node, false, false, 0 };
4022 struct inline_summary *info = inline_summaries->get (node);
4024 node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true);
4026 /* For self recursive functions the growth estimation really should be
4027 infinity. We don't want to return very large values because the growth
4028 plays various roles in badness computation fractions. Be sure to not
4029 return zero or negative growths. */
4030 if (d.self_recursive)
4031 d.growth = d.growth < info->size ? info->size : d.growth;
4032 else if (DECL_EXTERNAL (node->decl) || d.uninlinable)
4034 else
4036 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
4037 d.growth -= info->size;
4038 /* COMDAT functions are very often not shared across multiple units
4039 since they come from various template instantiations.
4040 Take this into account. */
4041 else if (DECL_COMDAT (node->decl)
4042 && node->can_remove_if_no_direct_calls_p ())
4043 d.growth -= (info->size
4044 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
4045 + 50) / 100;
4048 return d.growth;
4051 /* Verify if there are fewer than MAX_CALLERS. */
4053 static bool
4054 check_callers (cgraph_node *node, int *max_callers)
4056 ipa_ref *ref;
4058 if (!node->can_remove_if_no_direct_calls_and_refs_p ())
4059 return true;
4061 for (cgraph_edge *e = node->callers; e; e = e->next_caller)
4063 (*max_callers)--;
4064 if (!*max_callers
4065 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
4066 return true;
4069 FOR_EACH_ALIAS (node, ref)
4070 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), max_callers))
4071 return true;
4073 return false;
4077 /* Make cheap estimation if growth of NODE is likely positive knowing
4078 EDGE_GROWTH of one particular edge.
4079 We assume that most of other edges will have similar growth
4080 and skip computation if there are too many callers. */
4082 bool
4083 growth_likely_positive (struct cgraph_node *node,
4084 int edge_growth)
4086 int max_callers;
4087 struct cgraph_edge *e;
4088 gcc_checking_assert (edge_growth > 0);
4090 /* First quickly check if NODE is removable at all. */
4091 if (DECL_EXTERNAL (node->decl))
4092 return true;
4093 if (!node->can_remove_if_no_direct_calls_and_refs_p ()
4094 || node->address_taken)
4095 return true;
4097 max_callers = inline_summaries->get (node)->size * 4 / edge_growth + 2;
4099 for (e = node->callers; e; e = e->next_caller)
4101 max_callers--;
4102 if (!max_callers
4103 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
4104 return true;
4107 ipa_ref *ref;
4108 FOR_EACH_ALIAS (node, ref)
4109 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), &max_callers))
4110 return true;
4112 /* Unlike for functions called once, we play unsafe with
4113 COMDATs. We can allow that since we know functions
4114 in consideration are small (and thus risk is small) and
4115 moreover grow estimates already accounts that COMDAT
4116 functions may or may not disappear when eliminated from
4117 current unit. With good probability making aggressive
4118 choice in all units is going to make overall program
4119 smaller. */
4120 if (DECL_COMDAT (node->decl))
4122 if (!node->can_remove_if_no_direct_calls_p ())
4123 return true;
4125 else if (!node->will_be_removed_from_program_if_no_direct_calls_p ())
4126 return true;
4128 return estimate_growth (node) > 0;
4132 /* This function performs intraprocedural analysis in NODE that is required to
4133 inline indirect calls. */
4135 static void
4136 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
4138 ipa_analyze_node (node);
4139 if (dump_file && (dump_flags & TDF_DETAILS))
4141 ipa_print_node_params (dump_file, node);
4142 ipa_print_node_jump_functions (dump_file, node);
4147 /* Note function body size. */
4149 void
4150 inline_analyze_function (struct cgraph_node *node)
4152 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
4154 if (dump_file)
4155 fprintf (dump_file, "\nAnalyzing function: %s/%u\n",
4156 node->name (), node->order);
4157 if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p)
4158 inline_indirect_intraprocedural_analysis (node);
4159 compute_inline_parameters (node, false);
4160 if (!optimize)
4162 struct cgraph_edge *e;
4163 for (e = node->callees; e; e = e->next_callee)
4164 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4165 for (e = node->indirect_calls; e; e = e->next_callee)
4166 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4169 pop_cfun ();
4173 /* Called when new function is inserted to callgraph late. */
4175 void
4176 inline_summary_t::insert (struct cgraph_node *node, inline_summary *)
4178 inline_analyze_function (node);
4181 /* Note function body size. */
4183 void
4184 inline_generate_summary (void)
4186 struct cgraph_node *node;
4188 FOR_EACH_DEFINED_FUNCTION (node)
4189 if (DECL_STRUCT_FUNCTION (node->decl))
4190 node->local.versionable = tree_versionable_function_p (node->decl);
4192 /* When not optimizing, do not bother to analyze. Inlining is still done
4193 because edge redirection needs to happen there. */
4194 if (!optimize && !flag_generate_lto && !flag_generate_offload && !flag_wpa)
4195 return;
4197 if (!inline_summaries)
4198 inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab);
4200 inline_summaries->enable_insertion_hook ();
4202 ipa_register_cgraph_hooks ();
4203 inline_free_summary ();
4205 FOR_EACH_DEFINED_FUNCTION (node)
4206 if (!node->alias)
4207 inline_analyze_function (node);
4211 /* Read predicate from IB. */
4213 static struct predicate
4214 read_predicate (struct lto_input_block *ib)
4216 struct predicate out;
4217 clause_t clause;
4218 int k = 0;
4222 gcc_assert (k <= MAX_CLAUSES);
4223 clause = out.clause[k++] = streamer_read_uhwi (ib);
4225 while (clause);
4227 /* Zero-initialize the remaining clauses in OUT. */
4228 while (k <= MAX_CLAUSES)
4229 out.clause[k++] = 0;
4231 return out;
4235 /* Write inline summary for edge E to OB. */
4237 static void
4238 read_inline_edge_summary (struct lto_input_block *ib, struct cgraph_edge *e)
4240 struct inline_edge_summary *es = inline_edge_summary (e);
4241 struct predicate p;
4242 int length, i;
4244 es->call_stmt_size = streamer_read_uhwi (ib);
4245 es->call_stmt_time = streamer_read_uhwi (ib);
4246 es->loop_depth = streamer_read_uhwi (ib);
4247 p = read_predicate (ib);
4248 edge_set_predicate (e, &p);
4249 length = streamer_read_uhwi (ib);
4250 if (length)
4252 es->param.safe_grow_cleared (length);
4253 for (i = 0; i < length; i++)
4254 es->param[i].change_prob = streamer_read_uhwi (ib);
4259 /* Stream in inline summaries from the section. */
4261 static void
4262 inline_read_section (struct lto_file_decl_data *file_data, const char *data,
4263 size_t len)
4265 const struct lto_function_header *header =
4266 (const struct lto_function_header *) data;
4267 const int cfg_offset = sizeof (struct lto_function_header);
4268 const int main_offset = cfg_offset + header->cfg_size;
4269 const int string_offset = main_offset + header->main_size;
4270 struct data_in *data_in;
4271 unsigned int i, count2, j;
4272 unsigned int f_count;
4274 lto_input_block ib ((const char *) data + main_offset, header->main_size,
4275 file_data->mode_table);
4277 data_in =
4278 lto_data_in_create (file_data, (const char *) data + string_offset,
4279 header->string_size, vNULL);
4280 f_count = streamer_read_uhwi (&ib);
4281 for (i = 0; i < f_count; i++)
4283 unsigned int index;
4284 struct cgraph_node *node;
4285 struct inline_summary *info;
4286 lto_symtab_encoder_t encoder;
4287 struct bitpack_d bp;
4288 struct cgraph_edge *e;
4289 predicate p;
4291 index = streamer_read_uhwi (&ib);
4292 encoder = file_data->symtab_node_encoder;
4293 node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder,
4294 index));
4295 info = inline_summaries->get (node);
4297 info->estimated_stack_size
4298 = info->estimated_self_stack_size = streamer_read_uhwi (&ib);
4299 info->size = info->self_size = streamer_read_uhwi (&ib);
4300 info->time = info->self_time = streamer_read_uhwi (&ib);
4302 bp = streamer_read_bitpack (&ib);
4303 info->inlinable = bp_unpack_value (&bp, 1);
4304 info->contains_cilk_spawn = bp_unpack_value (&bp, 1);
4305 info->fp_expressions = bp_unpack_value (&bp, 1);
4307 count2 = streamer_read_uhwi (&ib);
4308 gcc_assert (!info->conds);
4309 for (j = 0; j < count2; j++)
4311 struct condition c;
4312 c.operand_num = streamer_read_uhwi (&ib);
4313 c.size = streamer_read_uhwi (&ib);
4314 c.code = (enum tree_code) streamer_read_uhwi (&ib);
4315 c.val = stream_read_tree (&ib, data_in);
4316 bp = streamer_read_bitpack (&ib);
4317 c.agg_contents = bp_unpack_value (&bp, 1);
4318 c.by_ref = bp_unpack_value (&bp, 1);
4319 if (c.agg_contents)
4320 c.offset = streamer_read_uhwi (&ib);
4321 vec_safe_push (info->conds, c);
4323 count2 = streamer_read_uhwi (&ib);
4324 gcc_assert (!info->entry);
4325 for (j = 0; j < count2; j++)
4327 struct size_time_entry e;
4329 e.size = streamer_read_uhwi (&ib);
4330 e.time = streamer_read_uhwi (&ib);
4331 e.predicate = read_predicate (&ib);
4333 vec_safe_push (info->entry, e);
4336 p = read_predicate (&ib);
4337 set_hint_predicate (&info->loop_iterations, p);
4338 p = read_predicate (&ib);
4339 set_hint_predicate (&info->loop_stride, p);
4340 p = read_predicate (&ib);
4341 set_hint_predicate (&info->array_index, p);
4342 for (e = node->callees; e; e = e->next_callee)
4343 read_inline_edge_summary (&ib, e);
4344 for (e = node->indirect_calls; e; e = e->next_callee)
4345 read_inline_edge_summary (&ib, e);
4348 lto_free_section_data (file_data, LTO_section_inline_summary, NULL, data,
4349 len);
4350 lto_data_in_delete (data_in);
4354 /* Read inline summary. Jump functions are shared among ipa-cp
4355 and inliner, so when ipa-cp is active, we don't need to write them
4356 twice. */
4358 void
4359 inline_read_summary (void)
4361 struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
4362 struct lto_file_decl_data *file_data;
4363 unsigned int j = 0;
4365 inline_summary_alloc ();
4367 while ((file_data = file_data_vec[j++]))
4369 size_t len;
4370 const char *data = lto_get_section_data (file_data,
4371 LTO_section_inline_summary,
4372 NULL, &len);
4373 if (data)
4374 inline_read_section (file_data, data, len);
4375 else
4376 /* Fatal error here. We do not want to support compiling ltrans units
4377 with different version of compiler or different flags than the WPA
4378 unit, so this should never happen. */
4379 fatal_error (input_location,
4380 "ipa inline summary is missing in input file");
4382 if (optimize)
4384 ipa_register_cgraph_hooks ();
4385 if (!flag_ipa_cp)
4386 ipa_prop_read_jump_functions ();
4389 gcc_assert (inline_summaries);
4390 inline_summaries->enable_insertion_hook ();
4394 /* Write predicate P to OB. */
4396 static void
4397 write_predicate (struct output_block *ob, struct predicate *p)
4399 int j;
4400 if (p)
4401 for (j = 0; p->clause[j]; j++)
4403 gcc_assert (j < MAX_CLAUSES);
4404 streamer_write_uhwi (ob, p->clause[j]);
4406 streamer_write_uhwi (ob, 0);
4410 /* Write inline summary for edge E to OB. */
4412 static void
4413 write_inline_edge_summary (struct output_block *ob, struct cgraph_edge *e)
4415 struct inline_edge_summary *es = inline_edge_summary (e);
4416 int i;
4418 streamer_write_uhwi (ob, es->call_stmt_size);
4419 streamer_write_uhwi (ob, es->call_stmt_time);
4420 streamer_write_uhwi (ob, es->loop_depth);
4421 write_predicate (ob, es->predicate);
4422 streamer_write_uhwi (ob, es->param.length ());
4423 for (i = 0; i < (int) es->param.length (); i++)
4424 streamer_write_uhwi (ob, es->param[i].change_prob);
4428 /* Write inline summary for node in SET.
4429 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
4430 active, we don't need to write them twice. */
4432 void
4433 inline_write_summary (void)
4435 struct output_block *ob = create_output_block (LTO_section_inline_summary);
4436 lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
4437 unsigned int count = 0;
4438 int i;
4440 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4442 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4443 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4444 if (cnode && cnode->definition && !cnode->alias)
4445 count++;
4447 streamer_write_uhwi (ob, count);
4449 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4451 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4452 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4453 if (cnode && cnode->definition && !cnode->alias)
4455 struct inline_summary *info = inline_summaries->get (cnode);
4456 struct bitpack_d bp;
4457 struct cgraph_edge *edge;
4458 int i;
4459 size_time_entry *e;
4460 struct condition *c;
4462 streamer_write_uhwi (ob, lto_symtab_encoder_encode (encoder, cnode));
4463 streamer_write_hwi (ob, info->estimated_self_stack_size);
4464 streamer_write_hwi (ob, info->self_size);
4465 streamer_write_hwi (ob, info->self_time);
4466 bp = bitpack_create (ob->main_stream);
4467 bp_pack_value (&bp, info->inlinable, 1);
4468 bp_pack_value (&bp, info->contains_cilk_spawn, 1);
4469 bp_pack_value (&bp, info->fp_expressions, 1);
4470 streamer_write_bitpack (&bp);
4471 streamer_write_uhwi (ob, vec_safe_length (info->conds));
4472 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
4474 streamer_write_uhwi (ob, c->operand_num);
4475 streamer_write_uhwi (ob, c->size);
4476 streamer_write_uhwi (ob, c->code);
4477 stream_write_tree (ob, c->val, true);
4478 bp = bitpack_create (ob->main_stream);
4479 bp_pack_value (&bp, c->agg_contents, 1);
4480 bp_pack_value (&bp, c->by_ref, 1);
4481 streamer_write_bitpack (&bp);
4482 if (c->agg_contents)
4483 streamer_write_uhwi (ob, c->offset);
4485 streamer_write_uhwi (ob, vec_safe_length (info->entry));
4486 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
4488 streamer_write_uhwi (ob, e->size);
4489 streamer_write_uhwi (ob, e->time);
4490 write_predicate (ob, &e->predicate);
4492 write_predicate (ob, info->loop_iterations);
4493 write_predicate (ob, info->loop_stride);
4494 write_predicate (ob, info->array_index);
4495 for (edge = cnode->callees; edge; edge = edge->next_callee)
4496 write_inline_edge_summary (ob, edge);
4497 for (edge = cnode->indirect_calls; edge; edge = edge->next_callee)
4498 write_inline_edge_summary (ob, edge);
4501 streamer_write_char_stream (ob->main_stream, 0);
4502 produce_asm (ob, NULL);
4503 destroy_output_block (ob);
4505 if (optimize && !flag_ipa_cp)
4506 ipa_prop_write_jump_functions ();
4510 /* Release inline summary. */
4512 void
4513 inline_free_summary (void)
4515 struct cgraph_node *node;
4516 if (edge_removal_hook_holder)
4517 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
4518 edge_removal_hook_holder = NULL;
4519 if (edge_duplication_hook_holder)
4520 symtab->remove_edge_duplication_hook (edge_duplication_hook_holder);
4521 edge_duplication_hook_holder = NULL;
4522 if (!inline_edge_summary_vec.exists ())
4523 return;
4524 FOR_EACH_DEFINED_FUNCTION (node)
4525 if (!node->alias)
4526 reset_inline_summary (node, inline_summaries->get (node));
4527 inline_summaries->release ();
4528 inline_summaries = NULL;
4529 inline_edge_summary_vec.release ();
4530 edge_predicate_pool.release ();