Clean up some minor white space issues in trans-decl.c and trans-expr.c
[official-gcc.git] / gcc / ipa-inline-analysis.c
blobf8ca825e24f3a894cf40cda1a656085124961139
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2016 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Analysis used by the inliner and other passes limiting code size growth.
23 We estimate for each function
24 - function body size
25 - average function execution time
26 - inlining size benefit (that is how much of function body size
27 and its call sequence is expected to disappear by inlining)
28 - inlining time benefit
29 - function frame size
30 For each call
31 - call statement size and time
33 inlinie_summary datastructures store above information locally (i.e.
34 parameters of the function itself) and globally (i.e. parameters of
35 the function created by applying all the inline decisions already
36 present in the callgraph).
38 We provide accestor to the inline_summary datastructure and
39 basic logic updating the parameters when inlining is performed.
41 The summaries are context sensitive. Context means
42 1) partial assignment of known constant values of operands
43 2) whether function is inlined into the call or not.
44 It is easy to add more variants. To represent function size and time
45 that depends on context (i.e. it is known to be optimized away when
46 context is known either by inlining or from IP-CP and clonning),
47 we use predicates. Predicates are logical formulas in
48 conjunctive-disjunctive form consisting of clauses. Clauses are bitmaps
49 specifying what conditions must be true. Conditions are simple test
50 of the form described above.
52 In order to make predicate (possibly) true, all of its clauses must
53 be (possibly) true. To make clause (possibly) true, one of conditions
54 it mentions must be (possibly) true. There are fixed bounds on
55 number of clauses and conditions and all the manipulation functions
56 are conservative in positive direction. I.e. we may lose precision
57 by thinking that predicate may be true even when it is not.
59 estimate_edge_size and estimate_edge_growth can be used to query
60 function size/time in the given context. inline_merge_summary merges
61 properties of caller and callee after inlining.
63 Finally pass_inline_parameters is exported. This is used to drive
64 computation of function parameters used by the early inliner. IPA
65 inlined performs analysis via its analyze_function method. */
67 #include "config.h"
68 #include "system.h"
69 #include "coretypes.h"
70 #include "backend.h"
71 #include "tree.h"
72 #include "gimple.h"
73 #include "alloc-pool.h"
74 #include "tree-pass.h"
75 #include "ssa.h"
76 #include "tree-streamer.h"
77 #include "cgraph.h"
78 #include "diagnostic.h"
79 #include "fold-const.h"
80 #include "print-tree.h"
81 #include "tree-inline.h"
82 #include "gimple-pretty-print.h"
83 #include "params.h"
84 #include "cfganal.h"
85 #include "gimple-iterator.h"
86 #include "tree-cfg.h"
87 #include "tree-ssa-loop-niter.h"
88 #include "tree-ssa-loop.h"
89 #include "symbol-summary.h"
90 #include "ipa-prop.h"
91 #include "ipa-inline.h"
92 #include "cfgloop.h"
93 #include "tree-scalar-evolution.h"
94 #include "ipa-utils.h"
95 #include "cilk.h"
96 #include "cfgexpand.h"
97 #include "gimplify.h"
99 /* Estimate runtime of function can easilly run into huge numbers with many
100 nested loops. Be sure we can compute time * INLINE_SIZE_SCALE * 2 in an
101 integer. For anything larger we use gcov_type. */
102 #define MAX_TIME 500000
104 /* Number of bits in integer, but we really want to be stable across different
105 hosts. */
106 #define NUM_CONDITIONS 32
108 enum predicate_conditions
110 predicate_false_condition = 0,
111 predicate_not_inlined_condition = 1,
112 predicate_first_dynamic_condition = 2
115 /* Special condition code we use to represent test that operand is compile time
116 constant. */
117 #define IS_NOT_CONSTANT ERROR_MARK
118 /* Special condition code we use to represent test that operand is not changed
119 across invocation of the function. When operand IS_NOT_CONSTANT it is always
120 CHANGED, however i.e. loop invariants can be NOT_CHANGED given percentage
121 of executions even when they are not compile time constants. */
122 #define CHANGED IDENTIFIER_NODE
124 /* Holders of ipa cgraph hooks: */
125 static struct cgraph_2edge_hook_list *edge_duplication_hook_holder;
126 static struct cgraph_edge_hook_list *edge_removal_hook_holder;
127 static void inline_edge_removal_hook (struct cgraph_edge *, void *);
128 static void inline_edge_duplication_hook (struct cgraph_edge *,
129 struct cgraph_edge *, void *);
131 /* VECtor holding inline summaries.
132 In GGC memory because conditions might point to constant trees. */
133 function_summary <inline_summary *> *inline_summaries;
134 vec<inline_edge_summary_t> inline_edge_summary_vec;
136 /* Cached node/edge growths. */
137 vec<edge_growth_cache_entry> edge_growth_cache;
139 /* Edge predicates goes here. */
140 static object_allocator<predicate> edge_predicate_pool ("edge predicates");
142 /* Return true predicate (tautology).
143 We represent it by empty list of clauses. */
145 static inline struct predicate
146 true_predicate (void)
148 struct predicate p;
149 p.clause[0] = 0;
150 return p;
154 /* Return predicate testing single condition number COND. */
156 static inline struct predicate
157 single_cond_predicate (int cond)
159 struct predicate p;
160 p.clause[0] = 1 << cond;
161 p.clause[1] = 0;
162 return p;
166 /* Return false predicate. First clause require false condition. */
168 static inline struct predicate
169 false_predicate (void)
171 return single_cond_predicate (predicate_false_condition);
175 /* Return true if P is (true). */
177 static inline bool
178 true_predicate_p (struct predicate *p)
180 return !p->clause[0];
184 /* Return true if P is (false). */
186 static inline bool
187 false_predicate_p (struct predicate *p)
189 if (p->clause[0] == (1 << predicate_false_condition))
191 gcc_checking_assert (!p->clause[1]
192 && p->clause[0] == 1 << predicate_false_condition);
193 return true;
195 return false;
199 /* Return predicate that is set true when function is not inlined. */
201 static inline struct predicate
202 not_inlined_predicate (void)
204 return single_cond_predicate (predicate_not_inlined_condition);
207 /* Simple description of whether a memory load or a condition refers to a load
208 from an aggregate and if so, how and where from in the aggregate.
209 Individual fields have the same meaning like fields with the same name in
210 struct condition. */
212 struct agg_position_info
214 HOST_WIDE_INT offset;
215 bool agg_contents;
216 bool by_ref;
219 /* Add condition to condition list CONDS. AGGPOS describes whether the used
220 oprand is loaded from an aggregate and where in the aggregate it is. It can
221 be NULL, which means this not a load from an aggregate. */
223 static struct predicate
224 add_condition (struct inline_summary *summary, int operand_num,
225 struct agg_position_info *aggpos,
226 enum tree_code code, tree val)
228 int i;
229 struct condition *c;
230 struct condition new_cond;
231 HOST_WIDE_INT offset;
232 bool agg_contents, by_ref;
234 if (aggpos)
236 offset = aggpos->offset;
237 agg_contents = aggpos->agg_contents;
238 by_ref = aggpos->by_ref;
240 else
242 offset = 0;
243 agg_contents = false;
244 by_ref = false;
247 gcc_checking_assert (operand_num >= 0);
248 for (i = 0; vec_safe_iterate (summary->conds, i, &c); i++)
250 if (c->operand_num == operand_num
251 && c->code == code
252 && c->val == val
253 && c->agg_contents == agg_contents
254 && (!agg_contents || (c->offset == offset && c->by_ref == by_ref)))
255 return single_cond_predicate (i + predicate_first_dynamic_condition);
257 /* Too many conditions. Give up and return constant true. */
258 if (i == NUM_CONDITIONS - predicate_first_dynamic_condition)
259 return true_predicate ();
261 new_cond.operand_num = operand_num;
262 new_cond.code = code;
263 new_cond.val = val;
264 new_cond.agg_contents = agg_contents;
265 new_cond.by_ref = by_ref;
266 new_cond.offset = offset;
267 vec_safe_push (summary->conds, new_cond);
268 return single_cond_predicate (i + predicate_first_dynamic_condition);
272 /* Add clause CLAUSE into the predicate P. */
274 static inline void
275 add_clause (conditions conditions, struct predicate *p, clause_t clause)
277 int i;
278 int i2;
279 int insert_here = -1;
280 int c1, c2;
282 /* True clause. */
283 if (!clause)
284 return;
286 /* False clause makes the whole predicate false. Kill the other variants. */
287 if (clause == (1 << predicate_false_condition))
289 p->clause[0] = (1 << predicate_false_condition);
290 p->clause[1] = 0;
291 return;
293 if (false_predicate_p (p))
294 return;
296 /* No one should be silly enough to add false into nontrivial clauses. */
297 gcc_checking_assert (!(clause & (1 << predicate_false_condition)));
299 /* Look where to insert the clause. At the same time prune out
300 clauses of P that are implied by the new clause and thus
301 redundant. */
302 for (i = 0, i2 = 0; i <= MAX_CLAUSES; i++)
304 p->clause[i2] = p->clause[i];
306 if (!p->clause[i])
307 break;
309 /* If p->clause[i] implies clause, there is nothing to add. */
310 if ((p->clause[i] & clause) == p->clause[i])
312 /* We had nothing to add, none of clauses should've become
313 redundant. */
314 gcc_checking_assert (i == i2);
315 return;
318 if (p->clause[i] < clause && insert_here < 0)
319 insert_here = i2;
321 /* If clause implies p->clause[i], then p->clause[i] becomes redundant.
322 Otherwise the p->clause[i] has to stay. */
323 if ((p->clause[i] & clause) != clause)
324 i2++;
327 /* Look for clauses that are obviously true. I.e.
328 op0 == 5 || op0 != 5. */
329 for (c1 = predicate_first_dynamic_condition; c1 < NUM_CONDITIONS; c1++)
331 condition *cc1;
332 if (!(clause & (1 << c1)))
333 continue;
334 cc1 = &(*conditions)[c1 - predicate_first_dynamic_condition];
335 /* We have no way to represent !CHANGED and !IS_NOT_CONSTANT
336 and thus there is no point for looking for them. */
337 if (cc1->code == CHANGED || cc1->code == IS_NOT_CONSTANT)
338 continue;
339 for (c2 = c1 + 1; c2 < NUM_CONDITIONS; c2++)
340 if (clause & (1 << c2))
342 condition *cc1 =
343 &(*conditions)[c1 - predicate_first_dynamic_condition];
344 condition *cc2 =
345 &(*conditions)[c2 - predicate_first_dynamic_condition];
346 if (cc1->operand_num == cc2->operand_num
347 && cc1->val == cc2->val
348 && cc2->code != IS_NOT_CONSTANT
349 && cc2->code != CHANGED
350 && cc1->code == invert_tree_comparison (cc2->code,
351 HONOR_NANS (cc1->val)))
352 return;
357 /* We run out of variants. Be conservative in positive direction. */
358 if (i2 == MAX_CLAUSES)
359 return;
360 /* Keep clauses in decreasing order. This makes equivalence testing easy. */
361 p->clause[i2 + 1] = 0;
362 if (insert_here >= 0)
363 for (; i2 > insert_here; i2--)
364 p->clause[i2] = p->clause[i2 - 1];
365 else
366 insert_here = i2;
367 p->clause[insert_here] = clause;
371 /* Return P & P2. */
373 static struct predicate
374 and_predicates (conditions conditions,
375 struct predicate *p, struct predicate *p2)
377 struct predicate out = *p;
378 int i;
380 /* Avoid busy work. */
381 if (false_predicate_p (p2) || true_predicate_p (p))
382 return *p2;
383 if (false_predicate_p (p) || true_predicate_p (p2))
384 return *p;
386 /* See how far predicates match. */
387 for (i = 0; p->clause[i] && p->clause[i] == p2->clause[i]; i++)
389 gcc_checking_assert (i < MAX_CLAUSES);
392 /* Combine the predicates rest. */
393 for (; p2->clause[i]; i++)
395 gcc_checking_assert (i < MAX_CLAUSES);
396 add_clause (conditions, &out, p2->clause[i]);
398 return out;
402 /* Return true if predicates are obviously equal. */
404 static inline bool
405 predicates_equal_p (struct predicate *p, struct predicate *p2)
407 int i;
408 for (i = 0; p->clause[i]; i++)
410 gcc_checking_assert (i < MAX_CLAUSES);
411 gcc_checking_assert (p->clause[i] > p->clause[i + 1]);
412 gcc_checking_assert (!p2->clause[i]
413 || p2->clause[i] > p2->clause[i + 1]);
414 if (p->clause[i] != p2->clause[i])
415 return false;
417 return !p2->clause[i];
421 /* Return P | P2. */
423 static struct predicate
424 or_predicates (conditions conditions,
425 struct predicate *p, struct predicate *p2)
427 struct predicate out = true_predicate ();
428 int i, j;
430 /* Avoid busy work. */
431 if (false_predicate_p (p2) || true_predicate_p (p))
432 return *p;
433 if (false_predicate_p (p) || true_predicate_p (p2))
434 return *p2;
435 if (predicates_equal_p (p, p2))
436 return *p;
438 /* OK, combine the predicates. */
439 for (i = 0; p->clause[i]; i++)
440 for (j = 0; p2->clause[j]; j++)
442 gcc_checking_assert (i < MAX_CLAUSES && j < MAX_CLAUSES);
443 add_clause (conditions, &out, p->clause[i] | p2->clause[j]);
445 return out;
449 /* Having partial truth assignment in POSSIBLE_TRUTHS, return false
450 if predicate P is known to be false. */
452 static bool
453 evaluate_predicate (struct predicate *p, clause_t possible_truths)
455 int i;
457 /* True remains true. */
458 if (true_predicate_p (p))
459 return true;
461 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
463 /* See if we can find clause we can disprove. */
464 for (i = 0; p->clause[i]; i++)
466 gcc_checking_assert (i < MAX_CLAUSES);
467 if (!(p->clause[i] & possible_truths))
468 return false;
470 return true;
473 /* Return the probability in range 0...REG_BR_PROB_BASE that the predicated
474 instruction will be recomputed per invocation of the inlined call. */
476 static int
477 predicate_probability (conditions conds,
478 struct predicate *p, clause_t possible_truths,
479 vec<inline_param_summary> inline_param_summary)
481 int i;
482 int combined_prob = REG_BR_PROB_BASE;
484 /* True remains true. */
485 if (true_predicate_p (p))
486 return REG_BR_PROB_BASE;
488 if (false_predicate_p (p))
489 return 0;
491 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
493 /* See if we can find clause we can disprove. */
494 for (i = 0; p->clause[i]; i++)
496 gcc_checking_assert (i < MAX_CLAUSES);
497 if (!(p->clause[i] & possible_truths))
498 return 0;
499 else
501 int this_prob = 0;
502 int i2;
503 if (!inline_param_summary.exists ())
504 return REG_BR_PROB_BASE;
505 for (i2 = 0; i2 < NUM_CONDITIONS; i2++)
506 if ((p->clause[i] & possible_truths) & (1 << i2))
508 if (i2 >= predicate_first_dynamic_condition)
510 condition *c =
511 &(*conds)[i2 - predicate_first_dynamic_condition];
512 if (c->code == CHANGED
513 && (c->operand_num <
514 (int) inline_param_summary.length ()))
516 int iprob =
517 inline_param_summary[c->operand_num].change_prob;
518 this_prob = MAX (this_prob, iprob);
520 else
521 this_prob = REG_BR_PROB_BASE;
523 else
524 this_prob = REG_BR_PROB_BASE;
526 combined_prob = MIN (this_prob, combined_prob);
527 if (!combined_prob)
528 return 0;
531 return combined_prob;
535 /* Dump conditional COND. */
537 static void
538 dump_condition (FILE *f, conditions conditions, int cond)
540 condition *c;
541 if (cond == predicate_false_condition)
542 fprintf (f, "false");
543 else if (cond == predicate_not_inlined_condition)
544 fprintf (f, "not inlined");
545 else
547 c = &(*conditions)[cond - predicate_first_dynamic_condition];
548 fprintf (f, "op%i", c->operand_num);
549 if (c->agg_contents)
550 fprintf (f, "[%soffset: " HOST_WIDE_INT_PRINT_DEC "]",
551 c->by_ref ? "ref " : "", c->offset);
552 if (c->code == IS_NOT_CONSTANT)
554 fprintf (f, " not constant");
555 return;
557 if (c->code == CHANGED)
559 fprintf (f, " changed");
560 return;
562 fprintf (f, " %s ", op_symbol_code (c->code));
563 print_generic_expr (f, c->val, 1);
568 /* Dump clause CLAUSE. */
570 static void
571 dump_clause (FILE *f, conditions conds, clause_t clause)
573 int i;
574 bool found = false;
575 fprintf (f, "(");
576 if (!clause)
577 fprintf (f, "true");
578 for (i = 0; i < NUM_CONDITIONS; i++)
579 if (clause & (1 << i))
581 if (found)
582 fprintf (f, " || ");
583 found = true;
584 dump_condition (f, conds, i);
586 fprintf (f, ")");
590 /* Dump predicate PREDICATE. */
592 static void
593 dump_predicate (FILE *f, conditions conds, struct predicate *pred)
595 int i;
596 if (true_predicate_p (pred))
597 dump_clause (f, conds, 0);
598 else
599 for (i = 0; pred->clause[i]; i++)
601 if (i)
602 fprintf (f, " && ");
603 dump_clause (f, conds, pred->clause[i]);
605 fprintf (f, "\n");
609 /* Dump inline hints. */
610 void
611 dump_inline_hints (FILE *f, inline_hints hints)
613 if (!hints)
614 return;
615 fprintf (f, "inline hints:");
616 if (hints & INLINE_HINT_indirect_call)
618 hints &= ~INLINE_HINT_indirect_call;
619 fprintf (f, " indirect_call");
621 if (hints & INLINE_HINT_loop_iterations)
623 hints &= ~INLINE_HINT_loop_iterations;
624 fprintf (f, " loop_iterations");
626 if (hints & INLINE_HINT_loop_stride)
628 hints &= ~INLINE_HINT_loop_stride;
629 fprintf (f, " loop_stride");
631 if (hints & INLINE_HINT_same_scc)
633 hints &= ~INLINE_HINT_same_scc;
634 fprintf (f, " same_scc");
636 if (hints & INLINE_HINT_in_scc)
638 hints &= ~INLINE_HINT_in_scc;
639 fprintf (f, " in_scc");
641 if (hints & INLINE_HINT_cross_module)
643 hints &= ~INLINE_HINT_cross_module;
644 fprintf (f, " cross_module");
646 if (hints & INLINE_HINT_declared_inline)
648 hints &= ~INLINE_HINT_declared_inline;
649 fprintf (f, " declared_inline");
651 if (hints & INLINE_HINT_array_index)
653 hints &= ~INLINE_HINT_array_index;
654 fprintf (f, " array_index");
656 if (hints & INLINE_HINT_known_hot)
658 hints &= ~INLINE_HINT_known_hot;
659 fprintf (f, " known_hot");
661 gcc_assert (!hints);
665 /* Record SIZE and TIME under condition PRED into the inline summary. */
667 static void
668 account_size_time (struct inline_summary *summary, int size, int time,
669 struct predicate *pred)
671 size_time_entry *e;
672 bool found = false;
673 int i;
675 if (false_predicate_p (pred))
676 return;
678 /* We need to create initial empty unconitional clause, but otherwie
679 we don't need to account empty times and sizes. */
680 if (!size && !time && summary->entry)
681 return;
683 /* Watch overflow that might result from insane profiles. */
684 if (time > MAX_TIME * INLINE_TIME_SCALE)
685 time = MAX_TIME * INLINE_TIME_SCALE;
686 gcc_assert (time >= 0);
688 for (i = 0; vec_safe_iterate (summary->entry, i, &e); i++)
689 if (predicates_equal_p (&e->predicate, pred))
691 found = true;
692 break;
694 if (i == 256)
696 i = 0;
697 found = true;
698 e = &(*summary->entry)[0];
699 gcc_assert (!e->predicate.clause[0]);
700 if (dump_file && (dump_flags & TDF_DETAILS))
701 fprintf (dump_file,
702 "\t\tReached limit on number of entries, "
703 "ignoring the predicate.");
705 if (dump_file && (dump_flags & TDF_DETAILS) && (time || size))
707 fprintf (dump_file,
708 "\t\tAccounting size:%3.2f, time:%3.2f on %spredicate:",
709 ((double) size) / INLINE_SIZE_SCALE,
710 ((double) time) / INLINE_TIME_SCALE, found ? "" : "new ");
711 dump_predicate (dump_file, summary->conds, pred);
713 if (!found)
715 struct size_time_entry new_entry;
716 new_entry.size = size;
717 new_entry.time = time;
718 new_entry.predicate = *pred;
719 vec_safe_push (summary->entry, new_entry);
721 else
723 e->size += size;
724 e->time += time;
725 if (e->time > MAX_TIME * INLINE_TIME_SCALE)
726 e->time = MAX_TIME * INLINE_TIME_SCALE;
730 /* We proved E to be unreachable, redirect it to __bultin_unreachable. */
732 static struct cgraph_edge *
733 redirect_to_unreachable (struct cgraph_edge *e)
735 struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL;
736 struct cgraph_node *target = cgraph_node::get_create
737 (builtin_decl_implicit (BUILT_IN_UNREACHABLE));
739 if (e->speculative)
740 e = e->resolve_speculation (target->decl);
741 else if (!e->callee)
742 e->make_direct (target);
743 else
744 e->redirect_callee (target);
745 struct inline_edge_summary *es = inline_edge_summary (e);
746 e->inline_failed = CIF_UNREACHABLE;
747 e->frequency = 0;
748 e->count = 0;
749 es->call_stmt_size = 0;
750 es->call_stmt_time = 0;
751 if (callee)
752 callee->remove_symbol_and_inline_clones ();
753 return e;
756 /* Set predicate for edge E. */
758 static void
759 edge_set_predicate (struct cgraph_edge *e, struct predicate *predicate)
761 /* If the edge is determined to be never executed, redirect it
762 to BUILTIN_UNREACHABLE to save inliner from inlining into it. */
763 if (predicate && false_predicate_p (predicate)
764 /* When handling speculative edges, we need to do the redirection
765 just once. Do it always on the direct edge, so we do not
766 attempt to resolve speculation while duplicating the edge. */
767 && (!e->speculative || e->callee))
768 e = redirect_to_unreachable (e);
770 struct inline_edge_summary *es = inline_edge_summary (e);
771 if (predicate && !true_predicate_p (predicate))
773 if (!es->predicate)
774 es->predicate = edge_predicate_pool.allocate ();
775 *es->predicate = *predicate;
777 else
779 if (es->predicate)
780 edge_predicate_pool.remove (es->predicate);
781 es->predicate = NULL;
785 /* Set predicate for hint *P. */
787 static void
788 set_hint_predicate (struct predicate **p, struct predicate new_predicate)
790 if (false_predicate_p (&new_predicate) || true_predicate_p (&new_predicate))
792 if (*p)
793 edge_predicate_pool.remove (*p);
794 *p = NULL;
796 else
798 if (!*p)
799 *p = edge_predicate_pool.allocate ();
800 **p = new_predicate;
805 /* KNOWN_VALS is partial mapping of parameters of NODE to constant values.
806 KNOWN_AGGS is a vector of aggreggate jump functions for each parameter.
807 Return clause of possible truths. When INLINE_P is true, assume that we are
808 inlining.
810 ERROR_MARK means compile time invariant. */
812 static clause_t
813 evaluate_conditions_for_known_args (struct cgraph_node *node,
814 bool inline_p,
815 vec<tree> known_vals,
816 vec<ipa_agg_jump_function_p>
817 known_aggs)
819 clause_t clause = inline_p ? 0 : 1 << predicate_not_inlined_condition;
820 struct inline_summary *info = inline_summaries->get (node);
821 int i;
822 struct condition *c;
824 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
826 tree val;
827 tree res;
829 /* We allow call stmt to have fewer arguments than the callee function
830 (especially for K&R style programs). So bound check here (we assume
831 known_aggs vector, if non-NULL, has the same length as
832 known_vals). */
833 gcc_checking_assert (!known_aggs.exists ()
834 || (known_vals.length () == known_aggs.length ()));
835 if (c->operand_num >= (int) known_vals.length ())
837 clause |= 1 << (i + predicate_first_dynamic_condition);
838 continue;
841 if (c->agg_contents)
843 struct ipa_agg_jump_function *agg;
845 if (c->code == CHANGED
846 && !c->by_ref
847 && (known_vals[c->operand_num] == error_mark_node))
848 continue;
850 if (known_aggs.exists ())
852 agg = known_aggs[c->operand_num];
853 val = ipa_find_agg_cst_for_param (agg, c->offset, c->by_ref);
855 else
856 val = NULL_TREE;
858 else
860 val = known_vals[c->operand_num];
861 if (val == error_mark_node && c->code != CHANGED)
862 val = NULL_TREE;
865 if (!val)
867 clause |= 1 << (i + predicate_first_dynamic_condition);
868 continue;
870 if (c->code == IS_NOT_CONSTANT || c->code == CHANGED)
871 continue;
873 if (operand_equal_p (TYPE_SIZE (TREE_TYPE (c->val)),
874 TYPE_SIZE (TREE_TYPE (val)), 0))
876 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (c->val), val);
878 res = val
879 ? fold_binary_to_constant (c->code, boolean_type_node, val, c->val)
880 : NULL;
882 if (res && integer_zerop (res))
883 continue;
885 clause |= 1 << (i + predicate_first_dynamic_condition);
887 return clause;
891 /* Work out what conditions might be true at invocation of E. */
893 static void
894 evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
895 clause_t *clause_ptr,
896 vec<tree> *known_vals_ptr,
897 vec<ipa_polymorphic_call_context>
898 *known_contexts_ptr,
899 vec<ipa_agg_jump_function_p> *known_aggs_ptr)
901 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
902 struct inline_summary *info = inline_summaries->get (callee);
903 vec<tree> known_vals = vNULL;
904 vec<ipa_agg_jump_function_p> known_aggs = vNULL;
906 if (clause_ptr)
907 *clause_ptr = inline_p ? 0 : 1 << predicate_not_inlined_condition;
908 if (known_vals_ptr)
909 known_vals_ptr->create (0);
910 if (known_contexts_ptr)
911 known_contexts_ptr->create (0);
913 if (ipa_node_params_sum
914 && !e->call_stmt_cannot_inline_p
915 && ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr))
917 struct ipa_node_params *parms_info;
918 struct ipa_edge_args *args = IPA_EDGE_REF (e);
919 struct inline_edge_summary *es = inline_edge_summary (e);
920 int i, count = ipa_get_cs_argument_count (args);
922 if (e->caller->global.inlined_to)
923 parms_info = IPA_NODE_REF (e->caller->global.inlined_to);
924 else
925 parms_info = IPA_NODE_REF (e->caller);
927 if (count && (info->conds || known_vals_ptr))
928 known_vals.safe_grow_cleared (count);
929 if (count && (info->conds || known_aggs_ptr))
930 known_aggs.safe_grow_cleared (count);
931 if (count && known_contexts_ptr)
932 known_contexts_ptr->safe_grow_cleared (count);
934 for (i = 0; i < count; i++)
936 struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i);
937 tree cst = ipa_value_from_jfunc (parms_info, jf);
939 if (!cst && e->call_stmt
940 && i < (int)gimple_call_num_args (e->call_stmt))
942 cst = gimple_call_arg (e->call_stmt, i);
943 if (!is_gimple_min_invariant (cst))
944 cst = NULL;
946 if (cst)
948 gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO);
949 if (known_vals.exists ())
950 known_vals[i] = cst;
952 else if (inline_p && !es->param[i].change_prob)
953 known_vals[i] = error_mark_node;
955 if (known_contexts_ptr)
956 (*known_contexts_ptr)[i] = ipa_context_from_jfunc (parms_info, e,
957 i, jf);
958 /* TODO: When IPA-CP starts propagating and merging aggregate jump
959 functions, use its knowledge of the caller too, just like the
960 scalar case above. */
961 known_aggs[i] = &jf->agg;
964 else if (e->call_stmt && !e->call_stmt_cannot_inline_p
965 && ((clause_ptr && info->conds) || known_vals_ptr))
967 int i, count = (int)gimple_call_num_args (e->call_stmt);
969 if (count && (info->conds || known_vals_ptr))
970 known_vals.safe_grow_cleared (count);
971 for (i = 0; i < count; i++)
973 tree cst = gimple_call_arg (e->call_stmt, i);
974 if (!is_gimple_min_invariant (cst))
975 cst = NULL;
976 if (cst)
977 known_vals[i] = cst;
981 if (clause_ptr)
982 *clause_ptr = evaluate_conditions_for_known_args (callee, inline_p,
983 known_vals, known_aggs);
985 if (known_vals_ptr)
986 *known_vals_ptr = known_vals;
987 else
988 known_vals.release ();
990 if (known_aggs_ptr)
991 *known_aggs_ptr = known_aggs;
992 else
993 known_aggs.release ();
997 /* Allocate the inline summary vector or resize it to cover all cgraph nodes. */
999 static void
1000 inline_summary_alloc (void)
1002 if (!edge_removal_hook_holder)
1003 edge_removal_hook_holder =
1004 symtab->add_edge_removal_hook (&inline_edge_removal_hook, NULL);
1005 if (!edge_duplication_hook_holder)
1006 edge_duplication_hook_holder =
1007 symtab->add_edge_duplication_hook (&inline_edge_duplication_hook, NULL);
1009 if (!inline_summaries)
1010 inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab);
1012 if (inline_edge_summary_vec.length () <= (unsigned) symtab->edges_max_uid)
1013 inline_edge_summary_vec.safe_grow_cleared (symtab->edges_max_uid + 1);
1016 /* We are called multiple time for given function; clear
1017 data from previous run so they are not cumulated. */
1019 static void
1020 reset_inline_edge_summary (struct cgraph_edge *e)
1022 if (e->uid < (int) inline_edge_summary_vec.length ())
1024 struct inline_edge_summary *es = inline_edge_summary (e);
1026 es->call_stmt_size = es->call_stmt_time = 0;
1027 if (es->predicate)
1028 edge_predicate_pool.remove (es->predicate);
1029 es->predicate = NULL;
1030 es->param.release ();
1034 /* We are called multiple time for given function; clear
1035 data from previous run so they are not cumulated. */
1037 static void
1038 reset_inline_summary (struct cgraph_node *node,
1039 inline_summary *info)
1041 struct cgraph_edge *e;
1043 info->self_size = info->self_time = 0;
1044 info->estimated_stack_size = 0;
1045 info->estimated_self_stack_size = 0;
1046 info->stack_frame_offset = 0;
1047 info->size = 0;
1048 info->time = 0;
1049 info->growth = 0;
1050 info->scc_no = 0;
1051 if (info->loop_iterations)
1053 edge_predicate_pool.remove (info->loop_iterations);
1054 info->loop_iterations = NULL;
1056 if (info->loop_stride)
1058 edge_predicate_pool.remove (info->loop_stride);
1059 info->loop_stride = NULL;
1061 if (info->array_index)
1063 edge_predicate_pool.remove (info->array_index);
1064 info->array_index = NULL;
1066 vec_free (info->conds);
1067 vec_free (info->entry);
1068 for (e = node->callees; e; e = e->next_callee)
1069 reset_inline_edge_summary (e);
1070 for (e = node->indirect_calls; e; e = e->next_callee)
1071 reset_inline_edge_summary (e);
1074 /* Hook that is called by cgraph.c when a node is removed. */
1076 void
1077 inline_summary_t::remove (cgraph_node *node, inline_summary *info)
1079 reset_inline_summary (node, info);
1082 /* Remap predicate P of former function to be predicate of duplicated function.
1083 POSSIBLE_TRUTHS is clause of possible truths in the duplicated node,
1084 INFO is inline summary of the duplicated node. */
1086 static struct predicate
1087 remap_predicate_after_duplication (struct predicate *p,
1088 clause_t possible_truths,
1089 struct inline_summary *info)
1091 struct predicate new_predicate = true_predicate ();
1092 int j;
1093 for (j = 0; p->clause[j]; j++)
1094 if (!(possible_truths & p->clause[j]))
1096 new_predicate = false_predicate ();
1097 break;
1099 else
1100 add_clause (info->conds, &new_predicate,
1101 possible_truths & p->clause[j]);
1102 return new_predicate;
1105 /* Same as remap_predicate_after_duplication but handle hint predicate *P.
1106 Additionally care about allocating new memory slot for updated predicate
1107 and set it to NULL when it becomes true or false (and thus uninteresting).
1110 static void
1111 remap_hint_predicate_after_duplication (struct predicate **p,
1112 clause_t possible_truths,
1113 struct inline_summary *info)
1115 struct predicate new_predicate;
1117 if (!*p)
1118 return;
1120 new_predicate = remap_predicate_after_duplication (*p,
1121 possible_truths, info);
1122 /* We do not want to free previous predicate; it is used by node origin. */
1123 *p = NULL;
1124 set_hint_predicate (p, new_predicate);
1128 /* Hook that is called by cgraph.c when a node is duplicated. */
1129 void
1130 inline_summary_t::duplicate (cgraph_node *src,
1131 cgraph_node *dst,
1132 inline_summary *,
1133 inline_summary *info)
1135 inline_summary_alloc ();
1136 memcpy (info, inline_summaries->get (src), sizeof (inline_summary));
1137 /* TODO: as an optimization, we may avoid copying conditions
1138 that are known to be false or true. */
1139 info->conds = vec_safe_copy (info->conds);
1141 /* When there are any replacements in the function body, see if we can figure
1142 out that something was optimized out. */
1143 if (ipa_node_params_sum && dst->clone.tree_map)
1145 vec<size_time_entry, va_gc> *entry = info->entry;
1146 /* Use SRC parm info since it may not be copied yet. */
1147 struct ipa_node_params *parms_info = IPA_NODE_REF (src);
1148 vec<tree> known_vals = vNULL;
1149 int count = ipa_get_param_count (parms_info);
1150 int i, j;
1151 clause_t possible_truths;
1152 struct predicate true_pred = true_predicate ();
1153 size_time_entry *e;
1154 int optimized_out_size = 0;
1155 bool inlined_to_p = false;
1156 struct cgraph_edge *edge, *next;
1158 info->entry = 0;
1159 known_vals.safe_grow_cleared (count);
1160 for (i = 0; i < count; i++)
1162 struct ipa_replace_map *r;
1164 for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++)
1166 if (((!r->old_tree && r->parm_num == i)
1167 || (r->old_tree && r->old_tree == ipa_get_param (parms_info, i)))
1168 && r->replace_p && !r->ref_p)
1170 known_vals[i] = r->new_tree;
1171 break;
1175 possible_truths = evaluate_conditions_for_known_args (dst, false,
1176 known_vals,
1177 vNULL);
1178 known_vals.release ();
1180 account_size_time (info, 0, 0, &true_pred);
1182 /* Remap size_time vectors.
1183 Simplify the predicate by prunning out alternatives that are known
1184 to be false.
1185 TODO: as on optimization, we can also eliminate conditions known
1186 to be true. */
1187 for (i = 0; vec_safe_iterate (entry, i, &e); i++)
1189 struct predicate new_predicate;
1190 new_predicate = remap_predicate_after_duplication (&e->predicate,
1191 possible_truths,
1192 info);
1193 if (false_predicate_p (&new_predicate))
1194 optimized_out_size += e->size;
1195 else
1196 account_size_time (info, e->size, e->time, &new_predicate);
1199 /* Remap edge predicates with the same simplification as above.
1200 Also copy constantness arrays. */
1201 for (edge = dst->callees; edge; edge = next)
1203 struct predicate new_predicate;
1204 struct inline_edge_summary *es = inline_edge_summary (edge);
1205 next = edge->next_callee;
1207 if (!edge->inline_failed)
1208 inlined_to_p = true;
1209 if (!es->predicate)
1210 continue;
1211 new_predicate = remap_predicate_after_duplication (es->predicate,
1212 possible_truths,
1213 info);
1214 if (false_predicate_p (&new_predicate)
1215 && !false_predicate_p (es->predicate))
1216 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1217 edge_set_predicate (edge, &new_predicate);
1220 /* Remap indirect edge predicates with the same simplificaiton as above.
1221 Also copy constantness arrays. */
1222 for (edge = dst->indirect_calls; edge; edge = next)
1224 struct predicate new_predicate;
1225 struct inline_edge_summary *es = inline_edge_summary (edge);
1226 next = edge->next_callee;
1228 gcc_checking_assert (edge->inline_failed);
1229 if (!es->predicate)
1230 continue;
1231 new_predicate = remap_predicate_after_duplication (es->predicate,
1232 possible_truths,
1233 info);
1234 if (false_predicate_p (&new_predicate)
1235 && !false_predicate_p (es->predicate))
1236 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1237 edge_set_predicate (edge, &new_predicate);
1239 remap_hint_predicate_after_duplication (&info->loop_iterations,
1240 possible_truths, info);
1241 remap_hint_predicate_after_duplication (&info->loop_stride,
1242 possible_truths, info);
1243 remap_hint_predicate_after_duplication (&info->array_index,
1244 possible_truths, info);
1246 /* If inliner or someone after inliner will ever start producing
1247 non-trivial clones, we will get trouble with lack of information
1248 about updating self sizes, because size vectors already contains
1249 sizes of the calees. */
1250 gcc_assert (!inlined_to_p || !optimized_out_size);
1252 else
1254 info->entry = vec_safe_copy (info->entry);
1255 if (info->loop_iterations)
1257 predicate p = *info->loop_iterations;
1258 info->loop_iterations = NULL;
1259 set_hint_predicate (&info->loop_iterations, p);
1261 if (info->loop_stride)
1263 predicate p = *info->loop_stride;
1264 info->loop_stride = NULL;
1265 set_hint_predicate (&info->loop_stride, p);
1267 if (info->array_index)
1269 predicate p = *info->array_index;
1270 info->array_index = NULL;
1271 set_hint_predicate (&info->array_index, p);
1274 if (!dst->global.inlined_to)
1275 inline_update_overall_summary (dst);
1279 /* Hook that is called by cgraph.c when a node is duplicated. */
1281 static void
1282 inline_edge_duplication_hook (struct cgraph_edge *src,
1283 struct cgraph_edge *dst,
1284 ATTRIBUTE_UNUSED void *data)
1286 struct inline_edge_summary *info;
1287 struct inline_edge_summary *srcinfo;
1288 inline_summary_alloc ();
1289 info = inline_edge_summary (dst);
1290 srcinfo = inline_edge_summary (src);
1291 memcpy (info, srcinfo, sizeof (struct inline_edge_summary));
1292 info->predicate = NULL;
1293 edge_set_predicate (dst, srcinfo->predicate);
1294 info->param = srcinfo->param.copy ();
1295 if (!dst->indirect_unknown_callee && src->indirect_unknown_callee)
1297 info->call_stmt_size -= (eni_size_weights.indirect_call_cost
1298 - eni_size_weights.call_cost);
1299 info->call_stmt_time -= (eni_time_weights.indirect_call_cost
1300 - eni_time_weights.call_cost);
1305 /* Keep edge cache consistent across edge removal. */
1307 static void
1308 inline_edge_removal_hook (struct cgraph_edge *edge,
1309 void *data ATTRIBUTE_UNUSED)
1311 if (edge_growth_cache.exists ())
1312 reset_edge_growth_cache (edge);
1313 reset_inline_edge_summary (edge);
1317 /* Initialize growth caches. */
1319 void
1320 initialize_growth_caches (void)
1322 if (symtab->edges_max_uid)
1323 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
1327 /* Free growth caches. */
1329 void
1330 free_growth_caches (void)
1332 edge_growth_cache.release ();
1336 /* Dump edge summaries associated to NODE and recursively to all clones.
1337 Indent by INDENT. */
1339 static void
1340 dump_inline_edge_summary (FILE *f, int indent, struct cgraph_node *node,
1341 struct inline_summary *info)
1343 struct cgraph_edge *edge;
1344 for (edge = node->callees; edge; edge = edge->next_callee)
1346 struct inline_edge_summary *es = inline_edge_summary (edge);
1347 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1348 int i;
1350 fprintf (f,
1351 "%*s%s/%i %s\n%*s loop depth:%2i freq:%4i size:%2i"
1352 " time: %2i callee size:%2i stack:%2i",
1353 indent, "", callee->name (), callee->order,
1354 !edge->inline_failed
1355 ? "inlined" : cgraph_inline_failed_string (edge-> inline_failed),
1356 indent, "", es->loop_depth, edge->frequency,
1357 es->call_stmt_size, es->call_stmt_time,
1358 (int) inline_summaries->get (callee)->size / INLINE_SIZE_SCALE,
1359 (int) inline_summaries->get (callee)->estimated_stack_size);
1361 if (es->predicate)
1363 fprintf (f, " predicate: ");
1364 dump_predicate (f, info->conds, es->predicate);
1366 else
1367 fprintf (f, "\n");
1368 if (es->param.exists ())
1369 for (i = 0; i < (int) es->param.length (); i++)
1371 int prob = es->param[i].change_prob;
1373 if (!prob)
1374 fprintf (f, "%*s op%i is compile time invariant\n",
1375 indent + 2, "", i);
1376 else if (prob != REG_BR_PROB_BASE)
1377 fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i,
1378 prob * 100.0 / REG_BR_PROB_BASE);
1380 if (!edge->inline_failed)
1382 fprintf (f, "%*sStack frame offset %i, callee self size %i,"
1383 " callee size %i\n",
1384 indent + 2, "",
1385 (int) inline_summaries->get (callee)->stack_frame_offset,
1386 (int) inline_summaries->get (callee)->estimated_self_stack_size,
1387 (int) inline_summaries->get (callee)->estimated_stack_size);
1388 dump_inline_edge_summary (f, indent + 2, callee, info);
1391 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
1393 struct inline_edge_summary *es = inline_edge_summary (edge);
1394 fprintf (f, "%*sindirect call loop depth:%2i freq:%4i size:%2i"
1395 " time: %2i",
1396 indent, "",
1397 es->loop_depth,
1398 edge->frequency, es->call_stmt_size, es->call_stmt_time);
1399 if (es->predicate)
1401 fprintf (f, "predicate: ");
1402 dump_predicate (f, info->conds, es->predicate);
1404 else
1405 fprintf (f, "\n");
1410 void
1411 dump_inline_summary (FILE *f, struct cgraph_node *node)
1413 if (node->definition)
1415 struct inline_summary *s = inline_summaries->get (node);
1416 size_time_entry *e;
1417 int i;
1418 fprintf (f, "Inline summary for %s/%i", node->name (),
1419 node->order);
1420 if (DECL_DISREGARD_INLINE_LIMITS (node->decl))
1421 fprintf (f, " always_inline");
1422 if (s->inlinable)
1423 fprintf (f, " inlinable");
1424 if (s->contains_cilk_spawn)
1425 fprintf (f, " contains_cilk_spawn");
1426 fprintf (f, "\n self time: %i\n", s->self_time);
1427 fprintf (f, " global time: %i\n", s->time);
1428 fprintf (f, " self size: %i\n", s->self_size);
1429 fprintf (f, " global size: %i\n", s->size);
1430 fprintf (f, " min size: %i\n", s->min_size);
1431 fprintf (f, " self stack: %i\n",
1432 (int) s->estimated_self_stack_size);
1433 fprintf (f, " global stack: %i\n", (int) s->estimated_stack_size);
1434 if (s->growth)
1435 fprintf (f, " estimated growth:%i\n", (int) s->growth);
1436 if (s->scc_no)
1437 fprintf (f, " In SCC: %i\n", (int) s->scc_no);
1438 for (i = 0; vec_safe_iterate (s->entry, i, &e); i++)
1440 fprintf (f, " size:%f, time:%f, predicate:",
1441 (double) e->size / INLINE_SIZE_SCALE,
1442 (double) e->time / INLINE_TIME_SCALE);
1443 dump_predicate (f, s->conds, &e->predicate);
1445 if (s->loop_iterations)
1447 fprintf (f, " loop iterations:");
1448 dump_predicate (f, s->conds, s->loop_iterations);
1450 if (s->loop_stride)
1452 fprintf (f, " loop stride:");
1453 dump_predicate (f, s->conds, s->loop_stride);
1455 if (s->array_index)
1457 fprintf (f, " array index:");
1458 dump_predicate (f, s->conds, s->array_index);
1460 fprintf (f, " calls:\n");
1461 dump_inline_edge_summary (f, 4, node, s);
1462 fprintf (f, "\n");
1466 DEBUG_FUNCTION void
1467 debug_inline_summary (struct cgraph_node *node)
1469 dump_inline_summary (stderr, node);
1472 void
1473 dump_inline_summaries (FILE *f)
1475 struct cgraph_node *node;
1477 FOR_EACH_DEFINED_FUNCTION (node)
1478 if (!node->global.inlined_to)
1479 dump_inline_summary (f, node);
1482 /* Give initial reasons why inlining would fail on EDGE. This gets either
1483 nullified or usually overwritten by more precise reasons later. */
1485 void
1486 initialize_inline_failed (struct cgraph_edge *e)
1488 struct cgraph_node *callee = e->callee;
1490 if (e->indirect_unknown_callee)
1491 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
1492 else if (!callee->definition)
1493 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
1494 else if (callee->local.redefined_extern_inline)
1495 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
1496 else if (e->call_stmt_cannot_inline_p)
1497 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
1498 else if (cfun && fn_contains_cilk_spawn_p (cfun))
1499 /* We can't inline if the function is spawing a function. */
1500 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
1501 else
1502 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
1505 /* Callback of walk_aliased_vdefs. Flags that it has been invoked to the
1506 boolean variable pointed to by DATA. */
1508 static bool
1509 mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED,
1510 void *data)
1512 bool *b = (bool *) data;
1513 *b = true;
1514 return true;
1517 /* If OP refers to value of function parameter, return the corresponding
1518 parameter. */
1520 static tree
1521 unmodified_parm_1 (gimple *stmt, tree op)
1523 /* SSA_NAME referring to parm default def? */
1524 if (TREE_CODE (op) == SSA_NAME
1525 && SSA_NAME_IS_DEFAULT_DEF (op)
1526 && TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL)
1527 return SSA_NAME_VAR (op);
1528 /* Non-SSA parm reference? */
1529 if (TREE_CODE (op) == PARM_DECL)
1531 bool modified = false;
1533 ao_ref refd;
1534 ao_ref_init (&refd, op);
1535 walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, &modified,
1536 NULL);
1537 if (!modified)
1538 return op;
1540 return NULL_TREE;
1543 /* If OP refers to value of function parameter, return the corresponding
1544 parameter. Also traverse chains of SSA register assignments. */
1546 static tree
1547 unmodified_parm (gimple *stmt, tree op)
1549 tree res = unmodified_parm_1 (stmt, op);
1550 if (res)
1551 return res;
1553 if (TREE_CODE (op) == SSA_NAME
1554 && !SSA_NAME_IS_DEFAULT_DEF (op)
1555 && gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1556 return unmodified_parm (SSA_NAME_DEF_STMT (op),
1557 gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)));
1558 return NULL_TREE;
1561 /* If OP refers to a value of a function parameter or value loaded from an
1562 aggregate passed to a parameter (either by value or reference), return TRUE
1563 and store the number of the parameter to *INDEX_P and information whether
1564 and how it has been loaded from an aggregate into *AGGPOS. INFO describes
1565 the function parameters, STMT is the statement in which OP is used or
1566 loaded. */
1568 static bool
1569 unmodified_parm_or_parm_agg_item (struct ipa_func_body_info *fbi,
1570 gimple *stmt, tree op, int *index_p,
1571 struct agg_position_info *aggpos)
1573 tree res = unmodified_parm_1 (stmt, op);
1575 gcc_checking_assert (aggpos);
1576 if (res)
1578 *index_p = ipa_get_param_decl_index (fbi->info, res);
1579 if (*index_p < 0)
1580 return false;
1581 aggpos->agg_contents = false;
1582 aggpos->by_ref = false;
1583 return true;
1586 if (TREE_CODE (op) == SSA_NAME)
1588 if (SSA_NAME_IS_DEFAULT_DEF (op)
1589 || !gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1590 return false;
1591 stmt = SSA_NAME_DEF_STMT (op);
1592 op = gimple_assign_rhs1 (stmt);
1593 if (!REFERENCE_CLASS_P (op))
1594 return unmodified_parm_or_parm_agg_item (fbi, stmt, op, index_p,
1595 aggpos);
1598 aggpos->agg_contents = true;
1599 return ipa_load_from_parm_agg (fbi, fbi->info->descriptors,
1600 stmt, op, index_p, &aggpos->offset,
1601 NULL, &aggpos->by_ref);
1604 /* See if statement might disappear after inlining.
1605 0 - means not eliminated
1606 1 - half of statements goes away
1607 2 - for sure it is eliminated.
1608 We are not terribly sophisticated, basically looking for simple abstraction
1609 penalty wrappers. */
1611 static int
1612 eliminated_by_inlining_prob (gimple *stmt)
1614 enum gimple_code code = gimple_code (stmt);
1615 enum tree_code rhs_code;
1617 if (!optimize)
1618 return 0;
1620 switch (code)
1622 case GIMPLE_RETURN:
1623 return 2;
1624 case GIMPLE_ASSIGN:
1625 if (gimple_num_ops (stmt) != 2)
1626 return 0;
1628 rhs_code = gimple_assign_rhs_code (stmt);
1630 /* Casts of parameters, loads from parameters passed by reference
1631 and stores to return value or parameters are often free after
1632 inlining dua to SRA and further combining.
1633 Assume that half of statements goes away. */
1634 if (CONVERT_EXPR_CODE_P (rhs_code)
1635 || rhs_code == VIEW_CONVERT_EXPR
1636 || rhs_code == ADDR_EXPR
1637 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1639 tree rhs = gimple_assign_rhs1 (stmt);
1640 tree lhs = gimple_assign_lhs (stmt);
1641 tree inner_rhs = get_base_address (rhs);
1642 tree inner_lhs = get_base_address (lhs);
1643 bool rhs_free = false;
1644 bool lhs_free = false;
1646 if (!inner_rhs)
1647 inner_rhs = rhs;
1648 if (!inner_lhs)
1649 inner_lhs = lhs;
1651 /* Reads of parameter are expected to be free. */
1652 if (unmodified_parm (stmt, inner_rhs))
1653 rhs_free = true;
1654 /* Match expressions of form &this->field. Those will most likely
1655 combine with something upstream after inlining. */
1656 else if (TREE_CODE (inner_rhs) == ADDR_EXPR)
1658 tree op = get_base_address (TREE_OPERAND (inner_rhs, 0));
1659 if (TREE_CODE (op) == PARM_DECL)
1660 rhs_free = true;
1661 else if (TREE_CODE (op) == MEM_REF
1662 && unmodified_parm (stmt, TREE_OPERAND (op, 0)))
1663 rhs_free = true;
1666 /* When parameter is not SSA register because its address is taken
1667 and it is just copied into one, the statement will be completely
1668 free after inlining (we will copy propagate backward). */
1669 if (rhs_free && is_gimple_reg (lhs))
1670 return 2;
1672 /* Reads of parameters passed by reference
1673 expected to be free (i.e. optimized out after inlining). */
1674 if (TREE_CODE (inner_rhs) == MEM_REF
1675 && unmodified_parm (stmt, TREE_OPERAND (inner_rhs, 0)))
1676 rhs_free = true;
1678 /* Copying parameter passed by reference into gimple register is
1679 probably also going to copy propagate, but we can't be quite
1680 sure. */
1681 if (rhs_free && is_gimple_reg (lhs))
1682 lhs_free = true;
1684 /* Writes to parameters, parameters passed by value and return value
1685 (either dirrectly or passed via invisible reference) are free.
1687 TODO: We ought to handle testcase like
1688 struct a {int a,b;};
1689 struct a
1690 retrurnsturct (void)
1692 struct a a ={1,2};
1693 return a;
1696 This translate into:
1698 retrurnsturct ()
1700 int a$b;
1701 int a$a;
1702 struct a a;
1703 struct a D.2739;
1705 <bb 2>:
1706 D.2739.a = 1;
1707 D.2739.b = 2;
1708 return D.2739;
1711 For that we either need to copy ipa-split logic detecting writes
1712 to return value. */
1713 if (TREE_CODE (inner_lhs) == PARM_DECL
1714 || TREE_CODE (inner_lhs) == RESULT_DECL
1715 || (TREE_CODE (inner_lhs) == MEM_REF
1716 && (unmodified_parm (stmt, TREE_OPERAND (inner_lhs, 0))
1717 || (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME
1718 && SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0))
1719 && TREE_CODE (SSA_NAME_VAR (TREE_OPERAND
1720 (inner_lhs,
1721 0))) == RESULT_DECL))))
1722 lhs_free = true;
1723 if (lhs_free
1724 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1725 rhs_free = true;
1726 if (lhs_free && rhs_free)
1727 return 1;
1729 return 0;
1730 default:
1731 return 0;
1736 /* If BB ends by a conditional we can turn into predicates, attach corresponding
1737 predicates to the CFG edges. */
1739 static void
1740 set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi,
1741 struct inline_summary *summary,
1742 basic_block bb)
1744 gimple *last;
1745 tree op;
1746 int index;
1747 struct agg_position_info aggpos;
1748 enum tree_code code, inverted_code;
1749 edge e;
1750 edge_iterator ei;
1751 gimple *set_stmt;
1752 tree op2;
1754 last = last_stmt (bb);
1755 if (!last || gimple_code (last) != GIMPLE_COND)
1756 return;
1757 if (!is_gimple_ip_invariant (gimple_cond_rhs (last)))
1758 return;
1759 op = gimple_cond_lhs (last);
1760 /* TODO: handle conditionals like
1761 var = op0 < 4;
1762 if (var != 0). */
1763 if (unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &aggpos))
1765 code = gimple_cond_code (last);
1766 inverted_code = invert_tree_comparison (code, HONOR_NANS (op));
1768 FOR_EACH_EDGE (e, ei, bb->succs)
1770 enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE
1771 ? code : inverted_code);
1772 /* invert_tree_comparison will return ERROR_MARK on FP
1773 comparsions that are not EQ/NE instead of returning proper
1774 unordered one. Be sure it is not confused with NON_CONSTANT. */
1775 if (this_code != ERROR_MARK)
1777 struct predicate p = add_condition
1778 (summary, index, &aggpos, this_code,
1779 unshare_expr_without_location (gimple_cond_rhs (last)));
1780 e->aux = edge_predicate_pool.allocate ();
1781 *(struct predicate *) e->aux = p;
1786 if (TREE_CODE (op) != SSA_NAME)
1787 return;
1788 /* Special case
1789 if (builtin_constant_p (op))
1790 constant_code
1791 else
1792 nonconstant_code.
1793 Here we can predicate nonconstant_code. We can't
1794 really handle constant_code since we have no predicate
1795 for this and also the constant code is not known to be
1796 optimized away when inliner doen't see operand is constant.
1797 Other optimizers might think otherwise. */
1798 if (gimple_cond_code (last) != NE_EXPR
1799 || !integer_zerop (gimple_cond_rhs (last)))
1800 return;
1801 set_stmt = SSA_NAME_DEF_STMT (op);
1802 if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P)
1803 || gimple_call_num_args (set_stmt) != 1)
1804 return;
1805 op2 = gimple_call_arg (set_stmt, 0);
1806 if (!unmodified_parm_or_parm_agg_item (fbi, set_stmt, op2, &index, &aggpos))
1807 return;
1808 FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE)
1810 struct predicate p = add_condition (summary, index, &aggpos,
1811 IS_NOT_CONSTANT, NULL_TREE);
1812 e->aux = edge_predicate_pool.allocate ();
1813 *(struct predicate *) e->aux = p;
1818 /* If BB ends by a switch we can turn into predicates, attach corresponding
1819 predicates to the CFG edges. */
1821 static void
1822 set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi,
1823 struct inline_summary *summary,
1824 basic_block bb)
1826 gimple *lastg;
1827 tree op;
1828 int index;
1829 struct agg_position_info aggpos;
1830 edge e;
1831 edge_iterator ei;
1832 size_t n;
1833 size_t case_idx;
1835 lastg = last_stmt (bb);
1836 if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH)
1837 return;
1838 gswitch *last = as_a <gswitch *> (lastg);
1839 op = gimple_switch_index (last);
1840 if (!unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &aggpos))
1841 return;
1843 FOR_EACH_EDGE (e, ei, bb->succs)
1845 e->aux = edge_predicate_pool.allocate ();
1846 *(struct predicate *) e->aux = false_predicate ();
1848 n = gimple_switch_num_labels (last);
1849 for (case_idx = 0; case_idx < n; ++case_idx)
1851 tree cl = gimple_switch_label (last, case_idx);
1852 tree min, max;
1853 struct predicate p;
1855 e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
1856 min = CASE_LOW (cl);
1857 max = CASE_HIGH (cl);
1859 /* For default we might want to construct predicate that none
1860 of cases is met, but it is bit hard to do not having negations
1861 of conditionals handy. */
1862 if (!min && !max)
1863 p = true_predicate ();
1864 else if (!max)
1865 p = add_condition (summary, index, &aggpos, EQ_EXPR,
1866 unshare_expr_without_location (min));
1867 else
1869 struct predicate p1, p2;
1870 p1 = add_condition (summary, index, &aggpos, GE_EXPR,
1871 unshare_expr_without_location (min));
1872 p2 = add_condition (summary, index, &aggpos, LE_EXPR,
1873 unshare_expr_without_location (max));
1874 p = and_predicates (summary->conds, &p1, &p2);
1876 *(struct predicate *) e->aux
1877 = or_predicates (summary->conds, &p, (struct predicate *) e->aux);
1882 /* For each BB in NODE attach to its AUX pointer predicate under
1883 which it is executable. */
1885 static void
1886 compute_bb_predicates (struct ipa_func_body_info *fbi,
1887 struct cgraph_node *node,
1888 struct inline_summary *summary)
1890 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1891 bool done = false;
1892 basic_block bb;
1894 FOR_EACH_BB_FN (bb, my_function)
1896 set_cond_stmt_execution_predicate (fbi, summary, bb);
1897 set_switch_stmt_execution_predicate (fbi, summary, bb);
1900 /* Entry block is always executable. */
1901 ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1902 = edge_predicate_pool.allocate ();
1903 *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1904 = true_predicate ();
1906 /* A simple dataflow propagation of predicates forward in the CFG.
1907 TODO: work in reverse postorder. */
1908 while (!done)
1910 done = true;
1911 FOR_EACH_BB_FN (bb, my_function)
1913 struct predicate p = false_predicate ();
1914 edge e;
1915 edge_iterator ei;
1916 FOR_EACH_EDGE (e, ei, bb->preds)
1918 if (e->src->aux)
1920 struct predicate this_bb_predicate
1921 = *(struct predicate *) e->src->aux;
1922 if (e->aux)
1923 this_bb_predicate
1924 = and_predicates (summary->conds, &this_bb_predicate,
1925 (struct predicate *) e->aux);
1926 p = or_predicates (summary->conds, &p, &this_bb_predicate);
1927 if (true_predicate_p (&p))
1928 break;
1931 if (false_predicate_p (&p))
1932 gcc_assert (!bb->aux);
1933 else
1935 if (!bb->aux)
1937 done = false;
1938 bb->aux = edge_predicate_pool.allocate ();
1939 *((struct predicate *) bb->aux) = p;
1941 else if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1943 /* This OR operation is needed to ensure monotonous data flow
1944 in the case we hit the limit on number of clauses and the
1945 and/or operations above give approximate answers. */
1946 p = or_predicates (summary->conds, &p, (struct predicate *)bb->aux);
1947 if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1949 done = false;
1950 *((struct predicate *) bb->aux) = p;
1959 /* We keep info about constantness of SSA names. */
1961 typedef struct predicate predicate_t;
1962 /* Return predicate specifying when the STMT might have result that is not
1963 a compile time constant. */
1965 static struct predicate
1966 will_be_nonconstant_expr_predicate (struct ipa_node_params *info,
1967 struct inline_summary *summary,
1968 tree expr,
1969 vec<predicate_t> nonconstant_names)
1971 tree parm;
1972 int index;
1974 while (UNARY_CLASS_P (expr))
1975 expr = TREE_OPERAND (expr, 0);
1977 parm = unmodified_parm (NULL, expr);
1978 if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0)
1979 return add_condition (summary, index, NULL, CHANGED, NULL_TREE);
1980 if (is_gimple_min_invariant (expr))
1981 return false_predicate ();
1982 if (TREE_CODE (expr) == SSA_NAME)
1983 return nonconstant_names[SSA_NAME_VERSION (expr)];
1984 if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr))
1986 struct predicate p1 = will_be_nonconstant_expr_predicate
1987 (info, summary, TREE_OPERAND (expr, 0),
1988 nonconstant_names);
1989 struct predicate p2;
1990 if (true_predicate_p (&p1))
1991 return p1;
1992 p2 = will_be_nonconstant_expr_predicate (info, summary,
1993 TREE_OPERAND (expr, 1),
1994 nonconstant_names);
1995 return or_predicates (summary->conds, &p1, &p2);
1997 else if (TREE_CODE (expr) == COND_EXPR)
1999 struct predicate p1 = will_be_nonconstant_expr_predicate
2000 (info, summary, TREE_OPERAND (expr, 0),
2001 nonconstant_names);
2002 struct predicate p2;
2003 if (true_predicate_p (&p1))
2004 return p1;
2005 p2 = will_be_nonconstant_expr_predicate (info, summary,
2006 TREE_OPERAND (expr, 1),
2007 nonconstant_names);
2008 if (true_predicate_p (&p2))
2009 return p2;
2010 p1 = or_predicates (summary->conds, &p1, &p2);
2011 p2 = will_be_nonconstant_expr_predicate (info, summary,
2012 TREE_OPERAND (expr, 2),
2013 nonconstant_names);
2014 return or_predicates (summary->conds, &p1, &p2);
2016 else
2018 debug_tree (expr);
2019 gcc_unreachable ();
2021 return false_predicate ();
2025 /* Return predicate specifying when the STMT might have result that is not
2026 a compile time constant. */
2028 static struct predicate
2029 will_be_nonconstant_predicate (struct ipa_func_body_info *fbi,
2030 struct inline_summary *summary,
2031 gimple *stmt,
2032 vec<predicate_t> nonconstant_names)
2034 struct predicate p = true_predicate ();
2035 ssa_op_iter iter;
2036 tree use;
2037 struct predicate op_non_const;
2038 bool is_load;
2039 int base_index;
2040 struct agg_position_info aggpos;
2042 /* What statments might be optimized away
2043 when their arguments are constant. */
2044 if (gimple_code (stmt) != GIMPLE_ASSIGN
2045 && gimple_code (stmt) != GIMPLE_COND
2046 && gimple_code (stmt) != GIMPLE_SWITCH
2047 && (gimple_code (stmt) != GIMPLE_CALL
2048 || !(gimple_call_flags (stmt) & ECF_CONST)))
2049 return p;
2051 /* Stores will stay anyway. */
2052 if (gimple_store_p (stmt))
2053 return p;
2055 is_load = gimple_assign_load_p (stmt);
2057 /* Loads can be optimized when the value is known. */
2058 if (is_load)
2060 tree op;
2061 gcc_assert (gimple_assign_single_p (stmt));
2062 op = gimple_assign_rhs1 (stmt);
2063 if (!unmodified_parm_or_parm_agg_item (fbi, stmt, op, &base_index,
2064 &aggpos))
2065 return p;
2067 else
2068 base_index = -1;
2070 /* See if we understand all operands before we start
2071 adding conditionals. */
2072 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2074 tree parm = unmodified_parm (stmt, use);
2075 /* For arguments we can build a condition. */
2076 if (parm && ipa_get_param_decl_index (fbi->info, parm) >= 0)
2077 continue;
2078 if (TREE_CODE (use) != SSA_NAME)
2079 return p;
2080 /* If we know when operand is constant,
2081 we still can say something useful. */
2082 if (!true_predicate_p (&nonconstant_names[SSA_NAME_VERSION (use)]))
2083 continue;
2084 return p;
2087 if (is_load)
2088 op_non_const =
2089 add_condition (summary, base_index, &aggpos, CHANGED, NULL);
2090 else
2091 op_non_const = false_predicate ();
2092 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2094 tree parm = unmodified_parm (stmt, use);
2095 int index;
2097 if (parm && (index = ipa_get_param_decl_index (fbi->info, parm)) >= 0)
2099 if (index != base_index)
2100 p = add_condition (summary, index, NULL, CHANGED, NULL_TREE);
2101 else
2102 continue;
2104 else
2105 p = nonconstant_names[SSA_NAME_VERSION (use)];
2106 op_non_const = or_predicates (summary->conds, &p, &op_non_const);
2108 if ((gimple_code (stmt) == GIMPLE_ASSIGN || gimple_code (stmt) == GIMPLE_CALL)
2109 && gimple_op (stmt, 0)
2110 && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
2111 nonconstant_names[SSA_NAME_VERSION (gimple_op (stmt, 0))]
2112 = op_non_const;
2113 return op_non_const;
2116 struct record_modified_bb_info
2118 bitmap bb_set;
2119 gimple *stmt;
2122 /* Callback of walk_aliased_vdefs. Records basic blocks where the value may be
2123 set except for info->stmt. */
2125 static bool
2126 record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data)
2128 struct record_modified_bb_info *info =
2129 (struct record_modified_bb_info *) data;
2130 if (SSA_NAME_DEF_STMT (vdef) == info->stmt)
2131 return false;
2132 bitmap_set_bit (info->bb_set,
2133 SSA_NAME_IS_DEFAULT_DEF (vdef)
2134 ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index
2135 : gimple_bb (SSA_NAME_DEF_STMT (vdef))->index);
2136 return false;
2139 /* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT
2140 will change since last invocation of STMT.
2142 Value 0 is reserved for compile time invariants.
2143 For common parameters it is REG_BR_PROB_BASE. For loop invariants it
2144 ought to be REG_BR_PROB_BASE / estimated_iters. */
2146 static int
2147 param_change_prob (gimple *stmt, int i)
2149 tree op = gimple_call_arg (stmt, i);
2150 basic_block bb = gimple_bb (stmt);
2151 tree base;
2153 /* Global invariants neve change. */
2154 if (is_gimple_min_invariant (op))
2155 return 0;
2156 /* We would have to do non-trivial analysis to really work out what
2157 is the probability of value to change (i.e. when init statement
2158 is in a sibling loop of the call).
2160 We do an conservative estimate: when call is executed N times more often
2161 than the statement defining value, we take the frequency 1/N. */
2162 if (TREE_CODE (op) == SSA_NAME)
2164 int init_freq;
2166 if (!bb->frequency)
2167 return REG_BR_PROB_BASE;
2169 if (SSA_NAME_IS_DEFAULT_DEF (op))
2170 init_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2171 else
2172 init_freq = gimple_bb (SSA_NAME_DEF_STMT (op))->frequency;
2174 if (!init_freq)
2175 init_freq = 1;
2176 if (init_freq < bb->frequency)
2177 return MAX (GCOV_COMPUTE_SCALE (init_freq, bb->frequency), 1);
2178 else
2179 return REG_BR_PROB_BASE;
2182 base = get_base_address (op);
2183 if (base)
2185 ao_ref refd;
2186 int max;
2187 struct record_modified_bb_info info;
2188 bitmap_iterator bi;
2189 unsigned index;
2190 tree init = ctor_for_folding (base);
2192 if (init != error_mark_node)
2193 return 0;
2194 if (!bb->frequency)
2195 return REG_BR_PROB_BASE;
2196 ao_ref_init (&refd, op);
2197 info.stmt = stmt;
2198 info.bb_set = BITMAP_ALLOC (NULL);
2199 walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info,
2200 NULL);
2201 if (bitmap_bit_p (info.bb_set, bb->index))
2203 BITMAP_FREE (info.bb_set);
2204 return REG_BR_PROB_BASE;
2207 /* Assume that every memory is initialized at entry.
2208 TODO: Can we easilly determine if value is always defined
2209 and thus we may skip entry block? */
2210 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
2211 max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2212 else
2213 max = 1;
2215 EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi)
2216 max = MIN (max, BASIC_BLOCK_FOR_FN (cfun, index)->frequency);
2218 BITMAP_FREE (info.bb_set);
2219 if (max < bb->frequency)
2220 return MAX (GCOV_COMPUTE_SCALE (max, bb->frequency), 1);
2221 else
2222 return REG_BR_PROB_BASE;
2224 return REG_BR_PROB_BASE;
2227 /* Find whether a basic block BB is the final block of a (half) diamond CFG
2228 sub-graph and if the predicate the condition depends on is known. If so,
2229 return true and store the pointer the predicate in *P. */
2231 static bool
2232 phi_result_unknown_predicate (struct ipa_node_params *info,
2233 inline_summary *summary, basic_block bb,
2234 struct predicate *p,
2235 vec<predicate_t> nonconstant_names)
2237 edge e;
2238 edge_iterator ei;
2239 basic_block first_bb = NULL;
2240 gimple *stmt;
2242 if (single_pred_p (bb))
2244 *p = false_predicate ();
2245 return true;
2248 FOR_EACH_EDGE (e, ei, bb->preds)
2250 if (single_succ_p (e->src))
2252 if (!single_pred_p (e->src))
2253 return false;
2254 if (!first_bb)
2255 first_bb = single_pred (e->src);
2256 else if (single_pred (e->src) != first_bb)
2257 return false;
2259 else
2261 if (!first_bb)
2262 first_bb = e->src;
2263 else if (e->src != first_bb)
2264 return false;
2268 if (!first_bb)
2269 return false;
2271 stmt = last_stmt (first_bb);
2272 if (!stmt
2273 || gimple_code (stmt) != GIMPLE_COND
2274 || !is_gimple_ip_invariant (gimple_cond_rhs (stmt)))
2275 return false;
2277 *p = will_be_nonconstant_expr_predicate (info, summary,
2278 gimple_cond_lhs (stmt),
2279 nonconstant_names);
2280 if (true_predicate_p (p))
2281 return false;
2282 else
2283 return true;
2286 /* Given a PHI statement in a function described by inline properties SUMMARY
2287 and *P being the predicate describing whether the selected PHI argument is
2288 known, store a predicate for the result of the PHI statement into
2289 NONCONSTANT_NAMES, if possible. */
2291 static void
2292 predicate_for_phi_result (struct inline_summary *summary, gphi *phi,
2293 struct predicate *p,
2294 vec<predicate_t> nonconstant_names)
2296 unsigned i;
2298 for (i = 0; i < gimple_phi_num_args (phi); i++)
2300 tree arg = gimple_phi_arg (phi, i)->def;
2301 if (!is_gimple_min_invariant (arg))
2303 gcc_assert (TREE_CODE (arg) == SSA_NAME);
2304 *p = or_predicates (summary->conds, p,
2305 &nonconstant_names[SSA_NAME_VERSION (arg)]);
2306 if (true_predicate_p (p))
2307 return;
2311 if (dump_file && (dump_flags & TDF_DETAILS))
2313 fprintf (dump_file, "\t\tphi predicate: ");
2314 dump_predicate (dump_file, summary->conds, p);
2316 nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p;
2319 /* Return predicate specifying when array index in access OP becomes non-constant. */
2321 static struct predicate
2322 array_index_predicate (inline_summary *info,
2323 vec< predicate_t> nonconstant_names, tree op)
2325 struct predicate p = false_predicate ();
2326 while (handled_component_p (op))
2328 if (TREE_CODE (op) == ARRAY_REF || TREE_CODE (op) == ARRAY_RANGE_REF)
2330 if (TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME)
2331 p = or_predicates (info->conds, &p,
2332 &nonconstant_names[SSA_NAME_VERSION
2333 (TREE_OPERAND (op, 1))]);
2335 op = TREE_OPERAND (op, 0);
2337 return p;
2340 /* For a typical usage of __builtin_expect (a<b, 1), we
2341 may introduce an extra relation stmt:
2342 With the builtin, we have
2343 t1 = a <= b;
2344 t2 = (long int) t1;
2345 t3 = __builtin_expect (t2, 1);
2346 if (t3 != 0)
2347 goto ...
2348 Without the builtin, we have
2349 if (a<=b)
2350 goto...
2351 This affects the size/time estimation and may have
2352 an impact on the earlier inlining.
2353 Here find this pattern and fix it up later. */
2355 static gimple *
2356 find_foldable_builtin_expect (basic_block bb)
2358 gimple_stmt_iterator bsi;
2360 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2362 gimple *stmt = gsi_stmt (bsi);
2363 if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT)
2364 || (is_gimple_call (stmt)
2365 && gimple_call_internal_p (stmt)
2366 && gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT))
2368 tree var = gimple_call_lhs (stmt);
2369 tree arg = gimple_call_arg (stmt, 0);
2370 use_operand_p use_p;
2371 gimple *use_stmt;
2372 bool match = false;
2373 bool done = false;
2375 if (!var || !arg)
2376 continue;
2377 gcc_assert (TREE_CODE (var) == SSA_NAME);
2379 while (TREE_CODE (arg) == SSA_NAME)
2381 gimple *stmt_tmp = SSA_NAME_DEF_STMT (arg);
2382 if (!is_gimple_assign (stmt_tmp))
2383 break;
2384 switch (gimple_assign_rhs_code (stmt_tmp))
2386 case LT_EXPR:
2387 case LE_EXPR:
2388 case GT_EXPR:
2389 case GE_EXPR:
2390 case EQ_EXPR:
2391 case NE_EXPR:
2392 match = true;
2393 done = true;
2394 break;
2395 CASE_CONVERT:
2396 break;
2397 default:
2398 done = true;
2399 break;
2401 if (done)
2402 break;
2403 arg = gimple_assign_rhs1 (stmt_tmp);
2406 if (match && single_imm_use (var, &use_p, &use_stmt)
2407 && gimple_code (use_stmt) == GIMPLE_COND)
2408 return use_stmt;
2411 return NULL;
2414 /* Return true when the basic blocks contains only clobbers followed by RESX.
2415 Such BBs are kept around to make removal of dead stores possible with
2416 presence of EH and will be optimized out by optimize_clobbers later in the
2417 game.
2419 NEED_EH is used to recurse in case the clobber has non-EH predecestors
2420 that can be clobber only, too.. When it is false, the RESX is not necessary
2421 on the end of basic block. */
2423 static bool
2424 clobber_only_eh_bb_p (basic_block bb, bool need_eh = true)
2426 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2427 edge_iterator ei;
2428 edge e;
2430 if (need_eh)
2432 if (gsi_end_p (gsi))
2433 return false;
2434 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX)
2435 return false;
2436 gsi_prev (&gsi);
2438 else if (!single_succ_p (bb))
2439 return false;
2441 for (; !gsi_end_p (gsi); gsi_prev (&gsi))
2443 gimple *stmt = gsi_stmt (gsi);
2444 if (is_gimple_debug (stmt))
2445 continue;
2446 if (gimple_clobber_p (stmt))
2447 continue;
2448 if (gimple_code (stmt) == GIMPLE_LABEL)
2449 break;
2450 return false;
2453 /* See if all predecestors are either throws or clobber only BBs. */
2454 FOR_EACH_EDGE (e, ei, bb->preds)
2455 if (!(e->flags & EDGE_EH)
2456 && !clobber_only_eh_bb_p (e->src, false))
2457 return false;
2459 return true;
2462 /* Compute function body size parameters for NODE.
2463 When EARLY is true, we compute only simple summaries without
2464 non-trivial predicates to drive the early inliner. */
2466 static void
2467 estimate_function_body_sizes (struct cgraph_node *node, bool early)
2469 gcov_type time = 0;
2470 /* Estimate static overhead for function prologue/epilogue and alignment. */
2471 int size = 2;
2472 /* Benefits are scaled by probability of elimination that is in range
2473 <0,2>. */
2474 basic_block bb;
2475 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
2476 int freq;
2477 struct inline_summary *info = inline_summaries->get (node);
2478 struct predicate bb_predicate;
2479 struct ipa_func_body_info fbi;
2480 vec<predicate_t> nonconstant_names = vNULL;
2481 int nblocks, n;
2482 int *order;
2483 predicate array_index = true_predicate ();
2484 gimple *fix_builtin_expect_stmt;
2486 gcc_assert (my_function && my_function->cfg);
2487 gcc_assert (cfun == my_function);
2489 memset(&fbi, 0, sizeof(fbi));
2490 info->conds = NULL;
2491 info->entry = NULL;
2493 /* When optimizing and analyzing for IPA inliner, initialize loop optimizer
2494 so we can produce proper inline hints.
2496 When optimizing and analyzing for early inliner, initialize node params
2497 so we can produce correct BB predicates. */
2499 if (opt_for_fn (node->decl, optimize))
2501 calculate_dominance_info (CDI_DOMINATORS);
2502 if (!early)
2503 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2504 else
2506 ipa_check_create_node_params ();
2507 ipa_initialize_node_params (node);
2510 if (ipa_node_params_sum)
2512 fbi.node = node;
2513 fbi.info = IPA_NODE_REF (node);
2514 fbi.bb_infos = vNULL;
2515 fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
2516 fbi.param_count = count_formal_params(node->decl);
2517 nonconstant_names.safe_grow_cleared
2518 (SSANAMES (my_function)->length ());
2522 if (dump_file)
2523 fprintf (dump_file, "\nAnalyzing function body size: %s\n",
2524 node->name ());
2526 /* When we run into maximal number of entries, we assign everything to the
2527 constant truth case. Be sure to have it in list. */
2528 bb_predicate = true_predicate ();
2529 account_size_time (info, 0, 0, &bb_predicate);
2531 bb_predicate = not_inlined_predicate ();
2532 account_size_time (info, 2 * INLINE_SIZE_SCALE, 0, &bb_predicate);
2534 if (fbi.info)
2535 compute_bb_predicates (&fbi, node, info);
2536 order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
2537 nblocks = pre_and_rev_post_order_compute (NULL, order, false);
2538 for (n = 0; n < nblocks; n++)
2540 bb = BASIC_BLOCK_FOR_FN (cfun, order[n]);
2541 freq = compute_call_stmt_bb_frequency (node->decl, bb);
2542 if (clobber_only_eh_bb_p (bb))
2544 if (dump_file && (dump_flags & TDF_DETAILS))
2545 fprintf (dump_file, "\n Ignoring BB %i;"
2546 " it will be optimized away by cleanup_clobbers\n",
2547 bb->index);
2548 continue;
2551 /* TODO: Obviously predicates can be propagated down across CFG. */
2552 if (fbi.info)
2554 if (bb->aux)
2555 bb_predicate = *(struct predicate *) bb->aux;
2556 else
2557 bb_predicate = false_predicate ();
2559 else
2560 bb_predicate = true_predicate ();
2562 if (dump_file && (dump_flags & TDF_DETAILS))
2564 fprintf (dump_file, "\n BB %i predicate:", bb->index);
2565 dump_predicate (dump_file, info->conds, &bb_predicate);
2568 if (fbi.info && nonconstant_names.exists ())
2570 struct predicate phi_predicate;
2571 bool first_phi = true;
2573 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
2574 gsi_next (&bsi))
2576 if (first_phi
2577 && !phi_result_unknown_predicate (fbi.info, info, bb,
2578 &phi_predicate,
2579 nonconstant_names))
2580 break;
2581 first_phi = false;
2582 if (dump_file && (dump_flags & TDF_DETAILS))
2584 fprintf (dump_file, " ");
2585 print_gimple_stmt (dump_file, gsi_stmt (bsi), 0, 0);
2587 predicate_for_phi_result (info, bsi.phi (), &phi_predicate,
2588 nonconstant_names);
2592 fix_builtin_expect_stmt = find_foldable_builtin_expect (bb);
2594 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
2595 gsi_next (&bsi))
2597 gimple *stmt = gsi_stmt (bsi);
2598 int this_size = estimate_num_insns (stmt, &eni_size_weights);
2599 int this_time = estimate_num_insns (stmt, &eni_time_weights);
2600 int prob;
2601 struct predicate will_be_nonconstant;
2603 /* This relation stmt should be folded after we remove
2604 buildin_expect call. Adjust the cost here. */
2605 if (stmt == fix_builtin_expect_stmt)
2607 this_size--;
2608 this_time--;
2611 if (dump_file && (dump_flags & TDF_DETAILS))
2613 fprintf (dump_file, " ");
2614 print_gimple_stmt (dump_file, stmt, 0, 0);
2615 fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n",
2616 ((double) freq) / CGRAPH_FREQ_BASE, this_size,
2617 this_time);
2620 if (gimple_assign_load_p (stmt) && nonconstant_names.exists ())
2622 struct predicate this_array_index;
2623 this_array_index =
2624 array_index_predicate (info, nonconstant_names,
2625 gimple_assign_rhs1 (stmt));
2626 if (!false_predicate_p (&this_array_index))
2627 array_index =
2628 and_predicates (info->conds, &array_index,
2629 &this_array_index);
2631 if (gimple_store_p (stmt) && nonconstant_names.exists ())
2633 struct predicate this_array_index;
2634 this_array_index =
2635 array_index_predicate (info, nonconstant_names,
2636 gimple_get_lhs (stmt));
2637 if (!false_predicate_p (&this_array_index))
2638 array_index =
2639 and_predicates (info->conds, &array_index,
2640 &this_array_index);
2644 if (is_gimple_call (stmt)
2645 && !gimple_call_internal_p (stmt))
2647 struct cgraph_edge *edge = node->get_edge (stmt);
2648 struct inline_edge_summary *es = inline_edge_summary (edge);
2650 /* Special case: results of BUILT_IN_CONSTANT_P will be always
2651 resolved as constant. We however don't want to optimize
2652 out the cgraph edges. */
2653 if (nonconstant_names.exists ()
2654 && gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P)
2655 && gimple_call_lhs (stmt)
2656 && TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME)
2658 struct predicate false_p = false_predicate ();
2659 nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))]
2660 = false_p;
2662 if (ipa_node_params_sum)
2664 int count = gimple_call_num_args (stmt);
2665 int i;
2667 if (count)
2668 es->param.safe_grow_cleared (count);
2669 for (i = 0; i < count; i++)
2671 int prob = param_change_prob (stmt, i);
2672 gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
2673 es->param[i].change_prob = prob;
2677 es->call_stmt_size = this_size;
2678 es->call_stmt_time = this_time;
2679 es->loop_depth = bb_loop_depth (bb);
2680 edge_set_predicate (edge, &bb_predicate);
2683 /* TODO: When conditional jump or swithc is known to be constant, but
2684 we did not translate it into the predicates, we really can account
2685 just maximum of the possible paths. */
2686 if (fbi.info)
2687 will_be_nonconstant
2688 = will_be_nonconstant_predicate (&fbi, info,
2689 stmt, nonconstant_names);
2690 if (this_time || this_size)
2692 struct predicate p;
2694 this_time *= freq;
2696 prob = eliminated_by_inlining_prob (stmt);
2697 if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS))
2698 fprintf (dump_file,
2699 "\t\t50%% will be eliminated by inlining\n");
2700 if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
2701 fprintf (dump_file, "\t\tWill be eliminated by inlining\n");
2703 if (fbi.info)
2704 p = and_predicates (info->conds, &bb_predicate,
2705 &will_be_nonconstant);
2706 else
2707 p = true_predicate ();
2709 if (!false_predicate_p (&p)
2710 || (is_gimple_call (stmt)
2711 && !false_predicate_p (&bb_predicate)))
2713 time += this_time;
2714 size += this_size;
2715 if (time > MAX_TIME * INLINE_TIME_SCALE)
2716 time = MAX_TIME * INLINE_TIME_SCALE;
2719 /* We account everything but the calls. Calls have their own
2720 size/time info attached to cgraph edges. This is necessary
2721 in order to make the cost disappear after inlining. */
2722 if (!is_gimple_call (stmt))
2724 if (prob)
2726 struct predicate ip = not_inlined_predicate ();
2727 ip = and_predicates (info->conds, &ip, &p);
2728 account_size_time (info, this_size * prob,
2729 this_time * prob, &ip);
2731 if (prob != 2)
2732 account_size_time (info, this_size * (2 - prob),
2733 this_time * (2 - prob), &p);
2736 gcc_assert (time >= 0);
2737 gcc_assert (size >= 0);
2741 set_hint_predicate (&inline_summaries->get (node)->array_index, array_index);
2742 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
2743 if (time > MAX_TIME)
2744 time = MAX_TIME;
2745 free (order);
2747 if (nonconstant_names.exists () && !early)
2749 struct loop *loop;
2750 predicate loop_iterations = true_predicate ();
2751 predicate loop_stride = true_predicate ();
2753 if (dump_file && (dump_flags & TDF_DETAILS))
2754 flow_loops_dump (dump_file, NULL, 0);
2755 scev_initialize ();
2756 FOR_EACH_LOOP (loop, 0)
2758 vec<edge> exits;
2759 edge ex;
2760 unsigned int j;
2761 struct tree_niter_desc niter_desc;
2762 bb_predicate = *(struct predicate *) loop->header->aux;
2764 exits = get_loop_exit_edges (loop);
2765 FOR_EACH_VEC_ELT (exits, j, ex)
2766 if (number_of_iterations_exit (loop, ex, &niter_desc, false)
2767 && !is_gimple_min_invariant (niter_desc.niter))
2769 predicate will_be_nonconstant
2770 = will_be_nonconstant_expr_predicate (fbi.info, info,
2771 niter_desc.niter,
2772 nonconstant_names);
2773 if (!true_predicate_p (&will_be_nonconstant))
2774 will_be_nonconstant = and_predicates (info->conds,
2775 &bb_predicate,
2776 &will_be_nonconstant);
2777 if (!true_predicate_p (&will_be_nonconstant)
2778 && !false_predicate_p (&will_be_nonconstant))
2779 /* This is slightly inprecise. We may want to represent each
2780 loop with independent predicate. */
2781 loop_iterations =
2782 and_predicates (info->conds, &loop_iterations,
2783 &will_be_nonconstant);
2785 exits.release ();
2788 /* To avoid quadratic behavior we analyze stride predicates only
2789 with respect to the containing loop. Thus we simply iterate
2790 over all defs in the outermost loop body. */
2791 for (loop = loops_for_fn (cfun)->tree_root->inner;
2792 loop != NULL; loop = loop->next)
2794 basic_block *body = get_loop_body (loop);
2795 for (unsigned i = 0; i < loop->num_nodes; i++)
2797 gimple_stmt_iterator gsi;
2798 bb_predicate = *(struct predicate *) body[i]->aux;
2799 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi);
2800 gsi_next (&gsi))
2802 gimple *stmt = gsi_stmt (gsi);
2804 if (!is_gimple_assign (stmt))
2805 continue;
2807 tree def = gimple_assign_lhs (stmt);
2808 if (TREE_CODE (def) != SSA_NAME)
2809 continue;
2811 affine_iv iv;
2812 if (!simple_iv (loop_containing_stmt (stmt),
2813 loop_containing_stmt (stmt),
2814 def, &iv, true)
2815 || is_gimple_min_invariant (iv.step))
2816 continue;
2818 predicate will_be_nonconstant
2819 = will_be_nonconstant_expr_predicate (fbi.info, info,
2820 iv.step,
2821 nonconstant_names);
2822 if (!true_predicate_p (&will_be_nonconstant))
2823 will_be_nonconstant
2824 = and_predicates (info->conds, &bb_predicate,
2825 &will_be_nonconstant);
2826 if (!true_predicate_p (&will_be_nonconstant)
2827 && !false_predicate_p (&will_be_nonconstant))
2828 /* This is slightly inprecise. We may want to represent
2829 each loop with independent predicate. */
2830 loop_stride = and_predicates (info->conds, &loop_stride,
2831 &will_be_nonconstant);
2834 free (body);
2836 set_hint_predicate (&inline_summaries->get (node)->loop_iterations,
2837 loop_iterations);
2838 set_hint_predicate (&inline_summaries->get (node)->loop_stride,
2839 loop_stride);
2840 scev_finalize ();
2842 FOR_ALL_BB_FN (bb, my_function)
2844 edge e;
2845 edge_iterator ei;
2847 if (bb->aux)
2848 edge_predicate_pool.remove ((predicate *)bb->aux);
2849 bb->aux = NULL;
2850 FOR_EACH_EDGE (e, ei, bb->succs)
2852 if (e->aux)
2853 edge_predicate_pool.remove ((predicate *) e->aux);
2854 e->aux = NULL;
2857 inline_summaries->get (node)->self_time = time;
2858 inline_summaries->get (node)->self_size = size;
2859 nonconstant_names.release ();
2860 ipa_release_body_info (&fbi);
2861 if (opt_for_fn (node->decl, optimize))
2863 if (!early)
2864 loop_optimizer_finalize ();
2865 else if (!ipa_edge_args_vector)
2866 ipa_free_all_node_params ();
2867 free_dominance_info (CDI_DOMINATORS);
2869 if (dump_file)
2871 fprintf (dump_file, "\n");
2872 dump_inline_summary (dump_file, node);
2877 /* Compute parameters of functions used by inliner.
2878 EARLY is true when we compute parameters for the early inliner */
2880 void
2881 compute_inline_parameters (struct cgraph_node *node, bool early)
2883 HOST_WIDE_INT self_stack_size;
2884 struct cgraph_edge *e;
2885 struct inline_summary *info;
2887 gcc_assert (!node->global.inlined_to);
2889 inline_summary_alloc ();
2891 info = inline_summaries->get (node);
2892 reset_inline_summary (node, info);
2894 /* FIXME: Thunks are inlinable, but tree-inline don't know how to do that.
2895 Once this happen, we will need to more curefully predict call
2896 statement size. */
2897 if (node->thunk.thunk_p)
2899 struct inline_edge_summary *es = inline_edge_summary (node->callees);
2900 struct predicate t = true_predicate ();
2902 info->inlinable = 0;
2903 node->callees->call_stmt_cannot_inline_p = true;
2904 node->local.can_change_signature = false;
2905 es->call_stmt_time = 1;
2906 es->call_stmt_size = 1;
2907 account_size_time (info, 0, 0, &t);
2908 return;
2911 /* Even is_gimple_min_invariant rely on current_function_decl. */
2912 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2914 /* Estimate the stack size for the function if we're optimizing. */
2915 self_stack_size = optimize ? estimated_stack_frame_size (node) : 0;
2916 info->estimated_self_stack_size = self_stack_size;
2917 info->estimated_stack_size = self_stack_size;
2918 info->stack_frame_offset = 0;
2920 /* Can this function be inlined at all? */
2921 if (!opt_for_fn (node->decl, optimize)
2922 && !lookup_attribute ("always_inline",
2923 DECL_ATTRIBUTES (node->decl)))
2924 info->inlinable = false;
2925 else
2926 info->inlinable = tree_inlinable_function_p (node->decl);
2928 info->contains_cilk_spawn = fn_contains_cilk_spawn_p (cfun);
2930 /* Type attributes can use parameter indices to describe them. */
2931 if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl)))
2932 node->local.can_change_signature = false;
2933 else
2935 /* Otherwise, inlinable functions always can change signature. */
2936 if (info->inlinable)
2937 node->local.can_change_signature = true;
2938 else
2940 /* Functions calling builtin_apply can not change signature. */
2941 for (e = node->callees; e; e = e->next_callee)
2943 tree cdecl = e->callee->decl;
2944 if (DECL_BUILT_IN (cdecl)
2945 && DECL_BUILT_IN_CLASS (cdecl) == BUILT_IN_NORMAL
2946 && (DECL_FUNCTION_CODE (cdecl) == BUILT_IN_APPLY_ARGS
2947 || DECL_FUNCTION_CODE (cdecl) == BUILT_IN_VA_START))
2948 break;
2950 node->local.can_change_signature = !e;
2953 estimate_function_body_sizes (node, early);
2955 for (e = node->callees; e; e = e->next_callee)
2956 if (e->callee->comdat_local_p ())
2957 break;
2958 node->calls_comdat_local = (e != NULL);
2960 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2961 info->time = info->self_time;
2962 info->size = info->self_size;
2963 info->stack_frame_offset = 0;
2964 info->estimated_stack_size = info->estimated_self_stack_size;
2965 if (flag_checking)
2967 inline_update_overall_summary (node);
2968 gcc_assert (info->time == info->self_time
2969 && info->size == info->self_size);
2972 pop_cfun ();
2976 /* Compute parameters of functions used by inliner using
2977 current_function_decl. */
2979 static unsigned int
2980 compute_inline_parameters_for_current (void)
2982 compute_inline_parameters (cgraph_node::get (current_function_decl), true);
2983 return 0;
2986 namespace {
2988 const pass_data pass_data_inline_parameters =
2990 GIMPLE_PASS, /* type */
2991 "inline_param", /* name */
2992 OPTGROUP_INLINE, /* optinfo_flags */
2993 TV_INLINE_PARAMETERS, /* tv_id */
2994 0, /* properties_required */
2995 0, /* properties_provided */
2996 0, /* properties_destroyed */
2997 0, /* todo_flags_start */
2998 0, /* todo_flags_finish */
3001 class pass_inline_parameters : public gimple_opt_pass
3003 public:
3004 pass_inline_parameters (gcc::context *ctxt)
3005 : gimple_opt_pass (pass_data_inline_parameters, ctxt)
3008 /* opt_pass methods: */
3009 opt_pass * clone () { return new pass_inline_parameters (m_ctxt); }
3010 virtual unsigned int execute (function *)
3012 return compute_inline_parameters_for_current ();
3015 }; // class pass_inline_parameters
3017 } // anon namespace
3019 gimple_opt_pass *
3020 make_pass_inline_parameters (gcc::context *ctxt)
3022 return new pass_inline_parameters (ctxt);
3026 /* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS,
3027 KNOWN_CONTEXTS and KNOWN_AGGS. */
3029 static bool
3030 estimate_edge_devirt_benefit (struct cgraph_edge *ie,
3031 int *size, int *time,
3032 vec<tree> known_vals,
3033 vec<ipa_polymorphic_call_context> known_contexts,
3034 vec<ipa_agg_jump_function_p> known_aggs)
3036 tree target;
3037 struct cgraph_node *callee;
3038 struct inline_summary *isummary;
3039 enum availability avail;
3040 bool speculative;
3042 if (!known_vals.exists () && !known_contexts.exists ())
3043 return false;
3044 if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining))
3045 return false;
3047 target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts,
3048 known_aggs, &speculative);
3049 if (!target || speculative)
3050 return false;
3052 /* Account for difference in cost between indirect and direct calls. */
3053 *size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost);
3054 *time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost);
3055 gcc_checking_assert (*time >= 0);
3056 gcc_checking_assert (*size >= 0);
3058 callee = cgraph_node::get (target);
3059 if (!callee || !callee->definition)
3060 return false;
3061 callee = callee->function_symbol (&avail);
3062 if (avail < AVAIL_AVAILABLE)
3063 return false;
3064 isummary = inline_summaries->get (callee);
3065 return isummary->inlinable;
3068 /* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to
3069 handle edge E with probability PROB.
3070 Set HINTS if edge may be devirtualized.
3071 KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call
3072 site. */
3074 static inline void
3075 estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
3076 int *time,
3077 int prob,
3078 vec<tree> known_vals,
3079 vec<ipa_polymorphic_call_context> known_contexts,
3080 vec<ipa_agg_jump_function_p> known_aggs,
3081 inline_hints *hints)
3083 struct inline_edge_summary *es = inline_edge_summary (e);
3084 int call_size = es->call_stmt_size;
3085 int call_time = es->call_stmt_time;
3086 int cur_size;
3087 if (!e->callee
3088 && estimate_edge_devirt_benefit (e, &call_size, &call_time,
3089 known_vals, known_contexts, known_aggs)
3090 && hints && e->maybe_hot_p ())
3091 *hints |= INLINE_HINT_indirect_call;
3092 cur_size = call_size * INLINE_SIZE_SCALE;
3093 *size += cur_size;
3094 if (min_size)
3095 *min_size += cur_size;
3096 *time += apply_probability ((gcov_type) call_time, prob)
3097 * e->frequency * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE);
3098 if (*time > MAX_TIME * INLINE_TIME_SCALE)
3099 *time = MAX_TIME * INLINE_TIME_SCALE;
3104 /* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
3105 calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3106 describe context of the call site. */
3108 static void
3109 estimate_calls_size_and_time (struct cgraph_node *node, int *size,
3110 int *min_size, int *time,
3111 inline_hints *hints,
3112 clause_t possible_truths,
3113 vec<tree> known_vals,
3114 vec<ipa_polymorphic_call_context> known_contexts,
3115 vec<ipa_agg_jump_function_p> known_aggs)
3117 struct cgraph_edge *e;
3118 for (e = node->callees; e; e = e->next_callee)
3120 if (inline_edge_summary_vec.length () <= (unsigned) e->uid)
3121 continue;
3123 struct inline_edge_summary *es = inline_edge_summary (e);
3125 /* Do not care about zero sized builtins. */
3126 if (e->inline_failed && !es->call_stmt_size)
3128 gcc_checking_assert (!es->call_stmt_time);
3129 continue;
3131 if (!es->predicate
3132 || evaluate_predicate (es->predicate, possible_truths))
3134 if (e->inline_failed)
3136 /* Predicates of calls shall not use NOT_CHANGED codes,
3137 sowe do not need to compute probabilities. */
3138 estimate_edge_size_and_time (e, size,
3139 es->predicate ? NULL : min_size,
3140 time, REG_BR_PROB_BASE,
3141 known_vals, known_contexts,
3142 known_aggs, hints);
3144 else
3145 estimate_calls_size_and_time (e->callee, size, min_size, time,
3146 hints,
3147 possible_truths,
3148 known_vals, known_contexts,
3149 known_aggs);
3152 for (e = node->indirect_calls; e; e = e->next_callee)
3154 if (inline_edge_summary_vec.length () <= (unsigned) e->uid)
3155 continue;
3157 struct inline_edge_summary *es = inline_edge_summary (e);
3158 if (!es->predicate
3159 || evaluate_predicate (es->predicate, possible_truths))
3160 estimate_edge_size_and_time (e, size,
3161 es->predicate ? NULL : min_size,
3162 time, REG_BR_PROB_BASE,
3163 known_vals, known_contexts, known_aggs,
3164 hints);
3169 /* Estimate size and time needed to execute NODE assuming
3170 POSSIBLE_TRUTHS clause, and KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3171 information about NODE's arguments. If non-NULL use also probability
3172 information present in INLINE_PARAM_SUMMARY vector.
3173 Additionally detemine hints determined by the context. Finally compute
3174 minimal size needed for the call that is independent on the call context and
3175 can be used for fast estimates. Return the values in RET_SIZE,
3176 RET_MIN_SIZE, RET_TIME and RET_HINTS. */
3178 static void
3179 estimate_node_size_and_time (struct cgraph_node *node,
3180 clause_t possible_truths,
3181 vec<tree> known_vals,
3182 vec<ipa_polymorphic_call_context> known_contexts,
3183 vec<ipa_agg_jump_function_p> known_aggs,
3184 int *ret_size, int *ret_min_size, int *ret_time,
3185 inline_hints *ret_hints,
3186 vec<inline_param_summary>
3187 inline_param_summary)
3189 struct inline_summary *info = inline_summaries->get (node);
3190 size_time_entry *e;
3191 int size = 0;
3192 int time = 0;
3193 int min_size = 0;
3194 inline_hints hints = 0;
3195 int i;
3197 if (dump_file && (dump_flags & TDF_DETAILS))
3199 bool found = false;
3200 fprintf (dump_file, " Estimating body: %s/%i\n"
3201 " Known to be false: ", node->name (),
3202 node->order);
3204 for (i = predicate_not_inlined_condition;
3205 i < (predicate_first_dynamic_condition
3206 + (int) vec_safe_length (info->conds)); i++)
3207 if (!(possible_truths & (1 << i)))
3209 if (found)
3210 fprintf (dump_file, ", ");
3211 found = true;
3212 dump_condition (dump_file, info->conds, i);
3216 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3217 if (evaluate_predicate (&e->predicate, possible_truths))
3219 size += e->size;
3220 gcc_checking_assert (e->time >= 0);
3221 gcc_checking_assert (time >= 0);
3222 if (!inline_param_summary.exists ())
3223 time += e->time;
3224 else
3226 int prob = predicate_probability (info->conds,
3227 &e->predicate,
3228 possible_truths,
3229 inline_param_summary);
3230 gcc_checking_assert (prob >= 0);
3231 gcc_checking_assert (prob <= REG_BR_PROB_BASE);
3232 time += apply_probability ((gcov_type) e->time, prob);
3234 if (time > MAX_TIME * INLINE_TIME_SCALE)
3235 time = MAX_TIME * INLINE_TIME_SCALE;
3236 gcc_checking_assert (time >= 0);
3239 gcc_checking_assert (true_predicate_p (&(*info->entry)[0].predicate));
3240 min_size = (*info->entry)[0].size;
3241 gcc_checking_assert (size >= 0);
3242 gcc_checking_assert (time >= 0);
3244 if (info->loop_iterations
3245 && !evaluate_predicate (info->loop_iterations, possible_truths))
3246 hints |= INLINE_HINT_loop_iterations;
3247 if (info->loop_stride
3248 && !evaluate_predicate (info->loop_stride, possible_truths))
3249 hints |= INLINE_HINT_loop_stride;
3250 if (info->array_index
3251 && !evaluate_predicate (info->array_index, possible_truths))
3252 hints |= INLINE_HINT_array_index;
3253 if (info->scc_no)
3254 hints |= INLINE_HINT_in_scc;
3255 if (DECL_DECLARED_INLINE_P (node->decl))
3256 hints |= INLINE_HINT_declared_inline;
3258 estimate_calls_size_and_time (node, &size, &min_size, &time, &hints, possible_truths,
3259 known_vals, known_contexts, known_aggs);
3260 gcc_checking_assert (size >= 0);
3261 gcc_checking_assert (time >= 0);
3262 time = RDIV (time, INLINE_TIME_SCALE);
3263 size = RDIV (size, INLINE_SIZE_SCALE);
3264 min_size = RDIV (min_size, INLINE_SIZE_SCALE);
3266 if (dump_file && (dump_flags & TDF_DETAILS))
3267 fprintf (dump_file, "\n size:%i time:%i\n", (int) size, (int) time);
3268 if (ret_time)
3269 *ret_time = time;
3270 if (ret_size)
3271 *ret_size = size;
3272 if (ret_min_size)
3273 *ret_min_size = min_size;
3274 if (ret_hints)
3275 *ret_hints = hints;
3276 return;
3280 /* Estimate size and time needed to execute callee of EDGE assuming that
3281 parameters known to be constant at caller of EDGE are propagated.
3282 KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values
3283 and types for parameters. */
3285 void
3286 estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
3287 vec<tree> known_vals,
3288 vec<ipa_polymorphic_call_context>
3289 known_contexts,
3290 vec<ipa_agg_jump_function_p> known_aggs,
3291 int *ret_size, int *ret_time,
3292 inline_hints *hints)
3294 clause_t clause;
3296 clause = evaluate_conditions_for_known_args (node, false, known_vals,
3297 known_aggs);
3298 estimate_node_size_and_time (node, clause, known_vals, known_contexts,
3299 known_aggs, ret_size, NULL, ret_time, hints, vNULL);
3302 /* Translate all conditions from callee representation into caller
3303 representation and symbolically evaluate predicate P into new predicate.
3305 INFO is inline_summary of function we are adding predicate into, CALLEE_INFO
3306 is summary of function predicate P is from. OPERAND_MAP is array giving
3307 callee formal IDs the caller formal IDs. POSSSIBLE_TRUTHS is clausule of all
3308 callee conditions that may be true in caller context. TOPLEV_PREDICATE is
3309 predicate under which callee is executed. OFFSET_MAP is an array of of
3310 offsets that need to be added to conditions, negative offset means that
3311 conditions relying on values passed by reference have to be discarded
3312 because they might not be preserved (and should be considered offset zero
3313 for other purposes). */
3315 static struct predicate
3316 remap_predicate (struct inline_summary *info,
3317 struct inline_summary *callee_info,
3318 struct predicate *p,
3319 vec<int> operand_map,
3320 vec<int> offset_map,
3321 clause_t possible_truths, struct predicate *toplev_predicate)
3323 int i;
3324 struct predicate out = true_predicate ();
3326 /* True predicate is easy. */
3327 if (true_predicate_p (p))
3328 return *toplev_predicate;
3329 for (i = 0; p->clause[i]; i++)
3331 clause_t clause = p->clause[i];
3332 int cond;
3333 struct predicate clause_predicate = false_predicate ();
3335 gcc_assert (i < MAX_CLAUSES);
3337 for (cond = 0; cond < NUM_CONDITIONS; cond++)
3338 /* Do we have condition we can't disprove? */
3339 if (clause & possible_truths & (1 << cond))
3341 struct predicate cond_predicate;
3342 /* Work out if the condition can translate to predicate in the
3343 inlined function. */
3344 if (cond >= predicate_first_dynamic_condition)
3346 struct condition *c;
3348 c = &(*callee_info->conds)[cond
3350 predicate_first_dynamic_condition];
3351 /* See if we can remap condition operand to caller's operand.
3352 Otherwise give up. */
3353 if (!operand_map.exists ()
3354 || (int) operand_map.length () <= c->operand_num
3355 || operand_map[c->operand_num] == -1
3356 /* TODO: For non-aggregate conditions, adding an offset is
3357 basically an arithmetic jump function processing which
3358 we should support in future. */
3359 || ((!c->agg_contents || !c->by_ref)
3360 && offset_map[c->operand_num] > 0)
3361 || (c->agg_contents && c->by_ref
3362 && offset_map[c->operand_num] < 0))
3363 cond_predicate = true_predicate ();
3364 else
3366 struct agg_position_info ap;
3367 HOST_WIDE_INT offset_delta = offset_map[c->operand_num];
3368 if (offset_delta < 0)
3370 gcc_checking_assert (!c->agg_contents || !c->by_ref);
3371 offset_delta = 0;
3373 gcc_assert (!c->agg_contents
3374 || c->by_ref || offset_delta == 0);
3375 ap.offset = c->offset + offset_delta;
3376 ap.agg_contents = c->agg_contents;
3377 ap.by_ref = c->by_ref;
3378 cond_predicate = add_condition (info,
3379 operand_map[c->operand_num],
3380 &ap, c->code, c->val);
3383 /* Fixed conditions remains same, construct single
3384 condition predicate. */
3385 else
3387 cond_predicate.clause[0] = 1 << cond;
3388 cond_predicate.clause[1] = 0;
3390 clause_predicate = or_predicates (info->conds, &clause_predicate,
3391 &cond_predicate);
3393 out = and_predicates (info->conds, &out, &clause_predicate);
3395 return and_predicates (info->conds, &out, toplev_predicate);
3399 /* Update summary information of inline clones after inlining.
3400 Compute peak stack usage. */
3402 static void
3403 inline_update_callee_summaries (struct cgraph_node *node, int depth)
3405 struct cgraph_edge *e;
3406 struct inline_summary *callee_info = inline_summaries->get (node);
3407 struct inline_summary *caller_info = inline_summaries->get (node->callers->caller);
3408 HOST_WIDE_INT peak;
3410 callee_info->stack_frame_offset
3411 = caller_info->stack_frame_offset
3412 + caller_info->estimated_self_stack_size;
3413 peak = callee_info->stack_frame_offset
3414 + callee_info->estimated_self_stack_size;
3415 if (inline_summaries->get (node->global.inlined_to)->estimated_stack_size < peak)
3416 inline_summaries->get (node->global.inlined_to)->estimated_stack_size = peak;
3417 ipa_propagate_frequency (node);
3418 for (e = node->callees; e; e = e->next_callee)
3420 if (!e->inline_failed)
3421 inline_update_callee_summaries (e->callee, depth);
3422 inline_edge_summary (e)->loop_depth += depth;
3424 for (e = node->indirect_calls; e; e = e->next_callee)
3425 inline_edge_summary (e)->loop_depth += depth;
3428 /* Update change_prob of EDGE after INLINED_EDGE has been inlined.
3429 When functoin A is inlined in B and A calls C with parameter that
3430 changes with probability PROB1 and C is known to be passthroug
3431 of argument if B that change with probability PROB2, the probability
3432 of change is now PROB1*PROB2. */
3434 static void
3435 remap_edge_change_prob (struct cgraph_edge *inlined_edge,
3436 struct cgraph_edge *edge)
3438 if (ipa_node_params_sum)
3440 int i;
3441 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3442 struct inline_edge_summary *es = inline_edge_summary (edge);
3443 struct inline_edge_summary *inlined_es
3444 = inline_edge_summary (inlined_edge);
3446 for (i = 0; i < ipa_get_cs_argument_count (args); i++)
3448 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3449 if (jfunc->type == IPA_JF_PASS_THROUGH
3450 && (ipa_get_jf_pass_through_formal_id (jfunc)
3451 < (int) inlined_es->param.length ()))
3453 int jf_formal_id = ipa_get_jf_pass_through_formal_id (jfunc);
3454 int prob1 = es->param[i].change_prob;
3455 int prob2 = inlined_es->param[jf_formal_id].change_prob;
3456 int prob = combine_probabilities (prob1, prob2);
3458 if (prob1 && prob2 && !prob)
3459 prob = 1;
3461 es->param[i].change_prob = prob;
3467 /* Update edge summaries of NODE after INLINED_EDGE has been inlined.
3469 Remap predicates of callees of NODE. Rest of arguments match
3470 remap_predicate.
3472 Also update change probabilities. */
3474 static void
3475 remap_edge_summaries (struct cgraph_edge *inlined_edge,
3476 struct cgraph_node *node,
3477 struct inline_summary *info,
3478 struct inline_summary *callee_info,
3479 vec<int> operand_map,
3480 vec<int> offset_map,
3481 clause_t possible_truths,
3482 struct predicate *toplev_predicate)
3484 struct cgraph_edge *e, *next;
3485 for (e = node->callees; e; e = next)
3487 struct inline_edge_summary *es = inline_edge_summary (e);
3488 struct predicate p;
3489 next = e->next_callee;
3491 if (e->inline_failed)
3493 remap_edge_change_prob (inlined_edge, e);
3495 if (es->predicate)
3497 p = remap_predicate (info, callee_info,
3498 es->predicate, operand_map, offset_map,
3499 possible_truths, toplev_predicate);
3500 edge_set_predicate (e, &p);
3502 else
3503 edge_set_predicate (e, toplev_predicate);
3505 else
3506 remap_edge_summaries (inlined_edge, e->callee, info, callee_info,
3507 operand_map, offset_map, possible_truths,
3508 toplev_predicate);
3510 for (e = node->indirect_calls; e; e = next)
3512 struct inline_edge_summary *es = inline_edge_summary (e);
3513 struct predicate p;
3514 next = e->next_callee;
3516 remap_edge_change_prob (inlined_edge, e);
3517 if (es->predicate)
3519 p = remap_predicate (info, callee_info,
3520 es->predicate, operand_map, offset_map,
3521 possible_truths, toplev_predicate);
3522 edge_set_predicate (e, &p);
3524 else
3525 edge_set_predicate (e, toplev_predicate);
3529 /* Same as remap_predicate, but set result into hint *HINT. */
3531 static void
3532 remap_hint_predicate (struct inline_summary *info,
3533 struct inline_summary *callee_info,
3534 struct predicate **hint,
3535 vec<int> operand_map,
3536 vec<int> offset_map,
3537 clause_t possible_truths,
3538 struct predicate *toplev_predicate)
3540 predicate p;
3542 if (!*hint)
3543 return;
3544 p = remap_predicate (info, callee_info,
3545 *hint,
3546 operand_map, offset_map,
3547 possible_truths, toplev_predicate);
3548 if (!false_predicate_p (&p) && !true_predicate_p (&p))
3550 if (!*hint)
3551 set_hint_predicate (hint, p);
3552 else
3553 **hint = and_predicates (info->conds, *hint, &p);
3557 /* We inlined EDGE. Update summary of the function we inlined into. */
3559 void
3560 inline_merge_summary (struct cgraph_edge *edge)
3562 struct inline_summary *callee_info = inline_summaries->get (edge->callee);
3563 struct cgraph_node *to = (edge->caller->global.inlined_to
3564 ? edge->caller->global.inlined_to : edge->caller);
3565 struct inline_summary *info = inline_summaries->get (to);
3566 clause_t clause = 0; /* not_inline is known to be false. */
3567 size_time_entry *e;
3568 vec<int> operand_map = vNULL;
3569 vec<int> offset_map = vNULL;
3570 int i;
3571 struct predicate toplev_predicate;
3572 struct predicate true_p = true_predicate ();
3573 struct inline_edge_summary *es = inline_edge_summary (edge);
3575 if (es->predicate)
3576 toplev_predicate = *es->predicate;
3577 else
3578 toplev_predicate = true_predicate ();
3580 if (callee_info->conds)
3581 evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL);
3582 if (ipa_node_params_sum && callee_info->conds)
3584 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3585 int count = ipa_get_cs_argument_count (args);
3586 int i;
3588 if (count)
3590 operand_map.safe_grow_cleared (count);
3591 offset_map.safe_grow_cleared (count);
3593 for (i = 0; i < count; i++)
3595 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3596 int map = -1;
3598 /* TODO: handle non-NOPs when merging. */
3599 if (jfunc->type == IPA_JF_PASS_THROUGH)
3601 if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
3602 map = ipa_get_jf_pass_through_formal_id (jfunc);
3603 if (!ipa_get_jf_pass_through_agg_preserved (jfunc))
3604 offset_map[i] = -1;
3606 else if (jfunc->type == IPA_JF_ANCESTOR)
3608 HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc);
3609 if (offset >= 0 && offset < INT_MAX)
3611 map = ipa_get_jf_ancestor_formal_id (jfunc);
3612 if (!ipa_get_jf_ancestor_agg_preserved (jfunc))
3613 offset = -1;
3614 offset_map[i] = offset;
3617 operand_map[i] = map;
3618 gcc_assert (map < ipa_get_param_count (IPA_NODE_REF (to)));
3621 for (i = 0; vec_safe_iterate (callee_info->entry, i, &e); i++)
3623 struct predicate p = remap_predicate (info, callee_info,
3624 &e->predicate, operand_map,
3625 offset_map, clause,
3626 &toplev_predicate);
3627 if (!false_predicate_p (&p))
3629 gcov_type add_time = ((gcov_type) e->time * edge->frequency
3630 + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
3631 int prob = predicate_probability (callee_info->conds,
3632 &e->predicate,
3633 clause, es->param);
3634 add_time = apply_probability ((gcov_type) add_time, prob);
3635 if (add_time > MAX_TIME * INLINE_TIME_SCALE)
3636 add_time = MAX_TIME * INLINE_TIME_SCALE;
3637 if (prob != REG_BR_PROB_BASE
3638 && dump_file && (dump_flags & TDF_DETAILS))
3640 fprintf (dump_file, "\t\tScaling time by probability:%f\n",
3641 (double) prob / REG_BR_PROB_BASE);
3643 account_size_time (info, e->size, add_time, &p);
3646 remap_edge_summaries (edge, edge->callee, info, callee_info, operand_map,
3647 offset_map, clause, &toplev_predicate);
3648 remap_hint_predicate (info, callee_info,
3649 &callee_info->loop_iterations,
3650 operand_map, offset_map, clause, &toplev_predicate);
3651 remap_hint_predicate (info, callee_info,
3652 &callee_info->loop_stride,
3653 operand_map, offset_map, clause, &toplev_predicate);
3654 remap_hint_predicate (info, callee_info,
3655 &callee_info->array_index,
3656 operand_map, offset_map, clause, &toplev_predicate);
3658 inline_update_callee_summaries (edge->callee,
3659 inline_edge_summary (edge)->loop_depth);
3661 /* We do not maintain predicates of inlined edges, free it. */
3662 edge_set_predicate (edge, &true_p);
3663 /* Similarly remove param summaries. */
3664 es->param.release ();
3665 operand_map.release ();
3666 offset_map.release ();
3669 /* For performance reasons inline_merge_summary is not updating overall size
3670 and time. Recompute it. */
3672 void
3673 inline_update_overall_summary (struct cgraph_node *node)
3675 struct inline_summary *info = inline_summaries->get (node);
3676 size_time_entry *e;
3677 int i;
3679 info->size = 0;
3680 info->time = 0;
3681 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3683 info->size += e->size, info->time += e->time;
3684 if (info->time > MAX_TIME * INLINE_TIME_SCALE)
3685 info->time = MAX_TIME * INLINE_TIME_SCALE;
3687 estimate_calls_size_and_time (node, &info->size, &info->min_size,
3688 &info->time, NULL,
3689 ~(clause_t) (1 << predicate_false_condition),
3690 vNULL, vNULL, vNULL);
3691 info->time = (info->time + INLINE_TIME_SCALE / 2) / INLINE_TIME_SCALE;
3692 info->size = (info->size + INLINE_SIZE_SCALE / 2) / INLINE_SIZE_SCALE;
3695 /* Return hints derrived from EDGE. */
3697 simple_edge_hints (struct cgraph_edge *edge)
3699 int hints = 0;
3700 struct cgraph_node *to = (edge->caller->global.inlined_to
3701 ? edge->caller->global.inlined_to : edge->caller);
3702 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
3703 if (inline_summaries->get (to)->scc_no
3704 && inline_summaries->get (to)->scc_no
3705 == inline_summaries->get (callee)->scc_no
3706 && !edge->recursive_p ())
3707 hints |= INLINE_HINT_same_scc;
3709 if (callee->lto_file_data && edge->caller->lto_file_data
3710 && edge->caller->lto_file_data != callee->lto_file_data
3711 && !callee->merged_comdat && !callee->icf_merged)
3712 hints |= INLINE_HINT_cross_module;
3714 return hints;
3717 /* Estimate the time cost for the caller when inlining EDGE.
3718 Only to be called via estimate_edge_time, that handles the
3719 caching mechanism.
3721 When caching, also update the cache entry. Compute both time and
3722 size, since we always need both metrics eventually. */
3725 do_estimate_edge_time (struct cgraph_edge *edge)
3727 int time;
3728 int size;
3729 inline_hints hints;
3730 struct cgraph_node *callee;
3731 clause_t clause;
3732 vec<tree> known_vals;
3733 vec<ipa_polymorphic_call_context> known_contexts;
3734 vec<ipa_agg_jump_function_p> known_aggs;
3735 struct inline_edge_summary *es = inline_edge_summary (edge);
3736 int min_size;
3738 callee = edge->callee->ultimate_alias_target ();
3740 gcc_checking_assert (edge->inline_failed);
3741 evaluate_properties_for_edge (edge, true,
3742 &clause, &known_vals, &known_contexts,
3743 &known_aggs);
3744 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3745 known_aggs, &size, &min_size, &time, &hints, es->param);
3747 /* When we have profile feedback, we can quite safely identify hot
3748 edges and for those we disable size limits. Don't do that when
3749 probability that caller will call the callee is low however, since it
3750 may hurt optimization of the caller's hot path. */
3751 if (edge->count && edge->maybe_hot_p ()
3752 && (edge->count * 2
3753 > (edge->caller->global.inlined_to
3754 ? edge->caller->global.inlined_to->count : edge->caller->count)))
3755 hints |= INLINE_HINT_known_hot;
3757 known_vals.release ();
3758 known_contexts.release ();
3759 known_aggs.release ();
3760 gcc_checking_assert (size >= 0);
3761 gcc_checking_assert (time >= 0);
3763 /* When caching, update the cache entry. */
3764 if (edge_growth_cache.exists ())
3766 inline_summaries->get (edge->callee)->min_size = min_size;
3767 if ((int) edge_growth_cache.length () <= edge->uid)
3768 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
3769 edge_growth_cache[edge->uid].time = time + (time >= 0);
3771 edge_growth_cache[edge->uid].size = size + (size >= 0);
3772 hints |= simple_edge_hints (edge);
3773 edge_growth_cache[edge->uid].hints = hints + 1;
3775 return time;
3779 /* Return estimated callee growth after inlining EDGE.
3780 Only to be called via estimate_edge_size. */
3783 do_estimate_edge_size (struct cgraph_edge *edge)
3785 int size;
3786 struct cgraph_node *callee;
3787 clause_t clause;
3788 vec<tree> known_vals;
3789 vec<ipa_polymorphic_call_context> known_contexts;
3790 vec<ipa_agg_jump_function_p> known_aggs;
3792 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3794 if (edge_growth_cache.exists ())
3796 do_estimate_edge_time (edge);
3797 size = edge_growth_cache[edge->uid].size;
3798 gcc_checking_assert (size);
3799 return size - (size > 0);
3802 callee = edge->callee->ultimate_alias_target ();
3804 /* Early inliner runs without caching, go ahead and do the dirty work. */
3805 gcc_checking_assert (edge->inline_failed);
3806 evaluate_properties_for_edge (edge, true,
3807 &clause, &known_vals, &known_contexts,
3808 &known_aggs);
3809 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3810 known_aggs, &size, NULL, NULL, NULL, vNULL);
3811 known_vals.release ();
3812 known_contexts.release ();
3813 known_aggs.release ();
3814 return size;
3818 /* Estimate the growth of the caller when inlining EDGE.
3819 Only to be called via estimate_edge_size. */
3821 inline_hints
3822 do_estimate_edge_hints (struct cgraph_edge *edge)
3824 inline_hints hints;
3825 struct cgraph_node *callee;
3826 clause_t clause;
3827 vec<tree> known_vals;
3828 vec<ipa_polymorphic_call_context> known_contexts;
3829 vec<ipa_agg_jump_function_p> known_aggs;
3831 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3833 if (edge_growth_cache.exists ())
3835 do_estimate_edge_time (edge);
3836 hints = edge_growth_cache[edge->uid].hints;
3837 gcc_checking_assert (hints);
3838 return hints - 1;
3841 callee = edge->callee->ultimate_alias_target ();
3843 /* Early inliner runs without caching, go ahead and do the dirty work. */
3844 gcc_checking_assert (edge->inline_failed);
3845 evaluate_properties_for_edge (edge, true,
3846 &clause, &known_vals, &known_contexts,
3847 &known_aggs);
3848 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3849 known_aggs, NULL, NULL, NULL, &hints, vNULL);
3850 known_vals.release ();
3851 known_contexts.release ();
3852 known_aggs.release ();
3853 hints |= simple_edge_hints (edge);
3854 return hints;
3858 /* Estimate self time of the function NODE after inlining EDGE. */
3861 estimate_time_after_inlining (struct cgraph_node *node,
3862 struct cgraph_edge *edge)
3864 struct inline_edge_summary *es = inline_edge_summary (edge);
3865 if (!es->predicate || !false_predicate_p (es->predicate))
3867 gcov_type time =
3868 inline_summaries->get (node)->time + estimate_edge_time (edge);
3869 if (time < 0)
3870 time = 0;
3871 if (time > MAX_TIME)
3872 time = MAX_TIME;
3873 return time;
3875 return inline_summaries->get (node)->time;
3879 /* Estimate the size of NODE after inlining EDGE which should be an
3880 edge to either NODE or a call inlined into NODE. */
3883 estimate_size_after_inlining (struct cgraph_node *node,
3884 struct cgraph_edge *edge)
3886 struct inline_edge_summary *es = inline_edge_summary (edge);
3887 if (!es->predicate || !false_predicate_p (es->predicate))
3889 int size = inline_summaries->get (node)->size + estimate_edge_growth (edge);
3890 gcc_assert (size >= 0);
3891 return size;
3893 return inline_summaries->get (node)->size;
3897 struct growth_data
3899 struct cgraph_node *node;
3900 bool self_recursive;
3901 bool uninlinable;
3902 int growth;
3906 /* Worker for do_estimate_growth. Collect growth for all callers. */
3908 static bool
3909 do_estimate_growth_1 (struct cgraph_node *node, void *data)
3911 struct cgraph_edge *e;
3912 struct growth_data *d = (struct growth_data *) data;
3914 for (e = node->callers; e; e = e->next_caller)
3916 gcc_checking_assert (e->inline_failed);
3918 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
3920 d->uninlinable = true;
3921 continue;
3924 if (e->recursive_p ())
3926 d->self_recursive = true;
3927 continue;
3929 d->growth += estimate_edge_growth (e);
3931 return false;
3935 /* Estimate the growth caused by inlining NODE into all callees. */
3938 estimate_growth (struct cgraph_node *node)
3940 struct growth_data d = { node, false, false, 0 };
3941 struct inline_summary *info = inline_summaries->get (node);
3943 node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true);
3945 /* For self recursive functions the growth estimation really should be
3946 infinity. We don't want to return very large values because the growth
3947 plays various roles in badness computation fractions. Be sure to not
3948 return zero or negative growths. */
3949 if (d.self_recursive)
3950 d.growth = d.growth < info->size ? info->size : d.growth;
3951 else if (DECL_EXTERNAL (node->decl) || d.uninlinable)
3953 else
3955 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
3956 d.growth -= info->size;
3957 /* COMDAT functions are very often not shared across multiple units
3958 since they come from various template instantiations.
3959 Take this into account. */
3960 else if (DECL_COMDAT (node->decl)
3961 && node->can_remove_if_no_direct_calls_p ())
3962 d.growth -= (info->size
3963 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
3964 + 50) / 100;
3967 return d.growth;
3970 /* Verify if there are fewer than MAX_CALLERS. */
3972 static bool
3973 check_callers (cgraph_node *node, int *max_callers)
3975 ipa_ref *ref;
3977 if (!node->can_remove_if_no_direct_calls_and_refs_p ())
3978 return true;
3980 for (cgraph_edge *e = node->callers; e; e = e->next_caller)
3982 (*max_callers)--;
3983 if (!*max_callers
3984 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
3985 return true;
3988 FOR_EACH_ALIAS (node, ref)
3989 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), max_callers))
3990 return true;
3992 return false;
3996 /* Make cheap estimation if growth of NODE is likely positive knowing
3997 EDGE_GROWTH of one particular edge.
3998 We assume that most of other edges will have similar growth
3999 and skip computation if there are too many callers. */
4001 bool
4002 growth_likely_positive (struct cgraph_node *node,
4003 int edge_growth)
4005 int max_callers;
4006 struct cgraph_edge *e;
4007 gcc_checking_assert (edge_growth > 0);
4009 /* First quickly check if NODE is removable at all. */
4010 if (DECL_EXTERNAL (node->decl))
4011 return true;
4012 if (!node->can_remove_if_no_direct_calls_and_refs_p ()
4013 || node->address_taken)
4014 return true;
4016 max_callers = inline_summaries->get (node)->size * 4 / edge_growth + 2;
4018 for (e = node->callers; e; e = e->next_caller)
4020 max_callers--;
4021 if (!max_callers
4022 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
4023 return true;
4026 ipa_ref *ref;
4027 FOR_EACH_ALIAS (node, ref)
4028 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), &max_callers))
4029 return true;
4031 /* Unlike for functions called once, we play unsafe with
4032 COMDATs. We can allow that since we know functions
4033 in consideration are small (and thus risk is small) and
4034 moreover grow estimates already accounts that COMDAT
4035 functions may or may not disappear when eliminated from
4036 current unit. With good probability making aggressive
4037 choice in all units is going to make overall program
4038 smaller. */
4039 if (DECL_COMDAT (node->decl))
4041 if (!node->can_remove_if_no_direct_calls_p ())
4042 return true;
4044 else if (!node->will_be_removed_from_program_if_no_direct_calls_p ())
4045 return true;
4047 return estimate_growth (node) > 0;
4051 /* This function performs intraprocedural analysis in NODE that is required to
4052 inline indirect calls. */
4054 static void
4055 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
4057 ipa_analyze_node (node);
4058 if (dump_file && (dump_flags & TDF_DETAILS))
4060 ipa_print_node_params (dump_file, node);
4061 ipa_print_node_jump_functions (dump_file, node);
4066 /* Note function body size. */
4068 void
4069 inline_analyze_function (struct cgraph_node *node)
4071 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
4073 if (dump_file)
4074 fprintf (dump_file, "\nAnalyzing function: %s/%u\n",
4075 node->name (), node->order);
4076 if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p)
4077 inline_indirect_intraprocedural_analysis (node);
4078 compute_inline_parameters (node, false);
4079 if (!optimize)
4081 struct cgraph_edge *e;
4082 for (e = node->callees; e; e = e->next_callee)
4084 if (e->inline_failed == CIF_FUNCTION_NOT_CONSIDERED)
4085 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4086 e->call_stmt_cannot_inline_p = true;
4088 for (e = node->indirect_calls; e; e = e->next_callee)
4090 if (e->inline_failed == CIF_FUNCTION_NOT_CONSIDERED)
4091 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4092 e->call_stmt_cannot_inline_p = true;
4096 pop_cfun ();
4100 /* Called when new function is inserted to callgraph late. */
4102 void
4103 inline_summary_t::insert (struct cgraph_node *node, inline_summary *)
4105 inline_analyze_function (node);
4108 /* Note function body size. */
4110 void
4111 inline_generate_summary (void)
4113 struct cgraph_node *node;
4115 FOR_EACH_DEFINED_FUNCTION (node)
4116 if (DECL_STRUCT_FUNCTION (node->decl))
4117 node->local.versionable = tree_versionable_function_p (node->decl);
4119 /* When not optimizing, do not bother to analyze. Inlining is still done
4120 because edge redirection needs to happen there. */
4121 if (!optimize && !flag_generate_lto && !flag_generate_offload && !flag_wpa)
4122 return;
4124 if (!inline_summaries)
4125 inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab);
4127 inline_summaries->enable_insertion_hook ();
4129 ipa_register_cgraph_hooks ();
4130 inline_free_summary ();
4132 FOR_EACH_DEFINED_FUNCTION (node)
4133 if (!node->alias)
4134 inline_analyze_function (node);
4138 /* Read predicate from IB. */
4140 static struct predicate
4141 read_predicate (struct lto_input_block *ib)
4143 struct predicate out;
4144 clause_t clause;
4145 int k = 0;
4149 gcc_assert (k <= MAX_CLAUSES);
4150 clause = out.clause[k++] = streamer_read_uhwi (ib);
4152 while (clause);
4154 /* Zero-initialize the remaining clauses in OUT. */
4155 while (k <= MAX_CLAUSES)
4156 out.clause[k++] = 0;
4158 return out;
4162 /* Write inline summary for edge E to OB. */
4164 static void
4165 read_inline_edge_summary (struct lto_input_block *ib, struct cgraph_edge *e)
4167 struct inline_edge_summary *es = inline_edge_summary (e);
4168 struct predicate p;
4169 int length, i;
4171 es->call_stmt_size = streamer_read_uhwi (ib);
4172 es->call_stmt_time = streamer_read_uhwi (ib);
4173 es->loop_depth = streamer_read_uhwi (ib);
4174 p = read_predicate (ib);
4175 edge_set_predicate (e, &p);
4176 length = streamer_read_uhwi (ib);
4177 if (length)
4179 es->param.safe_grow_cleared (length);
4180 for (i = 0; i < length; i++)
4181 es->param[i].change_prob = streamer_read_uhwi (ib);
4186 /* Stream in inline summaries from the section. */
4188 static void
4189 inline_read_section (struct lto_file_decl_data *file_data, const char *data,
4190 size_t len)
4192 const struct lto_function_header *header =
4193 (const struct lto_function_header *) data;
4194 const int cfg_offset = sizeof (struct lto_function_header);
4195 const int main_offset = cfg_offset + header->cfg_size;
4196 const int string_offset = main_offset + header->main_size;
4197 struct data_in *data_in;
4198 unsigned int i, count2, j;
4199 unsigned int f_count;
4201 lto_input_block ib ((const char *) data + main_offset, header->main_size,
4202 file_data->mode_table);
4204 data_in =
4205 lto_data_in_create (file_data, (const char *) data + string_offset,
4206 header->string_size, vNULL);
4207 f_count = streamer_read_uhwi (&ib);
4208 for (i = 0; i < f_count; i++)
4210 unsigned int index;
4211 struct cgraph_node *node;
4212 struct inline_summary *info;
4213 lto_symtab_encoder_t encoder;
4214 struct bitpack_d bp;
4215 struct cgraph_edge *e;
4216 predicate p;
4218 index = streamer_read_uhwi (&ib);
4219 encoder = file_data->symtab_node_encoder;
4220 node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder,
4221 index));
4222 info = inline_summaries->get (node);
4224 info->estimated_stack_size
4225 = info->estimated_self_stack_size = streamer_read_uhwi (&ib);
4226 info->size = info->self_size = streamer_read_uhwi (&ib);
4227 info->time = info->self_time = streamer_read_uhwi (&ib);
4229 bp = streamer_read_bitpack (&ib);
4230 info->inlinable = bp_unpack_value (&bp, 1);
4231 info->contains_cilk_spawn = bp_unpack_value (&bp, 1);
4233 count2 = streamer_read_uhwi (&ib);
4234 gcc_assert (!info->conds);
4235 for (j = 0; j < count2; j++)
4237 struct condition c;
4238 c.operand_num = streamer_read_uhwi (&ib);
4239 c.code = (enum tree_code) streamer_read_uhwi (&ib);
4240 c.val = stream_read_tree (&ib, data_in);
4241 bp = streamer_read_bitpack (&ib);
4242 c.agg_contents = bp_unpack_value (&bp, 1);
4243 c.by_ref = bp_unpack_value (&bp, 1);
4244 if (c.agg_contents)
4245 c.offset = streamer_read_uhwi (&ib);
4246 vec_safe_push (info->conds, c);
4248 count2 = streamer_read_uhwi (&ib);
4249 gcc_assert (!info->entry);
4250 for (j = 0; j < count2; j++)
4252 struct size_time_entry e;
4254 e.size = streamer_read_uhwi (&ib);
4255 e.time = streamer_read_uhwi (&ib);
4256 e.predicate = read_predicate (&ib);
4258 vec_safe_push (info->entry, e);
4261 p = read_predicate (&ib);
4262 set_hint_predicate (&info->loop_iterations, p);
4263 p = read_predicate (&ib);
4264 set_hint_predicate (&info->loop_stride, p);
4265 p = read_predicate (&ib);
4266 set_hint_predicate (&info->array_index, p);
4267 for (e = node->callees; e; e = e->next_callee)
4268 read_inline_edge_summary (&ib, e);
4269 for (e = node->indirect_calls; e; e = e->next_callee)
4270 read_inline_edge_summary (&ib, e);
4273 lto_free_section_data (file_data, LTO_section_inline_summary, NULL, data,
4274 len);
4275 lto_data_in_delete (data_in);
4279 /* Read inline summary. Jump functions are shared among ipa-cp
4280 and inliner, so when ipa-cp is active, we don't need to write them
4281 twice. */
4283 void
4284 inline_read_summary (void)
4286 struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
4287 struct lto_file_decl_data *file_data;
4288 unsigned int j = 0;
4290 inline_summary_alloc ();
4292 while ((file_data = file_data_vec[j++]))
4294 size_t len;
4295 const char *data = lto_get_section_data (file_data,
4296 LTO_section_inline_summary,
4297 NULL, &len);
4298 if (data)
4299 inline_read_section (file_data, data, len);
4300 else
4301 /* Fatal error here. We do not want to support compiling ltrans units
4302 with different version of compiler or different flags than the WPA
4303 unit, so this should never happen. */
4304 fatal_error (input_location,
4305 "ipa inline summary is missing in input file");
4307 if (optimize)
4309 ipa_register_cgraph_hooks ();
4310 if (!flag_ipa_cp)
4311 ipa_prop_read_jump_functions ();
4314 gcc_assert (inline_summaries);
4315 inline_summaries->enable_insertion_hook ();
4319 /* Write predicate P to OB. */
4321 static void
4322 write_predicate (struct output_block *ob, struct predicate *p)
4324 int j;
4325 if (p)
4326 for (j = 0; p->clause[j]; j++)
4328 gcc_assert (j < MAX_CLAUSES);
4329 streamer_write_uhwi (ob, p->clause[j]);
4331 streamer_write_uhwi (ob, 0);
4335 /* Write inline summary for edge E to OB. */
4337 static void
4338 write_inline_edge_summary (struct output_block *ob, struct cgraph_edge *e)
4340 struct inline_edge_summary *es = inline_edge_summary (e);
4341 int i;
4343 streamer_write_uhwi (ob, es->call_stmt_size);
4344 streamer_write_uhwi (ob, es->call_stmt_time);
4345 streamer_write_uhwi (ob, es->loop_depth);
4346 write_predicate (ob, es->predicate);
4347 streamer_write_uhwi (ob, es->param.length ());
4348 for (i = 0; i < (int) es->param.length (); i++)
4349 streamer_write_uhwi (ob, es->param[i].change_prob);
4353 /* Write inline summary for node in SET.
4354 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
4355 active, we don't need to write them twice. */
4357 void
4358 inline_write_summary (void)
4360 struct cgraph_node *node;
4361 struct output_block *ob = create_output_block (LTO_section_inline_summary);
4362 lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
4363 unsigned int count = 0;
4364 int i;
4366 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4368 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4369 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4370 if (cnode && cnode->definition && !cnode->alias)
4371 count++;
4373 streamer_write_uhwi (ob, count);
4375 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4377 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4378 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4379 if (cnode && (node = cnode)->definition && !node->alias)
4381 struct inline_summary *info = inline_summaries->get (node);
4382 struct bitpack_d bp;
4383 struct cgraph_edge *edge;
4384 int i;
4385 size_time_entry *e;
4386 struct condition *c;
4388 streamer_write_uhwi (ob,
4389 lto_symtab_encoder_encode (encoder,
4391 node));
4392 streamer_write_hwi (ob, info->estimated_self_stack_size);
4393 streamer_write_hwi (ob, info->self_size);
4394 streamer_write_hwi (ob, info->self_time);
4395 bp = bitpack_create (ob->main_stream);
4396 bp_pack_value (&bp, info->inlinable, 1);
4397 bp_pack_value (&bp, info->contains_cilk_spawn, 1);
4398 streamer_write_bitpack (&bp);
4399 streamer_write_uhwi (ob, vec_safe_length (info->conds));
4400 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
4402 streamer_write_uhwi (ob, c->operand_num);
4403 streamer_write_uhwi (ob, c->code);
4404 stream_write_tree (ob, c->val, true);
4405 bp = bitpack_create (ob->main_stream);
4406 bp_pack_value (&bp, c->agg_contents, 1);
4407 bp_pack_value (&bp, c->by_ref, 1);
4408 streamer_write_bitpack (&bp);
4409 if (c->agg_contents)
4410 streamer_write_uhwi (ob, c->offset);
4412 streamer_write_uhwi (ob, vec_safe_length (info->entry));
4413 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
4415 streamer_write_uhwi (ob, e->size);
4416 streamer_write_uhwi (ob, e->time);
4417 write_predicate (ob, &e->predicate);
4419 write_predicate (ob, info->loop_iterations);
4420 write_predicate (ob, info->loop_stride);
4421 write_predicate (ob, info->array_index);
4422 for (edge = node->callees; edge; edge = edge->next_callee)
4423 write_inline_edge_summary (ob, edge);
4424 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
4425 write_inline_edge_summary (ob, edge);
4428 streamer_write_char_stream (ob->main_stream, 0);
4429 produce_asm (ob, NULL);
4430 destroy_output_block (ob);
4432 if (optimize && !flag_ipa_cp)
4433 ipa_prop_write_jump_functions ();
4437 /* Release inline summary. */
4439 void
4440 inline_free_summary (void)
4442 struct cgraph_node *node;
4443 if (edge_removal_hook_holder)
4444 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
4445 edge_removal_hook_holder = NULL;
4446 if (edge_duplication_hook_holder)
4447 symtab->remove_edge_duplication_hook (edge_duplication_hook_holder);
4448 edge_duplication_hook_holder = NULL;
4449 if (!inline_edge_summary_vec.exists ())
4450 return;
4451 FOR_EACH_DEFINED_FUNCTION (node)
4452 if (!node->alias)
4453 reset_inline_summary (node, inline_summaries->get (node));
4454 inline_summaries->release ();
4455 inline_summaries = NULL;
4456 inline_edge_summary_vec.release ();
4457 edge_predicate_pool.release ();