* ipa-inline-analysis.c (reset_inline_summary): Clear fp_expressions
[official-gcc.git] / gcc / ipa-inline-analysis.c
bloba2f31285506e5dbed302d6abe2fa85afc6c0c303
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2016 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Analysis used by the inliner and other passes limiting code size growth.
23 We estimate for each function
24 - function body size
25 - average function execution time
26 - inlining size benefit (that is how much of function body size
27 and its call sequence is expected to disappear by inlining)
28 - inlining time benefit
29 - function frame size
30 For each call
31 - call statement size and time
33 inlinie_summary datastructures store above information locally (i.e.
34 parameters of the function itself) and globally (i.e. parameters of
35 the function created by applying all the inline decisions already
36 present in the callgraph).
38 We provide accestor to the inline_summary datastructure and
39 basic logic updating the parameters when inlining is performed.
41 The summaries are context sensitive. Context means
42 1) partial assignment of known constant values of operands
43 2) whether function is inlined into the call or not.
44 It is easy to add more variants. To represent function size and time
45 that depends on context (i.e. it is known to be optimized away when
46 context is known either by inlining or from IP-CP and clonning),
47 we use predicates. Predicates are logical formulas in
48 conjunctive-disjunctive form consisting of clauses. Clauses are bitmaps
49 specifying what conditions must be true. Conditions are simple test
50 of the form described above.
52 In order to make predicate (possibly) true, all of its clauses must
53 be (possibly) true. To make clause (possibly) true, one of conditions
54 it mentions must be (possibly) true. There are fixed bounds on
55 number of clauses and conditions and all the manipulation functions
56 are conservative in positive direction. I.e. we may lose precision
57 by thinking that predicate may be true even when it is not.
59 estimate_edge_size and estimate_edge_growth can be used to query
60 function size/time in the given context. inline_merge_summary merges
61 properties of caller and callee after inlining.
63 Finally pass_inline_parameters is exported. This is used to drive
64 computation of function parameters used by the early inliner. IPA
65 inlined performs analysis via its analyze_function method. */
67 #include "config.h"
68 #include "system.h"
69 #include "coretypes.h"
70 #include "backend.h"
71 #include "tree.h"
72 #include "gimple.h"
73 #include "alloc-pool.h"
74 #include "tree-pass.h"
75 #include "ssa.h"
76 #include "tree-streamer.h"
77 #include "cgraph.h"
78 #include "diagnostic.h"
79 #include "fold-const.h"
80 #include "print-tree.h"
81 #include "tree-inline.h"
82 #include "gimple-pretty-print.h"
83 #include "params.h"
84 #include "cfganal.h"
85 #include "gimple-iterator.h"
86 #include "tree-cfg.h"
87 #include "tree-ssa-loop-niter.h"
88 #include "tree-ssa-loop.h"
89 #include "symbol-summary.h"
90 #include "ipa-prop.h"
91 #include "ipa-inline.h"
92 #include "cfgloop.h"
93 #include "tree-scalar-evolution.h"
94 #include "ipa-utils.h"
95 #include "cilk.h"
96 #include "cfgexpand.h"
97 #include "gimplify.h"
99 /* Estimate runtime of function can easilly run into huge numbers with many
100 nested loops. Be sure we can compute time * INLINE_SIZE_SCALE * 2 in an
101 integer. For anything larger we use gcov_type. */
102 #define MAX_TIME 500000
104 /* Number of bits in integer, but we really want to be stable across different
105 hosts. */
106 #define NUM_CONDITIONS 32
108 enum predicate_conditions
110 predicate_false_condition = 0,
111 predicate_not_inlined_condition = 1,
112 predicate_first_dynamic_condition = 2
115 /* Special condition code we use to represent test that operand is compile time
116 constant. */
117 #define IS_NOT_CONSTANT ERROR_MARK
118 /* Special condition code we use to represent test that operand is not changed
119 across invocation of the function. When operand IS_NOT_CONSTANT it is always
120 CHANGED, however i.e. loop invariants can be NOT_CHANGED given percentage
121 of executions even when they are not compile time constants. */
122 #define CHANGED IDENTIFIER_NODE
124 /* Holders of ipa cgraph hooks: */
125 static struct cgraph_2edge_hook_list *edge_duplication_hook_holder;
126 static struct cgraph_edge_hook_list *edge_removal_hook_holder;
127 static void inline_edge_removal_hook (struct cgraph_edge *, void *);
128 static void inline_edge_duplication_hook (struct cgraph_edge *,
129 struct cgraph_edge *, void *);
131 /* VECtor holding inline summaries.
132 In GGC memory because conditions might point to constant trees. */
133 function_summary <inline_summary *> *inline_summaries;
134 vec<inline_edge_summary_t> inline_edge_summary_vec;
136 /* Cached node/edge growths. */
137 vec<edge_growth_cache_entry> edge_growth_cache;
139 /* Edge predicates goes here. */
140 static object_allocator<predicate> edge_predicate_pool ("edge predicates");
142 /* Return true predicate (tautology).
143 We represent it by empty list of clauses. */
145 static inline struct predicate
146 true_predicate (void)
148 struct predicate p;
149 p.clause[0] = 0;
150 return p;
154 /* Return predicate testing single condition number COND. */
156 static inline struct predicate
157 single_cond_predicate (int cond)
159 struct predicate p;
160 p.clause[0] = 1 << cond;
161 p.clause[1] = 0;
162 return p;
166 /* Return false predicate. First clause require false condition. */
168 static inline struct predicate
169 false_predicate (void)
171 return single_cond_predicate (predicate_false_condition);
175 /* Return true if P is (true). */
177 static inline bool
178 true_predicate_p (struct predicate *p)
180 return !p->clause[0];
184 /* Return true if P is (false). */
186 static inline bool
187 false_predicate_p (struct predicate *p)
189 if (p->clause[0] == (1 << predicate_false_condition))
191 gcc_checking_assert (!p->clause[1]
192 && p->clause[0] == 1 << predicate_false_condition);
193 return true;
195 return false;
199 /* Return predicate that is set true when function is not inlined. */
201 static inline struct predicate
202 not_inlined_predicate (void)
204 return single_cond_predicate (predicate_not_inlined_condition);
207 /* Simple description of whether a memory load or a condition refers to a load
208 from an aggregate and if so, how and where from in the aggregate.
209 Individual fields have the same meaning like fields with the same name in
210 struct condition. */
212 struct agg_position_info
214 HOST_WIDE_INT offset;
215 bool agg_contents;
216 bool by_ref;
219 /* Add condition to condition list CONDS. AGGPOS describes whether the used
220 oprand is loaded from an aggregate and where in the aggregate it is. It can
221 be NULL, which means this not a load from an aggregate. */
223 static struct predicate
224 add_condition (struct inline_summary *summary, int operand_num,
225 struct agg_position_info *aggpos,
226 enum tree_code code, tree val)
228 int i;
229 struct condition *c;
230 struct condition new_cond;
231 HOST_WIDE_INT offset;
232 bool agg_contents, by_ref;
234 if (aggpos)
236 offset = aggpos->offset;
237 agg_contents = aggpos->agg_contents;
238 by_ref = aggpos->by_ref;
240 else
242 offset = 0;
243 agg_contents = false;
244 by_ref = false;
247 gcc_checking_assert (operand_num >= 0);
248 for (i = 0; vec_safe_iterate (summary->conds, i, &c); i++)
250 if (c->operand_num == operand_num
251 && c->code == code
252 && c->val == val
253 && c->agg_contents == agg_contents
254 && (!agg_contents || (c->offset == offset && c->by_ref == by_ref)))
255 return single_cond_predicate (i + predicate_first_dynamic_condition);
257 /* Too many conditions. Give up and return constant true. */
258 if (i == NUM_CONDITIONS - predicate_first_dynamic_condition)
259 return true_predicate ();
261 new_cond.operand_num = operand_num;
262 new_cond.code = code;
263 new_cond.val = val;
264 new_cond.agg_contents = agg_contents;
265 new_cond.by_ref = by_ref;
266 new_cond.offset = offset;
267 vec_safe_push (summary->conds, new_cond);
268 return single_cond_predicate (i + predicate_first_dynamic_condition);
272 /* Add clause CLAUSE into the predicate P. */
274 static inline void
275 add_clause (conditions conditions, struct predicate *p, clause_t clause)
277 int i;
278 int i2;
279 int insert_here = -1;
280 int c1, c2;
282 /* True clause. */
283 if (!clause)
284 return;
286 /* False clause makes the whole predicate false. Kill the other variants. */
287 if (clause == (1 << predicate_false_condition))
289 p->clause[0] = (1 << predicate_false_condition);
290 p->clause[1] = 0;
291 return;
293 if (false_predicate_p (p))
294 return;
296 /* No one should be silly enough to add false into nontrivial clauses. */
297 gcc_checking_assert (!(clause & (1 << predicate_false_condition)));
299 /* Look where to insert the clause. At the same time prune out
300 clauses of P that are implied by the new clause and thus
301 redundant. */
302 for (i = 0, i2 = 0; i <= MAX_CLAUSES; i++)
304 p->clause[i2] = p->clause[i];
306 if (!p->clause[i])
307 break;
309 /* If p->clause[i] implies clause, there is nothing to add. */
310 if ((p->clause[i] & clause) == p->clause[i])
312 /* We had nothing to add, none of clauses should've become
313 redundant. */
314 gcc_checking_assert (i == i2);
315 return;
318 if (p->clause[i] < clause && insert_here < 0)
319 insert_here = i2;
321 /* If clause implies p->clause[i], then p->clause[i] becomes redundant.
322 Otherwise the p->clause[i] has to stay. */
323 if ((p->clause[i] & clause) != clause)
324 i2++;
327 /* Look for clauses that are obviously true. I.e.
328 op0 == 5 || op0 != 5. */
329 for (c1 = predicate_first_dynamic_condition; c1 < NUM_CONDITIONS; c1++)
331 condition *cc1;
332 if (!(clause & (1 << c1)))
333 continue;
334 cc1 = &(*conditions)[c1 - predicate_first_dynamic_condition];
335 /* We have no way to represent !CHANGED and !IS_NOT_CONSTANT
336 and thus there is no point for looking for them. */
337 if (cc1->code == CHANGED || cc1->code == IS_NOT_CONSTANT)
338 continue;
339 for (c2 = c1 + 1; c2 < NUM_CONDITIONS; c2++)
340 if (clause & (1 << c2))
342 condition *cc1 =
343 &(*conditions)[c1 - predicate_first_dynamic_condition];
344 condition *cc2 =
345 &(*conditions)[c2 - predicate_first_dynamic_condition];
346 if (cc1->operand_num == cc2->operand_num
347 && cc1->val == cc2->val
348 && cc2->code != IS_NOT_CONSTANT
349 && cc2->code != CHANGED
350 && cc1->code == invert_tree_comparison (cc2->code,
351 HONOR_NANS (cc1->val)))
352 return;
357 /* We run out of variants. Be conservative in positive direction. */
358 if (i2 == MAX_CLAUSES)
359 return;
360 /* Keep clauses in decreasing order. This makes equivalence testing easy. */
361 p->clause[i2 + 1] = 0;
362 if (insert_here >= 0)
363 for (; i2 > insert_here; i2--)
364 p->clause[i2] = p->clause[i2 - 1];
365 else
366 insert_here = i2;
367 p->clause[insert_here] = clause;
371 /* Return P & P2. */
373 static struct predicate
374 and_predicates (conditions conditions,
375 struct predicate *p, struct predicate *p2)
377 struct predicate out = *p;
378 int i;
380 /* Avoid busy work. */
381 if (false_predicate_p (p2) || true_predicate_p (p))
382 return *p2;
383 if (false_predicate_p (p) || true_predicate_p (p2))
384 return *p;
386 /* See how far predicates match. */
387 for (i = 0; p->clause[i] && p->clause[i] == p2->clause[i]; i++)
389 gcc_checking_assert (i < MAX_CLAUSES);
392 /* Combine the predicates rest. */
393 for (; p2->clause[i]; i++)
395 gcc_checking_assert (i < MAX_CLAUSES);
396 add_clause (conditions, &out, p2->clause[i]);
398 return out;
402 /* Return true if predicates are obviously equal. */
404 static inline bool
405 predicates_equal_p (struct predicate *p, struct predicate *p2)
407 int i;
408 for (i = 0; p->clause[i]; i++)
410 gcc_checking_assert (i < MAX_CLAUSES);
411 gcc_checking_assert (p->clause[i] > p->clause[i + 1]);
412 gcc_checking_assert (!p2->clause[i]
413 || p2->clause[i] > p2->clause[i + 1]);
414 if (p->clause[i] != p2->clause[i])
415 return false;
417 return !p2->clause[i];
421 /* Return P | P2. */
423 static struct predicate
424 or_predicates (conditions conditions,
425 struct predicate *p, struct predicate *p2)
427 struct predicate out = true_predicate ();
428 int i, j;
430 /* Avoid busy work. */
431 if (false_predicate_p (p2) || true_predicate_p (p))
432 return *p;
433 if (false_predicate_p (p) || true_predicate_p (p2))
434 return *p2;
435 if (predicates_equal_p (p, p2))
436 return *p;
438 /* OK, combine the predicates. */
439 for (i = 0; p->clause[i]; i++)
440 for (j = 0; p2->clause[j]; j++)
442 gcc_checking_assert (i < MAX_CLAUSES && j < MAX_CLAUSES);
443 add_clause (conditions, &out, p->clause[i] | p2->clause[j]);
445 return out;
449 /* Having partial truth assignment in POSSIBLE_TRUTHS, return false
450 if predicate P is known to be false. */
452 static bool
453 evaluate_predicate (struct predicate *p, clause_t possible_truths)
455 int i;
457 /* True remains true. */
458 if (true_predicate_p (p))
459 return true;
461 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
463 /* See if we can find clause we can disprove. */
464 for (i = 0; p->clause[i]; i++)
466 gcc_checking_assert (i < MAX_CLAUSES);
467 if (!(p->clause[i] & possible_truths))
468 return false;
470 return true;
473 /* Return the probability in range 0...REG_BR_PROB_BASE that the predicated
474 instruction will be recomputed per invocation of the inlined call. */
476 static int
477 predicate_probability (conditions conds,
478 struct predicate *p, clause_t possible_truths,
479 vec<inline_param_summary> inline_param_summary)
481 int i;
482 int combined_prob = REG_BR_PROB_BASE;
484 /* True remains true. */
485 if (true_predicate_p (p))
486 return REG_BR_PROB_BASE;
488 if (false_predicate_p (p))
489 return 0;
491 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
493 /* See if we can find clause we can disprove. */
494 for (i = 0; p->clause[i]; i++)
496 gcc_checking_assert (i < MAX_CLAUSES);
497 if (!(p->clause[i] & possible_truths))
498 return 0;
499 else
501 int this_prob = 0;
502 int i2;
503 if (!inline_param_summary.exists ())
504 return REG_BR_PROB_BASE;
505 for (i2 = 0; i2 < NUM_CONDITIONS; i2++)
506 if ((p->clause[i] & possible_truths) & (1 << i2))
508 if (i2 >= predicate_first_dynamic_condition)
510 condition *c =
511 &(*conds)[i2 - predicate_first_dynamic_condition];
512 if (c->code == CHANGED
513 && (c->operand_num <
514 (int) inline_param_summary.length ()))
516 int iprob =
517 inline_param_summary[c->operand_num].change_prob;
518 this_prob = MAX (this_prob, iprob);
520 else
521 this_prob = REG_BR_PROB_BASE;
523 else
524 this_prob = REG_BR_PROB_BASE;
526 combined_prob = MIN (this_prob, combined_prob);
527 if (!combined_prob)
528 return 0;
531 return combined_prob;
535 /* Dump conditional COND. */
537 static void
538 dump_condition (FILE *f, conditions conditions, int cond)
540 condition *c;
541 if (cond == predicate_false_condition)
542 fprintf (f, "false");
543 else if (cond == predicate_not_inlined_condition)
544 fprintf (f, "not inlined");
545 else
547 c = &(*conditions)[cond - predicate_first_dynamic_condition];
548 fprintf (f, "op%i", c->operand_num);
549 if (c->agg_contents)
550 fprintf (f, "[%soffset: " HOST_WIDE_INT_PRINT_DEC "]",
551 c->by_ref ? "ref " : "", c->offset);
552 if (c->code == IS_NOT_CONSTANT)
554 fprintf (f, " not constant");
555 return;
557 if (c->code == CHANGED)
559 fprintf (f, " changed");
560 return;
562 fprintf (f, " %s ", op_symbol_code (c->code));
563 print_generic_expr (f, c->val, 1);
568 /* Dump clause CLAUSE. */
570 static void
571 dump_clause (FILE *f, conditions conds, clause_t clause)
573 int i;
574 bool found = false;
575 fprintf (f, "(");
576 if (!clause)
577 fprintf (f, "true");
578 for (i = 0; i < NUM_CONDITIONS; i++)
579 if (clause & (1 << i))
581 if (found)
582 fprintf (f, " || ");
583 found = true;
584 dump_condition (f, conds, i);
586 fprintf (f, ")");
590 /* Dump predicate PREDICATE. */
592 static void
593 dump_predicate (FILE *f, conditions conds, struct predicate *pred)
595 int i;
596 if (true_predicate_p (pred))
597 dump_clause (f, conds, 0);
598 else
599 for (i = 0; pred->clause[i]; i++)
601 if (i)
602 fprintf (f, " && ");
603 dump_clause (f, conds, pred->clause[i]);
605 fprintf (f, "\n");
609 /* Dump inline hints. */
610 void
611 dump_inline_hints (FILE *f, inline_hints hints)
613 if (!hints)
614 return;
615 fprintf (f, "inline hints:");
616 if (hints & INLINE_HINT_indirect_call)
618 hints &= ~INLINE_HINT_indirect_call;
619 fprintf (f, " indirect_call");
621 if (hints & INLINE_HINT_loop_iterations)
623 hints &= ~INLINE_HINT_loop_iterations;
624 fprintf (f, " loop_iterations");
626 if (hints & INLINE_HINT_loop_stride)
628 hints &= ~INLINE_HINT_loop_stride;
629 fprintf (f, " loop_stride");
631 if (hints & INLINE_HINT_same_scc)
633 hints &= ~INLINE_HINT_same_scc;
634 fprintf (f, " same_scc");
636 if (hints & INLINE_HINT_in_scc)
638 hints &= ~INLINE_HINT_in_scc;
639 fprintf (f, " in_scc");
641 if (hints & INLINE_HINT_cross_module)
643 hints &= ~INLINE_HINT_cross_module;
644 fprintf (f, " cross_module");
646 if (hints & INLINE_HINT_declared_inline)
648 hints &= ~INLINE_HINT_declared_inline;
649 fprintf (f, " declared_inline");
651 if (hints & INLINE_HINT_array_index)
653 hints &= ~INLINE_HINT_array_index;
654 fprintf (f, " array_index");
656 if (hints & INLINE_HINT_known_hot)
658 hints &= ~INLINE_HINT_known_hot;
659 fprintf (f, " known_hot");
661 gcc_assert (!hints);
665 /* Record SIZE and TIME under condition PRED into the inline summary. */
667 static void
668 account_size_time (struct inline_summary *summary, int size, int time,
669 struct predicate *pred)
671 size_time_entry *e;
672 bool found = false;
673 int i;
675 if (false_predicate_p (pred))
676 return;
678 /* We need to create initial empty unconitional clause, but otherwie
679 we don't need to account empty times and sizes. */
680 if (!size && !time && summary->entry)
681 return;
683 /* Watch overflow that might result from insane profiles. */
684 if (time > MAX_TIME * INLINE_TIME_SCALE)
685 time = MAX_TIME * INLINE_TIME_SCALE;
686 gcc_assert (time >= 0);
688 for (i = 0; vec_safe_iterate (summary->entry, i, &e); i++)
689 if (predicates_equal_p (&e->predicate, pred))
691 found = true;
692 break;
694 if (i == 256)
696 i = 0;
697 found = true;
698 e = &(*summary->entry)[0];
699 gcc_assert (!e->predicate.clause[0]);
700 if (dump_file && (dump_flags & TDF_DETAILS))
701 fprintf (dump_file,
702 "\t\tReached limit on number of entries, "
703 "ignoring the predicate.");
705 if (dump_file && (dump_flags & TDF_DETAILS) && (time || size))
707 fprintf (dump_file,
708 "\t\tAccounting size:%3.2f, time:%3.2f on %spredicate:",
709 ((double) size) / INLINE_SIZE_SCALE,
710 ((double) time) / INLINE_TIME_SCALE, found ? "" : "new ");
711 dump_predicate (dump_file, summary->conds, pred);
713 if (!found)
715 struct size_time_entry new_entry;
716 new_entry.size = size;
717 new_entry.time = time;
718 new_entry.predicate = *pred;
719 vec_safe_push (summary->entry, new_entry);
721 else
723 e->size += size;
724 e->time += time;
725 if (e->time > MAX_TIME * INLINE_TIME_SCALE)
726 e->time = MAX_TIME * INLINE_TIME_SCALE;
730 /* We proved E to be unreachable, redirect it to __bultin_unreachable. */
732 static struct cgraph_edge *
733 redirect_to_unreachable (struct cgraph_edge *e)
735 struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL;
736 struct cgraph_node *target = cgraph_node::get_create
737 (builtin_decl_implicit (BUILT_IN_UNREACHABLE));
739 if (e->speculative)
740 e = e->resolve_speculation (target->decl);
741 else if (!e->callee)
742 e->make_direct (target);
743 else
744 e->redirect_callee (target);
745 struct inline_edge_summary *es = inline_edge_summary (e);
746 e->inline_failed = CIF_UNREACHABLE;
747 e->frequency = 0;
748 e->count = 0;
749 es->call_stmt_size = 0;
750 es->call_stmt_time = 0;
751 if (callee)
752 callee->remove_symbol_and_inline_clones ();
753 return e;
756 /* Set predicate for edge E. */
758 static void
759 edge_set_predicate (struct cgraph_edge *e, struct predicate *predicate)
761 /* If the edge is determined to be never executed, redirect it
762 to BUILTIN_UNREACHABLE to save inliner from inlining into it. */
763 if (predicate && false_predicate_p (predicate)
764 /* When handling speculative edges, we need to do the redirection
765 just once. Do it always on the direct edge, so we do not
766 attempt to resolve speculation while duplicating the edge. */
767 && (!e->speculative || e->callee))
768 e = redirect_to_unreachable (e);
770 struct inline_edge_summary *es = inline_edge_summary (e);
771 if (predicate && !true_predicate_p (predicate))
773 if (!es->predicate)
774 es->predicate = edge_predicate_pool.allocate ();
775 *es->predicate = *predicate;
777 else
779 if (es->predicate)
780 edge_predicate_pool.remove (es->predicate);
781 es->predicate = NULL;
785 /* Set predicate for hint *P. */
787 static void
788 set_hint_predicate (struct predicate **p, struct predicate new_predicate)
790 if (false_predicate_p (&new_predicate) || true_predicate_p (&new_predicate))
792 if (*p)
793 edge_predicate_pool.remove (*p);
794 *p = NULL;
796 else
798 if (!*p)
799 *p = edge_predicate_pool.allocate ();
800 **p = new_predicate;
805 /* KNOWN_VALS is partial mapping of parameters of NODE to constant values.
806 KNOWN_AGGS is a vector of aggreggate jump functions for each parameter.
807 Return clause of possible truths. When INLINE_P is true, assume that we are
808 inlining.
810 ERROR_MARK means compile time invariant. */
812 static clause_t
813 evaluate_conditions_for_known_args (struct cgraph_node *node,
814 bool inline_p,
815 vec<tree> known_vals,
816 vec<ipa_agg_jump_function_p>
817 known_aggs)
819 clause_t clause = inline_p ? 0 : 1 << predicate_not_inlined_condition;
820 struct inline_summary *info = inline_summaries->get (node);
821 int i;
822 struct condition *c;
824 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
826 tree val;
827 tree res;
829 /* We allow call stmt to have fewer arguments than the callee function
830 (especially for K&R style programs). So bound check here (we assume
831 known_aggs vector, if non-NULL, has the same length as
832 known_vals). */
833 gcc_checking_assert (!known_aggs.exists ()
834 || (known_vals.length () == known_aggs.length ()));
835 if (c->operand_num >= (int) known_vals.length ())
837 clause |= 1 << (i + predicate_first_dynamic_condition);
838 continue;
841 if (c->agg_contents)
843 struct ipa_agg_jump_function *agg;
845 if (c->code == CHANGED
846 && !c->by_ref
847 && (known_vals[c->operand_num] == error_mark_node))
848 continue;
850 if (known_aggs.exists ())
852 agg = known_aggs[c->operand_num];
853 val = ipa_find_agg_cst_for_param (agg, known_vals[c->operand_num],
854 c->offset, c->by_ref);
856 else
857 val = NULL_TREE;
859 else
861 val = known_vals[c->operand_num];
862 if (val == error_mark_node && c->code != CHANGED)
863 val = NULL_TREE;
866 if (!val)
868 clause |= 1 << (i + predicate_first_dynamic_condition);
869 continue;
871 if (c->code == IS_NOT_CONSTANT || c->code == CHANGED)
872 continue;
874 if (operand_equal_p (TYPE_SIZE (TREE_TYPE (c->val)),
875 TYPE_SIZE (TREE_TYPE (val)), 0))
877 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (c->val), val);
879 res = val
880 ? fold_binary_to_constant (c->code, boolean_type_node, val, c->val)
881 : NULL;
883 if (res && integer_zerop (res))
884 continue;
886 clause |= 1 << (i + predicate_first_dynamic_condition);
888 return clause;
892 /* Work out what conditions might be true at invocation of E. */
894 static void
895 evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
896 clause_t *clause_ptr,
897 vec<tree> *known_vals_ptr,
898 vec<ipa_polymorphic_call_context>
899 *known_contexts_ptr,
900 vec<ipa_agg_jump_function_p> *known_aggs_ptr)
902 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
903 struct inline_summary *info = inline_summaries->get (callee);
904 vec<tree> known_vals = vNULL;
905 vec<ipa_agg_jump_function_p> known_aggs = vNULL;
907 if (clause_ptr)
908 *clause_ptr = inline_p ? 0 : 1 << predicate_not_inlined_condition;
909 if (known_vals_ptr)
910 known_vals_ptr->create (0);
911 if (known_contexts_ptr)
912 known_contexts_ptr->create (0);
914 if (ipa_node_params_sum
915 && !e->call_stmt_cannot_inline_p
916 && ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr))
918 struct ipa_node_params *parms_info;
919 struct ipa_edge_args *args = IPA_EDGE_REF (e);
920 struct inline_edge_summary *es = inline_edge_summary (e);
921 int i, count = ipa_get_cs_argument_count (args);
923 if (e->caller->global.inlined_to)
924 parms_info = IPA_NODE_REF (e->caller->global.inlined_to);
925 else
926 parms_info = IPA_NODE_REF (e->caller);
928 if (count && (info->conds || known_vals_ptr))
929 known_vals.safe_grow_cleared (count);
930 if (count && (info->conds || known_aggs_ptr))
931 known_aggs.safe_grow_cleared (count);
932 if (count && known_contexts_ptr)
933 known_contexts_ptr->safe_grow_cleared (count);
935 for (i = 0; i < count; i++)
937 struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i);
938 tree cst = ipa_value_from_jfunc (parms_info, jf);
940 if (!cst && e->call_stmt
941 && i < (int)gimple_call_num_args (e->call_stmt))
943 cst = gimple_call_arg (e->call_stmt, i);
944 if (!is_gimple_min_invariant (cst))
945 cst = NULL;
947 if (cst)
949 gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO);
950 if (known_vals.exists ())
951 known_vals[i] = cst;
953 else if (inline_p && !es->param[i].change_prob)
954 known_vals[i] = error_mark_node;
956 if (known_contexts_ptr)
957 (*known_contexts_ptr)[i] = ipa_context_from_jfunc (parms_info, e,
958 i, jf);
959 /* TODO: When IPA-CP starts propagating and merging aggregate jump
960 functions, use its knowledge of the caller too, just like the
961 scalar case above. */
962 known_aggs[i] = &jf->agg;
965 else if (e->call_stmt && !e->call_stmt_cannot_inline_p
966 && ((clause_ptr && info->conds) || known_vals_ptr))
968 int i, count = (int)gimple_call_num_args (e->call_stmt);
970 if (count && (info->conds || known_vals_ptr))
971 known_vals.safe_grow_cleared (count);
972 for (i = 0; i < count; i++)
974 tree cst = gimple_call_arg (e->call_stmt, i);
975 if (!is_gimple_min_invariant (cst))
976 cst = NULL;
977 if (cst)
978 known_vals[i] = cst;
982 if (clause_ptr)
983 *clause_ptr = evaluate_conditions_for_known_args (callee, inline_p,
984 known_vals, known_aggs);
986 if (known_vals_ptr)
987 *known_vals_ptr = known_vals;
988 else
989 known_vals.release ();
991 if (known_aggs_ptr)
992 *known_aggs_ptr = known_aggs;
993 else
994 known_aggs.release ();
998 /* Allocate the inline summary vector or resize it to cover all cgraph nodes. */
1000 static void
1001 inline_summary_alloc (void)
1003 if (!edge_removal_hook_holder)
1004 edge_removal_hook_holder =
1005 symtab->add_edge_removal_hook (&inline_edge_removal_hook, NULL);
1006 if (!edge_duplication_hook_holder)
1007 edge_duplication_hook_holder =
1008 symtab->add_edge_duplication_hook (&inline_edge_duplication_hook, NULL);
1010 if (!inline_summaries)
1011 inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab);
1013 if (inline_edge_summary_vec.length () <= (unsigned) symtab->edges_max_uid)
1014 inline_edge_summary_vec.safe_grow_cleared (symtab->edges_max_uid + 1);
1017 /* We are called multiple time for given function; clear
1018 data from previous run so they are not cumulated. */
1020 static void
1021 reset_inline_edge_summary (struct cgraph_edge *e)
1023 if (e->uid < (int) inline_edge_summary_vec.length ())
1025 struct inline_edge_summary *es = inline_edge_summary (e);
1027 es->call_stmt_size = es->call_stmt_time = 0;
1028 if (es->predicate)
1029 edge_predicate_pool.remove (es->predicate);
1030 es->predicate = NULL;
1031 es->param.release ();
1035 /* We are called multiple time for given function; clear
1036 data from previous run so they are not cumulated. */
1038 static void
1039 reset_inline_summary (struct cgraph_node *node,
1040 inline_summary *info)
1042 struct cgraph_edge *e;
1044 info->self_size = info->self_time = 0;
1045 info->estimated_stack_size = 0;
1046 info->estimated_self_stack_size = 0;
1047 info->stack_frame_offset = 0;
1048 info->size = 0;
1049 info->time = 0;
1050 info->growth = 0;
1051 info->scc_no = 0;
1052 if (info->loop_iterations)
1054 edge_predicate_pool.remove (info->loop_iterations);
1055 info->loop_iterations = NULL;
1057 if (info->loop_stride)
1059 edge_predicate_pool.remove (info->loop_stride);
1060 info->loop_stride = NULL;
1062 if (info->array_index)
1064 edge_predicate_pool.remove (info->array_index);
1065 info->array_index = NULL;
1067 vec_free (info->conds);
1068 vec_free (info->entry);
1069 for (e = node->callees; e; e = e->next_callee)
1070 reset_inline_edge_summary (e);
1071 for (e = node->indirect_calls; e; e = e->next_callee)
1072 reset_inline_edge_summary (e);
1073 info->fp_expressions = false;
1076 /* Hook that is called by cgraph.c when a node is removed. */
1078 void
1079 inline_summary_t::remove (cgraph_node *node, inline_summary *info)
1081 reset_inline_summary (node, info);
1084 /* Remap predicate P of former function to be predicate of duplicated function.
1085 POSSIBLE_TRUTHS is clause of possible truths in the duplicated node,
1086 INFO is inline summary of the duplicated node. */
1088 static struct predicate
1089 remap_predicate_after_duplication (struct predicate *p,
1090 clause_t possible_truths,
1091 struct inline_summary *info)
1093 struct predicate new_predicate = true_predicate ();
1094 int j;
1095 for (j = 0; p->clause[j]; j++)
1096 if (!(possible_truths & p->clause[j]))
1098 new_predicate = false_predicate ();
1099 break;
1101 else
1102 add_clause (info->conds, &new_predicate,
1103 possible_truths & p->clause[j]);
1104 return new_predicate;
1107 /* Same as remap_predicate_after_duplication but handle hint predicate *P.
1108 Additionally care about allocating new memory slot for updated predicate
1109 and set it to NULL when it becomes true or false (and thus uninteresting).
1112 static void
1113 remap_hint_predicate_after_duplication (struct predicate **p,
1114 clause_t possible_truths,
1115 struct inline_summary *info)
1117 struct predicate new_predicate;
1119 if (!*p)
1120 return;
1122 new_predicate = remap_predicate_after_duplication (*p,
1123 possible_truths, info);
1124 /* We do not want to free previous predicate; it is used by node origin. */
1125 *p = NULL;
1126 set_hint_predicate (p, new_predicate);
1130 /* Hook that is called by cgraph.c when a node is duplicated. */
1131 void
1132 inline_summary_t::duplicate (cgraph_node *src,
1133 cgraph_node *dst,
1134 inline_summary *,
1135 inline_summary *info)
1137 inline_summary_alloc ();
1138 memcpy (info, inline_summaries->get (src), sizeof (inline_summary));
1139 /* TODO: as an optimization, we may avoid copying conditions
1140 that are known to be false or true. */
1141 info->conds = vec_safe_copy (info->conds);
1143 /* When there are any replacements in the function body, see if we can figure
1144 out that something was optimized out. */
1145 if (ipa_node_params_sum && dst->clone.tree_map)
1147 vec<size_time_entry, va_gc> *entry = info->entry;
1148 /* Use SRC parm info since it may not be copied yet. */
1149 struct ipa_node_params *parms_info = IPA_NODE_REF (src);
1150 vec<tree> known_vals = vNULL;
1151 int count = ipa_get_param_count (parms_info);
1152 int i, j;
1153 clause_t possible_truths;
1154 struct predicate true_pred = true_predicate ();
1155 size_time_entry *e;
1156 int optimized_out_size = 0;
1157 bool inlined_to_p = false;
1158 struct cgraph_edge *edge, *next;
1160 info->entry = 0;
1161 known_vals.safe_grow_cleared (count);
1162 for (i = 0; i < count; i++)
1164 struct ipa_replace_map *r;
1166 for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++)
1168 if (((!r->old_tree && r->parm_num == i)
1169 || (r->old_tree && r->old_tree == ipa_get_param (parms_info, i)))
1170 && r->replace_p && !r->ref_p)
1172 known_vals[i] = r->new_tree;
1173 break;
1177 possible_truths = evaluate_conditions_for_known_args (dst, false,
1178 known_vals,
1179 vNULL);
1180 known_vals.release ();
1182 account_size_time (info, 0, 0, &true_pred);
1184 /* Remap size_time vectors.
1185 Simplify the predicate by prunning out alternatives that are known
1186 to be false.
1187 TODO: as on optimization, we can also eliminate conditions known
1188 to be true. */
1189 for (i = 0; vec_safe_iterate (entry, i, &e); i++)
1191 struct predicate new_predicate;
1192 new_predicate = remap_predicate_after_duplication (&e->predicate,
1193 possible_truths,
1194 info);
1195 if (false_predicate_p (&new_predicate))
1196 optimized_out_size += e->size;
1197 else
1198 account_size_time (info, e->size, e->time, &new_predicate);
1201 /* Remap edge predicates with the same simplification as above.
1202 Also copy constantness arrays. */
1203 for (edge = dst->callees; edge; edge = next)
1205 struct predicate new_predicate;
1206 struct inline_edge_summary *es = inline_edge_summary (edge);
1207 next = edge->next_callee;
1209 if (!edge->inline_failed)
1210 inlined_to_p = true;
1211 if (!es->predicate)
1212 continue;
1213 new_predicate = remap_predicate_after_duplication (es->predicate,
1214 possible_truths,
1215 info);
1216 if (false_predicate_p (&new_predicate)
1217 && !false_predicate_p (es->predicate))
1218 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1219 edge_set_predicate (edge, &new_predicate);
1222 /* Remap indirect edge predicates with the same simplificaiton as above.
1223 Also copy constantness arrays. */
1224 for (edge = dst->indirect_calls; edge; edge = next)
1226 struct predicate new_predicate;
1227 struct inline_edge_summary *es = inline_edge_summary (edge);
1228 next = edge->next_callee;
1230 gcc_checking_assert (edge->inline_failed);
1231 if (!es->predicate)
1232 continue;
1233 new_predicate = remap_predicate_after_duplication (es->predicate,
1234 possible_truths,
1235 info);
1236 if (false_predicate_p (&new_predicate)
1237 && !false_predicate_p (es->predicate))
1238 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1239 edge_set_predicate (edge, &new_predicate);
1241 remap_hint_predicate_after_duplication (&info->loop_iterations,
1242 possible_truths, info);
1243 remap_hint_predicate_after_duplication (&info->loop_stride,
1244 possible_truths, info);
1245 remap_hint_predicate_after_duplication (&info->array_index,
1246 possible_truths, info);
1248 /* If inliner or someone after inliner will ever start producing
1249 non-trivial clones, we will get trouble with lack of information
1250 about updating self sizes, because size vectors already contains
1251 sizes of the calees. */
1252 gcc_assert (!inlined_to_p || !optimized_out_size);
1254 else
1256 info->entry = vec_safe_copy (info->entry);
1257 if (info->loop_iterations)
1259 predicate p = *info->loop_iterations;
1260 info->loop_iterations = NULL;
1261 set_hint_predicate (&info->loop_iterations, p);
1263 if (info->loop_stride)
1265 predicate p = *info->loop_stride;
1266 info->loop_stride = NULL;
1267 set_hint_predicate (&info->loop_stride, p);
1269 if (info->array_index)
1271 predicate p = *info->array_index;
1272 info->array_index = NULL;
1273 set_hint_predicate (&info->array_index, p);
1276 if (!dst->global.inlined_to)
1277 inline_update_overall_summary (dst);
1281 /* Hook that is called by cgraph.c when a node is duplicated. */
1283 static void
1284 inline_edge_duplication_hook (struct cgraph_edge *src,
1285 struct cgraph_edge *dst,
1286 ATTRIBUTE_UNUSED void *data)
1288 struct inline_edge_summary *info;
1289 struct inline_edge_summary *srcinfo;
1290 inline_summary_alloc ();
1291 info = inline_edge_summary (dst);
1292 srcinfo = inline_edge_summary (src);
1293 memcpy (info, srcinfo, sizeof (struct inline_edge_summary));
1294 info->predicate = NULL;
1295 edge_set_predicate (dst, srcinfo->predicate);
1296 info->param = srcinfo->param.copy ();
1297 if (!dst->indirect_unknown_callee && src->indirect_unknown_callee)
1299 info->call_stmt_size -= (eni_size_weights.indirect_call_cost
1300 - eni_size_weights.call_cost);
1301 info->call_stmt_time -= (eni_time_weights.indirect_call_cost
1302 - eni_time_weights.call_cost);
1307 /* Keep edge cache consistent across edge removal. */
1309 static void
1310 inline_edge_removal_hook (struct cgraph_edge *edge,
1311 void *data ATTRIBUTE_UNUSED)
1313 if (edge_growth_cache.exists ())
1314 reset_edge_growth_cache (edge);
1315 reset_inline_edge_summary (edge);
1319 /* Initialize growth caches. */
1321 void
1322 initialize_growth_caches (void)
1324 if (symtab->edges_max_uid)
1325 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
1329 /* Free growth caches. */
1331 void
1332 free_growth_caches (void)
1334 edge_growth_cache.release ();
1338 /* Dump edge summaries associated to NODE and recursively to all clones.
1339 Indent by INDENT. */
1341 static void
1342 dump_inline_edge_summary (FILE *f, int indent, struct cgraph_node *node,
1343 struct inline_summary *info)
1345 struct cgraph_edge *edge;
1346 for (edge = node->callees; edge; edge = edge->next_callee)
1348 struct inline_edge_summary *es = inline_edge_summary (edge);
1349 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1350 int i;
1352 fprintf (f,
1353 "%*s%s/%i %s\n%*s loop depth:%2i freq:%4i size:%2i"
1354 " time: %2i callee size:%2i stack:%2i",
1355 indent, "", callee->name (), callee->order,
1356 !edge->inline_failed
1357 ? "inlined" : cgraph_inline_failed_string (edge-> inline_failed),
1358 indent, "", es->loop_depth, edge->frequency,
1359 es->call_stmt_size, es->call_stmt_time,
1360 (int) inline_summaries->get (callee)->size / INLINE_SIZE_SCALE,
1361 (int) inline_summaries->get (callee)->estimated_stack_size);
1363 if (es->predicate)
1365 fprintf (f, " predicate: ");
1366 dump_predicate (f, info->conds, es->predicate);
1368 else
1369 fprintf (f, "\n");
1370 if (es->param.exists ())
1371 for (i = 0; i < (int) es->param.length (); i++)
1373 int prob = es->param[i].change_prob;
1375 if (!prob)
1376 fprintf (f, "%*s op%i is compile time invariant\n",
1377 indent + 2, "", i);
1378 else if (prob != REG_BR_PROB_BASE)
1379 fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i,
1380 prob * 100.0 / REG_BR_PROB_BASE);
1382 if (!edge->inline_failed)
1384 fprintf (f, "%*sStack frame offset %i, callee self size %i,"
1385 " callee size %i\n",
1386 indent + 2, "",
1387 (int) inline_summaries->get (callee)->stack_frame_offset,
1388 (int) inline_summaries->get (callee)->estimated_self_stack_size,
1389 (int) inline_summaries->get (callee)->estimated_stack_size);
1390 dump_inline_edge_summary (f, indent + 2, callee, info);
1393 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
1395 struct inline_edge_summary *es = inline_edge_summary (edge);
1396 fprintf (f, "%*sindirect call loop depth:%2i freq:%4i size:%2i"
1397 " time: %2i",
1398 indent, "",
1399 es->loop_depth,
1400 edge->frequency, es->call_stmt_size, es->call_stmt_time);
1401 if (es->predicate)
1403 fprintf (f, "predicate: ");
1404 dump_predicate (f, info->conds, es->predicate);
1406 else
1407 fprintf (f, "\n");
1412 void
1413 dump_inline_summary (FILE *f, struct cgraph_node *node)
1415 if (node->definition)
1417 struct inline_summary *s = inline_summaries->get (node);
1418 size_time_entry *e;
1419 int i;
1420 fprintf (f, "Inline summary for %s/%i", node->name (),
1421 node->order);
1422 if (DECL_DISREGARD_INLINE_LIMITS (node->decl))
1423 fprintf (f, " always_inline");
1424 if (s->inlinable)
1425 fprintf (f, " inlinable");
1426 if (s->contains_cilk_spawn)
1427 fprintf (f, " contains_cilk_spawn");
1428 if (s->fp_expressions)
1429 fprintf (f, " fp_expression");
1430 fprintf (f, "\n self time: %i\n", s->self_time);
1431 fprintf (f, " global time: %i\n", s->time);
1432 fprintf (f, " self size: %i\n", s->self_size);
1433 fprintf (f, " global size: %i\n", s->size);
1434 fprintf (f, " min size: %i\n", s->min_size);
1435 fprintf (f, " self stack: %i\n",
1436 (int) s->estimated_self_stack_size);
1437 fprintf (f, " global stack: %i\n", (int) s->estimated_stack_size);
1438 if (s->growth)
1439 fprintf (f, " estimated growth:%i\n", (int) s->growth);
1440 if (s->scc_no)
1441 fprintf (f, " In SCC: %i\n", (int) s->scc_no);
1442 for (i = 0; vec_safe_iterate (s->entry, i, &e); i++)
1444 fprintf (f, " size:%f, time:%f, predicate:",
1445 (double) e->size / INLINE_SIZE_SCALE,
1446 (double) e->time / INLINE_TIME_SCALE);
1447 dump_predicate (f, s->conds, &e->predicate);
1449 if (s->loop_iterations)
1451 fprintf (f, " loop iterations:");
1452 dump_predicate (f, s->conds, s->loop_iterations);
1454 if (s->loop_stride)
1456 fprintf (f, " loop stride:");
1457 dump_predicate (f, s->conds, s->loop_stride);
1459 if (s->array_index)
1461 fprintf (f, " array index:");
1462 dump_predicate (f, s->conds, s->array_index);
1464 fprintf (f, " calls:\n");
1465 dump_inline_edge_summary (f, 4, node, s);
1466 fprintf (f, "\n");
1470 DEBUG_FUNCTION void
1471 debug_inline_summary (struct cgraph_node *node)
1473 dump_inline_summary (stderr, node);
1476 void
1477 dump_inline_summaries (FILE *f)
1479 struct cgraph_node *node;
1481 FOR_EACH_DEFINED_FUNCTION (node)
1482 if (!node->global.inlined_to)
1483 dump_inline_summary (f, node);
1486 /* Give initial reasons why inlining would fail on EDGE. This gets either
1487 nullified or usually overwritten by more precise reasons later. */
1489 void
1490 initialize_inline_failed (struct cgraph_edge *e)
1492 struct cgraph_node *callee = e->callee;
1494 if (e->indirect_unknown_callee)
1495 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
1496 else if (!callee->definition)
1497 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
1498 else if (callee->local.redefined_extern_inline)
1499 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
1500 else if (e->call_stmt_cannot_inline_p)
1501 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
1502 else if (cfun && fn_contains_cilk_spawn_p (cfun))
1503 /* We can't inline if the function is spawing a function. */
1504 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
1505 else
1506 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
1509 /* Callback of walk_aliased_vdefs. Flags that it has been invoked to the
1510 boolean variable pointed to by DATA. */
1512 static bool
1513 mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED,
1514 void *data)
1516 bool *b = (bool *) data;
1517 *b = true;
1518 return true;
1521 /* If OP refers to value of function parameter, return the corresponding
1522 parameter. */
1524 static tree
1525 unmodified_parm_1 (gimple *stmt, tree op)
1527 /* SSA_NAME referring to parm default def? */
1528 if (TREE_CODE (op) == SSA_NAME
1529 && SSA_NAME_IS_DEFAULT_DEF (op)
1530 && TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL)
1531 return SSA_NAME_VAR (op);
1532 /* Non-SSA parm reference? */
1533 if (TREE_CODE (op) == PARM_DECL)
1535 bool modified = false;
1537 ao_ref refd;
1538 ao_ref_init (&refd, op);
1539 walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, &modified,
1540 NULL);
1541 if (!modified)
1542 return op;
1544 return NULL_TREE;
1547 /* If OP refers to value of function parameter, return the corresponding
1548 parameter. Also traverse chains of SSA register assignments. */
1550 static tree
1551 unmodified_parm (gimple *stmt, tree op)
1553 tree res = unmodified_parm_1 (stmt, op);
1554 if (res)
1555 return res;
1557 if (TREE_CODE (op) == SSA_NAME
1558 && !SSA_NAME_IS_DEFAULT_DEF (op)
1559 && gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1560 return unmodified_parm (SSA_NAME_DEF_STMT (op),
1561 gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)));
1562 return NULL_TREE;
1565 /* If OP refers to a value of a function parameter or value loaded from an
1566 aggregate passed to a parameter (either by value or reference), return TRUE
1567 and store the number of the parameter to *INDEX_P and information whether
1568 and how it has been loaded from an aggregate into *AGGPOS. INFO describes
1569 the function parameters, STMT is the statement in which OP is used or
1570 loaded. */
1572 static bool
1573 unmodified_parm_or_parm_agg_item (struct ipa_func_body_info *fbi,
1574 gimple *stmt, tree op, int *index_p,
1575 struct agg_position_info *aggpos)
1577 tree res = unmodified_parm_1 (stmt, op);
1579 gcc_checking_assert (aggpos);
1580 if (res)
1582 *index_p = ipa_get_param_decl_index (fbi->info, res);
1583 if (*index_p < 0)
1584 return false;
1585 aggpos->agg_contents = false;
1586 aggpos->by_ref = false;
1587 return true;
1590 if (TREE_CODE (op) == SSA_NAME)
1592 if (SSA_NAME_IS_DEFAULT_DEF (op)
1593 || !gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1594 return false;
1595 stmt = SSA_NAME_DEF_STMT (op);
1596 op = gimple_assign_rhs1 (stmt);
1597 if (!REFERENCE_CLASS_P (op))
1598 return unmodified_parm_or_parm_agg_item (fbi, stmt, op, index_p,
1599 aggpos);
1602 aggpos->agg_contents = true;
1603 return ipa_load_from_parm_agg (fbi, fbi->info->descriptors,
1604 stmt, op, index_p, &aggpos->offset,
1605 NULL, &aggpos->by_ref);
1608 /* See if statement might disappear after inlining.
1609 0 - means not eliminated
1610 1 - half of statements goes away
1611 2 - for sure it is eliminated.
1612 We are not terribly sophisticated, basically looking for simple abstraction
1613 penalty wrappers. */
1615 static int
1616 eliminated_by_inlining_prob (gimple *stmt)
1618 enum gimple_code code = gimple_code (stmt);
1619 enum tree_code rhs_code;
1621 if (!optimize)
1622 return 0;
1624 switch (code)
1626 case GIMPLE_RETURN:
1627 return 2;
1628 case GIMPLE_ASSIGN:
1629 if (gimple_num_ops (stmt) != 2)
1630 return 0;
1632 rhs_code = gimple_assign_rhs_code (stmt);
1634 /* Casts of parameters, loads from parameters passed by reference
1635 and stores to return value or parameters are often free after
1636 inlining dua to SRA and further combining.
1637 Assume that half of statements goes away. */
1638 if (CONVERT_EXPR_CODE_P (rhs_code)
1639 || rhs_code == VIEW_CONVERT_EXPR
1640 || rhs_code == ADDR_EXPR
1641 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1643 tree rhs = gimple_assign_rhs1 (stmt);
1644 tree lhs = gimple_assign_lhs (stmt);
1645 tree inner_rhs = get_base_address (rhs);
1646 tree inner_lhs = get_base_address (lhs);
1647 bool rhs_free = false;
1648 bool lhs_free = false;
1650 if (!inner_rhs)
1651 inner_rhs = rhs;
1652 if (!inner_lhs)
1653 inner_lhs = lhs;
1655 /* Reads of parameter are expected to be free. */
1656 if (unmodified_parm (stmt, inner_rhs))
1657 rhs_free = true;
1658 /* Match expressions of form &this->field. Those will most likely
1659 combine with something upstream after inlining. */
1660 else if (TREE_CODE (inner_rhs) == ADDR_EXPR)
1662 tree op = get_base_address (TREE_OPERAND (inner_rhs, 0));
1663 if (TREE_CODE (op) == PARM_DECL)
1664 rhs_free = true;
1665 else if (TREE_CODE (op) == MEM_REF
1666 && unmodified_parm (stmt, TREE_OPERAND (op, 0)))
1667 rhs_free = true;
1670 /* When parameter is not SSA register because its address is taken
1671 and it is just copied into one, the statement will be completely
1672 free after inlining (we will copy propagate backward). */
1673 if (rhs_free && is_gimple_reg (lhs))
1674 return 2;
1676 /* Reads of parameters passed by reference
1677 expected to be free (i.e. optimized out after inlining). */
1678 if (TREE_CODE (inner_rhs) == MEM_REF
1679 && unmodified_parm (stmt, TREE_OPERAND (inner_rhs, 0)))
1680 rhs_free = true;
1682 /* Copying parameter passed by reference into gimple register is
1683 probably also going to copy propagate, but we can't be quite
1684 sure. */
1685 if (rhs_free && is_gimple_reg (lhs))
1686 lhs_free = true;
1688 /* Writes to parameters, parameters passed by value and return value
1689 (either dirrectly or passed via invisible reference) are free.
1691 TODO: We ought to handle testcase like
1692 struct a {int a,b;};
1693 struct a
1694 retrurnsturct (void)
1696 struct a a ={1,2};
1697 return a;
1700 This translate into:
1702 retrurnsturct ()
1704 int a$b;
1705 int a$a;
1706 struct a a;
1707 struct a D.2739;
1709 <bb 2>:
1710 D.2739.a = 1;
1711 D.2739.b = 2;
1712 return D.2739;
1715 For that we either need to copy ipa-split logic detecting writes
1716 to return value. */
1717 if (TREE_CODE (inner_lhs) == PARM_DECL
1718 || TREE_CODE (inner_lhs) == RESULT_DECL
1719 || (TREE_CODE (inner_lhs) == MEM_REF
1720 && (unmodified_parm (stmt, TREE_OPERAND (inner_lhs, 0))
1721 || (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME
1722 && SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0))
1723 && TREE_CODE (SSA_NAME_VAR (TREE_OPERAND
1724 (inner_lhs,
1725 0))) == RESULT_DECL))))
1726 lhs_free = true;
1727 if (lhs_free
1728 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1729 rhs_free = true;
1730 if (lhs_free && rhs_free)
1731 return 1;
1733 return 0;
1734 default:
1735 return 0;
1740 /* If BB ends by a conditional we can turn into predicates, attach corresponding
1741 predicates to the CFG edges. */
1743 static void
1744 set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi,
1745 struct inline_summary *summary,
1746 basic_block bb)
1748 gimple *last;
1749 tree op;
1750 int index;
1751 struct agg_position_info aggpos;
1752 enum tree_code code, inverted_code;
1753 edge e;
1754 edge_iterator ei;
1755 gimple *set_stmt;
1756 tree op2;
1758 last = last_stmt (bb);
1759 if (!last || gimple_code (last) != GIMPLE_COND)
1760 return;
1761 if (!is_gimple_ip_invariant (gimple_cond_rhs (last)))
1762 return;
1763 op = gimple_cond_lhs (last);
1764 /* TODO: handle conditionals like
1765 var = op0 < 4;
1766 if (var != 0). */
1767 if (unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &aggpos))
1769 code = gimple_cond_code (last);
1770 inverted_code = invert_tree_comparison (code, HONOR_NANS (op));
1772 FOR_EACH_EDGE (e, ei, bb->succs)
1774 enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE
1775 ? code : inverted_code);
1776 /* invert_tree_comparison will return ERROR_MARK on FP
1777 comparsions that are not EQ/NE instead of returning proper
1778 unordered one. Be sure it is not confused with NON_CONSTANT. */
1779 if (this_code != ERROR_MARK)
1781 struct predicate p = add_condition
1782 (summary, index, &aggpos, this_code,
1783 unshare_expr_without_location (gimple_cond_rhs (last)));
1784 e->aux = edge_predicate_pool.allocate ();
1785 *(struct predicate *) e->aux = p;
1790 if (TREE_CODE (op) != SSA_NAME)
1791 return;
1792 /* Special case
1793 if (builtin_constant_p (op))
1794 constant_code
1795 else
1796 nonconstant_code.
1797 Here we can predicate nonconstant_code. We can't
1798 really handle constant_code since we have no predicate
1799 for this and also the constant code is not known to be
1800 optimized away when inliner doen't see operand is constant.
1801 Other optimizers might think otherwise. */
1802 if (gimple_cond_code (last) != NE_EXPR
1803 || !integer_zerop (gimple_cond_rhs (last)))
1804 return;
1805 set_stmt = SSA_NAME_DEF_STMT (op);
1806 if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P)
1807 || gimple_call_num_args (set_stmt) != 1)
1808 return;
1809 op2 = gimple_call_arg (set_stmt, 0);
1810 if (!unmodified_parm_or_parm_agg_item (fbi, set_stmt, op2, &index, &aggpos))
1811 return;
1812 FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE)
1814 struct predicate p = add_condition (summary, index, &aggpos,
1815 IS_NOT_CONSTANT, NULL_TREE);
1816 e->aux = edge_predicate_pool.allocate ();
1817 *(struct predicate *) e->aux = p;
1822 /* If BB ends by a switch we can turn into predicates, attach corresponding
1823 predicates to the CFG edges. */
1825 static void
1826 set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi,
1827 struct inline_summary *summary,
1828 basic_block bb)
1830 gimple *lastg;
1831 tree op;
1832 int index;
1833 struct agg_position_info aggpos;
1834 edge e;
1835 edge_iterator ei;
1836 size_t n;
1837 size_t case_idx;
1839 lastg = last_stmt (bb);
1840 if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH)
1841 return;
1842 gswitch *last = as_a <gswitch *> (lastg);
1843 op = gimple_switch_index (last);
1844 if (!unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &aggpos))
1845 return;
1847 FOR_EACH_EDGE (e, ei, bb->succs)
1849 e->aux = edge_predicate_pool.allocate ();
1850 *(struct predicate *) e->aux = false_predicate ();
1852 n = gimple_switch_num_labels (last);
1853 for (case_idx = 0; case_idx < n; ++case_idx)
1855 tree cl = gimple_switch_label (last, case_idx);
1856 tree min, max;
1857 struct predicate p;
1859 e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
1860 min = CASE_LOW (cl);
1861 max = CASE_HIGH (cl);
1863 /* For default we might want to construct predicate that none
1864 of cases is met, but it is bit hard to do not having negations
1865 of conditionals handy. */
1866 if (!min && !max)
1867 p = true_predicate ();
1868 else if (!max)
1869 p = add_condition (summary, index, &aggpos, EQ_EXPR,
1870 unshare_expr_without_location (min));
1871 else
1873 struct predicate p1, p2;
1874 p1 = add_condition (summary, index, &aggpos, GE_EXPR,
1875 unshare_expr_without_location (min));
1876 p2 = add_condition (summary, index, &aggpos, LE_EXPR,
1877 unshare_expr_without_location (max));
1878 p = and_predicates (summary->conds, &p1, &p2);
1880 *(struct predicate *) e->aux
1881 = or_predicates (summary->conds, &p, (struct predicate *) e->aux);
1886 /* For each BB in NODE attach to its AUX pointer predicate under
1887 which it is executable. */
1889 static void
1890 compute_bb_predicates (struct ipa_func_body_info *fbi,
1891 struct cgraph_node *node,
1892 struct inline_summary *summary)
1894 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1895 bool done = false;
1896 basic_block bb;
1898 FOR_EACH_BB_FN (bb, my_function)
1900 set_cond_stmt_execution_predicate (fbi, summary, bb);
1901 set_switch_stmt_execution_predicate (fbi, summary, bb);
1904 /* Entry block is always executable. */
1905 ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1906 = edge_predicate_pool.allocate ();
1907 *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1908 = true_predicate ();
1910 /* A simple dataflow propagation of predicates forward in the CFG.
1911 TODO: work in reverse postorder. */
1912 while (!done)
1914 done = true;
1915 FOR_EACH_BB_FN (bb, my_function)
1917 struct predicate p = false_predicate ();
1918 edge e;
1919 edge_iterator ei;
1920 FOR_EACH_EDGE (e, ei, bb->preds)
1922 if (e->src->aux)
1924 struct predicate this_bb_predicate
1925 = *(struct predicate *) e->src->aux;
1926 if (e->aux)
1927 this_bb_predicate
1928 = and_predicates (summary->conds, &this_bb_predicate,
1929 (struct predicate *) e->aux);
1930 p = or_predicates (summary->conds, &p, &this_bb_predicate);
1931 if (true_predicate_p (&p))
1932 break;
1935 if (false_predicate_p (&p))
1936 gcc_assert (!bb->aux);
1937 else
1939 if (!bb->aux)
1941 done = false;
1942 bb->aux = edge_predicate_pool.allocate ();
1943 *((struct predicate *) bb->aux) = p;
1945 else if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1947 /* This OR operation is needed to ensure monotonous data flow
1948 in the case we hit the limit on number of clauses and the
1949 and/or operations above give approximate answers. */
1950 p = or_predicates (summary->conds, &p, (struct predicate *)bb->aux);
1951 if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1953 done = false;
1954 *((struct predicate *) bb->aux) = p;
1963 /* We keep info about constantness of SSA names. */
1965 typedef struct predicate predicate_t;
1966 /* Return predicate specifying when the STMT might have result that is not
1967 a compile time constant. */
1969 static struct predicate
1970 will_be_nonconstant_expr_predicate (struct ipa_node_params *info,
1971 struct inline_summary *summary,
1972 tree expr,
1973 vec<predicate_t> nonconstant_names)
1975 tree parm;
1976 int index;
1978 while (UNARY_CLASS_P (expr))
1979 expr = TREE_OPERAND (expr, 0);
1981 parm = unmodified_parm (NULL, expr);
1982 if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0)
1983 return add_condition (summary, index, NULL, CHANGED, NULL_TREE);
1984 if (is_gimple_min_invariant (expr))
1985 return false_predicate ();
1986 if (TREE_CODE (expr) == SSA_NAME)
1987 return nonconstant_names[SSA_NAME_VERSION (expr)];
1988 if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr))
1990 struct predicate p1 = will_be_nonconstant_expr_predicate
1991 (info, summary, TREE_OPERAND (expr, 0),
1992 nonconstant_names);
1993 struct predicate p2;
1994 if (true_predicate_p (&p1))
1995 return p1;
1996 p2 = will_be_nonconstant_expr_predicate (info, summary,
1997 TREE_OPERAND (expr, 1),
1998 nonconstant_names);
1999 return or_predicates (summary->conds, &p1, &p2);
2001 else if (TREE_CODE (expr) == COND_EXPR)
2003 struct predicate p1 = will_be_nonconstant_expr_predicate
2004 (info, summary, TREE_OPERAND (expr, 0),
2005 nonconstant_names);
2006 struct predicate p2;
2007 if (true_predicate_p (&p1))
2008 return p1;
2009 p2 = will_be_nonconstant_expr_predicate (info, summary,
2010 TREE_OPERAND (expr, 1),
2011 nonconstant_names);
2012 if (true_predicate_p (&p2))
2013 return p2;
2014 p1 = or_predicates (summary->conds, &p1, &p2);
2015 p2 = will_be_nonconstant_expr_predicate (info, summary,
2016 TREE_OPERAND (expr, 2),
2017 nonconstant_names);
2018 return or_predicates (summary->conds, &p1, &p2);
2020 else
2022 debug_tree (expr);
2023 gcc_unreachable ();
2025 return false_predicate ();
2029 /* Return predicate specifying when the STMT might have result that is not
2030 a compile time constant. */
2032 static struct predicate
2033 will_be_nonconstant_predicate (struct ipa_func_body_info *fbi,
2034 struct inline_summary *summary,
2035 gimple *stmt,
2036 vec<predicate_t> nonconstant_names)
2038 struct predicate p = true_predicate ();
2039 ssa_op_iter iter;
2040 tree use;
2041 struct predicate op_non_const;
2042 bool is_load;
2043 int base_index;
2044 struct agg_position_info aggpos;
2046 /* What statments might be optimized away
2047 when their arguments are constant. */
2048 if (gimple_code (stmt) != GIMPLE_ASSIGN
2049 && gimple_code (stmt) != GIMPLE_COND
2050 && gimple_code (stmt) != GIMPLE_SWITCH
2051 && (gimple_code (stmt) != GIMPLE_CALL
2052 || !(gimple_call_flags (stmt) & ECF_CONST)))
2053 return p;
2055 /* Stores will stay anyway. */
2056 if (gimple_store_p (stmt))
2057 return p;
2059 is_load = gimple_assign_load_p (stmt);
2061 /* Loads can be optimized when the value is known. */
2062 if (is_load)
2064 tree op;
2065 gcc_assert (gimple_assign_single_p (stmt));
2066 op = gimple_assign_rhs1 (stmt);
2067 if (!unmodified_parm_or_parm_agg_item (fbi, stmt, op, &base_index,
2068 &aggpos))
2069 return p;
2071 else
2072 base_index = -1;
2074 /* See if we understand all operands before we start
2075 adding conditionals. */
2076 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2078 tree parm = unmodified_parm (stmt, use);
2079 /* For arguments we can build a condition. */
2080 if (parm && ipa_get_param_decl_index (fbi->info, parm) >= 0)
2081 continue;
2082 if (TREE_CODE (use) != SSA_NAME)
2083 return p;
2084 /* If we know when operand is constant,
2085 we still can say something useful. */
2086 if (!true_predicate_p (&nonconstant_names[SSA_NAME_VERSION (use)]))
2087 continue;
2088 return p;
2091 if (is_load)
2092 op_non_const =
2093 add_condition (summary, base_index, &aggpos, CHANGED, NULL);
2094 else
2095 op_non_const = false_predicate ();
2096 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2098 tree parm = unmodified_parm (stmt, use);
2099 int index;
2101 if (parm && (index = ipa_get_param_decl_index (fbi->info, parm)) >= 0)
2103 if (index != base_index)
2104 p = add_condition (summary, index, NULL, CHANGED, NULL_TREE);
2105 else
2106 continue;
2108 else
2109 p = nonconstant_names[SSA_NAME_VERSION (use)];
2110 op_non_const = or_predicates (summary->conds, &p, &op_non_const);
2112 if ((gimple_code (stmt) == GIMPLE_ASSIGN || gimple_code (stmt) == GIMPLE_CALL)
2113 && gimple_op (stmt, 0)
2114 && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
2115 nonconstant_names[SSA_NAME_VERSION (gimple_op (stmt, 0))]
2116 = op_non_const;
2117 return op_non_const;
2120 struct record_modified_bb_info
2122 bitmap bb_set;
2123 gimple *stmt;
2126 /* Callback of walk_aliased_vdefs. Records basic blocks where the value may be
2127 set except for info->stmt. */
2129 static bool
2130 record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data)
2132 struct record_modified_bb_info *info =
2133 (struct record_modified_bb_info *) data;
2134 if (SSA_NAME_DEF_STMT (vdef) == info->stmt)
2135 return false;
2136 bitmap_set_bit (info->bb_set,
2137 SSA_NAME_IS_DEFAULT_DEF (vdef)
2138 ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index
2139 : gimple_bb (SSA_NAME_DEF_STMT (vdef))->index);
2140 return false;
2143 /* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT
2144 will change since last invocation of STMT.
2146 Value 0 is reserved for compile time invariants.
2147 For common parameters it is REG_BR_PROB_BASE. For loop invariants it
2148 ought to be REG_BR_PROB_BASE / estimated_iters. */
2150 static int
2151 param_change_prob (gimple *stmt, int i)
2153 tree op = gimple_call_arg (stmt, i);
2154 basic_block bb = gimple_bb (stmt);
2155 tree base;
2157 /* Global invariants neve change. */
2158 if (is_gimple_min_invariant (op))
2159 return 0;
2160 /* We would have to do non-trivial analysis to really work out what
2161 is the probability of value to change (i.e. when init statement
2162 is in a sibling loop of the call).
2164 We do an conservative estimate: when call is executed N times more often
2165 than the statement defining value, we take the frequency 1/N. */
2166 if (TREE_CODE (op) == SSA_NAME)
2168 int init_freq;
2170 if (!bb->frequency)
2171 return REG_BR_PROB_BASE;
2173 if (SSA_NAME_IS_DEFAULT_DEF (op))
2174 init_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2175 else
2176 init_freq = gimple_bb (SSA_NAME_DEF_STMT (op))->frequency;
2178 if (!init_freq)
2179 init_freq = 1;
2180 if (init_freq < bb->frequency)
2181 return MAX (GCOV_COMPUTE_SCALE (init_freq, bb->frequency), 1);
2182 else
2183 return REG_BR_PROB_BASE;
2186 base = get_base_address (op);
2187 if (base)
2189 ao_ref refd;
2190 int max;
2191 struct record_modified_bb_info info;
2192 bitmap_iterator bi;
2193 unsigned index;
2194 tree init = ctor_for_folding (base);
2196 if (init != error_mark_node)
2197 return 0;
2198 if (!bb->frequency)
2199 return REG_BR_PROB_BASE;
2200 ao_ref_init (&refd, op);
2201 info.stmt = stmt;
2202 info.bb_set = BITMAP_ALLOC (NULL);
2203 walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info,
2204 NULL);
2205 if (bitmap_bit_p (info.bb_set, bb->index))
2207 BITMAP_FREE (info.bb_set);
2208 return REG_BR_PROB_BASE;
2211 /* Assume that every memory is initialized at entry.
2212 TODO: Can we easilly determine if value is always defined
2213 and thus we may skip entry block? */
2214 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
2215 max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2216 else
2217 max = 1;
2219 EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi)
2220 max = MIN (max, BASIC_BLOCK_FOR_FN (cfun, index)->frequency);
2222 BITMAP_FREE (info.bb_set);
2223 if (max < bb->frequency)
2224 return MAX (GCOV_COMPUTE_SCALE (max, bb->frequency), 1);
2225 else
2226 return REG_BR_PROB_BASE;
2228 return REG_BR_PROB_BASE;
2231 /* Find whether a basic block BB is the final block of a (half) diamond CFG
2232 sub-graph and if the predicate the condition depends on is known. If so,
2233 return true and store the pointer the predicate in *P. */
2235 static bool
2236 phi_result_unknown_predicate (struct ipa_node_params *info,
2237 inline_summary *summary, basic_block bb,
2238 struct predicate *p,
2239 vec<predicate_t> nonconstant_names)
2241 edge e;
2242 edge_iterator ei;
2243 basic_block first_bb = NULL;
2244 gimple *stmt;
2246 if (single_pred_p (bb))
2248 *p = false_predicate ();
2249 return true;
2252 FOR_EACH_EDGE (e, ei, bb->preds)
2254 if (single_succ_p (e->src))
2256 if (!single_pred_p (e->src))
2257 return false;
2258 if (!first_bb)
2259 first_bb = single_pred (e->src);
2260 else if (single_pred (e->src) != first_bb)
2261 return false;
2263 else
2265 if (!first_bb)
2266 first_bb = e->src;
2267 else if (e->src != first_bb)
2268 return false;
2272 if (!first_bb)
2273 return false;
2275 stmt = last_stmt (first_bb);
2276 if (!stmt
2277 || gimple_code (stmt) != GIMPLE_COND
2278 || !is_gimple_ip_invariant (gimple_cond_rhs (stmt)))
2279 return false;
2281 *p = will_be_nonconstant_expr_predicate (info, summary,
2282 gimple_cond_lhs (stmt),
2283 nonconstant_names);
2284 if (true_predicate_p (p))
2285 return false;
2286 else
2287 return true;
2290 /* Given a PHI statement in a function described by inline properties SUMMARY
2291 and *P being the predicate describing whether the selected PHI argument is
2292 known, store a predicate for the result of the PHI statement into
2293 NONCONSTANT_NAMES, if possible. */
2295 static void
2296 predicate_for_phi_result (struct inline_summary *summary, gphi *phi,
2297 struct predicate *p,
2298 vec<predicate_t> nonconstant_names)
2300 unsigned i;
2302 for (i = 0; i < gimple_phi_num_args (phi); i++)
2304 tree arg = gimple_phi_arg (phi, i)->def;
2305 if (!is_gimple_min_invariant (arg))
2307 gcc_assert (TREE_CODE (arg) == SSA_NAME);
2308 *p = or_predicates (summary->conds, p,
2309 &nonconstant_names[SSA_NAME_VERSION (arg)]);
2310 if (true_predicate_p (p))
2311 return;
2315 if (dump_file && (dump_flags & TDF_DETAILS))
2317 fprintf (dump_file, "\t\tphi predicate: ");
2318 dump_predicate (dump_file, summary->conds, p);
2320 nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p;
2323 /* Return predicate specifying when array index in access OP becomes non-constant. */
2325 static struct predicate
2326 array_index_predicate (inline_summary *info,
2327 vec< predicate_t> nonconstant_names, tree op)
2329 struct predicate p = false_predicate ();
2330 while (handled_component_p (op))
2332 if (TREE_CODE (op) == ARRAY_REF || TREE_CODE (op) == ARRAY_RANGE_REF)
2334 if (TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME)
2335 p = or_predicates (info->conds, &p,
2336 &nonconstant_names[SSA_NAME_VERSION
2337 (TREE_OPERAND (op, 1))]);
2339 op = TREE_OPERAND (op, 0);
2341 return p;
2344 /* For a typical usage of __builtin_expect (a<b, 1), we
2345 may introduce an extra relation stmt:
2346 With the builtin, we have
2347 t1 = a <= b;
2348 t2 = (long int) t1;
2349 t3 = __builtin_expect (t2, 1);
2350 if (t3 != 0)
2351 goto ...
2352 Without the builtin, we have
2353 if (a<=b)
2354 goto...
2355 This affects the size/time estimation and may have
2356 an impact on the earlier inlining.
2357 Here find this pattern and fix it up later. */
2359 static gimple *
2360 find_foldable_builtin_expect (basic_block bb)
2362 gimple_stmt_iterator bsi;
2364 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2366 gimple *stmt = gsi_stmt (bsi);
2367 if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT)
2368 || (is_gimple_call (stmt)
2369 && gimple_call_internal_p (stmt)
2370 && gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT))
2372 tree var = gimple_call_lhs (stmt);
2373 tree arg = gimple_call_arg (stmt, 0);
2374 use_operand_p use_p;
2375 gimple *use_stmt;
2376 bool match = false;
2377 bool done = false;
2379 if (!var || !arg)
2380 continue;
2381 gcc_assert (TREE_CODE (var) == SSA_NAME);
2383 while (TREE_CODE (arg) == SSA_NAME)
2385 gimple *stmt_tmp = SSA_NAME_DEF_STMT (arg);
2386 if (!is_gimple_assign (stmt_tmp))
2387 break;
2388 switch (gimple_assign_rhs_code (stmt_tmp))
2390 case LT_EXPR:
2391 case LE_EXPR:
2392 case GT_EXPR:
2393 case GE_EXPR:
2394 case EQ_EXPR:
2395 case NE_EXPR:
2396 match = true;
2397 done = true;
2398 break;
2399 CASE_CONVERT:
2400 break;
2401 default:
2402 done = true;
2403 break;
2405 if (done)
2406 break;
2407 arg = gimple_assign_rhs1 (stmt_tmp);
2410 if (match && single_imm_use (var, &use_p, &use_stmt)
2411 && gimple_code (use_stmt) == GIMPLE_COND)
2412 return use_stmt;
2415 return NULL;
2418 /* Return true when the basic blocks contains only clobbers followed by RESX.
2419 Such BBs are kept around to make removal of dead stores possible with
2420 presence of EH and will be optimized out by optimize_clobbers later in the
2421 game.
2423 NEED_EH is used to recurse in case the clobber has non-EH predecestors
2424 that can be clobber only, too.. When it is false, the RESX is not necessary
2425 on the end of basic block. */
2427 static bool
2428 clobber_only_eh_bb_p (basic_block bb, bool need_eh = true)
2430 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2431 edge_iterator ei;
2432 edge e;
2434 if (need_eh)
2436 if (gsi_end_p (gsi))
2437 return false;
2438 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX)
2439 return false;
2440 gsi_prev (&gsi);
2442 else if (!single_succ_p (bb))
2443 return false;
2445 for (; !gsi_end_p (gsi); gsi_prev (&gsi))
2447 gimple *stmt = gsi_stmt (gsi);
2448 if (is_gimple_debug (stmt))
2449 continue;
2450 if (gimple_clobber_p (stmt))
2451 continue;
2452 if (gimple_code (stmt) == GIMPLE_LABEL)
2453 break;
2454 return false;
2457 /* See if all predecestors are either throws or clobber only BBs. */
2458 FOR_EACH_EDGE (e, ei, bb->preds)
2459 if (!(e->flags & EDGE_EH)
2460 && !clobber_only_eh_bb_p (e->src, false))
2461 return false;
2463 return true;
2466 /* Return true if STMT compute a floating point expression that may be affected
2467 by -ffast-math and similar flags. */
2469 static bool
2470 fp_expression_p (gimple *stmt)
2472 ssa_op_iter i;
2473 tree op;
2475 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF|SSA_OP_USE)
2476 if (FLOAT_TYPE_P (TREE_TYPE (op)))
2477 return true;
2478 return false;
2481 /* Compute function body size parameters for NODE.
2482 When EARLY is true, we compute only simple summaries without
2483 non-trivial predicates to drive the early inliner. */
2485 static void
2486 estimate_function_body_sizes (struct cgraph_node *node, bool early)
2488 gcov_type time = 0;
2489 /* Estimate static overhead for function prologue/epilogue and alignment. */
2490 int size = 2;
2491 /* Benefits are scaled by probability of elimination that is in range
2492 <0,2>. */
2493 basic_block bb;
2494 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
2495 int freq;
2496 struct inline_summary *info = inline_summaries->get (node);
2497 struct predicate bb_predicate;
2498 struct ipa_func_body_info fbi;
2499 vec<predicate_t> nonconstant_names = vNULL;
2500 int nblocks, n;
2501 int *order;
2502 predicate array_index = true_predicate ();
2503 gimple *fix_builtin_expect_stmt;
2505 gcc_assert (my_function && my_function->cfg);
2506 gcc_assert (cfun == my_function);
2508 memset(&fbi, 0, sizeof(fbi));
2509 info->conds = NULL;
2510 info->entry = NULL;
2512 /* When optimizing and analyzing for IPA inliner, initialize loop optimizer
2513 so we can produce proper inline hints.
2515 When optimizing and analyzing for early inliner, initialize node params
2516 so we can produce correct BB predicates. */
2518 if (opt_for_fn (node->decl, optimize))
2520 calculate_dominance_info (CDI_DOMINATORS);
2521 if (!early)
2522 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2523 else
2525 ipa_check_create_node_params ();
2526 ipa_initialize_node_params (node);
2529 if (ipa_node_params_sum)
2531 fbi.node = node;
2532 fbi.info = IPA_NODE_REF (node);
2533 fbi.bb_infos = vNULL;
2534 fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
2535 fbi.param_count = count_formal_params(node->decl);
2536 nonconstant_names.safe_grow_cleared
2537 (SSANAMES (my_function)->length ());
2541 if (dump_file)
2542 fprintf (dump_file, "\nAnalyzing function body size: %s\n",
2543 node->name ());
2545 /* When we run into maximal number of entries, we assign everything to the
2546 constant truth case. Be sure to have it in list. */
2547 bb_predicate = true_predicate ();
2548 account_size_time (info, 0, 0, &bb_predicate);
2550 bb_predicate = not_inlined_predicate ();
2551 account_size_time (info, 2 * INLINE_SIZE_SCALE, 0, &bb_predicate);
2553 if (fbi.info)
2554 compute_bb_predicates (&fbi, node, info);
2555 order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
2556 nblocks = pre_and_rev_post_order_compute (NULL, order, false);
2557 for (n = 0; n < nblocks; n++)
2559 bb = BASIC_BLOCK_FOR_FN (cfun, order[n]);
2560 freq = compute_call_stmt_bb_frequency (node->decl, bb);
2561 if (clobber_only_eh_bb_p (bb))
2563 if (dump_file && (dump_flags & TDF_DETAILS))
2564 fprintf (dump_file, "\n Ignoring BB %i;"
2565 " it will be optimized away by cleanup_clobbers\n",
2566 bb->index);
2567 continue;
2570 /* TODO: Obviously predicates can be propagated down across CFG. */
2571 if (fbi.info)
2573 if (bb->aux)
2574 bb_predicate = *(struct predicate *) bb->aux;
2575 else
2576 bb_predicate = false_predicate ();
2578 else
2579 bb_predicate = true_predicate ();
2581 if (dump_file && (dump_flags & TDF_DETAILS))
2583 fprintf (dump_file, "\n BB %i predicate:", bb->index);
2584 dump_predicate (dump_file, info->conds, &bb_predicate);
2587 if (fbi.info && nonconstant_names.exists ())
2589 struct predicate phi_predicate;
2590 bool first_phi = true;
2592 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
2593 gsi_next (&bsi))
2595 if (first_phi
2596 && !phi_result_unknown_predicate (fbi.info, info, bb,
2597 &phi_predicate,
2598 nonconstant_names))
2599 break;
2600 first_phi = false;
2601 if (dump_file && (dump_flags & TDF_DETAILS))
2603 fprintf (dump_file, " ");
2604 print_gimple_stmt (dump_file, gsi_stmt (bsi), 0, 0);
2606 predicate_for_phi_result (info, bsi.phi (), &phi_predicate,
2607 nonconstant_names);
2611 fix_builtin_expect_stmt = find_foldable_builtin_expect (bb);
2613 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
2614 gsi_next (&bsi))
2616 gimple *stmt = gsi_stmt (bsi);
2617 int this_size = estimate_num_insns (stmt, &eni_size_weights);
2618 int this_time = estimate_num_insns (stmt, &eni_time_weights);
2619 int prob;
2620 struct predicate will_be_nonconstant;
2622 /* This relation stmt should be folded after we remove
2623 buildin_expect call. Adjust the cost here. */
2624 if (stmt == fix_builtin_expect_stmt)
2626 this_size--;
2627 this_time--;
2630 if (dump_file && (dump_flags & TDF_DETAILS))
2632 fprintf (dump_file, " ");
2633 print_gimple_stmt (dump_file, stmt, 0, 0);
2634 fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n",
2635 ((double) freq) / CGRAPH_FREQ_BASE, this_size,
2636 this_time);
2639 if (gimple_assign_load_p (stmt) && nonconstant_names.exists ())
2641 struct predicate this_array_index;
2642 this_array_index =
2643 array_index_predicate (info, nonconstant_names,
2644 gimple_assign_rhs1 (stmt));
2645 if (!false_predicate_p (&this_array_index))
2646 array_index =
2647 and_predicates (info->conds, &array_index,
2648 &this_array_index);
2650 if (gimple_store_p (stmt) && nonconstant_names.exists ())
2652 struct predicate this_array_index;
2653 this_array_index =
2654 array_index_predicate (info, nonconstant_names,
2655 gimple_get_lhs (stmt));
2656 if (!false_predicate_p (&this_array_index))
2657 array_index =
2658 and_predicates (info->conds, &array_index,
2659 &this_array_index);
2663 if (is_gimple_call (stmt)
2664 && !gimple_call_internal_p (stmt))
2666 struct cgraph_edge *edge = node->get_edge (stmt);
2667 struct inline_edge_summary *es = inline_edge_summary (edge);
2669 /* Special case: results of BUILT_IN_CONSTANT_P will be always
2670 resolved as constant. We however don't want to optimize
2671 out the cgraph edges. */
2672 if (nonconstant_names.exists ()
2673 && gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P)
2674 && gimple_call_lhs (stmt)
2675 && TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME)
2677 struct predicate false_p = false_predicate ();
2678 nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))]
2679 = false_p;
2681 if (ipa_node_params_sum)
2683 int count = gimple_call_num_args (stmt);
2684 int i;
2686 if (count)
2687 es->param.safe_grow_cleared (count);
2688 for (i = 0; i < count; i++)
2690 int prob = param_change_prob (stmt, i);
2691 gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
2692 es->param[i].change_prob = prob;
2696 es->call_stmt_size = this_size;
2697 es->call_stmt_time = this_time;
2698 es->loop_depth = bb_loop_depth (bb);
2699 edge_set_predicate (edge, &bb_predicate);
2702 /* TODO: When conditional jump or swithc is known to be constant, but
2703 we did not translate it into the predicates, we really can account
2704 just maximum of the possible paths. */
2705 if (fbi.info)
2706 will_be_nonconstant
2707 = will_be_nonconstant_predicate (&fbi, info,
2708 stmt, nonconstant_names);
2709 if (this_time || this_size)
2711 struct predicate p;
2713 this_time *= freq;
2715 prob = eliminated_by_inlining_prob (stmt);
2716 if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS))
2717 fprintf (dump_file,
2718 "\t\t50%% will be eliminated by inlining\n");
2719 if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
2720 fprintf (dump_file, "\t\tWill be eliminated by inlining\n");
2722 if (fbi.info)
2723 p = and_predicates (info->conds, &bb_predicate,
2724 &will_be_nonconstant);
2725 else
2726 p = true_predicate ();
2728 if (!false_predicate_p (&p)
2729 || (is_gimple_call (stmt)
2730 && !false_predicate_p (&bb_predicate)))
2732 time += this_time;
2733 size += this_size;
2734 if (time > MAX_TIME * INLINE_TIME_SCALE)
2735 time = MAX_TIME * INLINE_TIME_SCALE;
2738 /* We account everything but the calls. Calls have their own
2739 size/time info attached to cgraph edges. This is necessary
2740 in order to make the cost disappear after inlining. */
2741 if (!is_gimple_call (stmt))
2743 if (prob)
2745 struct predicate ip = not_inlined_predicate ();
2746 ip = and_predicates (info->conds, &ip, &p);
2747 account_size_time (info, this_size * prob,
2748 this_time * prob, &ip);
2750 if (prob != 2)
2751 account_size_time (info, this_size * (2 - prob),
2752 this_time * (2 - prob), &p);
2755 if (!info->fp_expressions && fp_expression_p (stmt))
2757 info->fp_expressions = true;
2758 if (dump_file)
2759 fprintf (dump_file, " fp_expression set\n");
2762 gcc_assert (time >= 0);
2763 gcc_assert (size >= 0);
2767 set_hint_predicate (&inline_summaries->get (node)->array_index, array_index);
2768 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
2769 if (time > MAX_TIME)
2770 time = MAX_TIME;
2771 free (order);
2773 if (nonconstant_names.exists () && !early)
2775 struct loop *loop;
2776 predicate loop_iterations = true_predicate ();
2777 predicate loop_stride = true_predicate ();
2779 if (dump_file && (dump_flags & TDF_DETAILS))
2780 flow_loops_dump (dump_file, NULL, 0);
2781 scev_initialize ();
2782 FOR_EACH_LOOP (loop, 0)
2784 vec<edge> exits;
2785 edge ex;
2786 unsigned int j;
2787 struct tree_niter_desc niter_desc;
2788 bb_predicate = *(struct predicate *) loop->header->aux;
2790 exits = get_loop_exit_edges (loop);
2791 FOR_EACH_VEC_ELT (exits, j, ex)
2792 if (number_of_iterations_exit (loop, ex, &niter_desc, false)
2793 && !is_gimple_min_invariant (niter_desc.niter))
2795 predicate will_be_nonconstant
2796 = will_be_nonconstant_expr_predicate (fbi.info, info,
2797 niter_desc.niter,
2798 nonconstant_names);
2799 if (!true_predicate_p (&will_be_nonconstant))
2800 will_be_nonconstant = and_predicates (info->conds,
2801 &bb_predicate,
2802 &will_be_nonconstant);
2803 if (!true_predicate_p (&will_be_nonconstant)
2804 && !false_predicate_p (&will_be_nonconstant))
2805 /* This is slightly inprecise. We may want to represent each
2806 loop with independent predicate. */
2807 loop_iterations =
2808 and_predicates (info->conds, &loop_iterations,
2809 &will_be_nonconstant);
2811 exits.release ();
2814 /* To avoid quadratic behavior we analyze stride predicates only
2815 with respect to the containing loop. Thus we simply iterate
2816 over all defs in the outermost loop body. */
2817 for (loop = loops_for_fn (cfun)->tree_root->inner;
2818 loop != NULL; loop = loop->next)
2820 basic_block *body = get_loop_body (loop);
2821 for (unsigned i = 0; i < loop->num_nodes; i++)
2823 gimple_stmt_iterator gsi;
2824 bb_predicate = *(struct predicate *) body[i]->aux;
2825 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi);
2826 gsi_next (&gsi))
2828 gimple *stmt = gsi_stmt (gsi);
2830 if (!is_gimple_assign (stmt))
2831 continue;
2833 tree def = gimple_assign_lhs (stmt);
2834 if (TREE_CODE (def) != SSA_NAME)
2835 continue;
2837 affine_iv iv;
2838 if (!simple_iv (loop_containing_stmt (stmt),
2839 loop_containing_stmt (stmt),
2840 def, &iv, true)
2841 || is_gimple_min_invariant (iv.step))
2842 continue;
2844 predicate will_be_nonconstant
2845 = will_be_nonconstant_expr_predicate (fbi.info, info,
2846 iv.step,
2847 nonconstant_names);
2848 if (!true_predicate_p (&will_be_nonconstant))
2849 will_be_nonconstant
2850 = and_predicates (info->conds, &bb_predicate,
2851 &will_be_nonconstant);
2852 if (!true_predicate_p (&will_be_nonconstant)
2853 && !false_predicate_p (&will_be_nonconstant))
2854 /* This is slightly inprecise. We may want to represent
2855 each loop with independent predicate. */
2856 loop_stride = and_predicates (info->conds, &loop_stride,
2857 &will_be_nonconstant);
2860 free (body);
2862 set_hint_predicate (&inline_summaries->get (node)->loop_iterations,
2863 loop_iterations);
2864 set_hint_predicate (&inline_summaries->get (node)->loop_stride,
2865 loop_stride);
2866 scev_finalize ();
2868 FOR_ALL_BB_FN (bb, my_function)
2870 edge e;
2871 edge_iterator ei;
2873 if (bb->aux)
2874 edge_predicate_pool.remove ((predicate *)bb->aux);
2875 bb->aux = NULL;
2876 FOR_EACH_EDGE (e, ei, bb->succs)
2878 if (e->aux)
2879 edge_predicate_pool.remove ((predicate *) e->aux);
2880 e->aux = NULL;
2883 inline_summaries->get (node)->self_time = time;
2884 inline_summaries->get (node)->self_size = size;
2885 nonconstant_names.release ();
2886 ipa_release_body_info (&fbi);
2887 if (opt_for_fn (node->decl, optimize))
2889 if (!early)
2890 loop_optimizer_finalize ();
2891 else if (!ipa_edge_args_vector)
2892 ipa_free_all_node_params ();
2893 free_dominance_info (CDI_DOMINATORS);
2895 if (dump_file)
2897 fprintf (dump_file, "\n");
2898 dump_inline_summary (dump_file, node);
2903 /* Compute parameters of functions used by inliner.
2904 EARLY is true when we compute parameters for the early inliner */
2906 void
2907 compute_inline_parameters (struct cgraph_node *node, bool early)
2909 HOST_WIDE_INT self_stack_size;
2910 struct cgraph_edge *e;
2911 struct inline_summary *info;
2913 gcc_assert (!node->global.inlined_to);
2915 inline_summary_alloc ();
2917 info = inline_summaries->get (node);
2918 reset_inline_summary (node, info);
2920 /* FIXME: Thunks are inlinable, but tree-inline don't know how to do that.
2921 Once this happen, we will need to more curefully predict call
2922 statement size. */
2923 if (node->thunk.thunk_p)
2925 struct inline_edge_summary *es = inline_edge_summary (node->callees);
2926 struct predicate t = true_predicate ();
2928 info->inlinable = 0;
2929 node->callees->call_stmt_cannot_inline_p = true;
2930 node->local.can_change_signature = false;
2931 es->call_stmt_time = 1;
2932 es->call_stmt_size = 1;
2933 account_size_time (info, 0, 0, &t);
2934 return;
2937 /* Even is_gimple_min_invariant rely on current_function_decl. */
2938 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2940 /* Estimate the stack size for the function if we're optimizing. */
2941 self_stack_size = optimize ? estimated_stack_frame_size (node) : 0;
2942 info->estimated_self_stack_size = self_stack_size;
2943 info->estimated_stack_size = self_stack_size;
2944 info->stack_frame_offset = 0;
2946 /* Can this function be inlined at all? */
2947 if (!opt_for_fn (node->decl, optimize)
2948 && !lookup_attribute ("always_inline",
2949 DECL_ATTRIBUTES (node->decl)))
2950 info->inlinable = false;
2951 else
2952 info->inlinable = tree_inlinable_function_p (node->decl);
2954 info->contains_cilk_spawn = fn_contains_cilk_spawn_p (cfun);
2956 /* Type attributes can use parameter indices to describe them. */
2957 if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl)))
2958 node->local.can_change_signature = false;
2959 else
2961 /* Otherwise, inlinable functions always can change signature. */
2962 if (info->inlinable)
2963 node->local.can_change_signature = true;
2964 else
2966 /* Functions calling builtin_apply can not change signature. */
2967 for (e = node->callees; e; e = e->next_callee)
2969 tree cdecl = e->callee->decl;
2970 if (DECL_BUILT_IN (cdecl)
2971 && DECL_BUILT_IN_CLASS (cdecl) == BUILT_IN_NORMAL
2972 && (DECL_FUNCTION_CODE (cdecl) == BUILT_IN_APPLY_ARGS
2973 || DECL_FUNCTION_CODE (cdecl) == BUILT_IN_VA_START))
2974 break;
2976 node->local.can_change_signature = !e;
2979 estimate_function_body_sizes (node, early);
2981 for (e = node->callees; e; e = e->next_callee)
2982 if (e->callee->comdat_local_p ())
2983 break;
2984 node->calls_comdat_local = (e != NULL);
2986 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2987 info->time = info->self_time;
2988 info->size = info->self_size;
2989 info->stack_frame_offset = 0;
2990 info->estimated_stack_size = info->estimated_self_stack_size;
2991 if (flag_checking)
2993 inline_update_overall_summary (node);
2994 gcc_assert (info->time == info->self_time
2995 && info->size == info->self_size);
2998 pop_cfun ();
3002 /* Compute parameters of functions used by inliner using
3003 current_function_decl. */
3005 static unsigned int
3006 compute_inline_parameters_for_current (void)
3008 compute_inline_parameters (cgraph_node::get (current_function_decl), true);
3009 return 0;
3012 namespace {
3014 const pass_data pass_data_inline_parameters =
3016 GIMPLE_PASS, /* type */
3017 "inline_param", /* name */
3018 OPTGROUP_INLINE, /* optinfo_flags */
3019 TV_INLINE_PARAMETERS, /* tv_id */
3020 0, /* properties_required */
3021 0, /* properties_provided */
3022 0, /* properties_destroyed */
3023 0, /* todo_flags_start */
3024 0, /* todo_flags_finish */
3027 class pass_inline_parameters : public gimple_opt_pass
3029 public:
3030 pass_inline_parameters (gcc::context *ctxt)
3031 : gimple_opt_pass (pass_data_inline_parameters, ctxt)
3034 /* opt_pass methods: */
3035 opt_pass * clone () { return new pass_inline_parameters (m_ctxt); }
3036 virtual unsigned int execute (function *)
3038 return compute_inline_parameters_for_current ();
3041 }; // class pass_inline_parameters
3043 } // anon namespace
3045 gimple_opt_pass *
3046 make_pass_inline_parameters (gcc::context *ctxt)
3048 return new pass_inline_parameters (ctxt);
3052 /* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS,
3053 KNOWN_CONTEXTS and KNOWN_AGGS. */
3055 static bool
3056 estimate_edge_devirt_benefit (struct cgraph_edge *ie,
3057 int *size, int *time,
3058 vec<tree> known_vals,
3059 vec<ipa_polymorphic_call_context> known_contexts,
3060 vec<ipa_agg_jump_function_p> known_aggs)
3062 tree target;
3063 struct cgraph_node *callee;
3064 struct inline_summary *isummary;
3065 enum availability avail;
3066 bool speculative;
3068 if (!known_vals.exists () && !known_contexts.exists ())
3069 return false;
3070 if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining))
3071 return false;
3073 target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts,
3074 known_aggs, &speculative);
3075 if (!target || speculative)
3076 return false;
3078 /* Account for difference in cost between indirect and direct calls. */
3079 *size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost);
3080 *time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost);
3081 gcc_checking_assert (*time >= 0);
3082 gcc_checking_assert (*size >= 0);
3084 callee = cgraph_node::get (target);
3085 if (!callee || !callee->definition)
3086 return false;
3087 callee = callee->function_symbol (&avail);
3088 if (avail < AVAIL_AVAILABLE)
3089 return false;
3090 isummary = inline_summaries->get (callee);
3091 return isummary->inlinable;
3094 /* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to
3095 handle edge E with probability PROB.
3096 Set HINTS if edge may be devirtualized.
3097 KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call
3098 site. */
3100 static inline void
3101 estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
3102 int *time,
3103 int prob,
3104 vec<tree> known_vals,
3105 vec<ipa_polymorphic_call_context> known_contexts,
3106 vec<ipa_agg_jump_function_p> known_aggs,
3107 inline_hints *hints)
3109 struct inline_edge_summary *es = inline_edge_summary (e);
3110 int call_size = es->call_stmt_size;
3111 int call_time = es->call_stmt_time;
3112 int cur_size;
3113 if (!e->callee
3114 && estimate_edge_devirt_benefit (e, &call_size, &call_time,
3115 known_vals, known_contexts, known_aggs)
3116 && hints && e->maybe_hot_p ())
3117 *hints |= INLINE_HINT_indirect_call;
3118 cur_size = call_size * INLINE_SIZE_SCALE;
3119 *size += cur_size;
3120 if (min_size)
3121 *min_size += cur_size;
3122 *time += apply_probability ((gcov_type) call_time, prob)
3123 * e->frequency * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE);
3124 if (*time > MAX_TIME * INLINE_TIME_SCALE)
3125 *time = MAX_TIME * INLINE_TIME_SCALE;
3130 /* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
3131 calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3132 describe context of the call site. */
3134 static void
3135 estimate_calls_size_and_time (struct cgraph_node *node, int *size,
3136 int *min_size, int *time,
3137 inline_hints *hints,
3138 clause_t possible_truths,
3139 vec<tree> known_vals,
3140 vec<ipa_polymorphic_call_context> known_contexts,
3141 vec<ipa_agg_jump_function_p> known_aggs)
3143 struct cgraph_edge *e;
3144 for (e = node->callees; e; e = e->next_callee)
3146 if (inline_edge_summary_vec.length () <= (unsigned) e->uid)
3147 continue;
3149 struct inline_edge_summary *es = inline_edge_summary (e);
3151 /* Do not care about zero sized builtins. */
3152 if (e->inline_failed && !es->call_stmt_size)
3154 gcc_checking_assert (!es->call_stmt_time);
3155 continue;
3157 if (!es->predicate
3158 || evaluate_predicate (es->predicate, possible_truths))
3160 if (e->inline_failed)
3162 /* Predicates of calls shall not use NOT_CHANGED codes,
3163 sowe do not need to compute probabilities. */
3164 estimate_edge_size_and_time (e, size,
3165 es->predicate ? NULL : min_size,
3166 time, REG_BR_PROB_BASE,
3167 known_vals, known_contexts,
3168 known_aggs, hints);
3170 else
3171 estimate_calls_size_and_time (e->callee, size, min_size, time,
3172 hints,
3173 possible_truths,
3174 known_vals, known_contexts,
3175 known_aggs);
3178 for (e = node->indirect_calls; e; e = e->next_callee)
3180 if (inline_edge_summary_vec.length () <= (unsigned) e->uid)
3181 continue;
3183 struct inline_edge_summary *es = inline_edge_summary (e);
3184 if (!es->predicate
3185 || evaluate_predicate (es->predicate, possible_truths))
3186 estimate_edge_size_and_time (e, size,
3187 es->predicate ? NULL : min_size,
3188 time, REG_BR_PROB_BASE,
3189 known_vals, known_contexts, known_aggs,
3190 hints);
3195 /* Estimate size and time needed to execute NODE assuming
3196 POSSIBLE_TRUTHS clause, and KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3197 information about NODE's arguments. If non-NULL use also probability
3198 information present in INLINE_PARAM_SUMMARY vector.
3199 Additionally detemine hints determined by the context. Finally compute
3200 minimal size needed for the call that is independent on the call context and
3201 can be used for fast estimates. Return the values in RET_SIZE,
3202 RET_MIN_SIZE, RET_TIME and RET_HINTS. */
3204 static void
3205 estimate_node_size_and_time (struct cgraph_node *node,
3206 clause_t possible_truths,
3207 vec<tree> known_vals,
3208 vec<ipa_polymorphic_call_context> known_contexts,
3209 vec<ipa_agg_jump_function_p> known_aggs,
3210 int *ret_size, int *ret_min_size, int *ret_time,
3211 inline_hints *ret_hints,
3212 vec<inline_param_summary>
3213 inline_param_summary)
3215 struct inline_summary *info = inline_summaries->get (node);
3216 size_time_entry *e;
3217 int size = 0;
3218 int time = 0;
3219 int min_size = 0;
3220 inline_hints hints = 0;
3221 int i;
3223 if (dump_file && (dump_flags & TDF_DETAILS))
3225 bool found = false;
3226 fprintf (dump_file, " Estimating body: %s/%i\n"
3227 " Known to be false: ", node->name (),
3228 node->order);
3230 for (i = predicate_not_inlined_condition;
3231 i < (predicate_first_dynamic_condition
3232 + (int) vec_safe_length (info->conds)); i++)
3233 if (!(possible_truths & (1 << i)))
3235 if (found)
3236 fprintf (dump_file, ", ");
3237 found = true;
3238 dump_condition (dump_file, info->conds, i);
3242 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3243 if (evaluate_predicate (&e->predicate, possible_truths))
3245 size += e->size;
3246 gcc_checking_assert (e->time >= 0);
3247 gcc_checking_assert (time >= 0);
3248 if (!inline_param_summary.exists ())
3249 time += e->time;
3250 else
3252 int prob = predicate_probability (info->conds,
3253 &e->predicate,
3254 possible_truths,
3255 inline_param_summary);
3256 gcc_checking_assert (prob >= 0);
3257 gcc_checking_assert (prob <= REG_BR_PROB_BASE);
3258 time += apply_probability ((gcov_type) e->time, prob);
3260 if (time > MAX_TIME * INLINE_TIME_SCALE)
3261 time = MAX_TIME * INLINE_TIME_SCALE;
3262 gcc_checking_assert (time >= 0);
3265 gcc_checking_assert (true_predicate_p (&(*info->entry)[0].predicate));
3266 min_size = (*info->entry)[0].size;
3267 gcc_checking_assert (size >= 0);
3268 gcc_checking_assert (time >= 0);
3270 if (info->loop_iterations
3271 && !evaluate_predicate (info->loop_iterations, possible_truths))
3272 hints |= INLINE_HINT_loop_iterations;
3273 if (info->loop_stride
3274 && !evaluate_predicate (info->loop_stride, possible_truths))
3275 hints |= INLINE_HINT_loop_stride;
3276 if (info->array_index
3277 && !evaluate_predicate (info->array_index, possible_truths))
3278 hints |= INLINE_HINT_array_index;
3279 if (info->scc_no)
3280 hints |= INLINE_HINT_in_scc;
3281 if (DECL_DECLARED_INLINE_P (node->decl))
3282 hints |= INLINE_HINT_declared_inline;
3284 estimate_calls_size_and_time (node, &size, &min_size, &time, &hints, possible_truths,
3285 known_vals, known_contexts, known_aggs);
3286 gcc_checking_assert (size >= 0);
3287 gcc_checking_assert (time >= 0);
3288 time = RDIV (time, INLINE_TIME_SCALE);
3289 size = RDIV (size, INLINE_SIZE_SCALE);
3290 min_size = RDIV (min_size, INLINE_SIZE_SCALE);
3292 if (dump_file && (dump_flags & TDF_DETAILS))
3293 fprintf (dump_file, "\n size:%i time:%i\n", (int) size, (int) time);
3294 if (ret_time)
3295 *ret_time = time;
3296 if (ret_size)
3297 *ret_size = size;
3298 if (ret_min_size)
3299 *ret_min_size = min_size;
3300 if (ret_hints)
3301 *ret_hints = hints;
3302 return;
3306 /* Estimate size and time needed to execute callee of EDGE assuming that
3307 parameters known to be constant at caller of EDGE are propagated.
3308 KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values
3309 and types for parameters. */
3311 void
3312 estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
3313 vec<tree> known_vals,
3314 vec<ipa_polymorphic_call_context>
3315 known_contexts,
3316 vec<ipa_agg_jump_function_p> known_aggs,
3317 int *ret_size, int *ret_time,
3318 inline_hints *hints)
3320 clause_t clause;
3322 clause = evaluate_conditions_for_known_args (node, false, known_vals,
3323 known_aggs);
3324 estimate_node_size_and_time (node, clause, known_vals, known_contexts,
3325 known_aggs, ret_size, NULL, ret_time, hints, vNULL);
3328 /* Translate all conditions from callee representation into caller
3329 representation and symbolically evaluate predicate P into new predicate.
3331 INFO is inline_summary of function we are adding predicate into, CALLEE_INFO
3332 is summary of function predicate P is from. OPERAND_MAP is array giving
3333 callee formal IDs the caller formal IDs. POSSSIBLE_TRUTHS is clausule of all
3334 callee conditions that may be true in caller context. TOPLEV_PREDICATE is
3335 predicate under which callee is executed. OFFSET_MAP is an array of of
3336 offsets that need to be added to conditions, negative offset means that
3337 conditions relying on values passed by reference have to be discarded
3338 because they might not be preserved (and should be considered offset zero
3339 for other purposes). */
3341 static struct predicate
3342 remap_predicate (struct inline_summary *info,
3343 struct inline_summary *callee_info,
3344 struct predicate *p,
3345 vec<int> operand_map,
3346 vec<int> offset_map,
3347 clause_t possible_truths, struct predicate *toplev_predicate)
3349 int i;
3350 struct predicate out = true_predicate ();
3352 /* True predicate is easy. */
3353 if (true_predicate_p (p))
3354 return *toplev_predicate;
3355 for (i = 0; p->clause[i]; i++)
3357 clause_t clause = p->clause[i];
3358 int cond;
3359 struct predicate clause_predicate = false_predicate ();
3361 gcc_assert (i < MAX_CLAUSES);
3363 for (cond = 0; cond < NUM_CONDITIONS; cond++)
3364 /* Do we have condition we can't disprove? */
3365 if (clause & possible_truths & (1 << cond))
3367 struct predicate cond_predicate;
3368 /* Work out if the condition can translate to predicate in the
3369 inlined function. */
3370 if (cond >= predicate_first_dynamic_condition)
3372 struct condition *c;
3374 c = &(*callee_info->conds)[cond
3376 predicate_first_dynamic_condition];
3377 /* See if we can remap condition operand to caller's operand.
3378 Otherwise give up. */
3379 if (!operand_map.exists ()
3380 || (int) operand_map.length () <= c->operand_num
3381 || operand_map[c->operand_num] == -1
3382 /* TODO: For non-aggregate conditions, adding an offset is
3383 basically an arithmetic jump function processing which
3384 we should support in future. */
3385 || ((!c->agg_contents || !c->by_ref)
3386 && offset_map[c->operand_num] > 0)
3387 || (c->agg_contents && c->by_ref
3388 && offset_map[c->operand_num] < 0))
3389 cond_predicate = true_predicate ();
3390 else
3392 struct agg_position_info ap;
3393 HOST_WIDE_INT offset_delta = offset_map[c->operand_num];
3394 if (offset_delta < 0)
3396 gcc_checking_assert (!c->agg_contents || !c->by_ref);
3397 offset_delta = 0;
3399 gcc_assert (!c->agg_contents
3400 || c->by_ref || offset_delta == 0);
3401 ap.offset = c->offset + offset_delta;
3402 ap.agg_contents = c->agg_contents;
3403 ap.by_ref = c->by_ref;
3404 cond_predicate = add_condition (info,
3405 operand_map[c->operand_num],
3406 &ap, c->code, c->val);
3409 /* Fixed conditions remains same, construct single
3410 condition predicate. */
3411 else
3413 cond_predicate.clause[0] = 1 << cond;
3414 cond_predicate.clause[1] = 0;
3416 clause_predicate = or_predicates (info->conds, &clause_predicate,
3417 &cond_predicate);
3419 out = and_predicates (info->conds, &out, &clause_predicate);
3421 return and_predicates (info->conds, &out, toplev_predicate);
3425 /* Update summary information of inline clones after inlining.
3426 Compute peak stack usage. */
3428 static void
3429 inline_update_callee_summaries (struct cgraph_node *node, int depth)
3431 struct cgraph_edge *e;
3432 struct inline_summary *callee_info = inline_summaries->get (node);
3433 struct inline_summary *caller_info = inline_summaries->get (node->callers->caller);
3434 HOST_WIDE_INT peak;
3436 callee_info->stack_frame_offset
3437 = caller_info->stack_frame_offset
3438 + caller_info->estimated_self_stack_size;
3439 peak = callee_info->stack_frame_offset
3440 + callee_info->estimated_self_stack_size;
3441 if (inline_summaries->get (node->global.inlined_to)->estimated_stack_size < peak)
3442 inline_summaries->get (node->global.inlined_to)->estimated_stack_size = peak;
3443 ipa_propagate_frequency (node);
3444 for (e = node->callees; e; e = e->next_callee)
3446 if (!e->inline_failed)
3447 inline_update_callee_summaries (e->callee, depth);
3448 inline_edge_summary (e)->loop_depth += depth;
3450 for (e = node->indirect_calls; e; e = e->next_callee)
3451 inline_edge_summary (e)->loop_depth += depth;
3454 /* Update change_prob of EDGE after INLINED_EDGE has been inlined.
3455 When functoin A is inlined in B and A calls C with parameter that
3456 changes with probability PROB1 and C is known to be passthroug
3457 of argument if B that change with probability PROB2, the probability
3458 of change is now PROB1*PROB2. */
3460 static void
3461 remap_edge_change_prob (struct cgraph_edge *inlined_edge,
3462 struct cgraph_edge *edge)
3464 if (ipa_node_params_sum)
3466 int i;
3467 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3468 struct inline_edge_summary *es = inline_edge_summary (edge);
3469 struct inline_edge_summary *inlined_es
3470 = inline_edge_summary (inlined_edge);
3472 for (i = 0; i < ipa_get_cs_argument_count (args); i++)
3474 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3475 if (jfunc->type == IPA_JF_PASS_THROUGH
3476 && (ipa_get_jf_pass_through_formal_id (jfunc)
3477 < (int) inlined_es->param.length ()))
3479 int jf_formal_id = ipa_get_jf_pass_through_formal_id (jfunc);
3480 int prob1 = es->param[i].change_prob;
3481 int prob2 = inlined_es->param[jf_formal_id].change_prob;
3482 int prob = combine_probabilities (prob1, prob2);
3484 if (prob1 && prob2 && !prob)
3485 prob = 1;
3487 es->param[i].change_prob = prob;
3493 /* Update edge summaries of NODE after INLINED_EDGE has been inlined.
3495 Remap predicates of callees of NODE. Rest of arguments match
3496 remap_predicate.
3498 Also update change probabilities. */
3500 static void
3501 remap_edge_summaries (struct cgraph_edge *inlined_edge,
3502 struct cgraph_node *node,
3503 struct inline_summary *info,
3504 struct inline_summary *callee_info,
3505 vec<int> operand_map,
3506 vec<int> offset_map,
3507 clause_t possible_truths,
3508 struct predicate *toplev_predicate)
3510 struct cgraph_edge *e, *next;
3511 for (e = node->callees; e; e = next)
3513 struct inline_edge_summary *es = inline_edge_summary (e);
3514 struct predicate p;
3515 next = e->next_callee;
3517 if (e->inline_failed)
3519 remap_edge_change_prob (inlined_edge, e);
3521 if (es->predicate)
3523 p = remap_predicate (info, callee_info,
3524 es->predicate, operand_map, offset_map,
3525 possible_truths, toplev_predicate);
3526 edge_set_predicate (e, &p);
3528 else
3529 edge_set_predicate (e, toplev_predicate);
3531 else
3532 remap_edge_summaries (inlined_edge, e->callee, info, callee_info,
3533 operand_map, offset_map, possible_truths,
3534 toplev_predicate);
3536 for (e = node->indirect_calls; e; e = next)
3538 struct inline_edge_summary *es = inline_edge_summary (e);
3539 struct predicate p;
3540 next = e->next_callee;
3542 remap_edge_change_prob (inlined_edge, e);
3543 if (es->predicate)
3545 p = remap_predicate (info, callee_info,
3546 es->predicate, operand_map, offset_map,
3547 possible_truths, toplev_predicate);
3548 edge_set_predicate (e, &p);
3550 else
3551 edge_set_predicate (e, toplev_predicate);
3555 /* Same as remap_predicate, but set result into hint *HINT. */
3557 static void
3558 remap_hint_predicate (struct inline_summary *info,
3559 struct inline_summary *callee_info,
3560 struct predicate **hint,
3561 vec<int> operand_map,
3562 vec<int> offset_map,
3563 clause_t possible_truths,
3564 struct predicate *toplev_predicate)
3566 predicate p;
3568 if (!*hint)
3569 return;
3570 p = remap_predicate (info, callee_info,
3571 *hint,
3572 operand_map, offset_map,
3573 possible_truths, toplev_predicate);
3574 if (!false_predicate_p (&p) && !true_predicate_p (&p))
3576 if (!*hint)
3577 set_hint_predicate (hint, p);
3578 else
3579 **hint = and_predicates (info->conds, *hint, &p);
3583 /* We inlined EDGE. Update summary of the function we inlined into. */
3585 void
3586 inline_merge_summary (struct cgraph_edge *edge)
3588 struct inline_summary *callee_info = inline_summaries->get (edge->callee);
3589 struct cgraph_node *to = (edge->caller->global.inlined_to
3590 ? edge->caller->global.inlined_to : edge->caller);
3591 struct inline_summary *info = inline_summaries->get (to);
3592 clause_t clause = 0; /* not_inline is known to be false. */
3593 size_time_entry *e;
3594 vec<int> operand_map = vNULL;
3595 vec<int> offset_map = vNULL;
3596 int i;
3597 struct predicate toplev_predicate;
3598 struct predicate true_p = true_predicate ();
3599 struct inline_edge_summary *es = inline_edge_summary (edge);
3601 if (es->predicate)
3602 toplev_predicate = *es->predicate;
3603 else
3604 toplev_predicate = true_predicate ();
3606 info->fp_expressions |= callee_info->fp_expressions;
3608 if (callee_info->conds)
3609 evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL);
3610 if (ipa_node_params_sum && callee_info->conds)
3612 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3613 int count = ipa_get_cs_argument_count (args);
3614 int i;
3616 if (count)
3618 operand_map.safe_grow_cleared (count);
3619 offset_map.safe_grow_cleared (count);
3621 for (i = 0; i < count; i++)
3623 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3624 int map = -1;
3626 /* TODO: handle non-NOPs when merging. */
3627 if (jfunc->type == IPA_JF_PASS_THROUGH)
3629 if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
3630 map = ipa_get_jf_pass_through_formal_id (jfunc);
3631 if (!ipa_get_jf_pass_through_agg_preserved (jfunc))
3632 offset_map[i] = -1;
3634 else if (jfunc->type == IPA_JF_ANCESTOR)
3636 HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc);
3637 if (offset >= 0 && offset < INT_MAX)
3639 map = ipa_get_jf_ancestor_formal_id (jfunc);
3640 if (!ipa_get_jf_ancestor_agg_preserved (jfunc))
3641 offset = -1;
3642 offset_map[i] = offset;
3645 operand_map[i] = map;
3646 gcc_assert (map < ipa_get_param_count (IPA_NODE_REF (to)));
3649 for (i = 0; vec_safe_iterate (callee_info->entry, i, &e); i++)
3651 struct predicate p = remap_predicate (info, callee_info,
3652 &e->predicate, operand_map,
3653 offset_map, clause,
3654 &toplev_predicate);
3655 if (!false_predicate_p (&p))
3657 gcov_type add_time = ((gcov_type) e->time * edge->frequency
3658 + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
3659 int prob = predicate_probability (callee_info->conds,
3660 &e->predicate,
3661 clause, es->param);
3662 add_time = apply_probability ((gcov_type) add_time, prob);
3663 if (add_time > MAX_TIME * INLINE_TIME_SCALE)
3664 add_time = MAX_TIME * INLINE_TIME_SCALE;
3665 if (prob != REG_BR_PROB_BASE
3666 && dump_file && (dump_flags & TDF_DETAILS))
3668 fprintf (dump_file, "\t\tScaling time by probability:%f\n",
3669 (double) prob / REG_BR_PROB_BASE);
3671 account_size_time (info, e->size, add_time, &p);
3674 remap_edge_summaries (edge, edge->callee, info, callee_info, operand_map,
3675 offset_map, clause, &toplev_predicate);
3676 remap_hint_predicate (info, callee_info,
3677 &callee_info->loop_iterations,
3678 operand_map, offset_map, clause, &toplev_predicate);
3679 remap_hint_predicate (info, callee_info,
3680 &callee_info->loop_stride,
3681 operand_map, offset_map, clause, &toplev_predicate);
3682 remap_hint_predicate (info, callee_info,
3683 &callee_info->array_index,
3684 operand_map, offset_map, clause, &toplev_predicate);
3686 inline_update_callee_summaries (edge->callee,
3687 inline_edge_summary (edge)->loop_depth);
3689 /* We do not maintain predicates of inlined edges, free it. */
3690 edge_set_predicate (edge, &true_p);
3691 /* Similarly remove param summaries. */
3692 es->param.release ();
3693 operand_map.release ();
3694 offset_map.release ();
3697 /* For performance reasons inline_merge_summary is not updating overall size
3698 and time. Recompute it. */
3700 void
3701 inline_update_overall_summary (struct cgraph_node *node)
3703 struct inline_summary *info = inline_summaries->get (node);
3704 size_time_entry *e;
3705 int i;
3707 info->size = 0;
3708 info->time = 0;
3709 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3711 info->size += e->size, info->time += e->time;
3712 if (info->time > MAX_TIME * INLINE_TIME_SCALE)
3713 info->time = MAX_TIME * INLINE_TIME_SCALE;
3715 estimate_calls_size_and_time (node, &info->size, &info->min_size,
3716 &info->time, NULL,
3717 ~(clause_t) (1 << predicate_false_condition),
3718 vNULL, vNULL, vNULL);
3719 info->time = (info->time + INLINE_TIME_SCALE / 2) / INLINE_TIME_SCALE;
3720 info->size = (info->size + INLINE_SIZE_SCALE / 2) / INLINE_SIZE_SCALE;
3723 /* Return hints derrived from EDGE. */
3725 simple_edge_hints (struct cgraph_edge *edge)
3727 int hints = 0;
3728 struct cgraph_node *to = (edge->caller->global.inlined_to
3729 ? edge->caller->global.inlined_to : edge->caller);
3730 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
3731 if (inline_summaries->get (to)->scc_no
3732 && inline_summaries->get (to)->scc_no
3733 == inline_summaries->get (callee)->scc_no
3734 && !edge->recursive_p ())
3735 hints |= INLINE_HINT_same_scc;
3737 if (callee->lto_file_data && edge->caller->lto_file_data
3738 && edge->caller->lto_file_data != callee->lto_file_data
3739 && !callee->merged_comdat && !callee->icf_merged)
3740 hints |= INLINE_HINT_cross_module;
3742 return hints;
3745 /* Estimate the time cost for the caller when inlining EDGE.
3746 Only to be called via estimate_edge_time, that handles the
3747 caching mechanism.
3749 When caching, also update the cache entry. Compute both time and
3750 size, since we always need both metrics eventually. */
3753 do_estimate_edge_time (struct cgraph_edge *edge)
3755 int time;
3756 int size;
3757 inline_hints hints;
3758 struct cgraph_node *callee;
3759 clause_t clause;
3760 vec<tree> known_vals;
3761 vec<ipa_polymorphic_call_context> known_contexts;
3762 vec<ipa_agg_jump_function_p> known_aggs;
3763 struct inline_edge_summary *es = inline_edge_summary (edge);
3764 int min_size;
3766 callee = edge->callee->ultimate_alias_target ();
3768 gcc_checking_assert (edge->inline_failed);
3769 evaluate_properties_for_edge (edge, true,
3770 &clause, &known_vals, &known_contexts,
3771 &known_aggs);
3772 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3773 known_aggs, &size, &min_size, &time, &hints, es->param);
3775 /* When we have profile feedback, we can quite safely identify hot
3776 edges and for those we disable size limits. Don't do that when
3777 probability that caller will call the callee is low however, since it
3778 may hurt optimization of the caller's hot path. */
3779 if (edge->count && edge->maybe_hot_p ()
3780 && (edge->count * 2
3781 > (edge->caller->global.inlined_to
3782 ? edge->caller->global.inlined_to->count : edge->caller->count)))
3783 hints |= INLINE_HINT_known_hot;
3785 known_vals.release ();
3786 known_contexts.release ();
3787 known_aggs.release ();
3788 gcc_checking_assert (size >= 0);
3789 gcc_checking_assert (time >= 0);
3791 /* When caching, update the cache entry. */
3792 if (edge_growth_cache.exists ())
3794 inline_summaries->get (edge->callee)->min_size = min_size;
3795 if ((int) edge_growth_cache.length () <= edge->uid)
3796 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
3797 edge_growth_cache[edge->uid].time = time + (time >= 0);
3799 edge_growth_cache[edge->uid].size = size + (size >= 0);
3800 hints |= simple_edge_hints (edge);
3801 edge_growth_cache[edge->uid].hints = hints + 1;
3803 return time;
3807 /* Return estimated callee growth after inlining EDGE.
3808 Only to be called via estimate_edge_size. */
3811 do_estimate_edge_size (struct cgraph_edge *edge)
3813 int size;
3814 struct cgraph_node *callee;
3815 clause_t clause;
3816 vec<tree> known_vals;
3817 vec<ipa_polymorphic_call_context> known_contexts;
3818 vec<ipa_agg_jump_function_p> known_aggs;
3820 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3822 if (edge_growth_cache.exists ())
3824 do_estimate_edge_time (edge);
3825 size = edge_growth_cache[edge->uid].size;
3826 gcc_checking_assert (size);
3827 return size - (size > 0);
3830 callee = edge->callee->ultimate_alias_target ();
3832 /* Early inliner runs without caching, go ahead and do the dirty work. */
3833 gcc_checking_assert (edge->inline_failed);
3834 evaluate_properties_for_edge (edge, true,
3835 &clause, &known_vals, &known_contexts,
3836 &known_aggs);
3837 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3838 known_aggs, &size, NULL, NULL, NULL, vNULL);
3839 known_vals.release ();
3840 known_contexts.release ();
3841 known_aggs.release ();
3842 return size;
3846 /* Estimate the growth of the caller when inlining EDGE.
3847 Only to be called via estimate_edge_size. */
3849 inline_hints
3850 do_estimate_edge_hints (struct cgraph_edge *edge)
3852 inline_hints hints;
3853 struct cgraph_node *callee;
3854 clause_t clause;
3855 vec<tree> known_vals;
3856 vec<ipa_polymorphic_call_context> known_contexts;
3857 vec<ipa_agg_jump_function_p> known_aggs;
3859 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3861 if (edge_growth_cache.exists ())
3863 do_estimate_edge_time (edge);
3864 hints = edge_growth_cache[edge->uid].hints;
3865 gcc_checking_assert (hints);
3866 return hints - 1;
3869 callee = edge->callee->ultimate_alias_target ();
3871 /* Early inliner runs without caching, go ahead and do the dirty work. */
3872 gcc_checking_assert (edge->inline_failed);
3873 evaluate_properties_for_edge (edge, true,
3874 &clause, &known_vals, &known_contexts,
3875 &known_aggs);
3876 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3877 known_aggs, NULL, NULL, NULL, &hints, vNULL);
3878 known_vals.release ();
3879 known_contexts.release ();
3880 known_aggs.release ();
3881 hints |= simple_edge_hints (edge);
3882 return hints;
3886 /* Estimate self time of the function NODE after inlining EDGE. */
3889 estimate_time_after_inlining (struct cgraph_node *node,
3890 struct cgraph_edge *edge)
3892 struct inline_edge_summary *es = inline_edge_summary (edge);
3893 if (!es->predicate || !false_predicate_p (es->predicate))
3895 gcov_type time =
3896 inline_summaries->get (node)->time + estimate_edge_time (edge);
3897 if (time < 0)
3898 time = 0;
3899 if (time > MAX_TIME)
3900 time = MAX_TIME;
3901 return time;
3903 return inline_summaries->get (node)->time;
3907 /* Estimate the size of NODE after inlining EDGE which should be an
3908 edge to either NODE or a call inlined into NODE. */
3911 estimate_size_after_inlining (struct cgraph_node *node,
3912 struct cgraph_edge *edge)
3914 struct inline_edge_summary *es = inline_edge_summary (edge);
3915 if (!es->predicate || !false_predicate_p (es->predicate))
3917 int size = inline_summaries->get (node)->size + estimate_edge_growth (edge);
3918 gcc_assert (size >= 0);
3919 return size;
3921 return inline_summaries->get (node)->size;
3925 struct growth_data
3927 struct cgraph_node *node;
3928 bool self_recursive;
3929 bool uninlinable;
3930 int growth;
3934 /* Worker for do_estimate_growth. Collect growth for all callers. */
3936 static bool
3937 do_estimate_growth_1 (struct cgraph_node *node, void *data)
3939 struct cgraph_edge *e;
3940 struct growth_data *d = (struct growth_data *) data;
3942 for (e = node->callers; e; e = e->next_caller)
3944 gcc_checking_assert (e->inline_failed);
3946 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
3948 d->uninlinable = true;
3949 continue;
3952 if (e->recursive_p ())
3954 d->self_recursive = true;
3955 continue;
3957 d->growth += estimate_edge_growth (e);
3959 return false;
3963 /* Estimate the growth caused by inlining NODE into all callees. */
3966 estimate_growth (struct cgraph_node *node)
3968 struct growth_data d = { node, false, false, 0 };
3969 struct inline_summary *info = inline_summaries->get (node);
3971 node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true);
3973 /* For self recursive functions the growth estimation really should be
3974 infinity. We don't want to return very large values because the growth
3975 plays various roles in badness computation fractions. Be sure to not
3976 return zero or negative growths. */
3977 if (d.self_recursive)
3978 d.growth = d.growth < info->size ? info->size : d.growth;
3979 else if (DECL_EXTERNAL (node->decl) || d.uninlinable)
3981 else
3983 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
3984 d.growth -= info->size;
3985 /* COMDAT functions are very often not shared across multiple units
3986 since they come from various template instantiations.
3987 Take this into account. */
3988 else if (DECL_COMDAT (node->decl)
3989 && node->can_remove_if_no_direct_calls_p ())
3990 d.growth -= (info->size
3991 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
3992 + 50) / 100;
3995 return d.growth;
3998 /* Verify if there are fewer than MAX_CALLERS. */
4000 static bool
4001 check_callers (cgraph_node *node, int *max_callers)
4003 ipa_ref *ref;
4005 if (!node->can_remove_if_no_direct_calls_and_refs_p ())
4006 return true;
4008 for (cgraph_edge *e = node->callers; e; e = e->next_caller)
4010 (*max_callers)--;
4011 if (!*max_callers
4012 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
4013 return true;
4016 FOR_EACH_ALIAS (node, ref)
4017 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), max_callers))
4018 return true;
4020 return false;
4024 /* Make cheap estimation if growth of NODE is likely positive knowing
4025 EDGE_GROWTH of one particular edge.
4026 We assume that most of other edges will have similar growth
4027 and skip computation if there are too many callers. */
4029 bool
4030 growth_likely_positive (struct cgraph_node *node,
4031 int edge_growth)
4033 int max_callers;
4034 struct cgraph_edge *e;
4035 gcc_checking_assert (edge_growth > 0);
4037 /* First quickly check if NODE is removable at all. */
4038 if (DECL_EXTERNAL (node->decl))
4039 return true;
4040 if (!node->can_remove_if_no_direct_calls_and_refs_p ()
4041 || node->address_taken)
4042 return true;
4044 max_callers = inline_summaries->get (node)->size * 4 / edge_growth + 2;
4046 for (e = node->callers; e; e = e->next_caller)
4048 max_callers--;
4049 if (!max_callers
4050 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
4051 return true;
4054 ipa_ref *ref;
4055 FOR_EACH_ALIAS (node, ref)
4056 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), &max_callers))
4057 return true;
4059 /* Unlike for functions called once, we play unsafe with
4060 COMDATs. We can allow that since we know functions
4061 in consideration are small (and thus risk is small) and
4062 moreover grow estimates already accounts that COMDAT
4063 functions may or may not disappear when eliminated from
4064 current unit. With good probability making aggressive
4065 choice in all units is going to make overall program
4066 smaller. */
4067 if (DECL_COMDAT (node->decl))
4069 if (!node->can_remove_if_no_direct_calls_p ())
4070 return true;
4072 else if (!node->will_be_removed_from_program_if_no_direct_calls_p ())
4073 return true;
4075 return estimate_growth (node) > 0;
4079 /* This function performs intraprocedural analysis in NODE that is required to
4080 inline indirect calls. */
4082 static void
4083 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
4085 ipa_analyze_node (node);
4086 if (dump_file && (dump_flags & TDF_DETAILS))
4088 ipa_print_node_params (dump_file, node);
4089 ipa_print_node_jump_functions (dump_file, node);
4094 /* Note function body size. */
4096 void
4097 inline_analyze_function (struct cgraph_node *node)
4099 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
4101 if (dump_file)
4102 fprintf (dump_file, "\nAnalyzing function: %s/%u\n",
4103 node->name (), node->order);
4104 if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p)
4105 inline_indirect_intraprocedural_analysis (node);
4106 compute_inline_parameters (node, false);
4107 if (!optimize)
4109 struct cgraph_edge *e;
4110 for (e = node->callees; e; e = e->next_callee)
4112 if (e->inline_failed == CIF_FUNCTION_NOT_CONSIDERED)
4113 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4114 e->call_stmt_cannot_inline_p = true;
4116 for (e = node->indirect_calls; e; e = e->next_callee)
4118 if (e->inline_failed == CIF_FUNCTION_NOT_CONSIDERED)
4119 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4120 e->call_stmt_cannot_inline_p = true;
4124 pop_cfun ();
4128 /* Called when new function is inserted to callgraph late. */
4130 void
4131 inline_summary_t::insert (struct cgraph_node *node, inline_summary *)
4133 inline_analyze_function (node);
4136 /* Note function body size. */
4138 void
4139 inline_generate_summary (void)
4141 struct cgraph_node *node;
4143 FOR_EACH_DEFINED_FUNCTION (node)
4144 if (DECL_STRUCT_FUNCTION (node->decl))
4145 node->local.versionable = tree_versionable_function_p (node->decl);
4147 /* When not optimizing, do not bother to analyze. Inlining is still done
4148 because edge redirection needs to happen there. */
4149 if (!optimize && !flag_generate_lto && !flag_generate_offload && !flag_wpa)
4150 return;
4152 if (!inline_summaries)
4153 inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab);
4155 inline_summaries->enable_insertion_hook ();
4157 ipa_register_cgraph_hooks ();
4158 inline_free_summary ();
4160 FOR_EACH_DEFINED_FUNCTION (node)
4161 if (!node->alias)
4162 inline_analyze_function (node);
4166 /* Read predicate from IB. */
4168 static struct predicate
4169 read_predicate (struct lto_input_block *ib)
4171 struct predicate out;
4172 clause_t clause;
4173 int k = 0;
4177 gcc_assert (k <= MAX_CLAUSES);
4178 clause = out.clause[k++] = streamer_read_uhwi (ib);
4180 while (clause);
4182 /* Zero-initialize the remaining clauses in OUT. */
4183 while (k <= MAX_CLAUSES)
4184 out.clause[k++] = 0;
4186 return out;
4190 /* Write inline summary for edge E to OB. */
4192 static void
4193 read_inline_edge_summary (struct lto_input_block *ib, struct cgraph_edge *e)
4195 struct inline_edge_summary *es = inline_edge_summary (e);
4196 struct predicate p;
4197 int length, i;
4199 es->call_stmt_size = streamer_read_uhwi (ib);
4200 es->call_stmt_time = streamer_read_uhwi (ib);
4201 es->loop_depth = streamer_read_uhwi (ib);
4202 p = read_predicate (ib);
4203 edge_set_predicate (e, &p);
4204 length = streamer_read_uhwi (ib);
4205 if (length)
4207 es->param.safe_grow_cleared (length);
4208 for (i = 0; i < length; i++)
4209 es->param[i].change_prob = streamer_read_uhwi (ib);
4214 /* Stream in inline summaries from the section. */
4216 static void
4217 inline_read_section (struct lto_file_decl_data *file_data, const char *data,
4218 size_t len)
4220 const struct lto_function_header *header =
4221 (const struct lto_function_header *) data;
4222 const int cfg_offset = sizeof (struct lto_function_header);
4223 const int main_offset = cfg_offset + header->cfg_size;
4224 const int string_offset = main_offset + header->main_size;
4225 struct data_in *data_in;
4226 unsigned int i, count2, j;
4227 unsigned int f_count;
4229 lto_input_block ib ((const char *) data + main_offset, header->main_size,
4230 file_data->mode_table);
4232 data_in =
4233 lto_data_in_create (file_data, (const char *) data + string_offset,
4234 header->string_size, vNULL);
4235 f_count = streamer_read_uhwi (&ib);
4236 for (i = 0; i < f_count; i++)
4238 unsigned int index;
4239 struct cgraph_node *node;
4240 struct inline_summary *info;
4241 lto_symtab_encoder_t encoder;
4242 struct bitpack_d bp;
4243 struct cgraph_edge *e;
4244 predicate p;
4246 index = streamer_read_uhwi (&ib);
4247 encoder = file_data->symtab_node_encoder;
4248 node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder,
4249 index));
4250 info = inline_summaries->get (node);
4252 info->estimated_stack_size
4253 = info->estimated_self_stack_size = streamer_read_uhwi (&ib);
4254 info->size = info->self_size = streamer_read_uhwi (&ib);
4255 info->time = info->self_time = streamer_read_uhwi (&ib);
4257 bp = streamer_read_bitpack (&ib);
4258 info->inlinable = bp_unpack_value (&bp, 1);
4259 info->contains_cilk_spawn = bp_unpack_value (&bp, 1);
4260 info->fp_expressions = bp_unpack_value (&bp, 1);
4262 count2 = streamer_read_uhwi (&ib);
4263 gcc_assert (!info->conds);
4264 for (j = 0; j < count2; j++)
4266 struct condition c;
4267 c.operand_num = streamer_read_uhwi (&ib);
4268 c.code = (enum tree_code) streamer_read_uhwi (&ib);
4269 c.val = stream_read_tree (&ib, data_in);
4270 bp = streamer_read_bitpack (&ib);
4271 c.agg_contents = bp_unpack_value (&bp, 1);
4272 c.by_ref = bp_unpack_value (&bp, 1);
4273 if (c.agg_contents)
4274 c.offset = streamer_read_uhwi (&ib);
4275 vec_safe_push (info->conds, c);
4277 count2 = streamer_read_uhwi (&ib);
4278 gcc_assert (!info->entry);
4279 for (j = 0; j < count2; j++)
4281 struct size_time_entry e;
4283 e.size = streamer_read_uhwi (&ib);
4284 e.time = streamer_read_uhwi (&ib);
4285 e.predicate = read_predicate (&ib);
4287 vec_safe_push (info->entry, e);
4290 p = read_predicate (&ib);
4291 set_hint_predicate (&info->loop_iterations, p);
4292 p = read_predicate (&ib);
4293 set_hint_predicate (&info->loop_stride, p);
4294 p = read_predicate (&ib);
4295 set_hint_predicate (&info->array_index, p);
4296 for (e = node->callees; e; e = e->next_callee)
4297 read_inline_edge_summary (&ib, e);
4298 for (e = node->indirect_calls; e; e = e->next_callee)
4299 read_inline_edge_summary (&ib, e);
4302 lto_free_section_data (file_data, LTO_section_inline_summary, NULL, data,
4303 len);
4304 lto_data_in_delete (data_in);
4308 /* Read inline summary. Jump functions are shared among ipa-cp
4309 and inliner, so when ipa-cp is active, we don't need to write them
4310 twice. */
4312 void
4313 inline_read_summary (void)
4315 struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
4316 struct lto_file_decl_data *file_data;
4317 unsigned int j = 0;
4319 inline_summary_alloc ();
4321 while ((file_data = file_data_vec[j++]))
4323 size_t len;
4324 const char *data = lto_get_section_data (file_data,
4325 LTO_section_inline_summary,
4326 NULL, &len);
4327 if (data)
4328 inline_read_section (file_data, data, len);
4329 else
4330 /* Fatal error here. We do not want to support compiling ltrans units
4331 with different version of compiler or different flags than the WPA
4332 unit, so this should never happen. */
4333 fatal_error (input_location,
4334 "ipa inline summary is missing in input file");
4336 if (optimize)
4338 ipa_register_cgraph_hooks ();
4339 if (!flag_ipa_cp)
4340 ipa_prop_read_jump_functions ();
4343 gcc_assert (inline_summaries);
4344 inline_summaries->enable_insertion_hook ();
4348 /* Write predicate P to OB. */
4350 static void
4351 write_predicate (struct output_block *ob, struct predicate *p)
4353 int j;
4354 if (p)
4355 for (j = 0; p->clause[j]; j++)
4357 gcc_assert (j < MAX_CLAUSES);
4358 streamer_write_uhwi (ob, p->clause[j]);
4360 streamer_write_uhwi (ob, 0);
4364 /* Write inline summary for edge E to OB. */
4366 static void
4367 write_inline_edge_summary (struct output_block *ob, struct cgraph_edge *e)
4369 struct inline_edge_summary *es = inline_edge_summary (e);
4370 int i;
4372 streamer_write_uhwi (ob, es->call_stmt_size);
4373 streamer_write_uhwi (ob, es->call_stmt_time);
4374 streamer_write_uhwi (ob, es->loop_depth);
4375 write_predicate (ob, es->predicate);
4376 streamer_write_uhwi (ob, es->param.length ());
4377 for (i = 0; i < (int) es->param.length (); i++)
4378 streamer_write_uhwi (ob, es->param[i].change_prob);
4382 /* Write inline summary for node in SET.
4383 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
4384 active, we don't need to write them twice. */
4386 void
4387 inline_write_summary (void)
4389 struct cgraph_node *node;
4390 struct output_block *ob = create_output_block (LTO_section_inline_summary);
4391 lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
4392 unsigned int count = 0;
4393 int i;
4395 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4397 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4398 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4399 if (cnode && cnode->definition && !cnode->alias)
4400 count++;
4402 streamer_write_uhwi (ob, count);
4404 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4406 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4407 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4408 if (cnode && (node = cnode)->definition && !node->alias)
4410 struct inline_summary *info = inline_summaries->get (node);
4411 struct bitpack_d bp;
4412 struct cgraph_edge *edge;
4413 int i;
4414 size_time_entry *e;
4415 struct condition *c;
4417 streamer_write_uhwi (ob,
4418 lto_symtab_encoder_encode (encoder,
4420 node));
4421 streamer_write_hwi (ob, info->estimated_self_stack_size);
4422 streamer_write_hwi (ob, info->self_size);
4423 streamer_write_hwi (ob, info->self_time);
4424 bp = bitpack_create (ob->main_stream);
4425 bp_pack_value (&bp, info->inlinable, 1);
4426 bp_pack_value (&bp, info->contains_cilk_spawn, 1);
4427 bp_pack_value (&bp, info->fp_expressions, 1);
4428 streamer_write_bitpack (&bp);
4429 streamer_write_uhwi (ob, vec_safe_length (info->conds));
4430 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
4432 streamer_write_uhwi (ob, c->operand_num);
4433 streamer_write_uhwi (ob, c->code);
4434 stream_write_tree (ob, c->val, true);
4435 bp = bitpack_create (ob->main_stream);
4436 bp_pack_value (&bp, c->agg_contents, 1);
4437 bp_pack_value (&bp, c->by_ref, 1);
4438 streamer_write_bitpack (&bp);
4439 if (c->agg_contents)
4440 streamer_write_uhwi (ob, c->offset);
4442 streamer_write_uhwi (ob, vec_safe_length (info->entry));
4443 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
4445 streamer_write_uhwi (ob, e->size);
4446 streamer_write_uhwi (ob, e->time);
4447 write_predicate (ob, &e->predicate);
4449 write_predicate (ob, info->loop_iterations);
4450 write_predicate (ob, info->loop_stride);
4451 write_predicate (ob, info->array_index);
4452 for (edge = node->callees; edge; edge = edge->next_callee)
4453 write_inline_edge_summary (ob, edge);
4454 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
4455 write_inline_edge_summary (ob, edge);
4458 streamer_write_char_stream (ob->main_stream, 0);
4459 produce_asm (ob, NULL);
4460 destroy_output_block (ob);
4462 if (optimize && !flag_ipa_cp)
4463 ipa_prop_write_jump_functions ();
4467 /* Release inline summary. */
4469 void
4470 inline_free_summary (void)
4472 struct cgraph_node *node;
4473 if (edge_removal_hook_holder)
4474 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
4475 edge_removal_hook_holder = NULL;
4476 if (edge_duplication_hook_holder)
4477 symtab->remove_edge_duplication_hook (edge_duplication_hook_holder);
4478 edge_duplication_hook_holder = NULL;
4479 if (!inline_edge_summary_vec.exists ())
4480 return;
4481 FOR_EACH_DEFINED_FUNCTION (node)
4482 if (!node->alias)
4483 reset_inline_summary (node, inline_summaries->get (node));
4484 inline_summaries->release ();
4485 inline_summaries = NULL;
4486 inline_edge_summary_vec.release ();
4487 edge_predicate_pool.release ();