kernel - support dummy reallocblks in devfs
[dragonfly.git] / contrib / gcc-5.0 / gcc / ipa-inline-analysis.c
blobff40b2b4e73d975e1f232d49dc56924a9727dc72
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Analysis used by the inliner and other passes limiting code size growth.
23 We estimate for each function
24 - function body size
25 - average function execution time
26 - inlining size benefit (that is how much of function body size
27 and its call sequence is expected to disappear by inlining)
28 - inlining time benefit
29 - function frame size
30 For each call
31 - call statement size and time
33 inlinie_summary datastructures store above information locally (i.e.
34 parameters of the function itself) and globally (i.e. parameters of
35 the function created by applying all the inline decisions already
36 present in the callgraph).
38 We provide accestor to the inline_summary datastructure and
39 basic logic updating the parameters when inlining is performed.
41 The summaries are context sensitive. Context means
42 1) partial assignment of known constant values of operands
43 2) whether function is inlined into the call or not.
44 It is easy to add more variants. To represent function size and time
45 that depends on context (i.e. it is known to be optimized away when
46 context is known either by inlining or from IP-CP and clonning),
47 we use predicates. Predicates are logical formulas in
48 conjunctive-disjunctive form consisting of clauses. Clauses are bitmaps
49 specifying what conditions must be true. Conditions are simple test
50 of the form described above.
52 In order to make predicate (possibly) true, all of its clauses must
53 be (possibly) true. To make clause (possibly) true, one of conditions
54 it mentions must be (possibly) true. There are fixed bounds on
55 number of clauses and conditions and all the manipulation functions
56 are conservative in positive direction. I.e. we may lose precision
57 by thinking that predicate may be true even when it is not.
59 estimate_edge_size and estimate_edge_growth can be used to query
60 function size/time in the given context. inline_merge_summary merges
61 properties of caller and callee after inlining.
63 Finally pass_inline_parameters is exported. This is used to drive
64 computation of function parameters used by the early inliner. IPA
65 inlined performs analysis via its analyze_function method. */
67 #include "config.h"
68 #include "system.h"
69 #include "coretypes.h"
70 #include "tm.h"
71 #include "hash-set.h"
72 #include "machmode.h"
73 #include "vec.h"
74 #include "double-int.h"
75 #include "input.h"
76 #include "alias.h"
77 #include "symtab.h"
78 #include "wide-int.h"
79 #include "inchash.h"
80 #include "real.h"
81 #include "tree.h"
82 #include "fold-const.h"
83 #include "stor-layout.h"
84 #include "stringpool.h"
85 #include "print-tree.h"
86 #include "tree-inline.h"
87 #include "langhooks.h"
88 #include "flags.h"
89 #include "diagnostic.h"
90 #include "gimple-pretty-print.h"
91 #include "params.h"
92 #include "tree-pass.h"
93 #include "coverage.h"
94 #include "predict.h"
95 #include "hard-reg-set.h"
96 #include "input.h"
97 #include "function.h"
98 #include "dominance.h"
99 #include "cfg.h"
100 #include "cfganal.h"
101 #include "basic-block.h"
102 #include "tree-ssa-alias.h"
103 #include "internal-fn.h"
104 #include "gimple-expr.h"
105 #include "is-a.h"
106 #include "gimple.h"
107 #include "gimple-iterator.h"
108 #include "gimple-ssa.h"
109 #include "tree-cfg.h"
110 #include "tree-phinodes.h"
111 #include "ssa-iterators.h"
112 #include "tree-ssanames.h"
113 #include "tree-ssa-loop-niter.h"
114 #include "tree-ssa-loop.h"
115 #include "hash-map.h"
116 #include "plugin-api.h"
117 #include "ipa-ref.h"
118 #include "cgraph.h"
119 #include "alloc-pool.h"
120 #include "symbol-summary.h"
121 #include "ipa-prop.h"
122 #include "lto-streamer.h"
123 #include "data-streamer.h"
124 #include "tree-streamer.h"
125 #include "ipa-inline.h"
126 #include "cfgloop.h"
127 #include "tree-scalar-evolution.h"
128 #include "ipa-utils.h"
129 #include "cilk.h"
130 #include "cfgexpand.h"
132 /* Estimate runtime of function can easilly run into huge numbers with many
133 nested loops. Be sure we can compute time * INLINE_SIZE_SCALE * 2 in an
134 integer. For anything larger we use gcov_type. */
135 #define MAX_TIME 500000
137 /* Number of bits in integer, but we really want to be stable across different
138 hosts. */
139 #define NUM_CONDITIONS 32
141 enum predicate_conditions
143 predicate_false_condition = 0,
144 predicate_not_inlined_condition = 1,
145 predicate_first_dynamic_condition = 2
148 /* Special condition code we use to represent test that operand is compile time
149 constant. */
150 #define IS_NOT_CONSTANT ERROR_MARK
151 /* Special condition code we use to represent test that operand is not changed
152 across invocation of the function. When operand IS_NOT_CONSTANT it is always
153 CHANGED, however i.e. loop invariants can be NOT_CHANGED given percentage
154 of executions even when they are not compile time constants. */
155 #define CHANGED IDENTIFIER_NODE
157 /* Holders of ipa cgraph hooks: */
158 static struct cgraph_2edge_hook_list *edge_duplication_hook_holder;
159 static struct cgraph_edge_hook_list *edge_removal_hook_holder;
160 static void inline_edge_removal_hook (struct cgraph_edge *, void *);
161 static void inline_edge_duplication_hook (struct cgraph_edge *,
162 struct cgraph_edge *, void *);
164 /* VECtor holding inline summaries.
165 In GGC memory because conditions might point to constant trees. */
166 function_summary <inline_summary *> *inline_summaries;
167 vec<inline_edge_summary_t> inline_edge_summary_vec;
169 /* Cached node/edge growths. */
170 vec<edge_growth_cache_entry> edge_growth_cache;
172 /* Edge predicates goes here. */
173 static alloc_pool edge_predicate_pool;
175 /* Return true predicate (tautology).
176 We represent it by empty list of clauses. */
178 static inline struct predicate
179 true_predicate (void)
181 struct predicate p;
182 p.clause[0] = 0;
183 return p;
187 /* Return predicate testing single condition number COND. */
189 static inline struct predicate
190 single_cond_predicate (int cond)
192 struct predicate p;
193 p.clause[0] = 1 << cond;
194 p.clause[1] = 0;
195 return p;
199 /* Return false predicate. First clause require false condition. */
201 static inline struct predicate
202 false_predicate (void)
204 return single_cond_predicate (predicate_false_condition);
208 /* Return true if P is (true). */
210 static inline bool
211 true_predicate_p (struct predicate *p)
213 return !p->clause[0];
217 /* Return true if P is (false). */
219 static inline bool
220 false_predicate_p (struct predicate *p)
222 if (p->clause[0] == (1 << predicate_false_condition))
224 gcc_checking_assert (!p->clause[1]
225 && p->clause[0] == 1 << predicate_false_condition);
226 return true;
228 return false;
232 /* Return predicate that is set true when function is not inlined. */
234 static inline struct predicate
235 not_inlined_predicate (void)
237 return single_cond_predicate (predicate_not_inlined_condition);
240 /* Simple description of whether a memory load or a condition refers to a load
241 from an aggregate and if so, how and where from in the aggregate.
242 Individual fields have the same meaning like fields with the same name in
243 struct condition. */
245 struct agg_position_info
247 HOST_WIDE_INT offset;
248 bool agg_contents;
249 bool by_ref;
252 /* Add condition to condition list SUMMARY. OPERAND_NUM, SIZE, CODE and VAL
253 correspond to fields of condition structure. AGGPOS describes whether the
254 used operand is loaded from an aggregate and where in the aggregate it is.
255 It can be NULL, which means this not a load from an aggregate. */
257 static struct predicate
258 add_condition (struct inline_summary *summary, int operand_num,
259 HOST_WIDE_INT size, struct agg_position_info *aggpos,
260 enum tree_code code, tree val)
262 int i;
263 struct condition *c;
264 struct condition new_cond;
265 HOST_WIDE_INT offset;
266 bool agg_contents, by_ref;
268 if (aggpos)
270 offset = aggpos->offset;
271 agg_contents = aggpos->agg_contents;
272 by_ref = aggpos->by_ref;
274 else
276 offset = 0;
277 agg_contents = false;
278 by_ref = false;
281 gcc_checking_assert (operand_num >= 0);
282 for (i = 0; vec_safe_iterate (summary->conds, i, &c); i++)
284 if (c->operand_num == operand_num
285 && c->size == size
286 && c->code == code
287 && c->val == val
288 && c->agg_contents == agg_contents
289 && (!agg_contents || (c->offset == offset && c->by_ref == by_ref)))
290 return single_cond_predicate (i + predicate_first_dynamic_condition);
292 /* Too many conditions. Give up and return constant true. */
293 if (i == NUM_CONDITIONS - predicate_first_dynamic_condition)
294 return true_predicate ();
296 new_cond.operand_num = operand_num;
297 new_cond.code = code;
298 new_cond.val = val;
299 new_cond.agg_contents = agg_contents;
300 new_cond.by_ref = by_ref;
301 new_cond.offset = offset;
302 new_cond.size = size;
303 vec_safe_push (summary->conds, new_cond);
304 return single_cond_predicate (i + predicate_first_dynamic_condition);
308 /* Add clause CLAUSE into the predicate P. */
310 static inline void
311 add_clause (conditions conditions, struct predicate *p, clause_t clause)
313 int i;
314 int i2;
315 int insert_here = -1;
316 int c1, c2;
318 /* True clause. */
319 if (!clause)
320 return;
322 /* False clause makes the whole predicate false. Kill the other variants. */
323 if (clause == (1 << predicate_false_condition))
325 p->clause[0] = (1 << predicate_false_condition);
326 p->clause[1] = 0;
327 return;
329 if (false_predicate_p (p))
330 return;
332 /* No one should be silly enough to add false into nontrivial clauses. */
333 gcc_checking_assert (!(clause & (1 << predicate_false_condition)));
335 /* Look where to insert the clause. At the same time prune out
336 clauses of P that are implied by the new clause and thus
337 redundant. */
338 for (i = 0, i2 = 0; i <= MAX_CLAUSES; i++)
340 p->clause[i2] = p->clause[i];
342 if (!p->clause[i])
343 break;
345 /* If p->clause[i] implies clause, there is nothing to add. */
346 if ((p->clause[i] & clause) == p->clause[i])
348 /* We had nothing to add, none of clauses should've become
349 redundant. */
350 gcc_checking_assert (i == i2);
351 return;
354 if (p->clause[i] < clause && insert_here < 0)
355 insert_here = i2;
357 /* If clause implies p->clause[i], then p->clause[i] becomes redundant.
358 Otherwise the p->clause[i] has to stay. */
359 if ((p->clause[i] & clause) != clause)
360 i2++;
363 /* Look for clauses that are obviously true. I.e.
364 op0 == 5 || op0 != 5. */
365 for (c1 = predicate_first_dynamic_condition; c1 < NUM_CONDITIONS; c1++)
367 condition *cc1;
368 if (!(clause & (1 << c1)))
369 continue;
370 cc1 = &(*conditions)[c1 - predicate_first_dynamic_condition];
371 /* We have no way to represent !CHANGED and !IS_NOT_CONSTANT
372 and thus there is no point for looking for them. */
373 if (cc1->code == CHANGED || cc1->code == IS_NOT_CONSTANT)
374 continue;
375 for (c2 = c1 + 1; c2 < NUM_CONDITIONS; c2++)
376 if (clause & (1 << c2))
378 condition *cc1 =
379 &(*conditions)[c1 - predicate_first_dynamic_condition];
380 condition *cc2 =
381 &(*conditions)[c2 - predicate_first_dynamic_condition];
382 if (cc1->operand_num == cc2->operand_num
383 && cc1->val == cc2->val
384 && cc2->code != IS_NOT_CONSTANT
385 && cc2->code != CHANGED
386 && cc1->code == invert_tree_comparison (cc2->code,
387 HONOR_NANS (cc1->val)))
388 return;
393 /* We run out of variants. Be conservative in positive direction. */
394 if (i2 == MAX_CLAUSES)
395 return;
396 /* Keep clauses in decreasing order. This makes equivalence testing easy. */
397 p->clause[i2 + 1] = 0;
398 if (insert_here >= 0)
399 for (; i2 > insert_here; i2--)
400 p->clause[i2] = p->clause[i2 - 1];
401 else
402 insert_here = i2;
403 p->clause[insert_here] = clause;
407 /* Return P & P2. */
409 static struct predicate
410 and_predicates (conditions conditions,
411 struct predicate *p, struct predicate *p2)
413 struct predicate out = *p;
414 int i;
416 /* Avoid busy work. */
417 if (false_predicate_p (p2) || true_predicate_p (p))
418 return *p2;
419 if (false_predicate_p (p) || true_predicate_p (p2))
420 return *p;
422 /* See how far predicates match. */
423 for (i = 0; p->clause[i] && p->clause[i] == p2->clause[i]; i++)
425 gcc_checking_assert (i < MAX_CLAUSES);
428 /* Combine the predicates rest. */
429 for (; p2->clause[i]; i++)
431 gcc_checking_assert (i < MAX_CLAUSES);
432 add_clause (conditions, &out, p2->clause[i]);
434 return out;
438 /* Return true if predicates are obviously equal. */
440 static inline bool
441 predicates_equal_p (struct predicate *p, struct predicate *p2)
443 int i;
444 for (i = 0; p->clause[i]; i++)
446 gcc_checking_assert (i < MAX_CLAUSES);
447 gcc_checking_assert (p->clause[i] > p->clause[i + 1]);
448 gcc_checking_assert (!p2->clause[i]
449 || p2->clause[i] > p2->clause[i + 1]);
450 if (p->clause[i] != p2->clause[i])
451 return false;
453 return !p2->clause[i];
457 /* Return P | P2. */
459 static struct predicate
460 or_predicates (conditions conditions,
461 struct predicate *p, struct predicate *p2)
463 struct predicate out = true_predicate ();
464 int i, j;
466 /* Avoid busy work. */
467 if (false_predicate_p (p2) || true_predicate_p (p))
468 return *p;
469 if (false_predicate_p (p) || true_predicate_p (p2))
470 return *p2;
471 if (predicates_equal_p (p, p2))
472 return *p;
474 /* OK, combine the predicates. */
475 for (i = 0; p->clause[i]; i++)
476 for (j = 0; p2->clause[j]; j++)
478 gcc_checking_assert (i < MAX_CLAUSES && j < MAX_CLAUSES);
479 add_clause (conditions, &out, p->clause[i] | p2->clause[j]);
481 return out;
485 /* Having partial truth assignment in POSSIBLE_TRUTHS, return false
486 if predicate P is known to be false. */
488 static bool
489 evaluate_predicate (struct predicate *p, clause_t possible_truths)
491 int i;
493 /* True remains true. */
494 if (true_predicate_p (p))
495 return true;
497 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
499 /* See if we can find clause we can disprove. */
500 for (i = 0; p->clause[i]; i++)
502 gcc_checking_assert (i < MAX_CLAUSES);
503 if (!(p->clause[i] & possible_truths))
504 return false;
506 return true;
509 /* Return the probability in range 0...REG_BR_PROB_BASE that the predicated
510 instruction will be recomputed per invocation of the inlined call. */
512 static int
513 predicate_probability (conditions conds,
514 struct predicate *p, clause_t possible_truths,
515 vec<inline_param_summary> inline_param_summary)
517 int i;
518 int combined_prob = REG_BR_PROB_BASE;
520 /* True remains true. */
521 if (true_predicate_p (p))
522 return REG_BR_PROB_BASE;
524 if (false_predicate_p (p))
525 return 0;
527 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
529 /* See if we can find clause we can disprove. */
530 for (i = 0; p->clause[i]; i++)
532 gcc_checking_assert (i < MAX_CLAUSES);
533 if (!(p->clause[i] & possible_truths))
534 return 0;
535 else
537 int this_prob = 0;
538 int i2;
539 if (!inline_param_summary.exists ())
540 return REG_BR_PROB_BASE;
541 for (i2 = 0; i2 < NUM_CONDITIONS; i2++)
542 if ((p->clause[i] & possible_truths) & (1 << i2))
544 if (i2 >= predicate_first_dynamic_condition)
546 condition *c =
547 &(*conds)[i2 - predicate_first_dynamic_condition];
548 if (c->code == CHANGED
549 && (c->operand_num <
550 (int) inline_param_summary.length ()))
552 int iprob =
553 inline_param_summary[c->operand_num].change_prob;
554 this_prob = MAX (this_prob, iprob);
556 else
557 this_prob = REG_BR_PROB_BASE;
559 else
560 this_prob = REG_BR_PROB_BASE;
562 combined_prob = MIN (this_prob, combined_prob);
563 if (!combined_prob)
564 return 0;
567 return combined_prob;
571 /* Dump conditional COND. */
573 static void
574 dump_condition (FILE *f, conditions conditions, int cond)
576 condition *c;
577 if (cond == predicate_false_condition)
578 fprintf (f, "false");
579 else if (cond == predicate_not_inlined_condition)
580 fprintf (f, "not inlined");
581 else
583 c = &(*conditions)[cond - predicate_first_dynamic_condition];
584 fprintf (f, "op%i", c->operand_num);
585 if (c->agg_contents)
586 fprintf (f, "[%soffset: " HOST_WIDE_INT_PRINT_DEC "]",
587 c->by_ref ? "ref " : "", c->offset);
588 if (c->code == IS_NOT_CONSTANT)
590 fprintf (f, " not constant");
591 return;
593 if (c->code == CHANGED)
595 fprintf (f, " changed");
596 return;
598 fprintf (f, " %s ", op_symbol_code (c->code));
599 print_generic_expr (f, c->val, 1);
604 /* Dump clause CLAUSE. */
606 static void
607 dump_clause (FILE *f, conditions conds, clause_t clause)
609 int i;
610 bool found = false;
611 fprintf (f, "(");
612 if (!clause)
613 fprintf (f, "true");
614 for (i = 0; i < NUM_CONDITIONS; i++)
615 if (clause & (1 << i))
617 if (found)
618 fprintf (f, " || ");
619 found = true;
620 dump_condition (f, conds, i);
622 fprintf (f, ")");
626 /* Dump predicate PREDICATE. */
628 static void
629 dump_predicate (FILE *f, conditions conds, struct predicate *pred)
631 int i;
632 if (true_predicate_p (pred))
633 dump_clause (f, conds, 0);
634 else
635 for (i = 0; pred->clause[i]; i++)
637 if (i)
638 fprintf (f, " && ");
639 dump_clause (f, conds, pred->clause[i]);
641 fprintf (f, "\n");
645 /* Dump inline hints. */
646 void
647 dump_inline_hints (FILE *f, inline_hints hints)
649 if (!hints)
650 return;
651 fprintf (f, "inline hints:");
652 if (hints & INLINE_HINT_indirect_call)
654 hints &= ~INLINE_HINT_indirect_call;
655 fprintf (f, " indirect_call");
657 if (hints & INLINE_HINT_loop_iterations)
659 hints &= ~INLINE_HINT_loop_iterations;
660 fprintf (f, " loop_iterations");
662 if (hints & INLINE_HINT_loop_stride)
664 hints &= ~INLINE_HINT_loop_stride;
665 fprintf (f, " loop_stride");
667 if (hints & INLINE_HINT_same_scc)
669 hints &= ~INLINE_HINT_same_scc;
670 fprintf (f, " same_scc");
672 if (hints & INLINE_HINT_in_scc)
674 hints &= ~INLINE_HINT_in_scc;
675 fprintf (f, " in_scc");
677 if (hints & INLINE_HINT_cross_module)
679 hints &= ~INLINE_HINT_cross_module;
680 fprintf (f, " cross_module");
682 if (hints & INLINE_HINT_declared_inline)
684 hints &= ~INLINE_HINT_declared_inline;
685 fprintf (f, " declared_inline");
687 if (hints & INLINE_HINT_array_index)
689 hints &= ~INLINE_HINT_array_index;
690 fprintf (f, " array_index");
692 if (hints & INLINE_HINT_known_hot)
694 hints &= ~INLINE_HINT_known_hot;
695 fprintf (f, " known_hot");
697 gcc_assert (!hints);
701 /* Record SIZE and TIME under condition PRED into the inline summary. */
703 static void
704 account_size_time (struct inline_summary *summary, int size, int time,
705 struct predicate *pred)
707 size_time_entry *e;
708 bool found = false;
709 int i;
711 if (false_predicate_p (pred))
712 return;
714 /* We need to create initial empty unconitional clause, but otherwie
715 we don't need to account empty times and sizes. */
716 if (!size && !time && summary->entry)
717 return;
719 /* Watch overflow that might result from insane profiles. */
720 if (time > MAX_TIME * INLINE_TIME_SCALE)
721 time = MAX_TIME * INLINE_TIME_SCALE;
722 gcc_assert (time >= 0);
724 for (i = 0; vec_safe_iterate (summary->entry, i, &e); i++)
725 if (predicates_equal_p (&e->predicate, pred))
727 found = true;
728 break;
730 if (i == 256)
732 i = 0;
733 found = true;
734 e = &(*summary->entry)[0];
735 gcc_assert (!e->predicate.clause[0]);
736 if (dump_file && (dump_flags & TDF_DETAILS))
737 fprintf (dump_file,
738 "\t\tReached limit on number of entries, "
739 "ignoring the predicate.");
741 if (dump_file && (dump_flags & TDF_DETAILS) && (time || size))
743 fprintf (dump_file,
744 "\t\tAccounting size:%3.2f, time:%3.2f on %spredicate:",
745 ((double) size) / INLINE_SIZE_SCALE,
746 ((double) time) / INLINE_TIME_SCALE, found ? "" : "new ");
747 dump_predicate (dump_file, summary->conds, pred);
749 if (!found)
751 struct size_time_entry new_entry;
752 new_entry.size = size;
753 new_entry.time = time;
754 new_entry.predicate = *pred;
755 vec_safe_push (summary->entry, new_entry);
757 else
759 e->size += size;
760 e->time += time;
761 if (e->time > MAX_TIME * INLINE_TIME_SCALE)
762 e->time = MAX_TIME * INLINE_TIME_SCALE;
766 /* We proved E to be unreachable, redirect it to __bultin_unreachable. */
768 static struct cgraph_edge *
769 redirect_to_unreachable (struct cgraph_edge *e)
771 struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL;
772 struct cgraph_node *target = cgraph_node::get_create
773 (builtin_decl_implicit (BUILT_IN_UNREACHABLE));
775 if (e->speculative)
776 e = e->resolve_speculation (target->decl);
777 else if (!e->callee)
778 e->make_direct (target);
779 else
780 e->redirect_callee (target);
781 struct inline_edge_summary *es = inline_edge_summary (e);
782 e->inline_failed = CIF_UNREACHABLE;
783 e->frequency = 0;
784 e->count = 0;
785 es->call_stmt_size = 0;
786 es->call_stmt_time = 0;
787 if (callee)
788 callee->remove_symbol_and_inline_clones ();
789 return e;
792 /* Set predicate for edge E. */
794 static void
795 edge_set_predicate (struct cgraph_edge *e, struct predicate *predicate)
797 /* If the edge is determined to be never executed, redirect it
798 to BUILTIN_UNREACHABLE to save inliner from inlining into it. */
799 if (predicate && false_predicate_p (predicate)
800 /* When handling speculative edges, we need to do the redirection
801 just once. Do it always on the direct edge, so we do not
802 attempt to resolve speculation while duplicating the edge. */
803 && (!e->speculative || e->callee))
804 e = redirect_to_unreachable (e);
806 struct inline_edge_summary *es = inline_edge_summary (e);
807 if (predicate && !true_predicate_p (predicate))
809 if (!es->predicate)
810 es->predicate = (struct predicate *) pool_alloc (edge_predicate_pool);
811 *es->predicate = *predicate;
813 else
815 if (es->predicate)
816 pool_free (edge_predicate_pool, es->predicate);
817 es->predicate = NULL;
821 /* Set predicate for hint *P. */
823 static void
824 set_hint_predicate (struct predicate **p, struct predicate new_predicate)
826 if (false_predicate_p (&new_predicate) || true_predicate_p (&new_predicate))
828 if (*p)
829 pool_free (edge_predicate_pool, *p);
830 *p = NULL;
832 else
834 if (!*p)
835 *p = (struct predicate *) pool_alloc (edge_predicate_pool);
836 **p = new_predicate;
841 /* KNOWN_VALS is partial mapping of parameters of NODE to constant values.
842 KNOWN_AGGS is a vector of aggreggate jump functions for each parameter.
843 Return clause of possible truths. When INLINE_P is true, assume that we are
844 inlining.
846 ERROR_MARK means compile time invariant. */
848 static clause_t
849 evaluate_conditions_for_known_args (struct cgraph_node *node,
850 bool inline_p,
851 vec<tree> known_vals,
852 vec<ipa_agg_jump_function_p>
853 known_aggs)
855 clause_t clause = inline_p ? 0 : 1 << predicate_not_inlined_condition;
856 struct inline_summary *info = inline_summaries->get (node);
857 int i;
858 struct condition *c;
860 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
862 tree val;
863 tree res;
865 /* We allow call stmt to have fewer arguments than the callee function
866 (especially for K&R style programs). So bound check here (we assume
867 known_aggs vector, if non-NULL, has the same length as
868 known_vals). */
869 gcc_checking_assert (!known_aggs.exists ()
870 || (known_vals.length () == known_aggs.length ()));
871 if (c->operand_num >= (int) known_vals.length ())
873 clause |= 1 << (i + predicate_first_dynamic_condition);
874 continue;
877 if (c->agg_contents)
879 struct ipa_agg_jump_function *agg;
881 if (c->code == CHANGED
882 && !c->by_ref
883 && (known_vals[c->operand_num] == error_mark_node))
884 continue;
886 if (known_aggs.exists ())
888 agg = known_aggs[c->operand_num];
889 val = ipa_find_agg_cst_for_param (agg, c->offset, c->by_ref);
891 else
892 val = NULL_TREE;
894 else
896 val = known_vals[c->operand_num];
897 if (val == error_mark_node && c->code != CHANGED)
898 val = NULL_TREE;
901 if (!val)
903 clause |= 1 << (i + predicate_first_dynamic_condition);
904 continue;
906 if (c->code == CHANGED)
907 continue;
909 if (tree_to_shwi (TYPE_SIZE (TREE_TYPE (val))) != c->size)
911 clause |= 1 << (i + predicate_first_dynamic_condition);
912 continue;
914 if (c->code == IS_NOT_CONSTANT)
915 continue;
917 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (c->val), val);
918 res = val
919 ? fold_binary_to_constant (c->code, boolean_type_node, val, c->val)
920 : NULL;
922 if (res && integer_zerop (res))
923 continue;
925 clause |= 1 << (i + predicate_first_dynamic_condition);
927 return clause;
931 /* Work out what conditions might be true at invocation of E. */
933 static void
934 evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
935 clause_t *clause_ptr,
936 vec<tree> *known_vals_ptr,
937 vec<ipa_polymorphic_call_context>
938 *known_contexts_ptr,
939 vec<ipa_agg_jump_function_p> *known_aggs_ptr)
941 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
942 struct inline_summary *info = inline_summaries->get (callee);
943 vec<tree> known_vals = vNULL;
944 vec<ipa_agg_jump_function_p> known_aggs = vNULL;
946 if (clause_ptr)
947 *clause_ptr = inline_p ? 0 : 1 << predicate_not_inlined_condition;
948 if (known_vals_ptr)
949 known_vals_ptr->create (0);
950 if (known_contexts_ptr)
951 known_contexts_ptr->create (0);
953 if (ipa_node_params_sum
954 && !e->call_stmt_cannot_inline_p
955 && ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr))
957 struct ipa_node_params *parms_info;
958 struct ipa_edge_args *args = IPA_EDGE_REF (e);
959 struct inline_edge_summary *es = inline_edge_summary (e);
960 int i, count = ipa_get_cs_argument_count (args);
962 if (e->caller->global.inlined_to)
963 parms_info = IPA_NODE_REF (e->caller->global.inlined_to);
964 else
965 parms_info = IPA_NODE_REF (e->caller);
967 if (count && (info->conds || known_vals_ptr))
968 known_vals.safe_grow_cleared (count);
969 if (count && (info->conds || known_aggs_ptr))
970 known_aggs.safe_grow_cleared (count);
971 if (count && known_contexts_ptr)
972 known_contexts_ptr->safe_grow_cleared (count);
974 for (i = 0; i < count; i++)
976 struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i);
977 tree cst = ipa_value_from_jfunc (parms_info, jf);
979 if (!cst && e->call_stmt
980 && i < (int)gimple_call_num_args (e->call_stmt))
982 cst = gimple_call_arg (e->call_stmt, i);
983 if (!is_gimple_min_invariant (cst))
984 cst = NULL;
986 if (cst)
988 gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO);
989 if (known_vals.exists ())
990 known_vals[i] = cst;
992 else if (inline_p && !es->param[i].change_prob)
993 known_vals[i] = error_mark_node;
995 if (known_contexts_ptr)
996 (*known_contexts_ptr)[i] = ipa_context_from_jfunc (parms_info, e,
997 i, jf);
998 /* TODO: When IPA-CP starts propagating and merging aggregate jump
999 functions, use its knowledge of the caller too, just like the
1000 scalar case above. */
1001 known_aggs[i] = &jf->agg;
1004 else if (e->call_stmt && !e->call_stmt_cannot_inline_p
1005 && ((clause_ptr && info->conds) || known_vals_ptr))
1007 int i, count = (int)gimple_call_num_args (e->call_stmt);
1009 if (count && (info->conds || known_vals_ptr))
1010 known_vals.safe_grow_cleared (count);
1011 for (i = 0; i < count; i++)
1013 tree cst = gimple_call_arg (e->call_stmt, i);
1014 if (!is_gimple_min_invariant (cst))
1015 cst = NULL;
1016 if (cst)
1017 known_vals[i] = cst;
1021 if (clause_ptr)
1022 *clause_ptr = evaluate_conditions_for_known_args (callee, inline_p,
1023 known_vals, known_aggs);
1025 if (known_vals_ptr)
1026 *known_vals_ptr = known_vals;
1027 else
1028 known_vals.release ();
1030 if (known_aggs_ptr)
1031 *known_aggs_ptr = known_aggs;
1032 else
1033 known_aggs.release ();
1037 /* Allocate the inline summary vector or resize it to cover all cgraph nodes. */
1039 static void
1040 inline_summary_alloc (void)
1042 if (!edge_removal_hook_holder)
1043 edge_removal_hook_holder =
1044 symtab->add_edge_removal_hook (&inline_edge_removal_hook, NULL);
1045 if (!edge_duplication_hook_holder)
1046 edge_duplication_hook_holder =
1047 symtab->add_edge_duplication_hook (&inline_edge_duplication_hook, NULL);
1049 if (!inline_summaries)
1050 inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab);
1052 if (inline_edge_summary_vec.length () <= (unsigned) symtab->edges_max_uid)
1053 inline_edge_summary_vec.safe_grow_cleared (symtab->edges_max_uid + 1);
1054 if (!edge_predicate_pool)
1055 edge_predicate_pool = create_alloc_pool ("edge predicates",
1056 sizeof (struct predicate), 10);
1059 /* We are called multiple time for given function; clear
1060 data from previous run so they are not cumulated. */
1062 static void
1063 reset_inline_edge_summary (struct cgraph_edge *e)
1065 if (e->uid < (int) inline_edge_summary_vec.length ())
1067 struct inline_edge_summary *es = inline_edge_summary (e);
1069 es->call_stmt_size = es->call_stmt_time = 0;
1070 if (es->predicate)
1071 pool_free (edge_predicate_pool, es->predicate);
1072 es->predicate = NULL;
1073 es->param.release ();
1077 /* We are called multiple time for given function; clear
1078 data from previous run so they are not cumulated. */
1080 static void
1081 reset_inline_summary (struct cgraph_node *node,
1082 inline_summary *info)
1084 struct cgraph_edge *e;
1086 info->self_size = info->self_time = 0;
1087 info->estimated_stack_size = 0;
1088 info->estimated_self_stack_size = 0;
1089 info->stack_frame_offset = 0;
1090 info->size = 0;
1091 info->time = 0;
1092 info->growth = 0;
1093 info->scc_no = 0;
1094 if (info->loop_iterations)
1096 pool_free (edge_predicate_pool, info->loop_iterations);
1097 info->loop_iterations = NULL;
1099 if (info->loop_stride)
1101 pool_free (edge_predicate_pool, info->loop_stride);
1102 info->loop_stride = NULL;
1104 if (info->array_index)
1106 pool_free (edge_predicate_pool, info->array_index);
1107 info->array_index = NULL;
1109 vec_free (info->conds);
1110 vec_free (info->entry);
1111 for (e = node->callees; e; e = e->next_callee)
1112 reset_inline_edge_summary (e);
1113 for (e = node->indirect_calls; e; e = e->next_callee)
1114 reset_inline_edge_summary (e);
1117 /* Hook that is called by cgraph.c when a node is removed. */
1119 void
1120 inline_summary_t::remove (cgraph_node *node, inline_summary *info)
1122 reset_inline_summary (node, info);
1125 /* Remap predicate P of former function to be predicate of duplicated function.
1126 POSSIBLE_TRUTHS is clause of possible truths in the duplicated node,
1127 INFO is inline summary of the duplicated node. */
1129 static struct predicate
1130 remap_predicate_after_duplication (struct predicate *p,
1131 clause_t possible_truths,
1132 struct inline_summary *info)
1134 struct predicate new_predicate = true_predicate ();
1135 int j;
1136 for (j = 0; p->clause[j]; j++)
1137 if (!(possible_truths & p->clause[j]))
1139 new_predicate = false_predicate ();
1140 break;
1142 else
1143 add_clause (info->conds, &new_predicate,
1144 possible_truths & p->clause[j]);
1145 return new_predicate;
1148 /* Same as remap_predicate_after_duplication but handle hint predicate *P.
1149 Additionally care about allocating new memory slot for updated predicate
1150 and set it to NULL when it becomes true or false (and thus uninteresting).
1153 static void
1154 remap_hint_predicate_after_duplication (struct predicate **p,
1155 clause_t possible_truths,
1156 struct inline_summary *info)
1158 struct predicate new_predicate;
1160 if (!*p)
1161 return;
1163 new_predicate = remap_predicate_after_duplication (*p,
1164 possible_truths, info);
1165 /* We do not want to free previous predicate; it is used by node origin. */
1166 *p = NULL;
1167 set_hint_predicate (p, new_predicate);
1171 /* Hook that is called by cgraph.c when a node is duplicated. */
1172 void
1173 inline_summary_t::duplicate (cgraph_node *src,
1174 cgraph_node *dst,
1175 inline_summary *,
1176 inline_summary *info)
1178 inline_summary_alloc ();
1179 memcpy (info, inline_summaries->get (src), sizeof (inline_summary));
1180 /* TODO: as an optimization, we may avoid copying conditions
1181 that are known to be false or true. */
1182 info->conds = vec_safe_copy (info->conds);
1184 /* When there are any replacements in the function body, see if we can figure
1185 out that something was optimized out. */
1186 if (ipa_node_params_sum && dst->clone.tree_map)
1188 vec<size_time_entry, va_gc> *entry = info->entry;
1189 /* Use SRC parm info since it may not be copied yet. */
1190 struct ipa_node_params *parms_info = IPA_NODE_REF (src);
1191 vec<tree> known_vals = vNULL;
1192 int count = ipa_get_param_count (parms_info);
1193 int i, j;
1194 clause_t possible_truths;
1195 struct predicate true_pred = true_predicate ();
1196 size_time_entry *e;
1197 int optimized_out_size = 0;
1198 bool inlined_to_p = false;
1199 struct cgraph_edge *edge, *next;
1201 info->entry = 0;
1202 known_vals.safe_grow_cleared (count);
1203 for (i = 0; i < count; i++)
1205 struct ipa_replace_map *r;
1207 for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++)
1209 if (((!r->old_tree && r->parm_num == i)
1210 || (r->old_tree && r->old_tree == ipa_get_param (parms_info, i)))
1211 && r->replace_p && !r->ref_p)
1213 known_vals[i] = r->new_tree;
1214 break;
1218 possible_truths = evaluate_conditions_for_known_args (dst, false,
1219 known_vals,
1220 vNULL);
1221 known_vals.release ();
1223 account_size_time (info, 0, 0, &true_pred);
1225 /* Remap size_time vectors.
1226 Simplify the predicate by prunning out alternatives that are known
1227 to be false.
1228 TODO: as on optimization, we can also eliminate conditions known
1229 to be true. */
1230 for (i = 0; vec_safe_iterate (entry, i, &e); i++)
1232 struct predicate new_predicate;
1233 new_predicate = remap_predicate_after_duplication (&e->predicate,
1234 possible_truths,
1235 info);
1236 if (false_predicate_p (&new_predicate))
1237 optimized_out_size += e->size;
1238 else
1239 account_size_time (info, e->size, e->time, &new_predicate);
1242 /* Remap edge predicates with the same simplification as above.
1243 Also copy constantness arrays. */
1244 for (edge = dst->callees; edge; edge = next)
1246 struct predicate new_predicate;
1247 struct inline_edge_summary *es = inline_edge_summary (edge);
1248 next = edge->next_callee;
1250 if (!edge->inline_failed)
1251 inlined_to_p = true;
1252 if (!es->predicate)
1253 continue;
1254 new_predicate = remap_predicate_after_duplication (es->predicate,
1255 possible_truths,
1256 info);
1257 if (false_predicate_p (&new_predicate)
1258 && !false_predicate_p (es->predicate))
1259 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1260 edge_set_predicate (edge, &new_predicate);
1263 /* Remap indirect edge predicates with the same simplificaiton as above.
1264 Also copy constantness arrays. */
1265 for (edge = dst->indirect_calls; edge; edge = next)
1267 struct predicate new_predicate;
1268 struct inline_edge_summary *es = inline_edge_summary (edge);
1269 next = edge->next_callee;
1271 gcc_checking_assert (edge->inline_failed);
1272 if (!es->predicate)
1273 continue;
1274 new_predicate = remap_predicate_after_duplication (es->predicate,
1275 possible_truths,
1276 info);
1277 if (false_predicate_p (&new_predicate)
1278 && !false_predicate_p (es->predicate))
1279 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1280 edge_set_predicate (edge, &new_predicate);
1282 remap_hint_predicate_after_duplication (&info->loop_iterations,
1283 possible_truths, info);
1284 remap_hint_predicate_after_duplication (&info->loop_stride,
1285 possible_truths, info);
1286 remap_hint_predicate_after_duplication (&info->array_index,
1287 possible_truths, info);
1289 /* If inliner or someone after inliner will ever start producing
1290 non-trivial clones, we will get trouble with lack of information
1291 about updating self sizes, because size vectors already contains
1292 sizes of the calees. */
1293 gcc_assert (!inlined_to_p || !optimized_out_size);
1295 else
1297 info->entry = vec_safe_copy (info->entry);
1298 if (info->loop_iterations)
1300 predicate p = *info->loop_iterations;
1301 info->loop_iterations = NULL;
1302 set_hint_predicate (&info->loop_iterations, p);
1304 if (info->loop_stride)
1306 predicate p = *info->loop_stride;
1307 info->loop_stride = NULL;
1308 set_hint_predicate (&info->loop_stride, p);
1310 if (info->array_index)
1312 predicate p = *info->array_index;
1313 info->array_index = NULL;
1314 set_hint_predicate (&info->array_index, p);
1317 if (!dst->global.inlined_to)
1318 inline_update_overall_summary (dst);
1322 /* Hook that is called by cgraph.c when a node is duplicated. */
1324 static void
1325 inline_edge_duplication_hook (struct cgraph_edge *src,
1326 struct cgraph_edge *dst,
1327 ATTRIBUTE_UNUSED void *data)
1329 struct inline_edge_summary *info;
1330 struct inline_edge_summary *srcinfo;
1331 inline_summary_alloc ();
1332 info = inline_edge_summary (dst);
1333 srcinfo = inline_edge_summary (src);
1334 memcpy (info, srcinfo, sizeof (struct inline_edge_summary));
1335 info->predicate = NULL;
1336 edge_set_predicate (dst, srcinfo->predicate);
1337 info->param = srcinfo->param.copy ();
1338 if (!dst->indirect_unknown_callee && src->indirect_unknown_callee)
1340 info->call_stmt_size -= (eni_size_weights.indirect_call_cost
1341 - eni_size_weights.call_cost);
1342 info->call_stmt_time -= (eni_time_weights.indirect_call_cost
1343 - eni_time_weights.call_cost);
1348 /* Keep edge cache consistent across edge removal. */
1350 static void
1351 inline_edge_removal_hook (struct cgraph_edge *edge,
1352 void *data ATTRIBUTE_UNUSED)
1354 if (edge_growth_cache.exists ())
1355 reset_edge_growth_cache (edge);
1356 reset_inline_edge_summary (edge);
1360 /* Initialize growth caches. */
1362 void
1363 initialize_growth_caches (void)
1365 if (symtab->edges_max_uid)
1366 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
1370 /* Free growth caches. */
1372 void
1373 free_growth_caches (void)
1375 edge_growth_cache.release ();
1379 /* Dump edge summaries associated to NODE and recursively to all clones.
1380 Indent by INDENT. */
1382 static void
1383 dump_inline_edge_summary (FILE *f, int indent, struct cgraph_node *node,
1384 struct inline_summary *info)
1386 struct cgraph_edge *edge;
1387 for (edge = node->callees; edge; edge = edge->next_callee)
1389 struct inline_edge_summary *es = inline_edge_summary (edge);
1390 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1391 int i;
1393 fprintf (f,
1394 "%*s%s/%i %s\n%*s loop depth:%2i freq:%4i size:%2i"
1395 " time: %2i callee size:%2i stack:%2i",
1396 indent, "", callee->name (), callee->order,
1397 !edge->inline_failed
1398 ? "inlined" : cgraph_inline_failed_string (edge-> inline_failed),
1399 indent, "", es->loop_depth, edge->frequency,
1400 es->call_stmt_size, es->call_stmt_time,
1401 (int) inline_summaries->get (callee)->size / INLINE_SIZE_SCALE,
1402 (int) inline_summaries->get (callee)->estimated_stack_size);
1404 if (es->predicate)
1406 fprintf (f, " predicate: ");
1407 dump_predicate (f, info->conds, es->predicate);
1409 else
1410 fprintf (f, "\n");
1411 if (es->param.exists ())
1412 for (i = 0; i < (int) es->param.length (); i++)
1414 int prob = es->param[i].change_prob;
1416 if (!prob)
1417 fprintf (f, "%*s op%i is compile time invariant\n",
1418 indent + 2, "", i);
1419 else if (prob != REG_BR_PROB_BASE)
1420 fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i,
1421 prob * 100.0 / REG_BR_PROB_BASE);
1423 if (!edge->inline_failed)
1425 fprintf (f, "%*sStack frame offset %i, callee self size %i,"
1426 " callee size %i\n",
1427 indent + 2, "",
1428 (int) inline_summaries->get (callee)->stack_frame_offset,
1429 (int) inline_summaries->get (callee)->estimated_self_stack_size,
1430 (int) inline_summaries->get (callee)->estimated_stack_size);
1431 dump_inline_edge_summary (f, indent + 2, callee, info);
1434 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
1436 struct inline_edge_summary *es = inline_edge_summary (edge);
1437 fprintf (f, "%*sindirect call loop depth:%2i freq:%4i size:%2i"
1438 " time: %2i",
1439 indent, "",
1440 es->loop_depth,
1441 edge->frequency, es->call_stmt_size, es->call_stmt_time);
1442 if (es->predicate)
1444 fprintf (f, "predicate: ");
1445 dump_predicate (f, info->conds, es->predicate);
1447 else
1448 fprintf (f, "\n");
1453 void
1454 dump_inline_summary (FILE *f, struct cgraph_node *node)
1456 if (node->definition)
1458 struct inline_summary *s = inline_summaries->get (node);
1459 size_time_entry *e;
1460 int i;
1461 fprintf (f, "Inline summary for %s/%i", node->name (),
1462 node->order);
1463 if (DECL_DISREGARD_INLINE_LIMITS (node->decl))
1464 fprintf (f, " always_inline");
1465 if (s->inlinable)
1466 fprintf (f, " inlinable");
1467 if (s->contains_cilk_spawn)
1468 fprintf (f, " contains_cilk_spawn");
1469 fprintf (f, "\n self time: %i\n", s->self_time);
1470 fprintf (f, " global time: %i\n", s->time);
1471 fprintf (f, " self size: %i\n", s->self_size);
1472 fprintf (f, " global size: %i\n", s->size);
1473 fprintf (f, " min size: %i\n", s->min_size);
1474 fprintf (f, " self stack: %i\n",
1475 (int) s->estimated_self_stack_size);
1476 fprintf (f, " global stack: %i\n", (int) s->estimated_stack_size);
1477 if (s->growth)
1478 fprintf (f, " estimated growth:%i\n", (int) s->growth);
1479 if (s->scc_no)
1480 fprintf (f, " In SCC: %i\n", (int) s->scc_no);
1481 for (i = 0; vec_safe_iterate (s->entry, i, &e); i++)
1483 fprintf (f, " size:%f, time:%f, predicate:",
1484 (double) e->size / INLINE_SIZE_SCALE,
1485 (double) e->time / INLINE_TIME_SCALE);
1486 dump_predicate (f, s->conds, &e->predicate);
1488 if (s->loop_iterations)
1490 fprintf (f, " loop iterations:");
1491 dump_predicate (f, s->conds, s->loop_iterations);
1493 if (s->loop_stride)
1495 fprintf (f, " loop stride:");
1496 dump_predicate (f, s->conds, s->loop_stride);
1498 if (s->array_index)
1500 fprintf (f, " array index:");
1501 dump_predicate (f, s->conds, s->array_index);
1503 fprintf (f, " calls:\n");
1504 dump_inline_edge_summary (f, 4, node, s);
1505 fprintf (f, "\n");
1509 DEBUG_FUNCTION void
1510 debug_inline_summary (struct cgraph_node *node)
1512 dump_inline_summary (stderr, node);
1515 void
1516 dump_inline_summaries (FILE *f)
1518 struct cgraph_node *node;
1520 FOR_EACH_DEFINED_FUNCTION (node)
1521 if (!node->global.inlined_to)
1522 dump_inline_summary (f, node);
1525 /* Give initial reasons why inlining would fail on EDGE. This gets either
1526 nullified or usually overwritten by more precise reasons later. */
1528 void
1529 initialize_inline_failed (struct cgraph_edge *e)
1531 struct cgraph_node *callee = e->callee;
1533 if (e->indirect_unknown_callee)
1534 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
1535 else if (!callee->definition)
1536 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
1537 else if (callee->local.redefined_extern_inline)
1538 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
1539 else if (e->call_stmt_cannot_inline_p)
1540 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
1541 else if (cfun && fn_contains_cilk_spawn_p (cfun))
1542 /* We can't inline if the function is spawing a function. */
1543 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
1544 else
1545 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
1548 /* Callback of walk_aliased_vdefs. Flags that it has been invoked to the
1549 boolean variable pointed to by DATA. */
1551 static bool
1552 mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED,
1553 void *data)
1555 bool *b = (bool *) data;
1556 *b = true;
1557 return true;
1560 /* If OP refers to value of function parameter, return the corresponding
1561 parameter. If non-NULL, the size of the memory load (or the SSA_NAME of the
1562 PARM_DECL) will be stored to *SIZE_P in that case too. */
1564 static tree
1565 unmodified_parm_1 (gimple stmt, tree op, HOST_WIDE_INT *size_p)
1567 /* SSA_NAME referring to parm default def? */
1568 if (TREE_CODE (op) == SSA_NAME
1569 && SSA_NAME_IS_DEFAULT_DEF (op)
1570 && TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL)
1572 if (size_p)
1573 *size_p = tree_to_shwi (TYPE_SIZE (TREE_TYPE (op)));
1574 return SSA_NAME_VAR (op);
1576 /* Non-SSA parm reference? */
1577 if (TREE_CODE (op) == PARM_DECL)
1579 bool modified = false;
1581 ao_ref refd;
1582 ao_ref_init (&refd, op);
1583 walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, &modified,
1584 NULL);
1585 if (!modified)
1587 if (size_p)
1588 *size_p = tree_to_shwi (TYPE_SIZE (TREE_TYPE (op)));
1589 return op;
1592 return NULL_TREE;
1595 /* If OP refers to value of function parameter, return the corresponding
1596 parameter. Also traverse chains of SSA register assignments. If non-NULL,
1597 the size of the memory load (or the SSA_NAME of the PARM_DECL) will be
1598 stored to *SIZE_P in that case too. */
1600 static tree
1601 unmodified_parm (gimple stmt, tree op, HOST_WIDE_INT *size_p)
1603 tree res = unmodified_parm_1 (stmt, op, size_p);
1604 if (res)
1605 return res;
1607 if (TREE_CODE (op) == SSA_NAME
1608 && !SSA_NAME_IS_DEFAULT_DEF (op)
1609 && gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1610 return unmodified_parm (SSA_NAME_DEF_STMT (op),
1611 gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)),
1612 size_p);
1613 return NULL_TREE;
1616 /* If OP refers to a value of a function parameter or value loaded from an
1617 aggregate passed to a parameter (either by value or reference), return TRUE
1618 and store the number of the parameter to *INDEX_P, the access size into
1619 *SIZE_P, and information whether and how it has been loaded from an
1620 aggregate into *AGGPOS. INFO describes the function parameters, STMT is the
1621 statement in which OP is used or loaded. */
1623 static bool
1624 unmodified_parm_or_parm_agg_item (struct ipa_func_body_info *fbi,
1625 gimple stmt, tree op, int *index_p,
1626 HOST_WIDE_INT *size_p,
1627 struct agg_position_info *aggpos)
1629 tree res = unmodified_parm_1 (stmt, op, size_p);
1631 gcc_checking_assert (aggpos);
1632 if (res)
1634 *index_p = ipa_get_param_decl_index (fbi->info, res);
1635 if (*index_p < 0)
1636 return false;
1637 aggpos->agg_contents = false;
1638 aggpos->by_ref = false;
1639 return true;
1642 if (TREE_CODE (op) == SSA_NAME)
1644 if (SSA_NAME_IS_DEFAULT_DEF (op)
1645 || !gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1646 return false;
1647 stmt = SSA_NAME_DEF_STMT (op);
1648 op = gimple_assign_rhs1 (stmt);
1649 if (!REFERENCE_CLASS_P (op))
1650 return unmodified_parm_or_parm_agg_item (fbi, stmt, op, index_p, size_p,
1651 aggpos);
1654 aggpos->agg_contents = true;
1655 return ipa_load_from_parm_agg (fbi, fbi->info->descriptors,
1656 stmt, op, index_p, &aggpos->offset,
1657 size_p, &aggpos->by_ref);
1660 /* See if statement might disappear after inlining.
1661 0 - means not eliminated
1662 1 - half of statements goes away
1663 2 - for sure it is eliminated.
1664 We are not terribly sophisticated, basically looking for simple abstraction
1665 penalty wrappers. */
1667 static int
1668 eliminated_by_inlining_prob (gimple stmt)
1670 enum gimple_code code = gimple_code (stmt);
1671 enum tree_code rhs_code;
1673 if (!optimize)
1674 return 0;
1676 switch (code)
1678 case GIMPLE_RETURN:
1679 return 2;
1680 case GIMPLE_ASSIGN:
1681 if (gimple_num_ops (stmt) != 2)
1682 return 0;
1684 rhs_code = gimple_assign_rhs_code (stmt);
1686 /* Casts of parameters, loads from parameters passed by reference
1687 and stores to return value or parameters are often free after
1688 inlining dua to SRA and further combining.
1689 Assume that half of statements goes away. */
1690 if (CONVERT_EXPR_CODE_P (rhs_code)
1691 || rhs_code == VIEW_CONVERT_EXPR
1692 || rhs_code == ADDR_EXPR
1693 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1695 tree rhs = gimple_assign_rhs1 (stmt);
1696 tree lhs = gimple_assign_lhs (stmt);
1697 tree inner_rhs = get_base_address (rhs);
1698 tree inner_lhs = get_base_address (lhs);
1699 bool rhs_free = false;
1700 bool lhs_free = false;
1702 if (!inner_rhs)
1703 inner_rhs = rhs;
1704 if (!inner_lhs)
1705 inner_lhs = lhs;
1707 /* Reads of parameter are expected to be free. */
1708 if (unmodified_parm (stmt, inner_rhs, NULL))
1709 rhs_free = true;
1710 /* Match expressions of form &this->field. Those will most likely
1711 combine with something upstream after inlining. */
1712 else if (TREE_CODE (inner_rhs) == ADDR_EXPR)
1714 tree op = get_base_address (TREE_OPERAND (inner_rhs, 0));
1715 if (TREE_CODE (op) == PARM_DECL)
1716 rhs_free = true;
1717 else if (TREE_CODE (op) == MEM_REF
1718 && unmodified_parm (stmt, TREE_OPERAND (op, 0), NULL))
1719 rhs_free = true;
1722 /* When parameter is not SSA register because its address is taken
1723 and it is just copied into one, the statement will be completely
1724 free after inlining (we will copy propagate backward). */
1725 if (rhs_free && is_gimple_reg (lhs))
1726 return 2;
1728 /* Reads of parameters passed by reference
1729 expected to be free (i.e. optimized out after inlining). */
1730 if (TREE_CODE (inner_rhs) == MEM_REF
1731 && unmodified_parm (stmt, TREE_OPERAND (inner_rhs, 0), NULL))
1732 rhs_free = true;
1734 /* Copying parameter passed by reference into gimple register is
1735 probably also going to copy propagate, but we can't be quite
1736 sure. */
1737 if (rhs_free && is_gimple_reg (lhs))
1738 lhs_free = true;
1740 /* Writes to parameters, parameters passed by value and return value
1741 (either dirrectly or passed via invisible reference) are free.
1743 TODO: We ought to handle testcase like
1744 struct a {int a,b;};
1745 struct a
1746 retrurnsturct (void)
1748 struct a a ={1,2};
1749 return a;
1752 This translate into:
1754 retrurnsturct ()
1756 int a$b;
1757 int a$a;
1758 struct a a;
1759 struct a D.2739;
1761 <bb 2>:
1762 D.2739.a = 1;
1763 D.2739.b = 2;
1764 return D.2739;
1767 For that we either need to copy ipa-split logic detecting writes
1768 to return value. */
1769 if (TREE_CODE (inner_lhs) == PARM_DECL
1770 || TREE_CODE (inner_lhs) == RESULT_DECL
1771 || (TREE_CODE (inner_lhs) == MEM_REF
1772 && (unmodified_parm (stmt, TREE_OPERAND (inner_lhs, 0), NULL)
1773 || (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME
1774 && SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0))
1775 && TREE_CODE (SSA_NAME_VAR (TREE_OPERAND
1776 (inner_lhs,
1777 0))) == RESULT_DECL))))
1778 lhs_free = true;
1779 if (lhs_free
1780 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1781 rhs_free = true;
1782 if (lhs_free && rhs_free)
1783 return 1;
1785 return 0;
1786 default:
1787 return 0;
1792 /* If BB ends by a conditional we can turn into predicates, attach corresponding
1793 predicates to the CFG edges. */
1795 static void
1796 set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi,
1797 struct inline_summary *summary,
1798 basic_block bb)
1800 gimple last;
1801 tree op;
1802 HOST_WIDE_INT size;
1803 int index;
1804 struct agg_position_info aggpos;
1805 enum tree_code code, inverted_code;
1806 edge e;
1807 edge_iterator ei;
1808 gimple set_stmt;
1809 tree op2;
1811 last = last_stmt (bb);
1812 if (!last || gimple_code (last) != GIMPLE_COND)
1813 return;
1814 if (!is_gimple_ip_invariant (gimple_cond_rhs (last)))
1815 return;
1816 op = gimple_cond_lhs (last);
1817 /* TODO: handle conditionals like
1818 var = op0 < 4;
1819 if (var != 0). */
1820 if (unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &size, &aggpos))
1822 code = gimple_cond_code (last);
1823 inverted_code = invert_tree_comparison (code, HONOR_NANS (op));
1825 FOR_EACH_EDGE (e, ei, bb->succs)
1827 enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE
1828 ? code : inverted_code);
1829 /* invert_tree_comparison will return ERROR_MARK on FP
1830 comparsions that are not EQ/NE instead of returning proper
1831 unordered one. Be sure it is not confused with NON_CONSTANT. */
1832 if (this_code != ERROR_MARK)
1834 struct predicate p = add_condition (summary, index, size, &aggpos,
1835 this_code,
1836 gimple_cond_rhs (last));
1837 e->aux = pool_alloc (edge_predicate_pool);
1838 *(struct predicate *) e->aux = p;
1843 if (TREE_CODE (op) != SSA_NAME)
1844 return;
1845 /* Special case
1846 if (builtin_constant_p (op))
1847 constant_code
1848 else
1849 nonconstant_code.
1850 Here we can predicate nonconstant_code. We can't
1851 really handle constant_code since we have no predicate
1852 for this and also the constant code is not known to be
1853 optimized away when inliner doen't see operand is constant.
1854 Other optimizers might think otherwise. */
1855 if (gimple_cond_code (last) != NE_EXPR
1856 || !integer_zerop (gimple_cond_rhs (last)))
1857 return;
1858 set_stmt = SSA_NAME_DEF_STMT (op);
1859 if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P)
1860 || gimple_call_num_args (set_stmt) != 1)
1861 return;
1862 op2 = gimple_call_arg (set_stmt, 0);
1863 if (!unmodified_parm_or_parm_agg_item (fbi, set_stmt, op2, &index, &size,
1864 &aggpos))
1865 return;
1866 FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE)
1868 struct predicate p = add_condition (summary, index, size, &aggpos,
1869 IS_NOT_CONSTANT, NULL_TREE);
1870 e->aux = pool_alloc (edge_predicate_pool);
1871 *(struct predicate *) e->aux = p;
1876 /* If BB ends by a switch we can turn into predicates, attach corresponding
1877 predicates to the CFG edges. */
1879 static void
1880 set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi,
1881 struct inline_summary *summary,
1882 basic_block bb)
1884 gimple lastg;
1885 tree op;
1886 int index;
1887 HOST_WIDE_INT size;
1888 struct agg_position_info aggpos;
1889 edge e;
1890 edge_iterator ei;
1891 size_t n;
1892 size_t case_idx;
1894 lastg = last_stmt (bb);
1895 if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH)
1896 return;
1897 gswitch *last = as_a <gswitch *> (lastg);
1898 op = gimple_switch_index (last);
1899 if (!unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &size, &aggpos))
1900 return;
1902 FOR_EACH_EDGE (e, ei, bb->succs)
1904 e->aux = pool_alloc (edge_predicate_pool);
1905 *(struct predicate *) e->aux = false_predicate ();
1907 n = gimple_switch_num_labels (last);
1908 for (case_idx = 0; case_idx < n; ++case_idx)
1910 tree cl = gimple_switch_label (last, case_idx);
1911 tree min, max;
1912 struct predicate p;
1914 e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
1915 min = CASE_LOW (cl);
1916 max = CASE_HIGH (cl);
1918 /* For default we might want to construct predicate that none
1919 of cases is met, but it is bit hard to do not having negations
1920 of conditionals handy. */
1921 if (!min && !max)
1922 p = true_predicate ();
1923 else if (!max)
1924 p = add_condition (summary, index, size, &aggpos, EQ_EXPR, min);
1925 else
1927 struct predicate p1, p2;
1928 p1 = add_condition (summary, index, size, &aggpos, GE_EXPR, min);
1929 p2 = add_condition (summary, index, size, &aggpos, LE_EXPR, max);
1930 p = and_predicates (summary->conds, &p1, &p2);
1932 *(struct predicate *) e->aux
1933 = or_predicates (summary->conds, &p, (struct predicate *) e->aux);
1938 /* For each BB in NODE attach to its AUX pointer predicate under
1939 which it is executable. */
1941 static void
1942 compute_bb_predicates (struct ipa_func_body_info *fbi,
1943 struct cgraph_node *node,
1944 struct inline_summary *summary)
1946 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1947 bool done = false;
1948 basic_block bb;
1950 FOR_EACH_BB_FN (bb, my_function)
1952 set_cond_stmt_execution_predicate (fbi, summary, bb);
1953 set_switch_stmt_execution_predicate (fbi, summary, bb);
1956 /* Entry block is always executable. */
1957 ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1958 = pool_alloc (edge_predicate_pool);
1959 *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1960 = true_predicate ();
1962 /* A simple dataflow propagation of predicates forward in the CFG.
1963 TODO: work in reverse postorder. */
1964 while (!done)
1966 done = true;
1967 FOR_EACH_BB_FN (bb, my_function)
1969 struct predicate p = false_predicate ();
1970 edge e;
1971 edge_iterator ei;
1972 FOR_EACH_EDGE (e, ei, bb->preds)
1974 if (e->src->aux)
1976 struct predicate this_bb_predicate
1977 = *(struct predicate *) e->src->aux;
1978 if (e->aux)
1979 this_bb_predicate
1980 = and_predicates (summary->conds, &this_bb_predicate,
1981 (struct predicate *) e->aux);
1982 p = or_predicates (summary->conds, &p, &this_bb_predicate);
1983 if (true_predicate_p (&p))
1984 break;
1987 if (false_predicate_p (&p))
1988 gcc_assert (!bb->aux);
1989 else
1991 if (!bb->aux)
1993 done = false;
1994 bb->aux = pool_alloc (edge_predicate_pool);
1995 *((struct predicate *) bb->aux) = p;
1997 else if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1999 /* This OR operation is needed to ensure monotonous data flow
2000 in the case we hit the limit on number of clauses and the
2001 and/or operations above give approximate answers. */
2002 p = or_predicates (summary->conds, &p, (struct predicate *)bb->aux);
2003 if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
2005 done = false;
2006 *((struct predicate *) bb->aux) = p;
2015 /* We keep info about constantness of SSA names. */
2017 typedef struct predicate predicate_t;
2018 /* Return predicate specifying when the STMT might have result that is not
2019 a compile time constant. */
2021 static struct predicate
2022 will_be_nonconstant_expr_predicate (struct ipa_node_params *info,
2023 struct inline_summary *summary,
2024 tree expr,
2025 vec<predicate_t> nonconstant_names)
2027 tree parm;
2028 int index;
2029 HOST_WIDE_INT size;
2031 while (UNARY_CLASS_P (expr))
2032 expr = TREE_OPERAND (expr, 0);
2034 parm = unmodified_parm (NULL, expr, &size);
2035 if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0)
2036 return add_condition (summary, index, size, NULL, CHANGED, NULL_TREE);
2037 if (is_gimple_min_invariant (expr))
2038 return false_predicate ();
2039 if (TREE_CODE (expr) == SSA_NAME)
2040 return nonconstant_names[SSA_NAME_VERSION (expr)];
2041 if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr))
2043 struct predicate p1 = will_be_nonconstant_expr_predicate
2044 (info, summary, TREE_OPERAND (expr, 0),
2045 nonconstant_names);
2046 struct predicate p2;
2047 if (true_predicate_p (&p1))
2048 return p1;
2049 p2 = will_be_nonconstant_expr_predicate (info, summary,
2050 TREE_OPERAND (expr, 1),
2051 nonconstant_names);
2052 return or_predicates (summary->conds, &p1, &p2);
2054 else if (TREE_CODE (expr) == COND_EXPR)
2056 struct predicate p1 = will_be_nonconstant_expr_predicate
2057 (info, summary, TREE_OPERAND (expr, 0),
2058 nonconstant_names);
2059 struct predicate p2;
2060 if (true_predicate_p (&p1))
2061 return p1;
2062 p2 = will_be_nonconstant_expr_predicate (info, summary,
2063 TREE_OPERAND (expr, 1),
2064 nonconstant_names);
2065 if (true_predicate_p (&p2))
2066 return p2;
2067 p1 = or_predicates (summary->conds, &p1, &p2);
2068 p2 = will_be_nonconstant_expr_predicate (info, summary,
2069 TREE_OPERAND (expr, 2),
2070 nonconstant_names);
2071 return or_predicates (summary->conds, &p1, &p2);
2073 else
2075 debug_tree (expr);
2076 gcc_unreachable ();
2078 return false_predicate ();
2082 /* Return predicate specifying when the STMT might have result that is not
2083 a compile time constant. */
2085 static struct predicate
2086 will_be_nonconstant_predicate (struct ipa_func_body_info *fbi,
2087 struct inline_summary *summary,
2088 gimple stmt,
2089 vec<predicate_t> nonconstant_names)
2091 struct predicate p = true_predicate ();
2092 ssa_op_iter iter;
2093 tree use;
2094 struct predicate op_non_const;
2095 bool is_load;
2096 int base_index;
2097 HOST_WIDE_INT size;
2098 struct agg_position_info aggpos;
2100 /* What statments might be optimized away
2101 when their arguments are constant. */
2102 if (gimple_code (stmt) != GIMPLE_ASSIGN
2103 && gimple_code (stmt) != GIMPLE_COND
2104 && gimple_code (stmt) != GIMPLE_SWITCH
2105 && (gimple_code (stmt) != GIMPLE_CALL
2106 || !(gimple_call_flags (stmt) & ECF_CONST)))
2107 return p;
2109 /* Stores will stay anyway. */
2110 if (gimple_store_p (stmt))
2111 return p;
2113 is_load = gimple_assign_load_p (stmt);
2115 /* Loads can be optimized when the value is known. */
2116 if (is_load)
2118 tree op;
2119 gcc_assert (gimple_assign_single_p (stmt));
2120 op = gimple_assign_rhs1 (stmt);
2121 if (!unmodified_parm_or_parm_agg_item (fbi, stmt, op, &base_index, &size,
2122 &aggpos))
2123 return p;
2125 else
2126 base_index = -1;
2128 /* See if we understand all operands before we start
2129 adding conditionals. */
2130 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2132 tree parm = unmodified_parm (stmt, use, NULL);
2133 /* For arguments we can build a condition. */
2134 if (parm && ipa_get_param_decl_index (fbi->info, parm) >= 0)
2135 continue;
2136 if (TREE_CODE (use) != SSA_NAME)
2137 return p;
2138 /* If we know when operand is constant,
2139 we still can say something useful. */
2140 if (!true_predicate_p (&nonconstant_names[SSA_NAME_VERSION (use)]))
2141 continue;
2142 return p;
2145 if (is_load)
2146 op_non_const =
2147 add_condition (summary, base_index, size, &aggpos, CHANGED, NULL);
2148 else
2149 op_non_const = false_predicate ();
2150 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2152 HOST_WIDE_INT size;
2153 tree parm = unmodified_parm (stmt, use, &size);
2154 int index;
2156 if (parm && (index = ipa_get_param_decl_index (fbi->info, parm)) >= 0)
2158 if (index != base_index)
2159 p = add_condition (summary, index, size, NULL, CHANGED, NULL_TREE);
2160 else
2161 continue;
2163 else
2164 p = nonconstant_names[SSA_NAME_VERSION (use)];
2165 op_non_const = or_predicates (summary->conds, &p, &op_non_const);
2167 if ((gimple_code (stmt) == GIMPLE_ASSIGN || gimple_code (stmt) == GIMPLE_CALL)
2168 && gimple_op (stmt, 0)
2169 && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
2170 nonconstant_names[SSA_NAME_VERSION (gimple_op (stmt, 0))]
2171 = op_non_const;
2172 return op_non_const;
2175 struct record_modified_bb_info
2177 bitmap bb_set;
2178 gimple stmt;
2181 /* Callback of walk_aliased_vdefs. Records basic blocks where the value may be
2182 set except for info->stmt. */
2184 static bool
2185 record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data)
2187 struct record_modified_bb_info *info =
2188 (struct record_modified_bb_info *) data;
2189 if (SSA_NAME_DEF_STMT (vdef) == info->stmt)
2190 return false;
2191 bitmap_set_bit (info->bb_set,
2192 SSA_NAME_IS_DEFAULT_DEF (vdef)
2193 ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index
2194 : gimple_bb (SSA_NAME_DEF_STMT (vdef))->index);
2195 return false;
2198 /* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT
2199 will change since last invocation of STMT.
2201 Value 0 is reserved for compile time invariants.
2202 For common parameters it is REG_BR_PROB_BASE. For loop invariants it
2203 ought to be REG_BR_PROB_BASE / estimated_iters. */
2205 static int
2206 param_change_prob (gimple stmt, int i)
2208 tree op = gimple_call_arg (stmt, i);
2209 basic_block bb = gimple_bb (stmt);
2210 tree base;
2212 /* Global invariants neve change. */
2213 if (is_gimple_min_invariant (op))
2214 return 0;
2215 /* We would have to do non-trivial analysis to really work out what
2216 is the probability of value to change (i.e. when init statement
2217 is in a sibling loop of the call).
2219 We do an conservative estimate: when call is executed N times more often
2220 than the statement defining value, we take the frequency 1/N. */
2221 if (TREE_CODE (op) == SSA_NAME)
2223 int init_freq;
2225 if (!bb->frequency)
2226 return REG_BR_PROB_BASE;
2228 if (SSA_NAME_IS_DEFAULT_DEF (op))
2229 init_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2230 else
2231 init_freq = gimple_bb (SSA_NAME_DEF_STMT (op))->frequency;
2233 if (!init_freq)
2234 init_freq = 1;
2235 if (init_freq < bb->frequency)
2236 return MAX (GCOV_COMPUTE_SCALE (init_freq, bb->frequency), 1);
2237 else
2238 return REG_BR_PROB_BASE;
2241 base = get_base_address (op);
2242 if (base)
2244 ao_ref refd;
2245 int max;
2246 struct record_modified_bb_info info;
2247 bitmap_iterator bi;
2248 unsigned index;
2249 tree init = ctor_for_folding (base);
2251 if (init != error_mark_node)
2252 return 0;
2253 if (!bb->frequency)
2254 return REG_BR_PROB_BASE;
2255 ao_ref_init (&refd, op);
2256 info.stmt = stmt;
2257 info.bb_set = BITMAP_ALLOC (NULL);
2258 walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info,
2259 NULL);
2260 if (bitmap_bit_p (info.bb_set, bb->index))
2262 BITMAP_FREE (info.bb_set);
2263 return REG_BR_PROB_BASE;
2266 /* Assume that every memory is initialized at entry.
2267 TODO: Can we easilly determine if value is always defined
2268 and thus we may skip entry block? */
2269 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
2270 max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2271 else
2272 max = 1;
2274 EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi)
2275 max = MIN (max, BASIC_BLOCK_FOR_FN (cfun, index)->frequency);
2277 BITMAP_FREE (info.bb_set);
2278 if (max < bb->frequency)
2279 return MAX (GCOV_COMPUTE_SCALE (max, bb->frequency), 1);
2280 else
2281 return REG_BR_PROB_BASE;
2283 return REG_BR_PROB_BASE;
2286 /* Find whether a basic block BB is the final block of a (half) diamond CFG
2287 sub-graph and if the predicate the condition depends on is known. If so,
2288 return true and store the pointer the predicate in *P. */
2290 static bool
2291 phi_result_unknown_predicate (struct ipa_node_params *info,
2292 inline_summary *summary, basic_block bb,
2293 struct predicate *p,
2294 vec<predicate_t> nonconstant_names)
2296 edge e;
2297 edge_iterator ei;
2298 basic_block first_bb = NULL;
2299 gimple stmt;
2301 if (single_pred_p (bb))
2303 *p = false_predicate ();
2304 return true;
2307 FOR_EACH_EDGE (e, ei, bb->preds)
2309 if (single_succ_p (e->src))
2311 if (!single_pred_p (e->src))
2312 return false;
2313 if (!first_bb)
2314 first_bb = single_pred (e->src);
2315 else if (single_pred (e->src) != first_bb)
2316 return false;
2318 else
2320 if (!first_bb)
2321 first_bb = e->src;
2322 else if (e->src != first_bb)
2323 return false;
2327 if (!first_bb)
2328 return false;
2330 stmt = last_stmt (first_bb);
2331 if (!stmt
2332 || gimple_code (stmt) != GIMPLE_COND
2333 || !is_gimple_ip_invariant (gimple_cond_rhs (stmt)))
2334 return false;
2336 *p = will_be_nonconstant_expr_predicate (info, summary,
2337 gimple_cond_lhs (stmt),
2338 nonconstant_names);
2339 if (true_predicate_p (p))
2340 return false;
2341 else
2342 return true;
2345 /* Given a PHI statement in a function described by inline properties SUMMARY
2346 and *P being the predicate describing whether the selected PHI argument is
2347 known, store a predicate for the result of the PHI statement into
2348 NONCONSTANT_NAMES, if possible. */
2350 static void
2351 predicate_for_phi_result (struct inline_summary *summary, gphi *phi,
2352 struct predicate *p,
2353 vec<predicate_t> nonconstant_names)
2355 unsigned i;
2357 for (i = 0; i < gimple_phi_num_args (phi); i++)
2359 tree arg = gimple_phi_arg (phi, i)->def;
2360 if (!is_gimple_min_invariant (arg))
2362 gcc_assert (TREE_CODE (arg) == SSA_NAME);
2363 *p = or_predicates (summary->conds, p,
2364 &nonconstant_names[SSA_NAME_VERSION (arg)]);
2365 if (true_predicate_p (p))
2366 return;
2370 if (dump_file && (dump_flags & TDF_DETAILS))
2372 fprintf (dump_file, "\t\tphi predicate: ");
2373 dump_predicate (dump_file, summary->conds, p);
2375 nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p;
2378 /* Return predicate specifying when array index in access OP becomes non-constant. */
2380 static struct predicate
2381 array_index_predicate (inline_summary *info,
2382 vec< predicate_t> nonconstant_names, tree op)
2384 struct predicate p = false_predicate ();
2385 while (handled_component_p (op))
2387 if (TREE_CODE (op) == ARRAY_REF || TREE_CODE (op) == ARRAY_RANGE_REF)
2389 if (TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME)
2390 p = or_predicates (info->conds, &p,
2391 &nonconstant_names[SSA_NAME_VERSION
2392 (TREE_OPERAND (op, 1))]);
2394 op = TREE_OPERAND (op, 0);
2396 return p;
2399 /* For a typical usage of __builtin_expect (a<b, 1), we
2400 may introduce an extra relation stmt:
2401 With the builtin, we have
2402 t1 = a <= b;
2403 t2 = (long int) t1;
2404 t3 = __builtin_expect (t2, 1);
2405 if (t3 != 0)
2406 goto ...
2407 Without the builtin, we have
2408 if (a<=b)
2409 goto...
2410 This affects the size/time estimation and may have
2411 an impact on the earlier inlining.
2412 Here find this pattern and fix it up later. */
2414 static gimple
2415 find_foldable_builtin_expect (basic_block bb)
2417 gimple_stmt_iterator bsi;
2419 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2421 gimple stmt = gsi_stmt (bsi);
2422 if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT)
2423 || (is_gimple_call (stmt)
2424 && gimple_call_internal_p (stmt)
2425 && gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT))
2427 tree var = gimple_call_lhs (stmt);
2428 tree arg = gimple_call_arg (stmt, 0);
2429 use_operand_p use_p;
2430 gimple use_stmt;
2431 bool match = false;
2432 bool done = false;
2434 if (!var || !arg)
2435 continue;
2436 gcc_assert (TREE_CODE (var) == SSA_NAME);
2438 while (TREE_CODE (arg) == SSA_NAME)
2440 gimple stmt_tmp = SSA_NAME_DEF_STMT (arg);
2441 if (!is_gimple_assign (stmt_tmp))
2442 break;
2443 switch (gimple_assign_rhs_code (stmt_tmp))
2445 case LT_EXPR:
2446 case LE_EXPR:
2447 case GT_EXPR:
2448 case GE_EXPR:
2449 case EQ_EXPR:
2450 case NE_EXPR:
2451 match = true;
2452 done = true;
2453 break;
2454 CASE_CONVERT:
2455 break;
2456 default:
2457 done = true;
2458 break;
2460 if (done)
2461 break;
2462 arg = gimple_assign_rhs1 (stmt_tmp);
2465 if (match && single_imm_use (var, &use_p, &use_stmt)
2466 && gimple_code (use_stmt) == GIMPLE_COND)
2467 return use_stmt;
2470 return NULL;
2473 /* Return true when the basic blocks contains only clobbers followed by RESX.
2474 Such BBs are kept around to make removal of dead stores possible with
2475 presence of EH and will be optimized out by optimize_clobbers later in the
2476 game.
2478 NEED_EH is used to recurse in case the clobber has non-EH predecestors
2479 that can be clobber only, too.. When it is false, the RESX is not necessary
2480 on the end of basic block. */
2482 static bool
2483 clobber_only_eh_bb_p (basic_block bb, bool need_eh = true)
2485 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2486 edge_iterator ei;
2487 edge e;
2489 if (need_eh)
2491 if (gsi_end_p (gsi))
2492 return false;
2493 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX)
2494 return false;
2495 gsi_prev (&gsi);
2497 else if (!single_succ_p (bb))
2498 return false;
2500 for (; !gsi_end_p (gsi); gsi_prev (&gsi))
2502 gimple stmt = gsi_stmt (gsi);
2503 if (is_gimple_debug (stmt))
2504 continue;
2505 if (gimple_clobber_p (stmt))
2506 continue;
2507 if (gimple_code (stmt) == GIMPLE_LABEL)
2508 break;
2509 return false;
2512 /* See if all predecestors are either throws or clobber only BBs. */
2513 FOR_EACH_EDGE (e, ei, bb->preds)
2514 if (!(e->flags & EDGE_EH)
2515 && !clobber_only_eh_bb_p (e->src, false))
2516 return false;
2518 return true;
2521 /* Compute function body size parameters for NODE.
2522 When EARLY is true, we compute only simple summaries without
2523 non-trivial predicates to drive the early inliner. */
2525 static void
2526 estimate_function_body_sizes (struct cgraph_node *node, bool early)
2528 gcov_type time = 0;
2529 /* Estimate static overhead for function prologue/epilogue and alignment. */
2530 int size = 2;
2531 /* Benefits are scaled by probability of elimination that is in range
2532 <0,2>. */
2533 basic_block bb;
2534 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
2535 int freq;
2536 struct inline_summary *info = inline_summaries->get (node);
2537 struct predicate bb_predicate;
2538 struct ipa_func_body_info fbi;
2539 vec<predicate_t> nonconstant_names = vNULL;
2540 int nblocks, n;
2541 int *order;
2542 predicate array_index = true_predicate ();
2543 gimple fix_builtin_expect_stmt;
2545 gcc_assert (my_function && my_function->cfg);
2546 gcc_assert (cfun == my_function);
2548 memset(&fbi, 0, sizeof(fbi));
2549 info->conds = NULL;
2550 info->entry = NULL;
2552 /* When optimizing and analyzing for IPA inliner, initialize loop optimizer
2553 so we can produce proper inline hints.
2555 When optimizing and analyzing for early inliner, initialize node params
2556 so we can produce correct BB predicates. */
2558 if (opt_for_fn (node->decl, optimize))
2560 calculate_dominance_info (CDI_DOMINATORS);
2561 if (!early)
2562 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2563 else
2565 ipa_check_create_node_params ();
2566 ipa_initialize_node_params (node);
2569 if (ipa_node_params_sum)
2571 fbi.node = node;
2572 fbi.info = IPA_NODE_REF (node);
2573 fbi.bb_infos = vNULL;
2574 fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
2575 fbi.param_count = count_formal_params(node->decl);
2576 nonconstant_names.safe_grow_cleared
2577 (SSANAMES (my_function)->length ());
2581 if (dump_file)
2582 fprintf (dump_file, "\nAnalyzing function body size: %s\n",
2583 node->name ());
2585 /* When we run into maximal number of entries, we assign everything to the
2586 constant truth case. Be sure to have it in list. */
2587 bb_predicate = true_predicate ();
2588 account_size_time (info, 0, 0, &bb_predicate);
2590 bb_predicate = not_inlined_predicate ();
2591 account_size_time (info, 2 * INLINE_SIZE_SCALE, 0, &bb_predicate);
2593 if (fbi.info)
2594 compute_bb_predicates (&fbi, node, info);
2595 order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
2596 nblocks = pre_and_rev_post_order_compute (NULL, order, false);
2597 for (n = 0; n < nblocks; n++)
2599 bb = BASIC_BLOCK_FOR_FN (cfun, order[n]);
2600 freq = compute_call_stmt_bb_frequency (node->decl, bb);
2601 if (clobber_only_eh_bb_p (bb))
2603 if (dump_file && (dump_flags & TDF_DETAILS))
2604 fprintf (dump_file, "\n Ignoring BB %i;"
2605 " it will be optimized away by cleanup_clobbers\n",
2606 bb->index);
2607 continue;
2610 /* TODO: Obviously predicates can be propagated down across CFG. */
2611 if (fbi.info)
2613 if (bb->aux)
2614 bb_predicate = *(struct predicate *) bb->aux;
2615 else
2616 bb_predicate = false_predicate ();
2618 else
2619 bb_predicate = true_predicate ();
2621 if (dump_file && (dump_flags & TDF_DETAILS))
2623 fprintf (dump_file, "\n BB %i predicate:", bb->index);
2624 dump_predicate (dump_file, info->conds, &bb_predicate);
2627 if (fbi.info && nonconstant_names.exists ())
2629 struct predicate phi_predicate;
2630 bool first_phi = true;
2632 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
2633 gsi_next (&bsi))
2635 if (first_phi
2636 && !phi_result_unknown_predicate (fbi.info, info, bb,
2637 &phi_predicate,
2638 nonconstant_names))
2639 break;
2640 first_phi = false;
2641 if (dump_file && (dump_flags & TDF_DETAILS))
2643 fprintf (dump_file, " ");
2644 print_gimple_stmt (dump_file, gsi_stmt (bsi), 0, 0);
2646 predicate_for_phi_result (info, bsi.phi (), &phi_predicate,
2647 nonconstant_names);
2651 fix_builtin_expect_stmt = find_foldable_builtin_expect (bb);
2653 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
2654 gsi_next (&bsi))
2656 gimple stmt = gsi_stmt (bsi);
2657 int this_size = estimate_num_insns (stmt, &eni_size_weights);
2658 int this_time = estimate_num_insns (stmt, &eni_time_weights);
2659 int prob;
2660 struct predicate will_be_nonconstant;
2662 /* This relation stmt should be folded after we remove
2663 buildin_expect call. Adjust the cost here. */
2664 if (stmt == fix_builtin_expect_stmt)
2666 this_size--;
2667 this_time--;
2670 if (dump_file && (dump_flags & TDF_DETAILS))
2672 fprintf (dump_file, " ");
2673 print_gimple_stmt (dump_file, stmt, 0, 0);
2674 fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n",
2675 ((double) freq) / CGRAPH_FREQ_BASE, this_size,
2676 this_time);
2679 if (gimple_assign_load_p (stmt) && nonconstant_names.exists ())
2681 struct predicate this_array_index;
2682 this_array_index =
2683 array_index_predicate (info, nonconstant_names,
2684 gimple_assign_rhs1 (stmt));
2685 if (!false_predicate_p (&this_array_index))
2686 array_index =
2687 and_predicates (info->conds, &array_index,
2688 &this_array_index);
2690 if (gimple_store_p (stmt) && nonconstant_names.exists ())
2692 struct predicate this_array_index;
2693 this_array_index =
2694 array_index_predicate (info, nonconstant_names,
2695 gimple_get_lhs (stmt));
2696 if (!false_predicate_p (&this_array_index))
2697 array_index =
2698 and_predicates (info->conds, &array_index,
2699 &this_array_index);
2703 if (is_gimple_call (stmt)
2704 && !gimple_call_internal_p (stmt))
2706 struct cgraph_edge *edge = node->get_edge (stmt);
2707 struct inline_edge_summary *es = inline_edge_summary (edge);
2709 /* Special case: results of BUILT_IN_CONSTANT_P will be always
2710 resolved as constant. We however don't want to optimize
2711 out the cgraph edges. */
2712 if (nonconstant_names.exists ()
2713 && gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P)
2714 && gimple_call_lhs (stmt)
2715 && TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME)
2717 struct predicate false_p = false_predicate ();
2718 nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))]
2719 = false_p;
2721 if (ipa_node_params_sum)
2723 int count = gimple_call_num_args (stmt);
2724 int i;
2726 if (count)
2727 es->param.safe_grow_cleared (count);
2728 for (i = 0; i < count; i++)
2730 int prob = param_change_prob (stmt, i);
2731 gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
2732 es->param[i].change_prob = prob;
2736 es->call_stmt_size = this_size;
2737 es->call_stmt_time = this_time;
2738 es->loop_depth = bb_loop_depth (bb);
2739 edge_set_predicate (edge, &bb_predicate);
2742 /* TODO: When conditional jump or swithc is known to be constant, but
2743 we did not translate it into the predicates, we really can account
2744 just maximum of the possible paths. */
2745 if (fbi.info)
2746 will_be_nonconstant
2747 = will_be_nonconstant_predicate (&fbi, info,
2748 stmt, nonconstant_names);
2749 if (this_time || this_size)
2751 struct predicate p;
2753 this_time *= freq;
2755 prob = eliminated_by_inlining_prob (stmt);
2756 if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS))
2757 fprintf (dump_file,
2758 "\t\t50%% will be eliminated by inlining\n");
2759 if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
2760 fprintf (dump_file, "\t\tWill be eliminated by inlining\n");
2762 if (fbi.info)
2763 p = and_predicates (info->conds, &bb_predicate,
2764 &will_be_nonconstant);
2765 else
2766 p = true_predicate ();
2768 if (!false_predicate_p (&p)
2769 || (is_gimple_call (stmt)
2770 && !false_predicate_p (&bb_predicate)))
2772 time += this_time;
2773 size += this_size;
2774 if (time > MAX_TIME * INLINE_TIME_SCALE)
2775 time = MAX_TIME * INLINE_TIME_SCALE;
2778 /* We account everything but the calls. Calls have their own
2779 size/time info attached to cgraph edges. This is necessary
2780 in order to make the cost disappear after inlining. */
2781 if (!is_gimple_call (stmt))
2783 if (prob)
2785 struct predicate ip = not_inlined_predicate ();
2786 ip = and_predicates (info->conds, &ip, &p);
2787 account_size_time (info, this_size * prob,
2788 this_time * prob, &ip);
2790 if (prob != 2)
2791 account_size_time (info, this_size * (2 - prob),
2792 this_time * (2 - prob), &p);
2795 gcc_assert (time >= 0);
2796 gcc_assert (size >= 0);
2800 set_hint_predicate (&inline_summaries->get (node)->array_index, array_index);
2801 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
2802 if (time > MAX_TIME)
2803 time = MAX_TIME;
2804 free (order);
2806 if (nonconstant_names.exists () && !early)
2808 struct loop *loop;
2809 predicate loop_iterations = true_predicate ();
2810 predicate loop_stride = true_predicate ();
2812 if (dump_file && (dump_flags & TDF_DETAILS))
2813 flow_loops_dump (dump_file, NULL, 0);
2814 scev_initialize ();
2815 FOR_EACH_LOOP (loop, 0)
2817 vec<edge> exits;
2818 edge ex;
2819 unsigned int j;
2820 struct tree_niter_desc niter_desc;
2821 bb_predicate = *(struct predicate *) loop->header->aux;
2823 exits = get_loop_exit_edges (loop);
2824 FOR_EACH_VEC_ELT (exits, j, ex)
2825 if (number_of_iterations_exit (loop, ex, &niter_desc, false)
2826 && !is_gimple_min_invariant (niter_desc.niter))
2828 predicate will_be_nonconstant
2829 = will_be_nonconstant_expr_predicate (fbi.info, info,
2830 niter_desc.niter,
2831 nonconstant_names);
2832 if (!true_predicate_p (&will_be_nonconstant))
2833 will_be_nonconstant = and_predicates (info->conds,
2834 &bb_predicate,
2835 &will_be_nonconstant);
2836 if (!true_predicate_p (&will_be_nonconstant)
2837 && !false_predicate_p (&will_be_nonconstant))
2838 /* This is slightly inprecise. We may want to represent each
2839 loop with independent predicate. */
2840 loop_iterations =
2841 and_predicates (info->conds, &loop_iterations,
2842 &will_be_nonconstant);
2844 exits.release ();
2846 for (gphi_iterator gsi = gsi_start_phis (loop->header);
2847 !gsi_end_p (gsi); gsi_next (&gsi))
2849 gphi *phi = gsi.phi ();
2850 tree use = gimple_phi_result (phi);
2851 affine_iv iv;
2852 predicate will_be_nonconstant;
2853 if (virtual_operand_p (use)
2854 || !simple_iv (loop, loop, use, &iv, true)
2855 || is_gimple_min_invariant (iv.step))
2856 continue;
2857 will_be_nonconstant
2858 = will_be_nonconstant_expr_predicate (fbi.info, info,
2859 iv.step,
2860 nonconstant_names);
2861 if (!true_predicate_p (&will_be_nonconstant))
2862 will_be_nonconstant = and_predicates (info->conds,
2863 &bb_predicate,
2864 &will_be_nonconstant);
2865 if (!true_predicate_p (&will_be_nonconstant)
2866 && !false_predicate_p (&will_be_nonconstant))
2867 /* This is slightly inprecise. We may want to represent
2868 each loop with independent predicate. */
2869 loop_stride = and_predicates (info->conds, &loop_stride,
2870 &will_be_nonconstant);
2873 set_hint_predicate (&inline_summaries->get (node)->loop_iterations,
2874 loop_iterations);
2875 set_hint_predicate (&inline_summaries->get (node)->loop_stride, loop_stride);
2876 scev_finalize ();
2878 FOR_ALL_BB_FN (bb, my_function)
2880 edge e;
2881 edge_iterator ei;
2883 if (bb->aux)
2884 pool_free (edge_predicate_pool, bb->aux);
2885 bb->aux = NULL;
2886 FOR_EACH_EDGE (e, ei, bb->succs)
2888 if (e->aux)
2889 pool_free (edge_predicate_pool, e->aux);
2890 e->aux = NULL;
2893 inline_summaries->get (node)->self_time = time;
2894 inline_summaries->get (node)->self_size = size;
2895 nonconstant_names.release ();
2896 if (opt_for_fn (node->decl, optimize))
2898 if (!early)
2899 loop_optimizer_finalize ();
2900 else if (!ipa_edge_args_vector)
2901 ipa_free_all_node_params ();
2902 free_dominance_info (CDI_DOMINATORS);
2904 if (dump_file)
2906 fprintf (dump_file, "\n");
2907 dump_inline_summary (dump_file, node);
2912 /* Compute parameters of functions used by inliner.
2913 EARLY is true when we compute parameters for the early inliner */
2915 void
2916 compute_inline_parameters (struct cgraph_node *node, bool early)
2918 HOST_WIDE_INT self_stack_size;
2919 struct cgraph_edge *e;
2920 struct inline_summary *info;
2922 gcc_assert (!node->global.inlined_to);
2924 inline_summary_alloc ();
2926 info = inline_summaries->get (node);
2927 reset_inline_summary (node, info);
2929 /* FIXME: Thunks are inlinable, but tree-inline don't know how to do that.
2930 Once this happen, we will need to more curefully predict call
2931 statement size. */
2932 if (node->thunk.thunk_p)
2934 struct inline_edge_summary *es = inline_edge_summary (node->callees);
2935 struct predicate t = true_predicate ();
2937 info->inlinable = 0;
2938 node->callees->call_stmt_cannot_inline_p = true;
2939 node->local.can_change_signature = false;
2940 es->call_stmt_time = 1;
2941 es->call_stmt_size = 1;
2942 account_size_time (info, 0, 0, &t);
2943 return;
2946 /* Even is_gimple_min_invariant rely on current_function_decl. */
2947 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2949 /* Estimate the stack size for the function if we're optimizing. */
2950 self_stack_size = optimize ? estimated_stack_frame_size (node) : 0;
2951 info->estimated_self_stack_size = self_stack_size;
2952 info->estimated_stack_size = self_stack_size;
2953 info->stack_frame_offset = 0;
2955 /* Can this function be inlined at all? */
2956 if (!opt_for_fn (node->decl, optimize)
2957 && !lookup_attribute ("always_inline",
2958 DECL_ATTRIBUTES (node->decl)))
2959 info->inlinable = false;
2960 else
2961 info->inlinable = tree_inlinable_function_p (node->decl);
2963 info->contains_cilk_spawn = fn_contains_cilk_spawn_p (cfun);
2965 /* Type attributes can use parameter indices to describe them. */
2966 if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl)))
2967 node->local.can_change_signature = false;
2968 else
2970 /* Otherwise, inlinable functions always can change signature. */
2971 if (info->inlinable)
2972 node->local.can_change_signature = true;
2973 else
2975 /* Functions calling builtin_apply can not change signature. */
2976 for (e = node->callees; e; e = e->next_callee)
2978 tree cdecl = e->callee->decl;
2979 if (DECL_BUILT_IN (cdecl)
2980 && DECL_BUILT_IN_CLASS (cdecl) == BUILT_IN_NORMAL
2981 && (DECL_FUNCTION_CODE (cdecl) == BUILT_IN_APPLY_ARGS
2982 || DECL_FUNCTION_CODE (cdecl) == BUILT_IN_VA_START))
2983 break;
2985 node->local.can_change_signature = !e;
2988 estimate_function_body_sizes (node, early);
2990 for (e = node->callees; e; e = e->next_callee)
2991 if (e->callee->comdat_local_p ())
2992 break;
2993 node->calls_comdat_local = (e != NULL);
2995 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2996 info->time = info->self_time;
2997 info->size = info->self_size;
2998 info->stack_frame_offset = 0;
2999 info->estimated_stack_size = info->estimated_self_stack_size;
3000 #ifdef ENABLE_CHECKING
3001 inline_update_overall_summary (node);
3002 gcc_assert (info->time == info->self_time && info->size == info->self_size);
3003 #endif
3005 pop_cfun ();
3009 /* Compute parameters of functions used by inliner using
3010 current_function_decl. */
3012 static unsigned int
3013 compute_inline_parameters_for_current (void)
3015 compute_inline_parameters (cgraph_node::get (current_function_decl), true);
3016 return 0;
3019 namespace {
3021 const pass_data pass_data_inline_parameters =
3023 GIMPLE_PASS, /* type */
3024 "inline_param", /* name */
3025 OPTGROUP_INLINE, /* optinfo_flags */
3026 TV_INLINE_PARAMETERS, /* tv_id */
3027 0, /* properties_required */
3028 0, /* properties_provided */
3029 0, /* properties_destroyed */
3030 0, /* todo_flags_start */
3031 0, /* todo_flags_finish */
3034 class pass_inline_parameters : public gimple_opt_pass
3036 public:
3037 pass_inline_parameters (gcc::context *ctxt)
3038 : gimple_opt_pass (pass_data_inline_parameters, ctxt)
3041 /* opt_pass methods: */
3042 opt_pass * clone () { return new pass_inline_parameters (m_ctxt); }
3043 virtual unsigned int execute (function *)
3045 return compute_inline_parameters_for_current ();
3048 }; // class pass_inline_parameters
3050 } // anon namespace
3052 gimple_opt_pass *
3053 make_pass_inline_parameters (gcc::context *ctxt)
3055 return new pass_inline_parameters (ctxt);
3059 /* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS,
3060 KNOWN_CONTEXTS and KNOWN_AGGS. */
3062 static bool
3063 estimate_edge_devirt_benefit (struct cgraph_edge *ie,
3064 int *size, int *time,
3065 vec<tree> known_vals,
3066 vec<ipa_polymorphic_call_context> known_contexts,
3067 vec<ipa_agg_jump_function_p> known_aggs)
3069 tree target;
3070 struct cgraph_node *callee;
3071 struct inline_summary *isummary;
3072 enum availability avail;
3073 bool speculative;
3075 if (!known_vals.exists () && !known_contexts.exists ())
3076 return false;
3077 if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining))
3078 return false;
3080 target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts,
3081 known_aggs, &speculative);
3082 if (!target || speculative)
3083 return false;
3085 /* Account for difference in cost between indirect and direct calls. */
3086 *size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost);
3087 *time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost);
3088 gcc_checking_assert (*time >= 0);
3089 gcc_checking_assert (*size >= 0);
3091 callee = cgraph_node::get (target);
3092 if (!callee || !callee->definition)
3093 return false;
3094 callee = callee->function_symbol (&avail);
3095 if (avail < AVAIL_AVAILABLE)
3096 return false;
3097 isummary = inline_summaries->get (callee);
3098 return isummary->inlinable;
3101 /* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to
3102 handle edge E with probability PROB.
3103 Set HINTS if edge may be devirtualized.
3104 KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call
3105 site. */
3107 static inline void
3108 estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
3109 int *time,
3110 int prob,
3111 vec<tree> known_vals,
3112 vec<ipa_polymorphic_call_context> known_contexts,
3113 vec<ipa_agg_jump_function_p> known_aggs,
3114 inline_hints *hints)
3116 struct inline_edge_summary *es = inline_edge_summary (e);
3117 int call_size = es->call_stmt_size;
3118 int call_time = es->call_stmt_time;
3119 int cur_size;
3120 if (!e->callee
3121 && estimate_edge_devirt_benefit (e, &call_size, &call_time,
3122 known_vals, known_contexts, known_aggs)
3123 && hints && e->maybe_hot_p ())
3124 *hints |= INLINE_HINT_indirect_call;
3125 cur_size = call_size * INLINE_SIZE_SCALE;
3126 *size += cur_size;
3127 if (min_size)
3128 *min_size += cur_size;
3129 *time += apply_probability ((gcov_type) call_time, prob)
3130 * e->frequency * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE);
3131 if (*time > MAX_TIME * INLINE_TIME_SCALE)
3132 *time = MAX_TIME * INLINE_TIME_SCALE;
3137 /* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
3138 calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3139 describe context of the call site. */
3141 static void
3142 estimate_calls_size_and_time (struct cgraph_node *node, int *size,
3143 int *min_size, int *time,
3144 inline_hints *hints,
3145 clause_t possible_truths,
3146 vec<tree> known_vals,
3147 vec<ipa_polymorphic_call_context> known_contexts,
3148 vec<ipa_agg_jump_function_p> known_aggs)
3150 struct cgraph_edge *e;
3151 for (e = node->callees; e; e = e->next_callee)
3153 if (inline_edge_summary_vec.length () <= (unsigned) e->uid)
3154 continue;
3156 struct inline_edge_summary *es = inline_edge_summary (e);
3158 /* Do not care about zero sized builtins. */
3159 if (e->inline_failed && !es->call_stmt_size)
3161 gcc_checking_assert (!es->call_stmt_time);
3162 continue;
3164 if (!es->predicate
3165 || evaluate_predicate (es->predicate, possible_truths))
3167 if (e->inline_failed)
3169 /* Predicates of calls shall not use NOT_CHANGED codes,
3170 sowe do not need to compute probabilities. */
3171 estimate_edge_size_and_time (e, size,
3172 es->predicate ? NULL : min_size,
3173 time, REG_BR_PROB_BASE,
3174 known_vals, known_contexts,
3175 known_aggs, hints);
3177 else
3178 estimate_calls_size_and_time (e->callee, size, min_size, time,
3179 hints,
3180 possible_truths,
3181 known_vals, known_contexts,
3182 known_aggs);
3185 for (e = node->indirect_calls; e; e = e->next_callee)
3187 if (inline_edge_summary_vec.length () <= (unsigned) e->uid)
3188 continue;
3190 struct inline_edge_summary *es = inline_edge_summary (e);
3191 if (!es->predicate
3192 || evaluate_predicate (es->predicate, possible_truths))
3193 estimate_edge_size_and_time (e, size,
3194 es->predicate ? NULL : min_size,
3195 time, REG_BR_PROB_BASE,
3196 known_vals, known_contexts, known_aggs,
3197 hints);
3202 /* Estimate size and time needed to execute NODE assuming
3203 POSSIBLE_TRUTHS clause, and KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3204 information about NODE's arguments. If non-NULL use also probability
3205 information present in INLINE_PARAM_SUMMARY vector.
3206 Additionally detemine hints determined by the context. Finally compute
3207 minimal size needed for the call that is independent on the call context and
3208 can be used for fast estimates. Return the values in RET_SIZE,
3209 RET_MIN_SIZE, RET_TIME and RET_HINTS. */
3211 static void
3212 estimate_node_size_and_time (struct cgraph_node *node,
3213 clause_t possible_truths,
3214 vec<tree> known_vals,
3215 vec<ipa_polymorphic_call_context> known_contexts,
3216 vec<ipa_agg_jump_function_p> known_aggs,
3217 int *ret_size, int *ret_min_size, int *ret_time,
3218 inline_hints *ret_hints,
3219 vec<inline_param_summary>
3220 inline_param_summary)
3222 struct inline_summary *info = inline_summaries->get (node);
3223 size_time_entry *e;
3224 int size = 0;
3225 int time = 0;
3226 int min_size = 0;
3227 inline_hints hints = 0;
3228 int i;
3230 if (dump_file && (dump_flags & TDF_DETAILS))
3232 bool found = false;
3233 fprintf (dump_file, " Estimating body: %s/%i\n"
3234 " Known to be false: ", node->name (),
3235 node->order);
3237 for (i = predicate_not_inlined_condition;
3238 i < (predicate_first_dynamic_condition
3239 + (int) vec_safe_length (info->conds)); i++)
3240 if (!(possible_truths & (1 << i)))
3242 if (found)
3243 fprintf (dump_file, ", ");
3244 found = true;
3245 dump_condition (dump_file, info->conds, i);
3249 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3250 if (evaluate_predicate (&e->predicate, possible_truths))
3252 size += e->size;
3253 gcc_checking_assert (e->time >= 0);
3254 gcc_checking_assert (time >= 0);
3255 if (!inline_param_summary.exists ())
3256 time += e->time;
3257 else
3259 int prob = predicate_probability (info->conds,
3260 &e->predicate,
3261 possible_truths,
3262 inline_param_summary);
3263 gcc_checking_assert (prob >= 0);
3264 gcc_checking_assert (prob <= REG_BR_PROB_BASE);
3265 time += apply_probability ((gcov_type) e->time, prob);
3267 if (time > MAX_TIME * INLINE_TIME_SCALE)
3268 time = MAX_TIME * INLINE_TIME_SCALE;
3269 gcc_checking_assert (time >= 0);
3272 gcc_checking_assert (true_predicate_p (&(*info->entry)[0].predicate));
3273 min_size = (*info->entry)[0].size;
3274 gcc_checking_assert (size >= 0);
3275 gcc_checking_assert (time >= 0);
3277 if (info->loop_iterations
3278 && !evaluate_predicate (info->loop_iterations, possible_truths))
3279 hints |= INLINE_HINT_loop_iterations;
3280 if (info->loop_stride
3281 && !evaluate_predicate (info->loop_stride, possible_truths))
3282 hints |= INLINE_HINT_loop_stride;
3283 if (info->array_index
3284 && !evaluate_predicate (info->array_index, possible_truths))
3285 hints |= INLINE_HINT_array_index;
3286 if (info->scc_no)
3287 hints |= INLINE_HINT_in_scc;
3288 if (DECL_DECLARED_INLINE_P (node->decl))
3289 hints |= INLINE_HINT_declared_inline;
3291 estimate_calls_size_and_time (node, &size, &min_size, &time, &hints, possible_truths,
3292 known_vals, known_contexts, known_aggs);
3293 gcc_checking_assert (size >= 0);
3294 gcc_checking_assert (time >= 0);
3295 time = RDIV (time, INLINE_TIME_SCALE);
3296 size = RDIV (size, INLINE_SIZE_SCALE);
3297 min_size = RDIV (min_size, INLINE_SIZE_SCALE);
3299 if (dump_file && (dump_flags & TDF_DETAILS))
3300 fprintf (dump_file, "\n size:%i time:%i\n", (int) size, (int) time);
3301 if (ret_time)
3302 *ret_time = time;
3303 if (ret_size)
3304 *ret_size = size;
3305 if (ret_min_size)
3306 *ret_min_size = min_size;
3307 if (ret_hints)
3308 *ret_hints = hints;
3309 return;
3313 /* Estimate size and time needed to execute callee of EDGE assuming that
3314 parameters known to be constant at caller of EDGE are propagated.
3315 KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values
3316 and types for parameters. */
3318 void
3319 estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
3320 vec<tree> known_vals,
3321 vec<ipa_polymorphic_call_context>
3322 known_contexts,
3323 vec<ipa_agg_jump_function_p> known_aggs,
3324 int *ret_size, int *ret_time,
3325 inline_hints *hints)
3327 clause_t clause;
3329 clause = evaluate_conditions_for_known_args (node, false, known_vals,
3330 known_aggs);
3331 estimate_node_size_and_time (node, clause, known_vals, known_contexts,
3332 known_aggs, ret_size, NULL, ret_time, hints, vNULL);
3335 /* Translate all conditions from callee representation into caller
3336 representation and symbolically evaluate predicate P into new predicate.
3338 INFO is inline_summary of function we are adding predicate into, CALLEE_INFO
3339 is summary of function predicate P is from. OPERAND_MAP is array giving
3340 callee formal IDs the caller formal IDs. POSSSIBLE_TRUTHS is clausule of all
3341 callee conditions that may be true in caller context. TOPLEV_PREDICATE is
3342 predicate under which callee is executed. OFFSET_MAP is an array of of
3343 offsets that need to be added to conditions, negative offset means that
3344 conditions relying on values passed by reference have to be discarded
3345 because they might not be preserved (and should be considered offset zero
3346 for other purposes). */
3348 static struct predicate
3349 remap_predicate (struct inline_summary *info,
3350 struct inline_summary *callee_info,
3351 struct predicate *p,
3352 vec<int> operand_map,
3353 vec<int> offset_map,
3354 clause_t possible_truths, struct predicate *toplev_predicate)
3356 int i;
3357 struct predicate out = true_predicate ();
3359 /* True predicate is easy. */
3360 if (true_predicate_p (p))
3361 return *toplev_predicate;
3362 for (i = 0; p->clause[i]; i++)
3364 clause_t clause = p->clause[i];
3365 int cond;
3366 struct predicate clause_predicate = false_predicate ();
3368 gcc_assert (i < MAX_CLAUSES);
3370 for (cond = 0; cond < NUM_CONDITIONS; cond++)
3371 /* Do we have condition we can't disprove? */
3372 if (clause & possible_truths & (1 << cond))
3374 struct predicate cond_predicate;
3375 /* Work out if the condition can translate to predicate in the
3376 inlined function. */
3377 if (cond >= predicate_first_dynamic_condition)
3379 struct condition *c;
3381 c = &(*callee_info->conds)[cond
3383 predicate_first_dynamic_condition];
3384 /* See if we can remap condition operand to caller's operand.
3385 Otherwise give up. */
3386 if (!operand_map.exists ()
3387 || (int) operand_map.length () <= c->operand_num
3388 || operand_map[c->operand_num] == -1
3389 /* TODO: For non-aggregate conditions, adding an offset is
3390 basically an arithmetic jump function processing which
3391 we should support in future. */
3392 || ((!c->agg_contents || !c->by_ref)
3393 && offset_map[c->operand_num] > 0)
3394 || (c->agg_contents && c->by_ref
3395 && offset_map[c->operand_num] < 0))
3396 cond_predicate = true_predicate ();
3397 else
3399 struct agg_position_info ap;
3400 HOST_WIDE_INT offset_delta = offset_map[c->operand_num];
3401 if (offset_delta < 0)
3403 gcc_checking_assert (!c->agg_contents || !c->by_ref);
3404 offset_delta = 0;
3406 gcc_assert (!c->agg_contents
3407 || c->by_ref || offset_delta == 0);
3408 ap.offset = c->offset + offset_delta;
3409 ap.agg_contents = c->agg_contents;
3410 ap.by_ref = c->by_ref;
3411 cond_predicate = add_condition (info,
3412 operand_map[c->operand_num],
3413 c->size, &ap, c->code,
3414 c->val);
3417 /* Fixed conditions remains same, construct single
3418 condition predicate. */
3419 else
3421 cond_predicate.clause[0] = 1 << cond;
3422 cond_predicate.clause[1] = 0;
3424 clause_predicate = or_predicates (info->conds, &clause_predicate,
3425 &cond_predicate);
3427 out = and_predicates (info->conds, &out, &clause_predicate);
3429 return and_predicates (info->conds, &out, toplev_predicate);
3433 /* Update summary information of inline clones after inlining.
3434 Compute peak stack usage. */
3436 static void
3437 inline_update_callee_summaries (struct cgraph_node *node, int depth)
3439 struct cgraph_edge *e;
3440 struct inline_summary *callee_info = inline_summaries->get (node);
3441 struct inline_summary *caller_info = inline_summaries->get (node->callers->caller);
3442 HOST_WIDE_INT peak;
3444 callee_info->stack_frame_offset
3445 = caller_info->stack_frame_offset
3446 + caller_info->estimated_self_stack_size;
3447 peak = callee_info->stack_frame_offset
3448 + callee_info->estimated_self_stack_size;
3449 if (inline_summaries->get (node->global.inlined_to)->estimated_stack_size < peak)
3450 inline_summaries->get (node->global.inlined_to)->estimated_stack_size = peak;
3451 ipa_propagate_frequency (node);
3452 for (e = node->callees; e; e = e->next_callee)
3454 if (!e->inline_failed)
3455 inline_update_callee_summaries (e->callee, depth);
3456 inline_edge_summary (e)->loop_depth += depth;
3458 for (e = node->indirect_calls; e; e = e->next_callee)
3459 inline_edge_summary (e)->loop_depth += depth;
3462 /* Update change_prob of EDGE after INLINED_EDGE has been inlined.
3463 When functoin A is inlined in B and A calls C with parameter that
3464 changes with probability PROB1 and C is known to be passthroug
3465 of argument if B that change with probability PROB2, the probability
3466 of change is now PROB1*PROB2. */
3468 static void
3469 remap_edge_change_prob (struct cgraph_edge *inlined_edge,
3470 struct cgraph_edge *edge)
3472 if (ipa_node_params_sum)
3474 int i;
3475 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3476 struct inline_edge_summary *es = inline_edge_summary (edge);
3477 struct inline_edge_summary *inlined_es
3478 = inline_edge_summary (inlined_edge);
3480 for (i = 0; i < ipa_get_cs_argument_count (args); i++)
3482 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3483 if (jfunc->type == IPA_JF_PASS_THROUGH
3484 && (ipa_get_jf_pass_through_formal_id (jfunc)
3485 < (int) inlined_es->param.length ()))
3487 int jf_formal_id = ipa_get_jf_pass_through_formal_id (jfunc);
3488 int prob1 = es->param[i].change_prob;
3489 int prob2 = inlined_es->param[jf_formal_id].change_prob;
3490 int prob = combine_probabilities (prob1, prob2);
3492 if (prob1 && prob2 && !prob)
3493 prob = 1;
3495 es->param[i].change_prob = prob;
3501 /* Update edge summaries of NODE after INLINED_EDGE has been inlined.
3503 Remap predicates of callees of NODE. Rest of arguments match
3504 remap_predicate.
3506 Also update change probabilities. */
3508 static void
3509 remap_edge_summaries (struct cgraph_edge *inlined_edge,
3510 struct cgraph_node *node,
3511 struct inline_summary *info,
3512 struct inline_summary *callee_info,
3513 vec<int> operand_map,
3514 vec<int> offset_map,
3515 clause_t possible_truths,
3516 struct predicate *toplev_predicate)
3518 struct cgraph_edge *e, *next;
3519 for (e = node->callees; e; e = next)
3521 struct inline_edge_summary *es = inline_edge_summary (e);
3522 struct predicate p;
3523 next = e->next_callee;
3525 if (e->inline_failed)
3527 remap_edge_change_prob (inlined_edge, e);
3529 if (es->predicate)
3531 p = remap_predicate (info, callee_info,
3532 es->predicate, operand_map, offset_map,
3533 possible_truths, toplev_predicate);
3534 edge_set_predicate (e, &p);
3536 else
3537 edge_set_predicate (e, toplev_predicate);
3539 else
3540 remap_edge_summaries (inlined_edge, e->callee, info, callee_info,
3541 operand_map, offset_map, possible_truths,
3542 toplev_predicate);
3544 for (e = node->indirect_calls; e; e = next)
3546 struct inline_edge_summary *es = inline_edge_summary (e);
3547 struct predicate p;
3548 next = e->next_callee;
3550 remap_edge_change_prob (inlined_edge, e);
3551 if (es->predicate)
3553 p = remap_predicate (info, callee_info,
3554 es->predicate, operand_map, offset_map,
3555 possible_truths, toplev_predicate);
3556 edge_set_predicate (e, &p);
3558 else
3559 edge_set_predicate (e, toplev_predicate);
3563 /* Same as remap_predicate, but set result into hint *HINT. */
3565 static void
3566 remap_hint_predicate (struct inline_summary *info,
3567 struct inline_summary *callee_info,
3568 struct predicate **hint,
3569 vec<int> operand_map,
3570 vec<int> offset_map,
3571 clause_t possible_truths,
3572 struct predicate *toplev_predicate)
3574 predicate p;
3576 if (!*hint)
3577 return;
3578 p = remap_predicate (info, callee_info,
3579 *hint,
3580 operand_map, offset_map,
3581 possible_truths, toplev_predicate);
3582 if (!false_predicate_p (&p) && !true_predicate_p (&p))
3584 if (!*hint)
3585 set_hint_predicate (hint, p);
3586 else
3587 **hint = and_predicates (info->conds, *hint, &p);
3591 /* We inlined EDGE. Update summary of the function we inlined into. */
3593 void
3594 inline_merge_summary (struct cgraph_edge *edge)
3596 struct inline_summary *callee_info = inline_summaries->get (edge->callee);
3597 struct cgraph_node *to = (edge->caller->global.inlined_to
3598 ? edge->caller->global.inlined_to : edge->caller);
3599 struct inline_summary *info = inline_summaries->get (to);
3600 clause_t clause = 0; /* not_inline is known to be false. */
3601 size_time_entry *e;
3602 vec<int> operand_map = vNULL;
3603 vec<int> offset_map = vNULL;
3604 int i;
3605 struct predicate toplev_predicate;
3606 struct predicate true_p = true_predicate ();
3607 struct inline_edge_summary *es = inline_edge_summary (edge);
3609 if (es->predicate)
3610 toplev_predicate = *es->predicate;
3611 else
3612 toplev_predicate = true_predicate ();
3614 if (callee_info->conds)
3615 evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL);
3616 if (ipa_node_params_sum && callee_info->conds)
3618 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3619 int count = ipa_get_cs_argument_count (args);
3620 int i;
3622 if (count)
3624 operand_map.safe_grow_cleared (count);
3625 offset_map.safe_grow_cleared (count);
3627 for (i = 0; i < count; i++)
3629 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3630 int map = -1;
3632 /* TODO: handle non-NOPs when merging. */
3633 if (jfunc->type == IPA_JF_PASS_THROUGH)
3635 if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
3636 map = ipa_get_jf_pass_through_formal_id (jfunc);
3637 if (!ipa_get_jf_pass_through_agg_preserved (jfunc))
3638 offset_map[i] = -1;
3640 else if (jfunc->type == IPA_JF_ANCESTOR)
3642 HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc);
3643 if (offset >= 0 && offset < INT_MAX)
3645 map = ipa_get_jf_ancestor_formal_id (jfunc);
3646 if (!ipa_get_jf_ancestor_agg_preserved (jfunc))
3647 offset = -1;
3648 offset_map[i] = offset;
3651 operand_map[i] = map;
3652 gcc_assert (map < ipa_get_param_count (IPA_NODE_REF (to)));
3655 for (i = 0; vec_safe_iterate (callee_info->entry, i, &e); i++)
3657 struct predicate p = remap_predicate (info, callee_info,
3658 &e->predicate, operand_map,
3659 offset_map, clause,
3660 &toplev_predicate);
3661 if (!false_predicate_p (&p))
3663 gcov_type add_time = ((gcov_type) e->time * edge->frequency
3664 + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
3665 int prob = predicate_probability (callee_info->conds,
3666 &e->predicate,
3667 clause, es->param);
3668 add_time = apply_probability ((gcov_type) add_time, prob);
3669 if (add_time > MAX_TIME * INLINE_TIME_SCALE)
3670 add_time = MAX_TIME * INLINE_TIME_SCALE;
3671 if (prob != REG_BR_PROB_BASE
3672 && dump_file && (dump_flags & TDF_DETAILS))
3674 fprintf (dump_file, "\t\tScaling time by probability:%f\n",
3675 (double) prob / REG_BR_PROB_BASE);
3677 account_size_time (info, e->size, add_time, &p);
3680 remap_edge_summaries (edge, edge->callee, info, callee_info, operand_map,
3681 offset_map, clause, &toplev_predicate);
3682 remap_hint_predicate (info, callee_info,
3683 &callee_info->loop_iterations,
3684 operand_map, offset_map, clause, &toplev_predicate);
3685 remap_hint_predicate (info, callee_info,
3686 &callee_info->loop_stride,
3687 operand_map, offset_map, clause, &toplev_predicate);
3688 remap_hint_predicate (info, callee_info,
3689 &callee_info->array_index,
3690 operand_map, offset_map, clause, &toplev_predicate);
3692 inline_update_callee_summaries (edge->callee,
3693 inline_edge_summary (edge)->loop_depth);
3695 /* We do not maintain predicates of inlined edges, free it. */
3696 edge_set_predicate (edge, &true_p);
3697 /* Similarly remove param summaries. */
3698 es->param.release ();
3699 operand_map.release ();
3700 offset_map.release ();
3703 /* For performance reasons inline_merge_summary is not updating overall size
3704 and time. Recompute it. */
3706 void
3707 inline_update_overall_summary (struct cgraph_node *node)
3709 struct inline_summary *info = inline_summaries->get (node);
3710 size_time_entry *e;
3711 int i;
3713 info->size = 0;
3714 info->time = 0;
3715 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3717 info->size += e->size, info->time += e->time;
3718 if (info->time > MAX_TIME * INLINE_TIME_SCALE)
3719 info->time = MAX_TIME * INLINE_TIME_SCALE;
3721 estimate_calls_size_and_time (node, &info->size, &info->min_size,
3722 &info->time, NULL,
3723 ~(clause_t) (1 << predicate_false_condition),
3724 vNULL, vNULL, vNULL);
3725 info->time = (info->time + INLINE_TIME_SCALE / 2) / INLINE_TIME_SCALE;
3726 info->size = (info->size + INLINE_SIZE_SCALE / 2) / INLINE_SIZE_SCALE;
3729 /* Return hints derrived from EDGE. */
3731 simple_edge_hints (struct cgraph_edge *edge)
3733 int hints = 0;
3734 struct cgraph_node *to = (edge->caller->global.inlined_to
3735 ? edge->caller->global.inlined_to : edge->caller);
3736 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
3737 if (inline_summaries->get (to)->scc_no
3738 && inline_summaries->get (to)->scc_no
3739 == inline_summaries->get (callee)->scc_no
3740 && !edge->recursive_p ())
3741 hints |= INLINE_HINT_same_scc;
3743 if (callee->lto_file_data && edge->caller->lto_file_data
3744 && edge->caller->lto_file_data != callee->lto_file_data
3745 && !callee->merged)
3746 hints |= INLINE_HINT_cross_module;
3748 return hints;
3751 /* Estimate the time cost for the caller when inlining EDGE.
3752 Only to be called via estimate_edge_time, that handles the
3753 caching mechanism.
3755 When caching, also update the cache entry. Compute both time and
3756 size, since we always need both metrics eventually. */
3759 do_estimate_edge_time (struct cgraph_edge *edge)
3761 int time;
3762 int size;
3763 inline_hints hints;
3764 struct cgraph_node *callee;
3765 clause_t clause;
3766 vec<tree> known_vals;
3767 vec<ipa_polymorphic_call_context> known_contexts;
3768 vec<ipa_agg_jump_function_p> known_aggs;
3769 struct inline_edge_summary *es = inline_edge_summary (edge);
3770 int min_size;
3772 callee = edge->callee->ultimate_alias_target ();
3774 gcc_checking_assert (edge->inline_failed);
3775 evaluate_properties_for_edge (edge, true,
3776 &clause, &known_vals, &known_contexts,
3777 &known_aggs);
3778 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3779 known_aggs, &size, &min_size, &time, &hints, es->param);
3781 /* When we have profile feedback, we can quite safely identify hot
3782 edges and for those we disable size limits. Don't do that when
3783 probability that caller will call the callee is low however, since it
3784 may hurt optimization of the caller's hot path. */
3785 if (edge->count && edge->maybe_hot_p ()
3786 && (edge->count * 2
3787 > (edge->caller->global.inlined_to
3788 ? edge->caller->global.inlined_to->count : edge->caller->count)))
3789 hints |= INLINE_HINT_known_hot;
3791 known_vals.release ();
3792 known_contexts.release ();
3793 known_aggs.release ();
3794 gcc_checking_assert (size >= 0);
3795 gcc_checking_assert (time >= 0);
3797 /* When caching, update the cache entry. */
3798 if (edge_growth_cache.exists ())
3800 inline_summaries->get (edge->callee)->min_size = min_size;
3801 if ((int) edge_growth_cache.length () <= edge->uid)
3802 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
3803 edge_growth_cache[edge->uid].time = time + (time >= 0);
3805 edge_growth_cache[edge->uid].size = size + (size >= 0);
3806 hints |= simple_edge_hints (edge);
3807 edge_growth_cache[edge->uid].hints = hints + 1;
3809 return time;
3813 /* Return estimated callee growth after inlining EDGE.
3814 Only to be called via estimate_edge_size. */
3817 do_estimate_edge_size (struct cgraph_edge *edge)
3819 int size;
3820 struct cgraph_node *callee;
3821 clause_t clause;
3822 vec<tree> known_vals;
3823 vec<ipa_polymorphic_call_context> known_contexts;
3824 vec<ipa_agg_jump_function_p> known_aggs;
3826 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3828 if (edge_growth_cache.exists ())
3830 do_estimate_edge_time (edge);
3831 size = edge_growth_cache[edge->uid].size;
3832 gcc_checking_assert (size);
3833 return size - (size > 0);
3836 callee = edge->callee->ultimate_alias_target ();
3838 /* Early inliner runs without caching, go ahead and do the dirty work. */
3839 gcc_checking_assert (edge->inline_failed);
3840 evaluate_properties_for_edge (edge, true,
3841 &clause, &known_vals, &known_contexts,
3842 &known_aggs);
3843 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3844 known_aggs, &size, NULL, NULL, NULL, vNULL);
3845 known_vals.release ();
3846 known_contexts.release ();
3847 known_aggs.release ();
3848 return size;
3852 /* Estimate the growth of the caller when inlining EDGE.
3853 Only to be called via estimate_edge_size. */
3855 inline_hints
3856 do_estimate_edge_hints (struct cgraph_edge *edge)
3858 inline_hints hints;
3859 struct cgraph_node *callee;
3860 clause_t clause;
3861 vec<tree> known_vals;
3862 vec<ipa_polymorphic_call_context> known_contexts;
3863 vec<ipa_agg_jump_function_p> known_aggs;
3865 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3867 if (edge_growth_cache.exists ())
3869 do_estimate_edge_time (edge);
3870 hints = edge_growth_cache[edge->uid].hints;
3871 gcc_checking_assert (hints);
3872 return hints - 1;
3875 callee = edge->callee->ultimate_alias_target ();
3877 /* Early inliner runs without caching, go ahead and do the dirty work. */
3878 gcc_checking_assert (edge->inline_failed);
3879 evaluate_properties_for_edge (edge, true,
3880 &clause, &known_vals, &known_contexts,
3881 &known_aggs);
3882 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3883 known_aggs, NULL, NULL, NULL, &hints, vNULL);
3884 known_vals.release ();
3885 known_contexts.release ();
3886 known_aggs.release ();
3887 hints |= simple_edge_hints (edge);
3888 return hints;
3892 /* Estimate self time of the function NODE after inlining EDGE. */
3895 estimate_time_after_inlining (struct cgraph_node *node,
3896 struct cgraph_edge *edge)
3898 struct inline_edge_summary *es = inline_edge_summary (edge);
3899 if (!es->predicate || !false_predicate_p (es->predicate))
3901 gcov_type time =
3902 inline_summaries->get (node)->time + estimate_edge_time (edge);
3903 if (time < 0)
3904 time = 0;
3905 if (time > MAX_TIME)
3906 time = MAX_TIME;
3907 return time;
3909 return inline_summaries->get (node)->time;
3913 /* Estimate the size of NODE after inlining EDGE which should be an
3914 edge to either NODE or a call inlined into NODE. */
3917 estimate_size_after_inlining (struct cgraph_node *node,
3918 struct cgraph_edge *edge)
3920 struct inline_edge_summary *es = inline_edge_summary (edge);
3921 if (!es->predicate || !false_predicate_p (es->predicate))
3923 int size = inline_summaries->get (node)->size + estimate_edge_growth (edge);
3924 gcc_assert (size >= 0);
3925 return size;
3927 return inline_summaries->get (node)->size;
3931 struct growth_data
3933 struct cgraph_node *node;
3934 bool self_recursive;
3935 bool uninlinable;
3936 int growth;
3940 /* Worker for do_estimate_growth. Collect growth for all callers. */
3942 static bool
3943 do_estimate_growth_1 (struct cgraph_node *node, void *data)
3945 struct cgraph_edge *e;
3946 struct growth_data *d = (struct growth_data *) data;
3948 for (e = node->callers; e; e = e->next_caller)
3950 gcc_checking_assert (e->inline_failed);
3952 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
3954 d->uninlinable = true;
3955 continue;
3958 if (e->recursive_p ())
3960 d->self_recursive = true;
3961 continue;
3963 d->growth += estimate_edge_growth (e);
3965 return false;
3969 /* Estimate the growth caused by inlining NODE into all callees. */
3972 estimate_growth (struct cgraph_node *node)
3974 struct growth_data d = { node, false, false, 0 };
3975 struct inline_summary *info = inline_summaries->get (node);
3977 node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true);
3979 /* For self recursive functions the growth estimation really should be
3980 infinity. We don't want to return very large values because the growth
3981 plays various roles in badness computation fractions. Be sure to not
3982 return zero or negative growths. */
3983 if (d.self_recursive)
3984 d.growth = d.growth < info->size ? info->size : d.growth;
3985 else if (DECL_EXTERNAL (node->decl) || d.uninlinable)
3987 else
3989 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
3990 d.growth -= info->size;
3991 /* COMDAT functions are very often not shared across multiple units
3992 since they come from various template instantiations.
3993 Take this into account. */
3994 else if (DECL_COMDAT (node->decl)
3995 && node->can_remove_if_no_direct_calls_p ())
3996 d.growth -= (info->size
3997 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
3998 + 50) / 100;
4001 return d.growth;
4004 /* Verify if there are fewer than MAX_CALLERS. */
4006 static bool
4007 check_callers (cgraph_node *node, int *max_callers)
4009 ipa_ref *ref;
4011 if (!node->can_remove_if_no_direct_calls_and_refs_p ())
4012 return true;
4014 for (cgraph_edge *e = node->callers; e; e = e->next_caller)
4016 (*max_callers)--;
4017 if (!*max_callers
4018 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
4019 return true;
4022 FOR_EACH_ALIAS (node, ref)
4023 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), max_callers))
4024 return true;
4026 return false;
4030 /* Make cheap estimation if growth of NODE is likely positive knowing
4031 EDGE_GROWTH of one particular edge.
4032 We assume that most of other edges will have similar growth
4033 and skip computation if there are too many callers. */
4035 bool
4036 growth_likely_positive (struct cgraph_node *node,
4037 int edge_growth)
4039 int max_callers;
4040 struct cgraph_edge *e;
4041 gcc_checking_assert (edge_growth > 0);
4043 /* First quickly check if NODE is removable at all. */
4044 if (DECL_EXTERNAL (node->decl))
4045 return true;
4046 if (!node->can_remove_if_no_direct_calls_and_refs_p ()
4047 || node->address_taken)
4048 return true;
4050 max_callers = inline_summaries->get (node)->size * 4 / edge_growth + 2;
4052 for (e = node->callers; e; e = e->next_caller)
4054 max_callers--;
4055 if (!max_callers
4056 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
4057 return true;
4060 ipa_ref *ref;
4061 FOR_EACH_ALIAS (node, ref)
4062 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), &max_callers))
4063 return true;
4065 /* Unlike for functions called once, we play unsafe with
4066 COMDATs. We can allow that since we know functions
4067 in consideration are small (and thus risk is small) and
4068 moreover grow estimates already accounts that COMDAT
4069 functions may or may not disappear when eliminated from
4070 current unit. With good probability making aggressive
4071 choice in all units is going to make overall program
4072 smaller. */
4073 if (DECL_COMDAT (node->decl))
4075 if (!node->can_remove_if_no_direct_calls_p ())
4076 return true;
4078 else if (!node->will_be_removed_from_program_if_no_direct_calls_p ())
4079 return true;
4081 return estimate_growth (node) > 0;
4085 /* This function performs intraprocedural analysis in NODE that is required to
4086 inline indirect calls. */
4088 static void
4089 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
4091 ipa_analyze_node (node);
4092 if (dump_file && (dump_flags & TDF_DETAILS))
4094 ipa_print_node_params (dump_file, node);
4095 ipa_print_node_jump_functions (dump_file, node);
4100 /* Note function body size. */
4102 void
4103 inline_analyze_function (struct cgraph_node *node)
4105 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
4107 if (dump_file)
4108 fprintf (dump_file, "\nAnalyzing function: %s/%u\n",
4109 node->name (), node->order);
4110 if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p)
4111 inline_indirect_intraprocedural_analysis (node);
4112 compute_inline_parameters (node, false);
4113 if (!optimize)
4115 struct cgraph_edge *e;
4116 for (e = node->callees; e; e = e->next_callee)
4118 if (e->inline_failed == CIF_FUNCTION_NOT_CONSIDERED)
4119 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4120 e->call_stmt_cannot_inline_p = true;
4122 for (e = node->indirect_calls; e; e = e->next_callee)
4124 if (e->inline_failed == CIF_FUNCTION_NOT_CONSIDERED)
4125 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4126 e->call_stmt_cannot_inline_p = true;
4130 pop_cfun ();
4134 /* Called when new function is inserted to callgraph late. */
4136 void
4137 inline_summary_t::insert (struct cgraph_node *node, inline_summary *)
4139 inline_analyze_function (node);
4142 /* Note function body size. */
4144 void
4145 inline_generate_summary (void)
4147 struct cgraph_node *node;
4149 /* When not optimizing, do not bother to analyze. Inlining is still done
4150 because edge redirection needs to happen there. */
4151 if (!optimize && !flag_generate_lto && !flag_generate_offload && !flag_wpa)
4152 return;
4154 if (!inline_summaries)
4155 inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab);
4157 inline_summaries->enable_insertion_hook ();
4159 ipa_register_cgraph_hooks ();
4160 inline_free_summary ();
4162 FOR_EACH_DEFINED_FUNCTION (node)
4163 if (!node->alias)
4164 inline_analyze_function (node);
4168 /* Read predicate from IB. */
4170 static struct predicate
4171 read_predicate (struct lto_input_block *ib)
4173 struct predicate out;
4174 clause_t clause;
4175 int k = 0;
4179 gcc_assert (k <= MAX_CLAUSES);
4180 clause = out.clause[k++] = streamer_read_uhwi (ib);
4182 while (clause);
4184 /* Zero-initialize the remaining clauses in OUT. */
4185 while (k <= MAX_CLAUSES)
4186 out.clause[k++] = 0;
4188 return out;
4192 /* Write inline summary for edge E to OB. */
4194 static void
4195 read_inline_edge_summary (struct lto_input_block *ib, struct cgraph_edge *e)
4197 struct inline_edge_summary *es = inline_edge_summary (e);
4198 struct predicate p;
4199 int length, i;
4201 es->call_stmt_size = streamer_read_uhwi (ib);
4202 es->call_stmt_time = streamer_read_uhwi (ib);
4203 es->loop_depth = streamer_read_uhwi (ib);
4204 p = read_predicate (ib);
4205 edge_set_predicate (e, &p);
4206 length = streamer_read_uhwi (ib);
4207 if (length)
4209 es->param.safe_grow_cleared (length);
4210 for (i = 0; i < length; i++)
4211 es->param[i].change_prob = streamer_read_uhwi (ib);
4216 /* Stream in inline summaries from the section. */
4218 static void
4219 inline_read_section (struct lto_file_decl_data *file_data, const char *data,
4220 size_t len)
4222 const struct lto_function_header *header =
4223 (const struct lto_function_header *) data;
4224 const int cfg_offset = sizeof (struct lto_function_header);
4225 const int main_offset = cfg_offset + header->cfg_size;
4226 const int string_offset = main_offset + header->main_size;
4227 struct data_in *data_in;
4228 unsigned int i, count2, j;
4229 unsigned int f_count;
4231 lto_input_block ib ((const char *) data + main_offset, header->main_size,
4232 file_data->mode_table);
4234 data_in =
4235 lto_data_in_create (file_data, (const char *) data + string_offset,
4236 header->string_size, vNULL);
4237 f_count = streamer_read_uhwi (&ib);
4238 for (i = 0; i < f_count; i++)
4240 unsigned int index;
4241 struct cgraph_node *node;
4242 struct inline_summary *info;
4243 lto_symtab_encoder_t encoder;
4244 struct bitpack_d bp;
4245 struct cgraph_edge *e;
4246 predicate p;
4248 index = streamer_read_uhwi (&ib);
4249 encoder = file_data->symtab_node_encoder;
4250 node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder,
4251 index));
4252 info = inline_summaries->get (node);
4254 info->estimated_stack_size
4255 = info->estimated_self_stack_size = streamer_read_uhwi (&ib);
4256 info->size = info->self_size = streamer_read_uhwi (&ib);
4257 info->time = info->self_time = streamer_read_uhwi (&ib);
4259 bp = streamer_read_bitpack (&ib);
4260 info->inlinable = bp_unpack_value (&bp, 1);
4261 info->contains_cilk_spawn = bp_unpack_value (&bp, 1);
4263 count2 = streamer_read_uhwi (&ib);
4264 gcc_assert (!info->conds);
4265 for (j = 0; j < count2; j++)
4267 struct condition c;
4268 c.operand_num = streamer_read_uhwi (&ib);
4269 c.size = streamer_read_uhwi (&ib);
4270 c.code = (enum tree_code) streamer_read_uhwi (&ib);
4271 c.val = stream_read_tree (&ib, data_in);
4272 bp = streamer_read_bitpack (&ib);
4273 c.agg_contents = bp_unpack_value (&bp, 1);
4274 c.by_ref = bp_unpack_value (&bp, 1);
4275 if (c.agg_contents)
4276 c.offset = streamer_read_uhwi (&ib);
4277 vec_safe_push (info->conds, c);
4279 count2 = streamer_read_uhwi (&ib);
4280 gcc_assert (!info->entry);
4281 for (j = 0; j < count2; j++)
4283 struct size_time_entry e;
4285 e.size = streamer_read_uhwi (&ib);
4286 e.time = streamer_read_uhwi (&ib);
4287 e.predicate = read_predicate (&ib);
4289 vec_safe_push (info->entry, e);
4292 p = read_predicate (&ib);
4293 set_hint_predicate (&info->loop_iterations, p);
4294 p = read_predicate (&ib);
4295 set_hint_predicate (&info->loop_stride, p);
4296 p = read_predicate (&ib);
4297 set_hint_predicate (&info->array_index, p);
4298 for (e = node->callees; e; e = e->next_callee)
4299 read_inline_edge_summary (&ib, e);
4300 for (e = node->indirect_calls; e; e = e->next_callee)
4301 read_inline_edge_summary (&ib, e);
4304 lto_free_section_data (file_data, LTO_section_inline_summary, NULL, data,
4305 len);
4306 lto_data_in_delete (data_in);
4310 /* Read inline summary. Jump functions are shared among ipa-cp
4311 and inliner, so when ipa-cp is active, we don't need to write them
4312 twice. */
4314 void
4315 inline_read_summary (void)
4317 struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
4318 struct lto_file_decl_data *file_data;
4319 unsigned int j = 0;
4321 inline_summary_alloc ();
4323 while ((file_data = file_data_vec[j++]))
4325 size_t len;
4326 const char *data = lto_get_section_data (file_data,
4327 LTO_section_inline_summary,
4328 NULL, &len);
4329 if (data)
4330 inline_read_section (file_data, data, len);
4331 else
4332 /* Fatal error here. We do not want to support compiling ltrans units
4333 with different version of compiler or different flags than the WPA
4334 unit, so this should never happen. */
4335 fatal_error (input_location,
4336 "ipa inline summary is missing in input file");
4338 if (optimize)
4340 ipa_register_cgraph_hooks ();
4341 if (!flag_ipa_cp)
4342 ipa_prop_read_jump_functions ();
4345 gcc_assert (inline_summaries);
4346 inline_summaries->enable_insertion_hook ();
4350 /* Write predicate P to OB. */
4352 static void
4353 write_predicate (struct output_block *ob, struct predicate *p)
4355 int j;
4356 if (p)
4357 for (j = 0; p->clause[j]; j++)
4359 gcc_assert (j < MAX_CLAUSES);
4360 streamer_write_uhwi (ob, p->clause[j]);
4362 streamer_write_uhwi (ob, 0);
4366 /* Write inline summary for edge E to OB. */
4368 static void
4369 write_inline_edge_summary (struct output_block *ob, struct cgraph_edge *e)
4371 struct inline_edge_summary *es = inline_edge_summary (e);
4372 int i;
4374 streamer_write_uhwi (ob, es->call_stmt_size);
4375 streamer_write_uhwi (ob, es->call_stmt_time);
4376 streamer_write_uhwi (ob, es->loop_depth);
4377 write_predicate (ob, es->predicate);
4378 streamer_write_uhwi (ob, es->param.length ());
4379 for (i = 0; i < (int) es->param.length (); i++)
4380 streamer_write_uhwi (ob, es->param[i].change_prob);
4384 /* Write inline summary for node in SET.
4385 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
4386 active, we don't need to write them twice. */
4388 void
4389 inline_write_summary (void)
4391 struct cgraph_node *node;
4392 struct output_block *ob = create_output_block (LTO_section_inline_summary);
4393 lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
4394 unsigned int count = 0;
4395 int i;
4397 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4399 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4400 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4401 if (cnode && cnode->definition && !cnode->alias)
4402 count++;
4404 streamer_write_uhwi (ob, count);
4406 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4408 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4409 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4410 if (cnode && (node = cnode)->definition && !node->alias)
4412 struct inline_summary *info = inline_summaries->get (node);
4413 struct bitpack_d bp;
4414 struct cgraph_edge *edge;
4415 int i;
4416 size_time_entry *e;
4417 struct condition *c;
4419 streamer_write_uhwi (ob,
4420 lto_symtab_encoder_encode (encoder,
4422 node));
4423 streamer_write_hwi (ob, info->estimated_self_stack_size);
4424 streamer_write_hwi (ob, info->self_size);
4425 streamer_write_hwi (ob, info->self_time);
4426 bp = bitpack_create (ob->main_stream);
4427 bp_pack_value (&bp, info->inlinable, 1);
4428 bp_pack_value (&bp, info->contains_cilk_spawn, 1);
4429 streamer_write_bitpack (&bp);
4430 streamer_write_uhwi (ob, vec_safe_length (info->conds));
4431 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
4433 streamer_write_uhwi (ob, c->operand_num);
4434 streamer_write_uhwi (ob, c->size);
4435 streamer_write_uhwi (ob, c->code);
4436 stream_write_tree (ob, c->val, true);
4437 bp = bitpack_create (ob->main_stream);
4438 bp_pack_value (&bp, c->agg_contents, 1);
4439 bp_pack_value (&bp, c->by_ref, 1);
4440 streamer_write_bitpack (&bp);
4441 if (c->agg_contents)
4442 streamer_write_uhwi (ob, c->offset);
4444 streamer_write_uhwi (ob, vec_safe_length (info->entry));
4445 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
4447 streamer_write_uhwi (ob, e->size);
4448 streamer_write_uhwi (ob, e->time);
4449 write_predicate (ob, &e->predicate);
4451 write_predicate (ob, info->loop_iterations);
4452 write_predicate (ob, info->loop_stride);
4453 write_predicate (ob, info->array_index);
4454 for (edge = node->callees; edge; edge = edge->next_callee)
4455 write_inline_edge_summary (ob, edge);
4456 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
4457 write_inline_edge_summary (ob, edge);
4460 streamer_write_char_stream (ob->main_stream, 0);
4461 produce_asm (ob, NULL);
4462 destroy_output_block (ob);
4464 if (optimize && !flag_ipa_cp)
4465 ipa_prop_write_jump_functions ();
4469 /* Release inline summary. */
4471 void
4472 inline_free_summary (void)
4474 struct cgraph_node *node;
4475 if (edge_removal_hook_holder)
4476 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
4477 edge_removal_hook_holder = NULL;
4478 if (edge_duplication_hook_holder)
4479 symtab->remove_edge_duplication_hook (edge_duplication_hook_holder);
4480 edge_duplication_hook_holder = NULL;
4481 if (!inline_edge_summary_vec.exists ())
4482 return;
4483 FOR_EACH_DEFINED_FUNCTION (node)
4484 if (!node->alias)
4485 reset_inline_summary (node, inline_summaries->get (node));
4486 inline_summaries->release ();
4487 inline_summaries = NULL;
4488 inline_edge_summary_vec.release ();
4489 if (edge_predicate_pool)
4490 free_alloc_pool (edge_predicate_pool);
4491 edge_predicate_pool = 0;