Daily bump.
[official-gcc.git] / gcc / ipa-fnsummary.c
blob504a2d1ce55a32c254cf96ebabe5266e6ce59a44
1 /* Function summary pass.
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Analysis of function bodies used by inter-procedural passes
23 We estimate for each function
24 - function body size and size after specializing into given context
25 - average function execution time in a given context
26 - function frame size
27 For each call
28 - call statement size, time and how often the parameters change
30 ipa_fn_summary data structures store above information locally (i.e.
31 parameters of the function itself) and globally (i.e. parameters of
32 the function created by applying all the inline decisions already
33 present in the callgraph).
35 We provide access to the ipa_fn_summary data structure and
36 basic logic updating the parameters when inlining is performed.
38 The summaries are context sensitive. Context means
39 1) partial assignment of known constant values of operands
40 2) whether function is inlined into the call or not.
41 It is easy to add more variants. To represent function size and time
42 that depends on context (i.e. it is known to be optimized away when
43 context is known either by inlining or from IP-CP and cloning),
44 we use predicates.
46 estimate_edge_size_and_time can be used to query
47 function size/time in the given context. ipa_merge_fn_summary_after_inlining merges
48 properties of caller and callee after inlining.
50 Finally pass_inline_parameters is exported. This is used to drive
51 computation of function parameters used by the early inliner. IPA
52 inlined performs analysis via its analyze_function method. */
54 #include "config.h"
55 #include "system.h"
56 #include "coretypes.h"
57 #include "backend.h"
58 #include "tree.h"
59 #include "gimple.h"
60 #include "alloc-pool.h"
61 #include "tree-pass.h"
62 #include "ssa.h"
63 #include "tree-streamer.h"
64 #include "cgraph.h"
65 #include "diagnostic.h"
66 #include "fold-const.h"
67 #include "print-tree.h"
68 #include "tree-inline.h"
69 #include "gimple-pretty-print.h"
70 #include "params.h"
71 #include "cfganal.h"
72 #include "gimple-iterator.h"
73 #include "tree-cfg.h"
74 #include "tree-ssa-loop-niter.h"
75 #include "tree-ssa-loop.h"
76 #include "symbol-summary.h"
77 #include "ipa-prop.h"
78 #include "ipa-fnsummary.h"
79 #include "cfgloop.h"
80 #include "tree-scalar-evolution.h"
81 #include "ipa-utils.h"
82 #include "cfgexpand.h"
83 #include "gimplify.h"
84 #include "stringpool.h"
85 #include "attribs.h"
87 /* Summaries. */
88 function_summary <ipa_fn_summary *> *ipa_fn_summaries;
89 call_summary <ipa_call_summary *> *ipa_call_summaries;
91 /* Edge predicates goes here. */
92 static object_allocator<predicate> edge_predicate_pool ("edge predicates");
95 /* Dump IPA hints. */
96 void
97 ipa_dump_hints (FILE *f, ipa_hints hints)
99 if (!hints)
100 return;
101 fprintf (f, "IPA hints:");
102 if (hints & INLINE_HINT_indirect_call)
104 hints &= ~INLINE_HINT_indirect_call;
105 fprintf (f, " indirect_call");
107 if (hints & INLINE_HINT_loop_iterations)
109 hints &= ~INLINE_HINT_loop_iterations;
110 fprintf (f, " loop_iterations");
112 if (hints & INLINE_HINT_loop_stride)
114 hints &= ~INLINE_HINT_loop_stride;
115 fprintf (f, " loop_stride");
117 if (hints & INLINE_HINT_same_scc)
119 hints &= ~INLINE_HINT_same_scc;
120 fprintf (f, " same_scc");
122 if (hints & INLINE_HINT_in_scc)
124 hints &= ~INLINE_HINT_in_scc;
125 fprintf (f, " in_scc");
127 if (hints & INLINE_HINT_cross_module)
129 hints &= ~INLINE_HINT_cross_module;
130 fprintf (f, " cross_module");
132 if (hints & INLINE_HINT_declared_inline)
134 hints &= ~INLINE_HINT_declared_inline;
135 fprintf (f, " declared_inline");
137 if (hints & INLINE_HINT_array_index)
139 hints &= ~INLINE_HINT_array_index;
140 fprintf (f, " array_index");
142 if (hints & INLINE_HINT_known_hot)
144 hints &= ~INLINE_HINT_known_hot;
145 fprintf (f, " known_hot");
147 gcc_assert (!hints);
151 /* Record SIZE and TIME to SUMMARY.
152 The accounted code will be executed when EXEC_PRED is true.
153 When NONCONST_PRED is false the code will evaulate to constant and
154 will get optimized out in specialized clones of the function. */
156 void
157 ipa_fn_summary::account_size_time (int size, sreal time,
158 const predicate &exec_pred,
159 const predicate &nonconst_pred_in)
161 size_time_entry *e;
162 bool found = false;
163 int i;
164 predicate nonconst_pred;
166 if (exec_pred == false)
167 return;
169 nonconst_pred = nonconst_pred_in & exec_pred;
171 if (nonconst_pred == false)
172 return;
174 /* We need to create initial empty unconitional clause, but otherwie
175 we don't need to account empty times and sizes. */
176 if (!size && time == 0 && size_time_table)
177 return;
179 gcc_assert (time >= 0);
181 for (i = 0; vec_safe_iterate (size_time_table, i, &e); i++)
182 if (e->exec_predicate == exec_pred
183 && e->nonconst_predicate == nonconst_pred)
185 found = true;
186 break;
188 if (i == 256)
190 i = 0;
191 found = true;
192 e = &(*size_time_table)[0];
193 if (dump_file && (dump_flags & TDF_DETAILS))
194 fprintf (dump_file,
195 "\t\tReached limit on number of entries, "
196 "ignoring the predicate.");
198 if (dump_file && (dump_flags & TDF_DETAILS) && (time != 0 || size))
200 fprintf (dump_file,
201 "\t\tAccounting size:%3.2f, time:%3.2f on %spredicate exec:",
202 ((double) size) / ipa_fn_summary::size_scale,
203 (time.to_double ()), found ? "" : "new ");
204 exec_pred.dump (dump_file, conds, 0);
205 if (exec_pred != nonconst_pred)
207 fprintf (dump_file, " nonconst:");
208 nonconst_pred.dump (dump_file, conds);
210 else
211 fprintf (dump_file, "\n");
213 if (!found)
215 struct size_time_entry new_entry;
216 new_entry.size = size;
217 new_entry.time = time;
218 new_entry.exec_predicate = exec_pred;
219 new_entry.nonconst_predicate = nonconst_pred;
220 vec_safe_push (size_time_table, new_entry);
222 else
224 e->size += size;
225 e->time += time;
229 /* We proved E to be unreachable, redirect it to __bultin_unreachable. */
231 static struct cgraph_edge *
232 redirect_to_unreachable (struct cgraph_edge *e)
234 struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL;
235 struct cgraph_node *target = cgraph_node::get_create
236 (builtin_decl_implicit (BUILT_IN_UNREACHABLE));
238 if (e->speculative)
239 e = e->resolve_speculation (target->decl);
240 else if (!e->callee)
241 e->make_direct (target);
242 else
243 e->redirect_callee (target);
244 struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
245 e->inline_failed = CIF_UNREACHABLE;
246 e->count = profile_count::zero ();
247 es->call_stmt_size = 0;
248 es->call_stmt_time = 0;
249 if (callee)
250 callee->remove_symbol_and_inline_clones ();
251 return e;
254 /* Set predicate for edge E. */
256 static void
257 edge_set_predicate (struct cgraph_edge *e, predicate *predicate)
259 /* If the edge is determined to be never executed, redirect it
260 to BUILTIN_UNREACHABLE to make it clear to IPA passes the call will
261 be optimized out. */
262 if (predicate && *predicate == false
263 /* When handling speculative edges, we need to do the redirection
264 just once. Do it always on the direct edge, so we do not
265 attempt to resolve speculation while duplicating the edge. */
266 && (!e->speculative || e->callee))
267 e = redirect_to_unreachable (e);
269 struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
270 if (predicate && *predicate != true)
272 if (!es->predicate)
273 es->predicate = edge_predicate_pool.allocate ();
274 *es->predicate = *predicate;
276 else
278 if (es->predicate)
279 edge_predicate_pool.remove (es->predicate);
280 es->predicate = NULL;
284 /* Set predicate for hint *P. */
286 static void
287 set_hint_predicate (predicate **p, predicate new_predicate)
289 if (new_predicate == false || new_predicate == true)
291 if (*p)
292 edge_predicate_pool.remove (*p);
293 *p = NULL;
295 else
297 if (!*p)
298 *p = edge_predicate_pool.allocate ();
299 **p = new_predicate;
304 /* Compute what conditions may or may not hold given invormation about
305 parameters. RET_CLAUSE returns truths that may hold in a specialized copy,
306 whie RET_NONSPEC_CLAUSE returns truths that may hold in an nonspecialized
307 copy when called in a given context. It is a bitmask of conditions. Bit
308 0 means that condition is known to be false, while bit 1 means that condition
309 may or may not be true. These differs - for example NOT_INLINED condition
310 is always false in the second and also builtin_constant_p tests can not use
311 the fact that parameter is indeed a constant.
313 KNOWN_VALS is partial mapping of parameters of NODE to constant values.
314 KNOWN_AGGS is a vector of aggreggate jump functions for each parameter.
315 Return clause of possible truths. When INLINE_P is true, assume that we are
316 inlining.
318 ERROR_MARK means compile time invariant. */
320 static void
321 evaluate_conditions_for_known_args (struct cgraph_node *node,
322 bool inline_p,
323 vec<tree> known_vals,
324 vec<ipa_agg_jump_function_p>
325 known_aggs,
326 clause_t *ret_clause,
327 clause_t *ret_nonspec_clause)
329 clause_t clause = inline_p ? 0 : 1 << predicate::not_inlined_condition;
330 clause_t nonspec_clause = 1 << predicate::not_inlined_condition;
331 struct ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
332 int i;
333 struct condition *c;
335 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
337 tree val;
338 tree res;
340 /* We allow call stmt to have fewer arguments than the callee function
341 (especially for K&R style programs). So bound check here (we assume
342 known_aggs vector, if non-NULL, has the same length as
343 known_vals). */
344 gcc_checking_assert (!known_aggs.exists ()
345 || (known_vals.length () == known_aggs.length ()));
346 if (c->operand_num >= (int) known_vals.length ())
348 clause |= 1 << (i + predicate::first_dynamic_condition);
349 nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
350 continue;
353 if (c->agg_contents)
355 struct ipa_agg_jump_function *agg;
357 if (c->code == predicate::changed
358 && !c->by_ref
359 && (known_vals[c->operand_num] == error_mark_node))
360 continue;
362 if (known_aggs.exists ())
364 agg = known_aggs[c->operand_num];
365 val = ipa_find_agg_cst_for_param (agg, known_vals[c->operand_num],
366 c->offset, c->by_ref);
368 else
369 val = NULL_TREE;
371 else
373 val = known_vals[c->operand_num];
374 if (val == error_mark_node && c->code != predicate::changed)
375 val = NULL_TREE;
378 if (!val)
380 clause |= 1 << (i + predicate::first_dynamic_condition);
381 nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
382 continue;
384 if (c->code == predicate::changed)
386 nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
387 continue;
390 if (tree_to_shwi (TYPE_SIZE (TREE_TYPE (val))) != c->size)
392 clause |= 1 << (i + predicate::first_dynamic_condition);
393 nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
394 continue;
396 if (c->code == predicate::is_not_constant)
398 nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
399 continue;
402 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (c->val), val);
403 res = val
404 ? fold_binary_to_constant (c->code, boolean_type_node, val, c->val)
405 : NULL;
407 if (res && integer_zerop (res))
408 continue;
410 clause |= 1 << (i + predicate::first_dynamic_condition);
411 nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
413 *ret_clause = clause;
414 if (ret_nonspec_clause)
415 *ret_nonspec_clause = nonspec_clause;
419 /* Work out what conditions might be true at invocation of E. */
421 void
422 evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
423 clause_t *clause_ptr,
424 clause_t *nonspec_clause_ptr,
425 vec<tree> *known_vals_ptr,
426 vec<ipa_polymorphic_call_context>
427 *known_contexts_ptr,
428 vec<ipa_agg_jump_function_p> *known_aggs_ptr)
430 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
431 struct ipa_fn_summary *info = ipa_fn_summaries->get_create (callee);
432 vec<tree> known_vals = vNULL;
433 vec<ipa_agg_jump_function_p> known_aggs = vNULL;
435 if (clause_ptr)
436 *clause_ptr = inline_p ? 0 : 1 << predicate::not_inlined_condition;
437 if (known_vals_ptr)
438 known_vals_ptr->create (0);
439 if (known_contexts_ptr)
440 known_contexts_ptr->create (0);
442 if (ipa_node_params_sum
443 && !e->call_stmt_cannot_inline_p
444 && ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr))
446 struct ipa_node_params *caller_parms_info, *callee_pi;
447 struct ipa_edge_args *args = IPA_EDGE_REF (e);
448 struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
449 int i, count = ipa_get_cs_argument_count (args);
451 if (e->caller->global.inlined_to)
452 caller_parms_info = IPA_NODE_REF (e->caller->global.inlined_to);
453 else
454 caller_parms_info = IPA_NODE_REF (e->caller);
455 callee_pi = IPA_NODE_REF (e->callee);
457 if (count && (info->conds || known_vals_ptr))
458 known_vals.safe_grow_cleared (count);
459 if (count && (info->conds || known_aggs_ptr))
460 known_aggs.safe_grow_cleared (count);
461 if (count && known_contexts_ptr)
462 known_contexts_ptr->safe_grow_cleared (count);
464 for (i = 0; i < count; i++)
466 struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i);
467 tree cst = ipa_value_from_jfunc (caller_parms_info, jf,
468 ipa_get_type (callee_pi, i));
470 if (!cst && e->call_stmt
471 && i < (int)gimple_call_num_args (e->call_stmt))
473 cst = gimple_call_arg (e->call_stmt, i);
474 if (!is_gimple_min_invariant (cst))
475 cst = NULL;
477 if (cst)
479 gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO);
480 if (known_vals.exists ())
481 known_vals[i] = cst;
483 else if (inline_p && !es->param[i].change_prob)
484 known_vals[i] = error_mark_node;
486 if (known_contexts_ptr)
487 (*known_contexts_ptr)[i]
488 = ipa_context_from_jfunc (caller_parms_info, e, i, jf);
489 /* TODO: When IPA-CP starts propagating and merging aggregate jump
490 functions, use its knowledge of the caller too, just like the
491 scalar case above. */
492 known_aggs[i] = &jf->agg;
495 else if (e->call_stmt && !e->call_stmt_cannot_inline_p
496 && ((clause_ptr && info->conds) || known_vals_ptr))
498 int i, count = (int)gimple_call_num_args (e->call_stmt);
500 if (count && (info->conds || known_vals_ptr))
501 known_vals.safe_grow_cleared (count);
502 for (i = 0; i < count; i++)
504 tree cst = gimple_call_arg (e->call_stmt, i);
505 if (!is_gimple_min_invariant (cst))
506 cst = NULL;
507 if (cst)
508 known_vals[i] = cst;
512 evaluate_conditions_for_known_args (callee, inline_p,
513 known_vals, known_aggs, clause_ptr,
514 nonspec_clause_ptr);
516 if (known_vals_ptr)
517 *known_vals_ptr = known_vals;
518 else
519 known_vals.release ();
521 if (known_aggs_ptr)
522 *known_aggs_ptr = known_aggs;
523 else
524 known_aggs.release ();
528 /* Allocate the function summary. */
530 static void
531 ipa_fn_summary_alloc (void)
533 gcc_checking_assert (!ipa_fn_summaries);
534 ipa_fn_summaries = ipa_fn_summary_t::create_ggc (symtab);
535 ipa_call_summaries = new ipa_call_summary_t (symtab, false);
538 /* We are called multiple time for given function; clear
539 data from previous run so they are not cumulated. */
541 void
542 ipa_call_summary::reset ()
544 call_stmt_size = call_stmt_time = 0;
545 is_return_callee_uncaptured = false;
546 if (predicate)
547 edge_predicate_pool.remove (predicate);
548 predicate = NULL;
549 param.release ();
552 /* We are called multiple time for given function; clear
553 data from previous run so they are not cumulated. */
555 void
556 ipa_fn_summary::reset (struct cgraph_node *node)
558 struct cgraph_edge *e;
560 self_size = 0;
561 estimated_stack_size = 0;
562 estimated_self_stack_size = 0;
563 stack_frame_offset = 0;
564 size = 0;
565 time = 0;
566 growth = 0;
567 scc_no = 0;
568 if (loop_iterations)
570 edge_predicate_pool.remove (loop_iterations);
571 loop_iterations = NULL;
573 if (loop_stride)
575 edge_predicate_pool.remove (loop_stride);
576 loop_stride = NULL;
578 if (array_index)
580 edge_predicate_pool.remove (array_index);
581 array_index = NULL;
583 vec_free (conds);
584 vec_free (size_time_table);
585 for (e = node->callees; e; e = e->next_callee)
586 ipa_call_summaries->get_create (e)->reset ();
587 for (e = node->indirect_calls; e; e = e->next_callee)
588 ipa_call_summaries->get_create (e)->reset ();
589 fp_expressions = false;
592 /* Hook that is called by cgraph.c when a node is removed. */
594 void
595 ipa_fn_summary_t::remove (cgraph_node *node, ipa_fn_summary *info)
597 info->reset (node);
600 /* Same as remap_predicate_after_duplication but handle hint predicate *P.
601 Additionally care about allocating new memory slot for updated predicate
602 and set it to NULL when it becomes true or false (and thus uninteresting).
605 static void
606 remap_hint_predicate_after_duplication (predicate **p,
607 clause_t possible_truths)
609 predicate new_predicate;
611 if (!*p)
612 return;
614 new_predicate = (*p)->remap_after_duplication (possible_truths);
615 /* We do not want to free previous predicate; it is used by node origin. */
616 *p = NULL;
617 set_hint_predicate (p, new_predicate);
621 /* Hook that is called by cgraph.c when a node is duplicated. */
622 void
623 ipa_fn_summary_t::duplicate (cgraph_node *src,
624 cgraph_node *dst,
625 ipa_fn_summary *,
626 ipa_fn_summary *info)
628 memcpy (info, ipa_fn_summaries->get_create (src), sizeof (ipa_fn_summary));
629 /* TODO: as an optimization, we may avoid copying conditions
630 that are known to be false or true. */
631 info->conds = vec_safe_copy (info->conds);
633 /* When there are any replacements in the function body, see if we can figure
634 out that something was optimized out. */
635 if (ipa_node_params_sum && dst->clone.tree_map)
637 vec<size_time_entry, va_gc> *entry = info->size_time_table;
638 /* Use SRC parm info since it may not be copied yet. */
639 struct ipa_node_params *parms_info = IPA_NODE_REF (src);
640 vec<tree> known_vals = vNULL;
641 int count = ipa_get_param_count (parms_info);
642 int i, j;
643 clause_t possible_truths;
644 predicate true_pred = true;
645 size_time_entry *e;
646 int optimized_out_size = 0;
647 bool inlined_to_p = false;
648 struct cgraph_edge *edge, *next;
650 info->size_time_table = 0;
651 known_vals.safe_grow_cleared (count);
652 for (i = 0; i < count; i++)
654 struct ipa_replace_map *r;
656 for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++)
658 if (((!r->old_tree && r->parm_num == i)
659 || (r->old_tree && r->old_tree == ipa_get_param (parms_info, i)))
660 && r->replace_p && !r->ref_p)
662 known_vals[i] = r->new_tree;
663 break;
667 evaluate_conditions_for_known_args (dst, false,
668 known_vals,
669 vNULL,
670 &possible_truths,
671 /* We are going to specialize,
672 so ignore nonspec truths. */
673 NULL);
674 known_vals.release ();
676 info->account_size_time (0, 0, true_pred, true_pred);
678 /* Remap size_time vectors.
679 Simplify the predicate by prunning out alternatives that are known
680 to be false.
681 TODO: as on optimization, we can also eliminate conditions known
682 to be true. */
683 for (i = 0; vec_safe_iterate (entry, i, &e); i++)
685 predicate new_exec_pred;
686 predicate new_nonconst_pred;
687 new_exec_pred = e->exec_predicate.remap_after_duplication
688 (possible_truths);
689 new_nonconst_pred = e->nonconst_predicate.remap_after_duplication
690 (possible_truths);
691 if (new_exec_pred == false || new_nonconst_pred == false)
692 optimized_out_size += e->size;
693 else
694 info->account_size_time (e->size, e->time, new_exec_pred,
695 new_nonconst_pred);
698 /* Remap edge predicates with the same simplification as above.
699 Also copy constantness arrays. */
700 for (edge = dst->callees; edge; edge = next)
702 predicate new_predicate;
703 struct ipa_call_summary *es = ipa_call_summaries->get_create (edge);
704 next = edge->next_callee;
706 if (!edge->inline_failed)
707 inlined_to_p = true;
708 if (!es->predicate)
709 continue;
710 new_predicate = es->predicate->remap_after_duplication
711 (possible_truths);
712 if (new_predicate == false && *es->predicate != false)
713 optimized_out_size += es->call_stmt_size * ipa_fn_summary::size_scale;
714 edge_set_predicate (edge, &new_predicate);
717 /* Remap indirect edge predicates with the same simplificaiton as above.
718 Also copy constantness arrays. */
719 for (edge = dst->indirect_calls; edge; edge = next)
721 predicate new_predicate;
722 struct ipa_call_summary *es = ipa_call_summaries->get_create (edge);
723 next = edge->next_callee;
725 gcc_checking_assert (edge->inline_failed);
726 if (!es->predicate)
727 continue;
728 new_predicate = es->predicate->remap_after_duplication
729 (possible_truths);
730 if (new_predicate == false && *es->predicate != false)
731 optimized_out_size += es->call_stmt_size * ipa_fn_summary::size_scale;
732 edge_set_predicate (edge, &new_predicate);
734 remap_hint_predicate_after_duplication (&info->loop_iterations,
735 possible_truths);
736 remap_hint_predicate_after_duplication (&info->loop_stride,
737 possible_truths);
738 remap_hint_predicate_after_duplication (&info->array_index,
739 possible_truths);
741 /* If inliner or someone after inliner will ever start producing
742 non-trivial clones, we will get trouble with lack of information
743 about updating self sizes, because size vectors already contains
744 sizes of the calees. */
745 gcc_assert (!inlined_to_p || !optimized_out_size);
747 else
749 info->size_time_table = vec_safe_copy (info->size_time_table);
750 if (info->loop_iterations)
752 predicate p = *info->loop_iterations;
753 info->loop_iterations = NULL;
754 set_hint_predicate (&info->loop_iterations, p);
756 if (info->loop_stride)
758 predicate p = *info->loop_stride;
759 info->loop_stride = NULL;
760 set_hint_predicate (&info->loop_stride, p);
762 if (info->array_index)
764 predicate p = *info->array_index;
765 info->array_index = NULL;
766 set_hint_predicate (&info->array_index, p);
769 if (!dst->global.inlined_to)
770 ipa_update_overall_fn_summary (dst);
774 /* Hook that is called by cgraph.c when a node is duplicated. */
776 void
777 ipa_call_summary_t::duplicate (struct cgraph_edge *src,
778 struct cgraph_edge *dst,
779 struct ipa_call_summary *srcinfo,
780 struct ipa_call_summary *info)
782 *info = *srcinfo;
783 info->predicate = NULL;
784 edge_set_predicate (dst, srcinfo->predicate);
785 info->param = srcinfo->param.copy ();
786 if (!dst->indirect_unknown_callee && src->indirect_unknown_callee)
788 info->call_stmt_size -= (eni_size_weights.indirect_call_cost
789 - eni_size_weights.call_cost);
790 info->call_stmt_time -= (eni_time_weights.indirect_call_cost
791 - eni_time_weights.call_cost);
796 /* Keep edge cache consistent across edge removal. */
798 void
799 ipa_call_summary_t::remove (struct cgraph_edge *,
800 struct ipa_call_summary *sum)
802 sum->reset ();
806 /* Dump edge summaries associated to NODE and recursively to all clones.
807 Indent by INDENT. */
809 static void
810 dump_ipa_call_summary (FILE *f, int indent, struct cgraph_node *node,
811 struct ipa_fn_summary *info)
813 struct cgraph_edge *edge;
814 for (edge = node->callees; edge; edge = edge->next_callee)
816 struct ipa_call_summary *es = ipa_call_summaries->get_create (edge);
817 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
818 int i;
820 fprintf (f,
821 "%*s%s/%i %s\n%*s loop depth:%2i freq:%4.2f size:%2i"
822 " time: %2i callee size:%2i stack:%2i",
823 indent, "", callee->name (), callee->order,
824 !edge->inline_failed
825 ? "inlined" : cgraph_inline_failed_string (edge-> inline_failed),
826 indent, "", es->loop_depth, edge->sreal_frequency ().to_double (),
827 es->call_stmt_size, es->call_stmt_time,
828 (int) (ipa_fn_summaries->get_create (callee)->size
829 / ipa_fn_summary::size_scale),
830 (int) ipa_fn_summaries->get_create (callee)->estimated_stack_size);
832 if (es->predicate)
834 fprintf (f, " predicate: ");
835 es->predicate->dump (f, info->conds);
837 else
838 fprintf (f, "\n");
839 if (es->param.exists ())
840 for (i = 0; i < (int) es->param.length (); i++)
842 int prob = es->param[i].change_prob;
844 if (!prob)
845 fprintf (f, "%*s op%i is compile time invariant\n",
846 indent + 2, "", i);
847 else if (prob != REG_BR_PROB_BASE)
848 fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i,
849 prob * 100.0 / REG_BR_PROB_BASE);
851 if (!edge->inline_failed)
853 ipa_fn_summary *s = ipa_fn_summaries->get (callee);
854 fprintf (f, "%*sStack frame offset %i, callee self size %i,"
855 " callee size %i\n",
856 indent + 2, "",
857 (int) s->stack_frame_offset,
858 (int) s->estimated_self_stack_size,
859 (int) s->estimated_stack_size);
860 dump_ipa_call_summary (f, indent + 2, callee, info);
863 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
865 struct ipa_call_summary *es = ipa_call_summaries->get_create (edge);
866 fprintf (f, "%*sindirect call loop depth:%2i freq:%4.2f size:%2i"
867 " time: %2i",
868 indent, "",
869 es->loop_depth,
870 edge->sreal_frequency ().to_double (), es->call_stmt_size,
871 es->call_stmt_time);
872 if (es->predicate)
874 fprintf (f, "predicate: ");
875 es->predicate->dump (f, info->conds);
877 else
878 fprintf (f, "\n");
883 void
884 ipa_dump_fn_summary (FILE *f, struct cgraph_node *node)
886 if (node->definition)
888 struct ipa_fn_summary *s = ipa_fn_summaries->get_create (node);
889 size_time_entry *e;
890 int i;
891 fprintf (f, "IPA function summary for %s/%i", node->name (),
892 node->order);
893 if (DECL_DISREGARD_INLINE_LIMITS (node->decl))
894 fprintf (f, " always_inline");
895 if (s->inlinable)
896 fprintf (f, " inlinable");
897 if (s->fp_expressions)
898 fprintf (f, " fp_expression");
899 fprintf (f, "\n global time: %f\n", s->time.to_double ());
900 fprintf (f, " self size: %i\n", s->self_size);
901 fprintf (f, " global size: %i\n", s->size);
902 fprintf (f, " min size: %i\n", s->min_size);
903 fprintf (f, " self stack: %i\n",
904 (int) s->estimated_self_stack_size);
905 fprintf (f, " global stack: %i\n", (int) s->estimated_stack_size);
906 if (s->growth)
907 fprintf (f, " estimated growth:%i\n", (int) s->growth);
908 if (s->scc_no)
909 fprintf (f, " In SCC: %i\n", (int) s->scc_no);
910 for (i = 0; vec_safe_iterate (s->size_time_table, i, &e); i++)
912 fprintf (f, " size:%f, time:%f",
913 (double) e->size / ipa_fn_summary::size_scale,
914 e->time.to_double ());
915 if (e->exec_predicate != true)
917 fprintf (f, ", executed if:");
918 e->exec_predicate.dump (f, s->conds, 0);
920 if (e->exec_predicate != e->nonconst_predicate)
922 fprintf (f, ", nonconst if:");
923 e->nonconst_predicate.dump (f, s->conds, 0);
925 fprintf (f, "\n");
927 if (s->loop_iterations)
929 fprintf (f, " loop iterations:");
930 s->loop_iterations->dump (f, s->conds);
932 if (s->loop_stride)
934 fprintf (f, " loop stride:");
935 s->loop_stride->dump (f, s->conds);
937 if (s->array_index)
939 fprintf (f, " array index:");
940 s->array_index->dump (f, s->conds);
942 fprintf (f, " calls:\n");
943 dump_ipa_call_summary (f, 4, node, s);
944 fprintf (f, "\n");
948 DEBUG_FUNCTION void
949 ipa_debug_fn_summary (struct cgraph_node *node)
951 ipa_dump_fn_summary (stderr, node);
954 void
955 ipa_dump_fn_summaries (FILE *f)
957 struct cgraph_node *node;
959 FOR_EACH_DEFINED_FUNCTION (node)
960 if (!node->global.inlined_to)
961 ipa_dump_fn_summary (f, node);
964 /* Callback of walk_aliased_vdefs. Flags that it has been invoked to the
965 boolean variable pointed to by DATA. */
967 static bool
968 mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED,
969 void *data)
971 bool *b = (bool *) data;
972 *b = true;
973 return true;
976 /* If OP refers to value of function parameter, return the corresponding
977 parameter. If non-NULL, the size of the memory load (or the SSA_NAME of the
978 PARM_DECL) will be stored to *SIZE_P in that case too. */
980 static tree
981 unmodified_parm_1 (gimple *stmt, tree op, HOST_WIDE_INT *size_p)
983 /* SSA_NAME referring to parm default def? */
984 if (TREE_CODE (op) == SSA_NAME
985 && SSA_NAME_IS_DEFAULT_DEF (op)
986 && TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL)
988 if (size_p)
989 *size_p = tree_to_shwi (TYPE_SIZE (TREE_TYPE (op)));
990 return SSA_NAME_VAR (op);
992 /* Non-SSA parm reference? */
993 if (TREE_CODE (op) == PARM_DECL)
995 bool modified = false;
997 ao_ref refd;
998 ao_ref_init (&refd, op);
999 walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, &modified,
1000 NULL);
1001 if (!modified)
1003 if (size_p)
1004 *size_p = tree_to_shwi (TYPE_SIZE (TREE_TYPE (op)));
1005 return op;
1008 return NULL_TREE;
1011 /* If OP refers to value of function parameter, return the corresponding
1012 parameter. Also traverse chains of SSA register assignments. If non-NULL,
1013 the size of the memory load (or the SSA_NAME of the PARM_DECL) will be
1014 stored to *SIZE_P in that case too. */
1016 static tree
1017 unmodified_parm (gimple *stmt, tree op, HOST_WIDE_INT *size_p)
1019 tree res = unmodified_parm_1 (stmt, op, size_p);
1020 if (res)
1021 return res;
1023 if (TREE_CODE (op) == SSA_NAME
1024 && !SSA_NAME_IS_DEFAULT_DEF (op)
1025 && gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1026 return unmodified_parm (SSA_NAME_DEF_STMT (op),
1027 gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)),
1028 size_p);
1029 return NULL_TREE;
1032 /* If OP refers to a value of a function parameter or value loaded from an
1033 aggregate passed to a parameter (either by value or reference), return TRUE
1034 and store the number of the parameter to *INDEX_P, the access size into
1035 *SIZE_P, and information whether and how it has been loaded from an
1036 aggregate into *AGGPOS. INFO describes the function parameters, STMT is the
1037 statement in which OP is used or loaded. */
1039 static bool
1040 unmodified_parm_or_parm_agg_item (struct ipa_func_body_info *fbi,
1041 gimple *stmt, tree op, int *index_p,
1042 HOST_WIDE_INT *size_p,
1043 struct agg_position_info *aggpos)
1045 tree res = unmodified_parm_1 (stmt, op, size_p);
1047 gcc_checking_assert (aggpos);
1048 if (res)
1050 *index_p = ipa_get_param_decl_index (fbi->info, res);
1051 if (*index_p < 0)
1052 return false;
1053 aggpos->agg_contents = false;
1054 aggpos->by_ref = false;
1055 return true;
1058 if (TREE_CODE (op) == SSA_NAME)
1060 if (SSA_NAME_IS_DEFAULT_DEF (op)
1061 || !gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1062 return false;
1063 stmt = SSA_NAME_DEF_STMT (op);
1064 op = gimple_assign_rhs1 (stmt);
1065 if (!REFERENCE_CLASS_P (op))
1066 return unmodified_parm_or_parm_agg_item (fbi, stmt, op, index_p, size_p,
1067 aggpos);
1070 aggpos->agg_contents = true;
1071 return ipa_load_from_parm_agg (fbi, fbi->info->descriptors,
1072 stmt, op, index_p, &aggpos->offset,
1073 size_p, &aggpos->by_ref);
1076 /* See if statement might disappear after inlining.
1077 0 - means not eliminated
1078 1 - half of statements goes away
1079 2 - for sure it is eliminated.
1080 We are not terribly sophisticated, basically looking for simple abstraction
1081 penalty wrappers. */
1083 static int
1084 eliminated_by_inlining_prob (gimple *stmt)
1086 enum gimple_code code = gimple_code (stmt);
1087 enum tree_code rhs_code;
1089 if (!optimize)
1090 return 0;
1092 switch (code)
1094 case GIMPLE_RETURN:
1095 return 2;
1096 case GIMPLE_ASSIGN:
1097 if (gimple_num_ops (stmt) != 2)
1098 return 0;
1100 rhs_code = gimple_assign_rhs_code (stmt);
1102 /* Casts of parameters, loads from parameters passed by reference
1103 and stores to return value or parameters are often free after
1104 inlining dua to SRA and further combining.
1105 Assume that half of statements goes away. */
1106 if (CONVERT_EXPR_CODE_P (rhs_code)
1107 || rhs_code == VIEW_CONVERT_EXPR
1108 || rhs_code == ADDR_EXPR
1109 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1111 tree rhs = gimple_assign_rhs1 (stmt);
1112 tree lhs = gimple_assign_lhs (stmt);
1113 tree inner_rhs = get_base_address (rhs);
1114 tree inner_lhs = get_base_address (lhs);
1115 bool rhs_free = false;
1116 bool lhs_free = false;
1118 if (!inner_rhs)
1119 inner_rhs = rhs;
1120 if (!inner_lhs)
1121 inner_lhs = lhs;
1123 /* Reads of parameter are expected to be free. */
1124 if (unmodified_parm (stmt, inner_rhs, NULL))
1125 rhs_free = true;
1126 /* Match expressions of form &this->field. Those will most likely
1127 combine with something upstream after inlining. */
1128 else if (TREE_CODE (inner_rhs) == ADDR_EXPR)
1130 tree op = get_base_address (TREE_OPERAND (inner_rhs, 0));
1131 if (TREE_CODE (op) == PARM_DECL)
1132 rhs_free = true;
1133 else if (TREE_CODE (op) == MEM_REF
1134 && unmodified_parm (stmt, TREE_OPERAND (op, 0), NULL))
1135 rhs_free = true;
1138 /* When parameter is not SSA register because its address is taken
1139 and it is just copied into one, the statement will be completely
1140 free after inlining (we will copy propagate backward). */
1141 if (rhs_free && is_gimple_reg (lhs))
1142 return 2;
1144 /* Reads of parameters passed by reference
1145 expected to be free (i.e. optimized out after inlining). */
1146 if (TREE_CODE (inner_rhs) == MEM_REF
1147 && unmodified_parm (stmt, TREE_OPERAND (inner_rhs, 0), NULL))
1148 rhs_free = true;
1150 /* Copying parameter passed by reference into gimple register is
1151 probably also going to copy propagate, but we can't be quite
1152 sure. */
1153 if (rhs_free && is_gimple_reg (lhs))
1154 lhs_free = true;
1156 /* Writes to parameters, parameters passed by value and return value
1157 (either dirrectly or passed via invisible reference) are free.
1159 TODO: We ought to handle testcase like
1160 struct a {int a,b;};
1161 struct a
1162 retrurnsturct (void)
1164 struct a a ={1,2};
1165 return a;
1168 This translate into:
1170 retrurnsturct ()
1172 int a$b;
1173 int a$a;
1174 struct a a;
1175 struct a D.2739;
1177 <bb 2>:
1178 D.2739.a = 1;
1179 D.2739.b = 2;
1180 return D.2739;
1183 For that we either need to copy ipa-split logic detecting writes
1184 to return value. */
1185 if (TREE_CODE (inner_lhs) == PARM_DECL
1186 || TREE_CODE (inner_lhs) == RESULT_DECL
1187 || (TREE_CODE (inner_lhs) == MEM_REF
1188 && (unmodified_parm (stmt, TREE_OPERAND (inner_lhs, 0), NULL)
1189 || (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME
1190 && SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0))
1191 && TREE_CODE (SSA_NAME_VAR (TREE_OPERAND
1192 (inner_lhs,
1193 0))) == RESULT_DECL))))
1194 lhs_free = true;
1195 if (lhs_free
1196 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1197 rhs_free = true;
1198 if (lhs_free && rhs_free)
1199 return 1;
1201 return 0;
1202 default:
1203 return 0;
1208 /* If BB ends by a conditional we can turn into predicates, attach corresponding
1209 predicates to the CFG edges. */
1211 static void
1212 set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi,
1213 struct ipa_fn_summary *summary,
1214 basic_block bb)
1216 gimple *last;
1217 tree op;
1218 int index;
1219 HOST_WIDE_INT size;
1220 struct agg_position_info aggpos;
1221 enum tree_code code, inverted_code;
1222 edge e;
1223 edge_iterator ei;
1224 gimple *set_stmt;
1225 tree op2;
1227 last = last_stmt (bb);
1228 if (!last || gimple_code (last) != GIMPLE_COND)
1229 return;
1230 if (!is_gimple_ip_invariant (gimple_cond_rhs (last)))
1231 return;
1232 op = gimple_cond_lhs (last);
1233 /* TODO: handle conditionals like
1234 var = op0 < 4;
1235 if (var != 0). */
1236 if (unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &size, &aggpos))
1238 code = gimple_cond_code (last);
1239 inverted_code = invert_tree_comparison (code, HONOR_NANS (op));
1241 FOR_EACH_EDGE (e, ei, bb->succs)
1243 enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE
1244 ? code : inverted_code);
1245 /* invert_tree_comparison will return ERROR_MARK on FP
1246 comparsions that are not EQ/NE instead of returning proper
1247 unordered one. Be sure it is not confused with NON_CONSTANT. */
1248 if (this_code != ERROR_MARK)
1250 predicate p
1251 = add_condition (summary, index, size, &aggpos, this_code,
1252 unshare_expr_without_location
1253 (gimple_cond_rhs (last)));
1254 e->aux = edge_predicate_pool.allocate ();
1255 *(predicate *) e->aux = p;
1260 if (TREE_CODE (op) != SSA_NAME)
1261 return;
1262 /* Special case
1263 if (builtin_constant_p (op))
1264 constant_code
1265 else
1266 nonconstant_code.
1267 Here we can predicate nonconstant_code. We can't
1268 really handle constant_code since we have no predicate
1269 for this and also the constant code is not known to be
1270 optimized away when inliner doen't see operand is constant.
1271 Other optimizers might think otherwise. */
1272 if (gimple_cond_code (last) != NE_EXPR
1273 || !integer_zerop (gimple_cond_rhs (last)))
1274 return;
1275 set_stmt = SSA_NAME_DEF_STMT (op);
1276 if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P)
1277 || gimple_call_num_args (set_stmt) != 1)
1278 return;
1279 op2 = gimple_call_arg (set_stmt, 0);
1280 if (!unmodified_parm_or_parm_agg_item (fbi, set_stmt, op2, &index, &size,
1281 &aggpos))
1282 return;
1283 FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE)
1285 predicate p = add_condition (summary, index, size, &aggpos,
1286 predicate::is_not_constant, NULL_TREE);
1287 e->aux = edge_predicate_pool.allocate ();
1288 *(predicate *) e->aux = p;
1293 /* If BB ends by a switch we can turn into predicates, attach corresponding
1294 predicates to the CFG edges. */
1296 static void
1297 set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi,
1298 struct ipa_fn_summary *summary,
1299 basic_block bb)
1301 gimple *lastg;
1302 tree op;
1303 int index;
1304 HOST_WIDE_INT size;
1305 struct agg_position_info aggpos;
1306 edge e;
1307 edge_iterator ei;
1308 size_t n;
1309 size_t case_idx;
1311 lastg = last_stmt (bb);
1312 if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH)
1313 return;
1314 gswitch *last = as_a <gswitch *> (lastg);
1315 op = gimple_switch_index (last);
1316 if (!unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &size, &aggpos))
1317 return;
1319 FOR_EACH_EDGE (e, ei, bb->succs)
1321 e->aux = edge_predicate_pool.allocate ();
1322 *(predicate *) e->aux = false;
1324 n = gimple_switch_num_labels (last);
1325 for (case_idx = 0; case_idx < n; ++case_idx)
1327 tree cl = gimple_switch_label (last, case_idx);
1328 tree min, max;
1329 predicate p;
1331 e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
1332 min = CASE_LOW (cl);
1333 max = CASE_HIGH (cl);
1335 /* For default we might want to construct predicate that none
1336 of cases is met, but it is bit hard to do not having negations
1337 of conditionals handy. */
1338 if (!min && !max)
1339 p = true;
1340 else if (!max)
1341 p = add_condition (summary, index, size, &aggpos, EQ_EXPR,
1342 unshare_expr_without_location (min));
1343 else
1345 predicate p1, p2;
1346 p1 = add_condition (summary, index, size, &aggpos, GE_EXPR,
1347 unshare_expr_without_location (min));
1348 p2 = add_condition (summary, index, size, &aggpos, LE_EXPR,
1349 unshare_expr_without_location (max));
1350 p = p1 & p2;
1352 *(struct predicate *) e->aux
1353 = p.or_with (summary->conds, *(struct predicate *) e->aux);
1358 /* For each BB in NODE attach to its AUX pointer predicate under
1359 which it is executable. */
1361 static void
1362 compute_bb_predicates (struct ipa_func_body_info *fbi,
1363 struct cgraph_node *node,
1364 struct ipa_fn_summary *summary)
1366 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1367 bool done = false;
1368 basic_block bb;
1370 FOR_EACH_BB_FN (bb, my_function)
1372 set_cond_stmt_execution_predicate (fbi, summary, bb);
1373 set_switch_stmt_execution_predicate (fbi, summary, bb);
1376 /* Entry block is always executable. */
1377 ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1378 = edge_predicate_pool.allocate ();
1379 *(predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux = true;
1381 /* A simple dataflow propagation of predicates forward in the CFG.
1382 TODO: work in reverse postorder. */
1383 while (!done)
1385 done = true;
1386 FOR_EACH_BB_FN (bb, my_function)
1388 predicate p = false;
1389 edge e;
1390 edge_iterator ei;
1391 FOR_EACH_EDGE (e, ei, bb->preds)
1393 if (e->src->aux)
1395 predicate this_bb_predicate
1396 = *(predicate *) e->src->aux;
1397 if (e->aux)
1398 this_bb_predicate &= (*(struct predicate *) e->aux);
1399 p = p.or_with (summary->conds, this_bb_predicate);
1400 if (p == true)
1401 break;
1404 if (p == false)
1405 gcc_checking_assert (!bb->aux);
1406 else
1408 if (!bb->aux)
1410 done = false;
1411 bb->aux = edge_predicate_pool.allocate ();
1412 *((predicate *) bb->aux) = p;
1414 else if (p != *(predicate *) bb->aux)
1416 /* This OR operation is needed to ensure monotonous data flow
1417 in the case we hit the limit on number of clauses and the
1418 and/or operations above give approximate answers. */
1419 p = p.or_with (summary->conds, *(predicate *)bb->aux);
1420 if (p != *(predicate *) bb->aux)
1422 done = false;
1423 *((predicate *) bb->aux) = p;
1432 /* Return predicate specifying when the STMT might have result that is not
1433 a compile time constant. */
1435 static predicate
1436 will_be_nonconstant_expr_predicate (struct ipa_node_params *info,
1437 struct ipa_fn_summary *summary,
1438 tree expr,
1439 vec<predicate> nonconstant_names)
1441 tree parm;
1442 int index;
1443 HOST_WIDE_INT size;
1445 while (UNARY_CLASS_P (expr))
1446 expr = TREE_OPERAND (expr, 0);
1448 parm = unmodified_parm (NULL, expr, &size);
1449 if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0)
1450 return add_condition (summary, index, size, NULL, predicate::changed,
1451 NULL_TREE);
1452 if (is_gimple_min_invariant (expr))
1453 return false;
1454 if (TREE_CODE (expr) == SSA_NAME)
1455 return nonconstant_names[SSA_NAME_VERSION (expr)];
1456 if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr))
1458 predicate p1 = will_be_nonconstant_expr_predicate
1459 (info, summary, TREE_OPERAND (expr, 0),
1460 nonconstant_names);
1461 if (p1 == true)
1462 return p1;
1464 predicate p2;
1465 p2 = will_be_nonconstant_expr_predicate (info, summary,
1466 TREE_OPERAND (expr, 1),
1467 nonconstant_names);
1468 return p1.or_with (summary->conds, p2);
1470 else if (TREE_CODE (expr) == COND_EXPR)
1472 predicate p1 = will_be_nonconstant_expr_predicate
1473 (info, summary, TREE_OPERAND (expr, 0),
1474 nonconstant_names);
1475 if (p1 == true)
1476 return p1;
1478 predicate p2;
1479 p2 = will_be_nonconstant_expr_predicate (info, summary,
1480 TREE_OPERAND (expr, 1),
1481 nonconstant_names);
1482 if (p2 == true)
1483 return p2;
1484 p1 = p1.or_with (summary->conds, p2);
1485 p2 = will_be_nonconstant_expr_predicate (info, summary,
1486 TREE_OPERAND (expr, 2),
1487 nonconstant_names);
1488 return p2.or_with (summary->conds, p1);
1490 else if (TREE_CODE (expr) == CALL_EXPR)
1491 return true;
1492 else
1494 debug_tree (expr);
1495 gcc_unreachable ();
1497 return false;
1501 /* Return predicate specifying when the STMT might have result that is not
1502 a compile time constant. */
1504 static predicate
1505 will_be_nonconstant_predicate (struct ipa_func_body_info *fbi,
1506 struct ipa_fn_summary *summary,
1507 gimple *stmt,
1508 vec<predicate> nonconstant_names)
1510 predicate p = true;
1511 ssa_op_iter iter;
1512 tree use;
1513 predicate op_non_const;
1514 bool is_load;
1515 int base_index;
1516 HOST_WIDE_INT size;
1517 struct agg_position_info aggpos;
1519 /* What statments might be optimized away
1520 when their arguments are constant. */
1521 if (gimple_code (stmt) != GIMPLE_ASSIGN
1522 && gimple_code (stmt) != GIMPLE_COND
1523 && gimple_code (stmt) != GIMPLE_SWITCH
1524 && (gimple_code (stmt) != GIMPLE_CALL
1525 || !(gimple_call_flags (stmt) & ECF_CONST)))
1526 return p;
1528 /* Stores will stay anyway. */
1529 if (gimple_store_p (stmt))
1530 return p;
1532 is_load = gimple_assign_load_p (stmt);
1534 /* Loads can be optimized when the value is known. */
1535 if (is_load)
1537 tree op;
1538 gcc_assert (gimple_assign_single_p (stmt));
1539 op = gimple_assign_rhs1 (stmt);
1540 if (!unmodified_parm_or_parm_agg_item (fbi, stmt, op, &base_index, &size,
1541 &aggpos))
1542 return p;
1544 else
1545 base_index = -1;
1547 /* See if we understand all operands before we start
1548 adding conditionals. */
1549 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
1551 tree parm = unmodified_parm (stmt, use, NULL);
1552 /* For arguments we can build a condition. */
1553 if (parm && ipa_get_param_decl_index (fbi->info, parm) >= 0)
1554 continue;
1555 if (TREE_CODE (use) != SSA_NAME)
1556 return p;
1557 /* If we know when operand is constant,
1558 we still can say something useful. */
1559 if (nonconstant_names[SSA_NAME_VERSION (use)] != true)
1560 continue;
1561 return p;
1564 if (is_load)
1565 op_non_const =
1566 add_condition (summary, base_index, size, &aggpos, predicate::changed,
1567 NULL);
1568 else
1569 op_non_const = false;
1570 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
1572 HOST_WIDE_INT size;
1573 tree parm = unmodified_parm (stmt, use, &size);
1574 int index;
1576 if (parm && (index = ipa_get_param_decl_index (fbi->info, parm)) >= 0)
1578 if (index != base_index)
1579 p = add_condition (summary, index, size, NULL, predicate::changed,
1580 NULL_TREE);
1581 else
1582 continue;
1584 else
1585 p = nonconstant_names[SSA_NAME_VERSION (use)];
1586 op_non_const = p.or_with (summary->conds, op_non_const);
1588 if ((gimple_code (stmt) == GIMPLE_ASSIGN || gimple_code (stmt) == GIMPLE_CALL)
1589 && gimple_op (stmt, 0)
1590 && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
1591 nonconstant_names[SSA_NAME_VERSION (gimple_op (stmt, 0))]
1592 = op_non_const;
1593 return op_non_const;
1596 struct record_modified_bb_info
1598 tree op;
1599 bitmap bb_set;
1600 gimple *stmt;
1603 /* Value is initialized in INIT_BB and used in USE_BB. We want to copute
1604 probability how often it changes between USE_BB.
1605 INIT_BB->count/USE_BB->count is an estimate, but if INIT_BB
1606 is in different loop nest, we can do better.
1607 This is all just estimate. In theory we look for minimal cut separating
1608 INIT_BB and USE_BB, but we only want to anticipate loop invariant motion
1609 anyway. */
1611 static basic_block
1612 get_minimal_bb (basic_block init_bb, basic_block use_bb)
1614 struct loop *l = find_common_loop (init_bb->loop_father, use_bb->loop_father);
1615 if (l && l->header->count < init_bb->count)
1616 return l->header;
1617 return init_bb;
1620 /* Callback of walk_aliased_vdefs. Records basic blocks where the value may be
1621 set except for info->stmt. */
1623 static bool
1624 record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data)
1626 struct record_modified_bb_info *info =
1627 (struct record_modified_bb_info *) data;
1628 if (SSA_NAME_DEF_STMT (vdef) == info->stmt)
1629 return false;
1630 if (gimple_clobber_p (SSA_NAME_DEF_STMT (vdef)))
1631 return false;
1632 bitmap_set_bit (info->bb_set,
1633 SSA_NAME_IS_DEFAULT_DEF (vdef)
1634 ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index
1635 : get_minimal_bb
1636 (gimple_bb (SSA_NAME_DEF_STMT (vdef)),
1637 gimple_bb (info->stmt))->index);
1638 if (dump_file)
1640 fprintf (dump_file, " Param ");
1641 print_generic_expr (dump_file, info->op, TDF_SLIM);
1642 fprintf (dump_file, " changed at bb %i, minimal: %i stmt: ",
1643 gimple_bb (SSA_NAME_DEF_STMT (vdef))->index,
1644 get_minimal_bb
1645 (gimple_bb (SSA_NAME_DEF_STMT (vdef)),
1646 gimple_bb (info->stmt))->index);
1647 print_gimple_stmt (dump_file, SSA_NAME_DEF_STMT (vdef), 0);
1649 return false;
1652 /* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT
1653 will change since last invocation of STMT.
1655 Value 0 is reserved for compile time invariants.
1656 For common parameters it is REG_BR_PROB_BASE. For loop invariants it
1657 ought to be REG_BR_PROB_BASE / estimated_iters. */
1659 static int
1660 param_change_prob (gimple *stmt, int i)
1662 tree op = gimple_call_arg (stmt, i);
1663 basic_block bb = gimple_bb (stmt);
1665 if (TREE_CODE (op) == WITH_SIZE_EXPR)
1666 op = TREE_OPERAND (op, 0);
1668 tree base = get_base_address (op);
1670 /* Global invariants never change. */
1671 if (is_gimple_min_invariant (base))
1672 return 0;
1674 /* We would have to do non-trivial analysis to really work out what
1675 is the probability of value to change (i.e. when init statement
1676 is in a sibling loop of the call).
1678 We do an conservative estimate: when call is executed N times more often
1679 than the statement defining value, we take the frequency 1/N. */
1680 if (TREE_CODE (base) == SSA_NAME)
1682 profile_count init_count;
1684 if (!bb->count.nonzero_p ())
1685 return REG_BR_PROB_BASE;
1687 if (SSA_NAME_IS_DEFAULT_DEF (base))
1688 init_count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
1689 else
1690 init_count = get_minimal_bb
1691 (gimple_bb (SSA_NAME_DEF_STMT (base)),
1692 gimple_bb (stmt))->count;
1694 if (init_count < bb->count)
1695 return MAX ((init_count.to_sreal_scale (bb->count)
1696 * REG_BR_PROB_BASE).to_int (), 1);
1697 return REG_BR_PROB_BASE;
1699 else
1701 ao_ref refd;
1702 profile_count max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
1703 struct record_modified_bb_info info;
1704 tree init = ctor_for_folding (base);
1706 if (init != error_mark_node)
1707 return 0;
1708 if (!bb->count.nonzero_p ())
1709 return REG_BR_PROB_BASE;
1710 if (dump_file)
1712 fprintf (dump_file, " Analyzing param change probablity of ");
1713 print_generic_expr (dump_file, op, TDF_SLIM);
1714 fprintf (dump_file, "\n");
1716 ao_ref_init (&refd, op);
1717 info.op = op;
1718 info.stmt = stmt;
1719 info.bb_set = BITMAP_ALLOC (NULL);
1720 walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info,
1721 NULL);
1722 if (bitmap_bit_p (info.bb_set, bb->index))
1724 if (dump_file)
1725 fprintf (dump_file, " Set in same BB as used.\n");
1726 BITMAP_FREE (info.bb_set);
1727 return REG_BR_PROB_BASE;
1730 bitmap_iterator bi;
1731 unsigned index;
1732 /* Lookup the most frequent update of the value and believe that
1733 it dominates all the other; precise analysis here is difficult. */
1734 EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi)
1735 max = max.max (BASIC_BLOCK_FOR_FN (cfun, index)->count);
1736 if (dump_file)
1738 fprintf (dump_file, " Set with count ");
1739 max.dump (dump_file);
1740 fprintf (dump_file, " and used with count ");
1741 bb->count.dump (dump_file);
1742 fprintf (dump_file, " freq %f\n",
1743 max.to_sreal_scale (bb->count).to_double ());
1746 BITMAP_FREE (info.bb_set);
1747 if (max < bb->count)
1748 return MAX ((max.to_sreal_scale (bb->count)
1749 * REG_BR_PROB_BASE).to_int (), 1);
1750 return REG_BR_PROB_BASE;
1754 /* Find whether a basic block BB is the final block of a (half) diamond CFG
1755 sub-graph and if the predicate the condition depends on is known. If so,
1756 return true and store the pointer the predicate in *P. */
1758 static bool
1759 phi_result_unknown_predicate (struct ipa_node_params *info,
1760 ipa_fn_summary *summary, basic_block bb,
1761 predicate *p,
1762 vec<predicate> nonconstant_names)
1764 edge e;
1765 edge_iterator ei;
1766 basic_block first_bb = NULL;
1767 gimple *stmt;
1769 if (single_pred_p (bb))
1771 *p = false;
1772 return true;
1775 FOR_EACH_EDGE (e, ei, bb->preds)
1777 if (single_succ_p (e->src))
1779 if (!single_pred_p (e->src))
1780 return false;
1781 if (!first_bb)
1782 first_bb = single_pred (e->src);
1783 else if (single_pred (e->src) != first_bb)
1784 return false;
1786 else
1788 if (!first_bb)
1789 first_bb = e->src;
1790 else if (e->src != first_bb)
1791 return false;
1795 if (!first_bb)
1796 return false;
1798 stmt = last_stmt (first_bb);
1799 if (!stmt
1800 || gimple_code (stmt) != GIMPLE_COND
1801 || !is_gimple_ip_invariant (gimple_cond_rhs (stmt)))
1802 return false;
1804 *p = will_be_nonconstant_expr_predicate (info, summary,
1805 gimple_cond_lhs (stmt),
1806 nonconstant_names);
1807 if (*p == true)
1808 return false;
1809 else
1810 return true;
1813 /* Given a PHI statement in a function described by inline properties SUMMARY
1814 and *P being the predicate describing whether the selected PHI argument is
1815 known, store a predicate for the result of the PHI statement into
1816 NONCONSTANT_NAMES, if possible. */
1818 static void
1819 predicate_for_phi_result (struct ipa_fn_summary *summary, gphi *phi,
1820 predicate *p,
1821 vec<predicate> nonconstant_names)
1823 unsigned i;
1825 for (i = 0; i < gimple_phi_num_args (phi); i++)
1827 tree arg = gimple_phi_arg (phi, i)->def;
1828 if (!is_gimple_min_invariant (arg))
1830 gcc_assert (TREE_CODE (arg) == SSA_NAME);
1831 *p = p->or_with (summary->conds,
1832 nonconstant_names[SSA_NAME_VERSION (arg)]);
1833 if (*p == true)
1834 return;
1838 if (dump_file && (dump_flags & TDF_DETAILS))
1840 fprintf (dump_file, "\t\tphi predicate: ");
1841 p->dump (dump_file, summary->conds);
1843 nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p;
1846 /* Return predicate specifying when array index in access OP becomes non-constant. */
1848 static predicate
1849 array_index_predicate (ipa_fn_summary *info,
1850 vec< predicate> nonconstant_names, tree op)
1852 predicate p = false;
1853 while (handled_component_p (op))
1855 if (TREE_CODE (op) == ARRAY_REF || TREE_CODE (op) == ARRAY_RANGE_REF)
1857 if (TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME)
1858 p = p.or_with (info->conds,
1859 nonconstant_names[SSA_NAME_VERSION
1860 (TREE_OPERAND (op, 1))]);
1862 op = TREE_OPERAND (op, 0);
1864 return p;
1867 /* For a typical usage of __builtin_expect (a<b, 1), we
1868 may introduce an extra relation stmt:
1869 With the builtin, we have
1870 t1 = a <= b;
1871 t2 = (long int) t1;
1872 t3 = __builtin_expect (t2, 1);
1873 if (t3 != 0)
1874 goto ...
1875 Without the builtin, we have
1876 if (a<=b)
1877 goto...
1878 This affects the size/time estimation and may have
1879 an impact on the earlier inlining.
1880 Here find this pattern and fix it up later. */
1882 static gimple *
1883 find_foldable_builtin_expect (basic_block bb)
1885 gimple_stmt_iterator bsi;
1887 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1889 gimple *stmt = gsi_stmt (bsi);
1890 if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT)
1891 || gimple_call_internal_p (stmt, IFN_BUILTIN_EXPECT))
1893 tree var = gimple_call_lhs (stmt);
1894 tree arg = gimple_call_arg (stmt, 0);
1895 use_operand_p use_p;
1896 gimple *use_stmt;
1897 bool match = false;
1898 bool done = false;
1900 if (!var || !arg)
1901 continue;
1902 gcc_assert (TREE_CODE (var) == SSA_NAME);
1904 while (TREE_CODE (arg) == SSA_NAME)
1906 gimple *stmt_tmp = SSA_NAME_DEF_STMT (arg);
1907 if (!is_gimple_assign (stmt_tmp))
1908 break;
1909 switch (gimple_assign_rhs_code (stmt_tmp))
1911 case LT_EXPR:
1912 case LE_EXPR:
1913 case GT_EXPR:
1914 case GE_EXPR:
1915 case EQ_EXPR:
1916 case NE_EXPR:
1917 match = true;
1918 done = true;
1919 break;
1920 CASE_CONVERT:
1921 break;
1922 default:
1923 done = true;
1924 break;
1926 if (done)
1927 break;
1928 arg = gimple_assign_rhs1 (stmt_tmp);
1931 if (match && single_imm_use (var, &use_p, &use_stmt)
1932 && gimple_code (use_stmt) == GIMPLE_COND)
1933 return use_stmt;
1936 return NULL;
1939 /* Return true when the basic blocks contains only clobbers followed by RESX.
1940 Such BBs are kept around to make removal of dead stores possible with
1941 presence of EH and will be optimized out by optimize_clobbers later in the
1942 game.
1944 NEED_EH is used to recurse in case the clobber has non-EH predecestors
1945 that can be clobber only, too.. When it is false, the RESX is not necessary
1946 on the end of basic block. */
1948 static bool
1949 clobber_only_eh_bb_p (basic_block bb, bool need_eh = true)
1951 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1952 edge_iterator ei;
1953 edge e;
1955 if (need_eh)
1957 if (gsi_end_p (gsi))
1958 return false;
1959 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX)
1960 return false;
1961 gsi_prev (&gsi);
1963 else if (!single_succ_p (bb))
1964 return false;
1966 for (; !gsi_end_p (gsi); gsi_prev (&gsi))
1968 gimple *stmt = gsi_stmt (gsi);
1969 if (is_gimple_debug (stmt))
1970 continue;
1971 if (gimple_clobber_p (stmt))
1972 continue;
1973 if (gimple_code (stmt) == GIMPLE_LABEL)
1974 break;
1975 return false;
1978 /* See if all predecestors are either throws or clobber only BBs. */
1979 FOR_EACH_EDGE (e, ei, bb->preds)
1980 if (!(e->flags & EDGE_EH)
1981 && !clobber_only_eh_bb_p (e->src, false))
1982 return false;
1984 return true;
1987 /* Return true if STMT compute a floating point expression that may be affected
1988 by -ffast-math and similar flags. */
1990 static bool
1991 fp_expression_p (gimple *stmt)
1993 ssa_op_iter i;
1994 tree op;
1996 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF|SSA_OP_USE)
1997 if (FLOAT_TYPE_P (TREE_TYPE (op)))
1998 return true;
1999 return false;
2002 /* Analyze function body for NODE.
2003 EARLY indicates run from early optimization pipeline. */
2005 static void
2006 analyze_function_body (struct cgraph_node *node, bool early)
2008 sreal time = 0;
2009 /* Estimate static overhead for function prologue/epilogue and alignment. */
2010 int size = 2;
2011 /* Benefits are scaled by probability of elimination that is in range
2012 <0,2>. */
2013 basic_block bb;
2014 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
2015 sreal freq;
2016 struct ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
2017 predicate bb_predicate;
2018 struct ipa_func_body_info fbi;
2019 vec<predicate> nonconstant_names = vNULL;
2020 int nblocks, n;
2021 int *order;
2022 predicate array_index = true;
2023 gimple *fix_builtin_expect_stmt;
2025 gcc_assert (my_function && my_function->cfg);
2026 gcc_assert (cfun == my_function);
2028 memset(&fbi, 0, sizeof(fbi));
2029 info->conds = NULL;
2030 info->size_time_table = NULL;
2032 /* When optimizing and analyzing for IPA inliner, initialize loop optimizer
2033 so we can produce proper inline hints.
2035 When optimizing and analyzing for early inliner, initialize node params
2036 so we can produce correct BB predicates. */
2038 if (opt_for_fn (node->decl, optimize))
2040 calculate_dominance_info (CDI_DOMINATORS);
2041 if (!early)
2042 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2043 else
2045 ipa_check_create_node_params ();
2046 ipa_initialize_node_params (node);
2049 if (ipa_node_params_sum)
2051 fbi.node = node;
2052 fbi.info = IPA_NODE_REF (node);
2053 fbi.bb_infos = vNULL;
2054 fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
2055 fbi.param_count = count_formal_params(node->decl);
2056 nonconstant_names.safe_grow_cleared
2057 (SSANAMES (my_function)->length ());
2061 if (dump_file)
2062 fprintf (dump_file, "\nAnalyzing function body size: %s\n",
2063 node->name ());
2065 /* When we run into maximal number of entries, we assign everything to the
2066 constant truth case. Be sure to have it in list. */
2067 bb_predicate = true;
2068 info->account_size_time (0, 0, bb_predicate, bb_predicate);
2070 bb_predicate = predicate::not_inlined ();
2071 info->account_size_time (2 * ipa_fn_summary::size_scale, 0, bb_predicate,
2072 bb_predicate);
2074 if (fbi.info)
2075 compute_bb_predicates (&fbi, node, info);
2076 order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
2077 nblocks = pre_and_rev_post_order_compute (NULL, order, false);
2078 for (n = 0; n < nblocks; n++)
2080 bb = BASIC_BLOCK_FOR_FN (cfun, order[n]);
2081 freq = bb->count.to_sreal_scale (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count);
2082 if (clobber_only_eh_bb_p (bb))
2084 if (dump_file && (dump_flags & TDF_DETAILS))
2085 fprintf (dump_file, "\n Ignoring BB %i;"
2086 " it will be optimized away by cleanup_clobbers\n",
2087 bb->index);
2088 continue;
2091 /* TODO: Obviously predicates can be propagated down across CFG. */
2092 if (fbi.info)
2094 if (bb->aux)
2095 bb_predicate = *(predicate *) bb->aux;
2096 else
2097 bb_predicate = false;
2099 else
2100 bb_predicate = true;
2102 if (dump_file && (dump_flags & TDF_DETAILS))
2104 fprintf (dump_file, "\n BB %i predicate:", bb->index);
2105 bb_predicate.dump (dump_file, info->conds);
2108 if (fbi.info && nonconstant_names.exists ())
2110 predicate phi_predicate;
2111 bool first_phi = true;
2113 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
2114 gsi_next (&bsi))
2116 if (first_phi
2117 && !phi_result_unknown_predicate (fbi.info, info, bb,
2118 &phi_predicate,
2119 nonconstant_names))
2120 break;
2121 first_phi = false;
2122 if (dump_file && (dump_flags & TDF_DETAILS))
2124 fprintf (dump_file, " ");
2125 print_gimple_stmt (dump_file, gsi_stmt (bsi), 0);
2127 predicate_for_phi_result (info, bsi.phi (), &phi_predicate,
2128 nonconstant_names);
2132 fix_builtin_expect_stmt = find_foldable_builtin_expect (bb);
2134 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
2135 gsi_next (&bsi))
2137 gimple *stmt = gsi_stmt (bsi);
2138 int this_size = estimate_num_insns (stmt, &eni_size_weights);
2139 int this_time = estimate_num_insns (stmt, &eni_time_weights);
2140 int prob;
2141 predicate will_be_nonconstant;
2143 /* This relation stmt should be folded after we remove
2144 buildin_expect call. Adjust the cost here. */
2145 if (stmt == fix_builtin_expect_stmt)
2147 this_size--;
2148 this_time--;
2151 if (dump_file && (dump_flags & TDF_DETAILS))
2153 fprintf (dump_file, " ");
2154 print_gimple_stmt (dump_file, stmt, 0);
2155 fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n",
2156 freq.to_double (), this_size,
2157 this_time);
2160 if (gimple_assign_load_p (stmt) && nonconstant_names.exists ())
2162 predicate this_array_index;
2163 this_array_index =
2164 array_index_predicate (info, nonconstant_names,
2165 gimple_assign_rhs1 (stmt));
2166 if (this_array_index != false)
2167 array_index &= this_array_index;
2169 if (gimple_store_p (stmt) && nonconstant_names.exists ())
2171 predicate this_array_index;
2172 this_array_index =
2173 array_index_predicate (info, nonconstant_names,
2174 gimple_get_lhs (stmt));
2175 if (this_array_index != false)
2176 array_index &= this_array_index;
2180 if (is_gimple_call (stmt)
2181 && !gimple_call_internal_p (stmt))
2183 struct cgraph_edge *edge = node->get_edge (stmt);
2184 ipa_call_summary *es = ipa_call_summaries->get_create (edge);
2186 /* Special case: results of BUILT_IN_CONSTANT_P will be always
2187 resolved as constant. We however don't want to optimize
2188 out the cgraph edges. */
2189 if (nonconstant_names.exists ()
2190 && gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P)
2191 && gimple_call_lhs (stmt)
2192 && TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME)
2194 predicate false_p = false;
2195 nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))]
2196 = false_p;
2198 if (ipa_node_params_sum)
2200 int count = gimple_call_num_args (stmt);
2201 int i;
2203 if (count)
2204 es->param.safe_grow_cleared (count);
2205 for (i = 0; i < count; i++)
2207 int prob = param_change_prob (stmt, i);
2208 gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
2209 es->param[i].change_prob = prob;
2213 es->call_stmt_size = this_size;
2214 es->call_stmt_time = this_time;
2215 es->loop_depth = bb_loop_depth (bb);
2216 edge_set_predicate (edge, &bb_predicate);
2219 /* TODO: When conditional jump or swithc is known to be constant, but
2220 we did not translate it into the predicates, we really can account
2221 just maximum of the possible paths. */
2222 if (fbi.info)
2223 will_be_nonconstant
2224 = will_be_nonconstant_predicate (&fbi, info,
2225 stmt, nonconstant_names);
2226 else
2227 will_be_nonconstant = true;
2228 if (this_time || this_size)
2230 sreal final_time = (sreal)this_time * freq;
2232 prob = eliminated_by_inlining_prob (stmt);
2233 if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS))
2234 fprintf (dump_file,
2235 "\t\t50%% will be eliminated by inlining\n");
2236 if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
2237 fprintf (dump_file, "\t\tWill be eliminated by inlining\n");
2239 struct predicate p = bb_predicate & will_be_nonconstant;
2241 /* We can ignore statement when we proved it is never going
2242 to happen, but we can not do that for call statements
2243 because edges are accounted specially. */
2245 if (*(is_gimple_call (stmt) ? &bb_predicate : &p) != false)
2247 time += final_time;
2248 size += this_size;
2251 /* We account everything but the calls. Calls have their own
2252 size/time info attached to cgraph edges. This is necessary
2253 in order to make the cost disappear after inlining. */
2254 if (!is_gimple_call (stmt))
2256 if (prob)
2258 predicate ip = bb_predicate & predicate::not_inlined ();
2259 info->account_size_time (this_size * prob,
2260 (this_time * prob) / 2, ip,
2263 if (prob != 2)
2264 info->account_size_time (this_size * (2 - prob),
2265 (this_time * (2 - prob) / 2),
2266 bb_predicate,
2270 if (!info->fp_expressions && fp_expression_p (stmt))
2272 info->fp_expressions = true;
2273 if (dump_file)
2274 fprintf (dump_file, " fp_expression set\n");
2277 gcc_assert (time >= 0);
2278 gcc_assert (size >= 0);
2282 set_hint_predicate (&ipa_fn_summaries->get_create (node)->array_index,
2283 array_index);
2284 free (order);
2286 if (nonconstant_names.exists () && !early)
2288 struct loop *loop;
2289 predicate loop_iterations = true;
2290 predicate loop_stride = true;
2292 if (dump_file && (dump_flags & TDF_DETAILS))
2293 flow_loops_dump (dump_file, NULL, 0);
2294 scev_initialize ();
2295 FOR_EACH_LOOP (loop, 0)
2297 vec<edge> exits;
2298 edge ex;
2299 unsigned int j;
2300 struct tree_niter_desc niter_desc;
2301 bb_predicate = *(predicate *) loop->header->aux;
2303 exits = get_loop_exit_edges (loop);
2304 FOR_EACH_VEC_ELT (exits, j, ex)
2305 if (number_of_iterations_exit (loop, ex, &niter_desc, false)
2306 && !is_gimple_min_invariant (niter_desc.niter))
2308 predicate will_be_nonconstant
2309 = will_be_nonconstant_expr_predicate (fbi.info, info,
2310 niter_desc.niter,
2311 nonconstant_names);
2312 if (will_be_nonconstant != true)
2313 will_be_nonconstant = bb_predicate & will_be_nonconstant;
2314 if (will_be_nonconstant != true
2315 && will_be_nonconstant != false)
2316 /* This is slightly inprecise. We may want to represent each
2317 loop with independent predicate. */
2318 loop_iterations &= will_be_nonconstant;
2320 exits.release ();
2323 /* To avoid quadratic behavior we analyze stride predicates only
2324 with respect to the containing loop. Thus we simply iterate
2325 over all defs in the outermost loop body. */
2326 for (loop = loops_for_fn (cfun)->tree_root->inner;
2327 loop != NULL; loop = loop->next)
2329 basic_block *body = get_loop_body (loop);
2330 for (unsigned i = 0; i < loop->num_nodes; i++)
2332 gimple_stmt_iterator gsi;
2333 bb_predicate = *(predicate *) body[i]->aux;
2334 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi);
2335 gsi_next (&gsi))
2337 gimple *stmt = gsi_stmt (gsi);
2339 if (!is_gimple_assign (stmt))
2340 continue;
2342 tree def = gimple_assign_lhs (stmt);
2343 if (TREE_CODE (def) != SSA_NAME)
2344 continue;
2346 affine_iv iv;
2347 if (!simple_iv (loop_containing_stmt (stmt),
2348 loop_containing_stmt (stmt),
2349 def, &iv, true)
2350 || is_gimple_min_invariant (iv.step))
2351 continue;
2353 predicate will_be_nonconstant
2354 = will_be_nonconstant_expr_predicate (fbi.info, info,
2355 iv.step,
2356 nonconstant_names);
2357 if (will_be_nonconstant != true)
2358 will_be_nonconstant = bb_predicate & will_be_nonconstant;
2359 if (will_be_nonconstant != true
2360 && will_be_nonconstant != false)
2361 /* This is slightly inprecise. We may want to represent
2362 each loop with independent predicate. */
2363 loop_stride = loop_stride & will_be_nonconstant;
2366 free (body);
2368 ipa_fn_summary *s = ipa_fn_summaries->get_create (node);
2369 set_hint_predicate (&s->loop_iterations, loop_iterations);
2370 set_hint_predicate (&s->loop_stride, loop_stride);
2371 scev_finalize ();
2373 FOR_ALL_BB_FN (bb, my_function)
2375 edge e;
2376 edge_iterator ei;
2378 if (bb->aux)
2379 edge_predicate_pool.remove ((predicate *)bb->aux);
2380 bb->aux = NULL;
2381 FOR_EACH_EDGE (e, ei, bb->succs)
2383 if (e->aux)
2384 edge_predicate_pool.remove ((predicate *) e->aux);
2385 e->aux = NULL;
2388 ipa_fn_summary *s = ipa_fn_summaries->get_create (node);
2389 s->time = time;
2390 s->self_size = size;
2391 nonconstant_names.release ();
2392 ipa_release_body_info (&fbi);
2393 if (opt_for_fn (node->decl, optimize))
2395 if (!early)
2396 loop_optimizer_finalize ();
2397 else if (!ipa_edge_args_sum)
2398 ipa_free_all_node_params ();
2399 free_dominance_info (CDI_DOMINATORS);
2401 if (dump_file)
2403 fprintf (dump_file, "\n");
2404 ipa_dump_fn_summary (dump_file, node);
2409 /* Compute function summary.
2410 EARLY is true when we compute parameters during early opts. */
2412 void
2413 compute_fn_summary (struct cgraph_node *node, bool early)
2415 HOST_WIDE_INT self_stack_size;
2416 struct cgraph_edge *e;
2417 struct ipa_fn_summary *info;
2419 gcc_assert (!node->global.inlined_to);
2421 if (!ipa_fn_summaries)
2422 ipa_fn_summary_alloc ();
2424 info = ipa_fn_summaries->get_create (node);
2425 info->reset (node);
2427 /* Estimate the stack size for the function if we're optimizing. */
2428 self_stack_size = optimize && !node->thunk.thunk_p
2429 ? estimated_stack_frame_size (node) : 0;
2430 info->estimated_self_stack_size = self_stack_size;
2431 info->estimated_stack_size = self_stack_size;
2432 info->stack_frame_offset = 0;
2434 if (node->thunk.thunk_p)
2436 ipa_call_summary *es = ipa_call_summaries->get_create (node->callees);
2437 predicate t = true;
2439 node->local.can_change_signature = false;
2440 es->call_stmt_size = eni_size_weights.call_cost;
2441 es->call_stmt_time = eni_time_weights.call_cost;
2442 info->account_size_time (ipa_fn_summary::size_scale * 2, 2, t, t);
2443 t = predicate::not_inlined ();
2444 info->account_size_time (2 * ipa_fn_summary::size_scale, 0, t, t);
2445 ipa_update_overall_fn_summary (node);
2446 info->self_size = info->size;
2447 /* We can not inline instrumentation clones. */
2448 if (node->thunk.add_pointer_bounds_args)
2450 info->inlinable = false;
2451 node->callees->inline_failed = CIF_CHKP;
2453 else if (stdarg_p (TREE_TYPE (node->decl)))
2455 info->inlinable = false;
2456 node->callees->inline_failed = CIF_VARIADIC_THUNK;
2458 else
2459 info->inlinable = true;
2461 else
2463 /* Even is_gimple_min_invariant rely on current_function_decl. */
2464 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2466 /* Can this function be inlined at all? */
2467 if (!opt_for_fn (node->decl, optimize)
2468 && !lookup_attribute ("always_inline",
2469 DECL_ATTRIBUTES (node->decl)))
2470 info->inlinable = false;
2471 else
2472 info->inlinable = tree_inlinable_function_p (node->decl);
2474 /* Type attributes can use parameter indices to describe them. */
2475 if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl))
2476 /* Likewise for #pragma omp declare simd functions or functions
2477 with simd attribute. */
2478 || lookup_attribute ("omp declare simd",
2479 DECL_ATTRIBUTES (node->decl)))
2480 node->local.can_change_signature = false;
2481 else
2483 /* Otherwise, inlinable functions always can change signature. */
2484 if (info->inlinable)
2485 node->local.can_change_signature = true;
2486 else
2488 /* Functions calling builtin_apply can not change signature. */
2489 for (e = node->callees; e; e = e->next_callee)
2491 tree cdecl = e->callee->decl;
2492 if (DECL_BUILT_IN (cdecl)
2493 && DECL_BUILT_IN_CLASS (cdecl) == BUILT_IN_NORMAL
2494 && (DECL_FUNCTION_CODE (cdecl) == BUILT_IN_APPLY_ARGS
2495 || DECL_FUNCTION_CODE (cdecl) == BUILT_IN_VA_START))
2496 break;
2498 node->local.can_change_signature = !e;
2501 /* Functions called by instrumentation thunk can't change signature
2502 because instrumentation thunk modification is not supported. */
2503 if (node->local.can_change_signature)
2504 for (e = node->callers; e; e = e->next_caller)
2505 if (e->caller->thunk.thunk_p
2506 && e->caller->thunk.add_pointer_bounds_args)
2508 node->local.can_change_signature = false;
2509 break;
2511 analyze_function_body (node, early);
2512 pop_cfun ();
2514 for (e = node->callees; e; e = e->next_callee)
2515 if (e->callee->comdat_local_p ())
2516 break;
2517 node->calls_comdat_local = (e != NULL);
2519 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2520 info->size = info->self_size;
2521 info->stack_frame_offset = 0;
2522 info->estimated_stack_size = info->estimated_self_stack_size;
2524 /* Code above should compute exactly the same result as
2525 ipa_update_overall_fn_summary but because computation happens in
2526 different order the roundoff errors result in slight changes. */
2527 ipa_update_overall_fn_summary (node);
2528 gcc_assert (info->size == info->self_size);
2532 /* Compute parameters of functions used by inliner using
2533 current_function_decl. */
2535 static unsigned int
2536 compute_fn_summary_for_current (void)
2538 compute_fn_summary (cgraph_node::get (current_function_decl), true);
2539 return 0;
2542 /* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS,
2543 KNOWN_CONTEXTS and KNOWN_AGGS. */
2545 static bool
2546 estimate_edge_devirt_benefit (struct cgraph_edge *ie,
2547 int *size, int *time,
2548 vec<tree> known_vals,
2549 vec<ipa_polymorphic_call_context> known_contexts,
2550 vec<ipa_agg_jump_function_p> known_aggs)
2552 tree target;
2553 struct cgraph_node *callee;
2554 struct ipa_fn_summary *isummary;
2555 enum availability avail;
2556 bool speculative;
2558 if (!known_vals.exists () && !known_contexts.exists ())
2559 return false;
2560 if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining))
2561 return false;
2563 target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts,
2564 known_aggs, &speculative);
2565 if (!target || speculative)
2566 return false;
2568 /* Account for difference in cost between indirect and direct calls. */
2569 *size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost);
2570 *time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost);
2571 gcc_checking_assert (*time >= 0);
2572 gcc_checking_assert (*size >= 0);
2574 callee = cgraph_node::get (target);
2575 if (!callee || !callee->definition)
2576 return false;
2577 callee = callee->function_symbol (&avail);
2578 if (avail < AVAIL_AVAILABLE)
2579 return false;
2580 isummary = ipa_fn_summaries->get_create (callee);
2581 return isummary->inlinable;
2584 /* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to
2585 handle edge E with probability PROB.
2586 Set HINTS if edge may be devirtualized.
2587 KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call
2588 site. */
2590 static inline void
2591 estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
2592 sreal *time,
2593 int prob,
2594 vec<tree> known_vals,
2595 vec<ipa_polymorphic_call_context> known_contexts,
2596 vec<ipa_agg_jump_function_p> known_aggs,
2597 ipa_hints *hints)
2599 struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
2600 int call_size = es->call_stmt_size;
2601 int call_time = es->call_stmt_time;
2602 int cur_size;
2603 if (!e->callee
2604 && estimate_edge_devirt_benefit (e, &call_size, &call_time,
2605 known_vals, known_contexts, known_aggs)
2606 && hints && e->maybe_hot_p ())
2607 *hints |= INLINE_HINT_indirect_call;
2608 cur_size = call_size * ipa_fn_summary::size_scale;
2609 *size += cur_size;
2610 if (min_size)
2611 *min_size += cur_size;
2612 if (prob == REG_BR_PROB_BASE)
2613 *time += ((sreal)call_time) * e->sreal_frequency ();
2614 else
2615 *time += ((sreal)call_time * prob) * e->sreal_frequency ();
2620 /* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
2621 calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
2622 describe context of the call site. */
2624 static void
2625 estimate_calls_size_and_time (struct cgraph_node *node, int *size,
2626 int *min_size, sreal *time,
2627 ipa_hints *hints,
2628 clause_t possible_truths,
2629 vec<tree> known_vals,
2630 vec<ipa_polymorphic_call_context> known_contexts,
2631 vec<ipa_agg_jump_function_p> known_aggs)
2633 struct cgraph_edge *e;
2634 for (e = node->callees; e; e = e->next_callee)
2636 struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
2638 /* Do not care about zero sized builtins. */
2639 if (e->inline_failed && !es->call_stmt_size)
2641 gcc_checking_assert (!es->call_stmt_time);
2642 continue;
2644 if (!es->predicate
2645 || es->predicate->evaluate (possible_truths))
2647 if (e->inline_failed)
2649 /* Predicates of calls shall not use NOT_CHANGED codes,
2650 sowe do not need to compute probabilities. */
2651 estimate_edge_size_and_time (e, size,
2652 es->predicate ? NULL : min_size,
2653 time, REG_BR_PROB_BASE,
2654 known_vals, known_contexts,
2655 known_aggs, hints);
2657 else
2658 estimate_calls_size_and_time (e->callee, size, min_size, time,
2659 hints,
2660 possible_truths,
2661 known_vals, known_contexts,
2662 known_aggs);
2665 for (e = node->indirect_calls; e; e = e->next_callee)
2667 struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
2668 if (!es->predicate
2669 || es->predicate->evaluate (possible_truths))
2670 estimate_edge_size_and_time (e, size,
2671 es->predicate ? NULL : min_size,
2672 time, REG_BR_PROB_BASE,
2673 known_vals, known_contexts, known_aggs,
2674 hints);
2679 /* Estimate size and time needed to execute NODE assuming
2680 POSSIBLE_TRUTHS clause, and KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
2681 information about NODE's arguments. If non-NULL use also probability
2682 information present in INLINE_PARAM_SUMMARY vector.
2683 Additionally detemine hints determined by the context. Finally compute
2684 minimal size needed for the call that is independent on the call context and
2685 can be used for fast estimates. Return the values in RET_SIZE,
2686 RET_MIN_SIZE, RET_TIME and RET_HINTS. */
2688 void
2689 estimate_node_size_and_time (struct cgraph_node *node,
2690 clause_t possible_truths,
2691 clause_t nonspec_possible_truths,
2692 vec<tree> known_vals,
2693 vec<ipa_polymorphic_call_context> known_contexts,
2694 vec<ipa_agg_jump_function_p> known_aggs,
2695 int *ret_size, int *ret_min_size,
2696 sreal *ret_time,
2697 sreal *ret_nonspecialized_time,
2698 ipa_hints *ret_hints,
2699 vec<inline_param_summary>
2700 inline_param_summary)
2702 struct ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
2703 size_time_entry *e;
2704 int size = 0;
2705 sreal time = 0;
2706 int min_size = 0;
2707 ipa_hints hints = 0;
2708 int i;
2710 if (dump_file && (dump_flags & TDF_DETAILS))
2712 bool found = false;
2713 fprintf (dump_file, " Estimating body: %s/%i\n"
2714 " Known to be false: ", node->name (),
2715 node->order);
2717 for (i = predicate::not_inlined_condition;
2718 i < (predicate::first_dynamic_condition
2719 + (int) vec_safe_length (info->conds)); i++)
2720 if (!(possible_truths & (1 << i)))
2722 if (found)
2723 fprintf (dump_file, ", ");
2724 found = true;
2725 dump_condition (dump_file, info->conds, i);
2729 estimate_calls_size_and_time (node, &size, &min_size, &time, &hints, possible_truths,
2730 known_vals, known_contexts, known_aggs);
2731 sreal nonspecialized_time = time;
2733 for (i = 0; vec_safe_iterate (info->size_time_table, i, &e); i++)
2735 bool exec = e->exec_predicate.evaluate (nonspec_possible_truths);
2737 /* Because predicates are conservative, it can happen that nonconst is 1
2738 but exec is 0. */
2739 if (exec)
2741 bool nonconst = e->nonconst_predicate.evaluate (possible_truths);
2743 gcc_checking_assert (e->time >= 0);
2744 gcc_checking_assert (time >= 0);
2746 /* We compute specialized size only because size of nonspecialized
2747 copy is context independent.
2749 The difference between nonspecialized execution and specialized is
2750 that nonspecialized is not going to have optimized out computations
2751 known to be constant in a specialized setting. */
2752 if (nonconst)
2753 size += e->size;
2754 nonspecialized_time += e->time;
2755 if (!nonconst)
2757 else if (!inline_param_summary.exists ())
2759 if (nonconst)
2760 time += e->time;
2762 else
2764 int prob = e->nonconst_predicate.probability
2765 (info->conds, possible_truths,
2766 inline_param_summary);
2767 gcc_checking_assert (prob >= 0);
2768 gcc_checking_assert (prob <= REG_BR_PROB_BASE);
2769 time += e->time * prob / REG_BR_PROB_BASE;
2771 gcc_checking_assert (time >= 0);
2774 gcc_checking_assert ((*info->size_time_table)[0].exec_predicate == true);
2775 gcc_checking_assert ((*info->size_time_table)[0].nonconst_predicate == true);
2776 min_size = (*info->size_time_table)[0].size;
2777 gcc_checking_assert (size >= 0);
2778 gcc_checking_assert (time >= 0);
2779 /* nonspecialized_time should be always bigger than specialized time.
2780 Roundoff issues however may get into the way. */
2781 gcc_checking_assert ((nonspecialized_time - time * 0.99) >= -1);
2783 /* Roundoff issues may make specialized time bigger than nonspecialized
2784 time. We do not really want that to happen because some heurstics
2785 may get confused by seeing negative speedups. */
2786 if (time > nonspecialized_time)
2787 time = nonspecialized_time;
2789 if (info->loop_iterations
2790 && !info->loop_iterations->evaluate (possible_truths))
2791 hints |= INLINE_HINT_loop_iterations;
2792 if (info->loop_stride
2793 && !info->loop_stride->evaluate (possible_truths))
2794 hints |= INLINE_HINT_loop_stride;
2795 if (info->array_index
2796 && !info->array_index->evaluate (possible_truths))
2797 hints |= INLINE_HINT_array_index;
2798 if (info->scc_no)
2799 hints |= INLINE_HINT_in_scc;
2800 if (DECL_DECLARED_INLINE_P (node->decl))
2801 hints |= INLINE_HINT_declared_inline;
2803 size = RDIV (size, ipa_fn_summary::size_scale);
2804 min_size = RDIV (min_size, ipa_fn_summary::size_scale);
2806 if (dump_file && (dump_flags & TDF_DETAILS))
2807 fprintf (dump_file, "\n size:%i time:%f nonspec time:%f\n", (int) size,
2808 time.to_double (), nonspecialized_time.to_double ());
2809 if (ret_time)
2810 *ret_time = time;
2811 if (ret_nonspecialized_time)
2812 *ret_nonspecialized_time = nonspecialized_time;
2813 if (ret_size)
2814 *ret_size = size;
2815 if (ret_min_size)
2816 *ret_min_size = min_size;
2817 if (ret_hints)
2818 *ret_hints = hints;
2819 return;
2823 /* Estimate size and time needed to execute callee of EDGE assuming that
2824 parameters known to be constant at caller of EDGE are propagated.
2825 KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values
2826 and types for parameters. */
2828 void
2829 estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
2830 vec<tree> known_vals,
2831 vec<ipa_polymorphic_call_context>
2832 known_contexts,
2833 vec<ipa_agg_jump_function_p> known_aggs,
2834 int *ret_size, sreal *ret_time,
2835 sreal *ret_nonspec_time,
2836 ipa_hints *hints)
2838 clause_t clause, nonspec_clause;
2840 evaluate_conditions_for_known_args (node, false, known_vals, known_aggs,
2841 &clause, &nonspec_clause);
2842 estimate_node_size_and_time (node, clause, nonspec_clause,
2843 known_vals, known_contexts,
2844 known_aggs, ret_size, NULL, ret_time,
2845 ret_nonspec_time, hints, vNULL);
2849 /* Update summary information of inline clones after inlining.
2850 Compute peak stack usage. */
2852 static void
2853 inline_update_callee_summaries (struct cgraph_node *node, int depth)
2855 struct cgraph_edge *e;
2856 ipa_fn_summary *callee_info = ipa_fn_summaries->get_create (node);
2857 ipa_fn_summary *caller_info
2858 = ipa_fn_summaries->get_create (node->callers->caller);
2859 HOST_WIDE_INT peak;
2861 callee_info->stack_frame_offset
2862 = caller_info->stack_frame_offset
2863 + caller_info->estimated_self_stack_size;
2864 peak = callee_info->stack_frame_offset
2865 + callee_info->estimated_self_stack_size;
2867 ipa_fn_summary *s = ipa_fn_summaries->get_create (node->global.inlined_to);
2868 if (s->estimated_stack_size < peak)
2869 s->estimated_stack_size = peak;
2870 ipa_propagate_frequency (node);
2871 for (e = node->callees; e; e = e->next_callee)
2873 if (!e->inline_failed)
2874 inline_update_callee_summaries (e->callee, depth);
2875 ipa_call_summaries->get_create (e)->loop_depth += depth;
2877 for (e = node->indirect_calls; e; e = e->next_callee)
2878 ipa_call_summaries->get_create (e)->loop_depth += depth;
2881 /* Update change_prob of EDGE after INLINED_EDGE has been inlined.
2882 When functoin A is inlined in B and A calls C with parameter that
2883 changes with probability PROB1 and C is known to be passthroug
2884 of argument if B that change with probability PROB2, the probability
2885 of change is now PROB1*PROB2. */
2887 static void
2888 remap_edge_change_prob (struct cgraph_edge *inlined_edge,
2889 struct cgraph_edge *edge)
2891 if (ipa_node_params_sum)
2893 int i;
2894 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
2895 struct ipa_call_summary *es = ipa_call_summaries->get_create (edge);
2896 struct ipa_call_summary *inlined_es
2897 = ipa_call_summaries->get_create (inlined_edge);
2899 for (i = 0; i < ipa_get_cs_argument_count (args); i++)
2901 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
2902 if (jfunc->type == IPA_JF_PASS_THROUGH
2903 || jfunc->type == IPA_JF_ANCESTOR)
2905 int id = jfunc->type == IPA_JF_PASS_THROUGH
2906 ? ipa_get_jf_pass_through_formal_id (jfunc)
2907 : ipa_get_jf_ancestor_formal_id (jfunc);
2908 if (id < (int) inlined_es->param.length ())
2910 int prob1 = es->param[i].change_prob;
2911 int prob2 = inlined_es->param[id].change_prob;
2912 int prob = combine_probabilities (prob1, prob2);
2914 if (prob1 && prob2 && !prob)
2915 prob = 1;
2917 es->param[i].change_prob = prob;
2924 /* Update edge summaries of NODE after INLINED_EDGE has been inlined.
2926 Remap predicates of callees of NODE. Rest of arguments match
2927 remap_predicate.
2929 Also update change probabilities. */
2931 static void
2932 remap_edge_summaries (struct cgraph_edge *inlined_edge,
2933 struct cgraph_node *node,
2934 struct ipa_fn_summary *info,
2935 struct ipa_fn_summary *callee_info,
2936 vec<int> operand_map,
2937 vec<int> offset_map,
2938 clause_t possible_truths,
2939 predicate *toplev_predicate)
2941 struct cgraph_edge *e, *next;
2942 for (e = node->callees; e; e = next)
2944 struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
2945 predicate p;
2946 next = e->next_callee;
2948 if (e->inline_failed)
2950 remap_edge_change_prob (inlined_edge, e);
2952 if (es->predicate)
2954 p = es->predicate->remap_after_inlining
2955 (info, callee_info, operand_map,
2956 offset_map, possible_truths,
2957 *toplev_predicate);
2958 edge_set_predicate (e, &p);
2960 else
2961 edge_set_predicate (e, toplev_predicate);
2963 else
2964 remap_edge_summaries (inlined_edge, e->callee, info, callee_info,
2965 operand_map, offset_map, possible_truths,
2966 toplev_predicate);
2968 for (e = node->indirect_calls; e; e = next)
2970 struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
2971 predicate p;
2972 next = e->next_callee;
2974 remap_edge_change_prob (inlined_edge, e);
2975 if (es->predicate)
2977 p = es->predicate->remap_after_inlining
2978 (info, callee_info, operand_map, offset_map,
2979 possible_truths, *toplev_predicate);
2980 edge_set_predicate (e, &p);
2982 else
2983 edge_set_predicate (e, toplev_predicate);
2987 /* Same as remap_predicate, but set result into hint *HINT. */
2989 static void
2990 remap_hint_predicate (struct ipa_fn_summary *info,
2991 struct ipa_fn_summary *callee_info,
2992 predicate **hint,
2993 vec<int> operand_map,
2994 vec<int> offset_map,
2995 clause_t possible_truths,
2996 predicate *toplev_predicate)
2998 predicate p;
3000 if (!*hint)
3001 return;
3002 p = (*hint)->remap_after_inlining
3003 (info, callee_info,
3004 operand_map, offset_map,
3005 possible_truths, *toplev_predicate);
3006 if (p != false && p != true)
3008 if (!*hint)
3009 set_hint_predicate (hint, p);
3010 else
3011 **hint &= p;
3015 /* We inlined EDGE. Update summary of the function we inlined into. */
3017 void
3018 ipa_merge_fn_summary_after_inlining (struct cgraph_edge *edge)
3020 ipa_fn_summary *callee_info = ipa_fn_summaries->get_create (edge->callee);
3021 struct cgraph_node *to = (edge->caller->global.inlined_to
3022 ? edge->caller->global.inlined_to : edge->caller);
3023 struct ipa_fn_summary *info = ipa_fn_summaries->get_create (to);
3024 clause_t clause = 0; /* not_inline is known to be false. */
3025 size_time_entry *e;
3026 vec<int> operand_map = vNULL;
3027 vec<int> offset_map = vNULL;
3028 int i;
3029 predicate toplev_predicate;
3030 predicate true_p = true;
3031 struct ipa_call_summary *es = ipa_call_summaries->get_create (edge);
3033 if (es->predicate)
3034 toplev_predicate = *es->predicate;
3035 else
3036 toplev_predicate = true;
3038 info->fp_expressions |= callee_info->fp_expressions;
3040 if (callee_info->conds)
3041 evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL, NULL);
3042 if (ipa_node_params_sum && callee_info->conds)
3044 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3045 int count = ipa_get_cs_argument_count (args);
3046 int i;
3048 if (count)
3050 operand_map.safe_grow_cleared (count);
3051 offset_map.safe_grow_cleared (count);
3053 for (i = 0; i < count; i++)
3055 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3056 int map = -1;
3058 /* TODO: handle non-NOPs when merging. */
3059 if (jfunc->type == IPA_JF_PASS_THROUGH)
3061 if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
3062 map = ipa_get_jf_pass_through_formal_id (jfunc);
3063 if (!ipa_get_jf_pass_through_agg_preserved (jfunc))
3064 offset_map[i] = -1;
3066 else if (jfunc->type == IPA_JF_ANCESTOR)
3068 HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc);
3069 if (offset >= 0 && offset < INT_MAX)
3071 map = ipa_get_jf_ancestor_formal_id (jfunc);
3072 if (!ipa_get_jf_ancestor_agg_preserved (jfunc))
3073 offset = -1;
3074 offset_map[i] = offset;
3077 operand_map[i] = map;
3078 gcc_assert (map < ipa_get_param_count (IPA_NODE_REF (to)));
3081 for (i = 0; vec_safe_iterate (callee_info->size_time_table, i, &e); i++)
3083 predicate p;
3084 p = e->exec_predicate.remap_after_inlining
3085 (info, callee_info, operand_map,
3086 offset_map, clause,
3087 toplev_predicate);
3088 predicate nonconstp;
3089 nonconstp = e->nonconst_predicate.remap_after_inlining
3090 (info, callee_info, operand_map,
3091 offset_map, clause,
3092 toplev_predicate);
3093 if (p != false && nonconstp != false)
3095 sreal add_time = ((sreal)e->time * edge->sreal_frequency ());
3096 int prob = e->nonconst_predicate.probability (callee_info->conds,
3097 clause, es->param);
3098 add_time = add_time * prob / REG_BR_PROB_BASE;
3099 if (prob != REG_BR_PROB_BASE
3100 && dump_file && (dump_flags & TDF_DETAILS))
3102 fprintf (dump_file, "\t\tScaling time by probability:%f\n",
3103 (double) prob / REG_BR_PROB_BASE);
3105 info->account_size_time (e->size, add_time, p, nonconstp);
3108 remap_edge_summaries (edge, edge->callee, info, callee_info, operand_map,
3109 offset_map, clause, &toplev_predicate);
3110 remap_hint_predicate (info, callee_info,
3111 &callee_info->loop_iterations,
3112 operand_map, offset_map, clause, &toplev_predicate);
3113 remap_hint_predicate (info, callee_info,
3114 &callee_info->loop_stride,
3115 operand_map, offset_map, clause, &toplev_predicate);
3116 remap_hint_predicate (info, callee_info,
3117 &callee_info->array_index,
3118 operand_map, offset_map, clause, &toplev_predicate);
3120 ipa_call_summary *s = ipa_call_summaries->get_create (edge);
3121 inline_update_callee_summaries (edge->callee, s->loop_depth);
3123 /* We do not maintain predicates of inlined edges, free it. */
3124 edge_set_predicate (edge, &true_p);
3125 /* Similarly remove param summaries. */
3126 es->param.release ();
3127 operand_map.release ();
3128 offset_map.release ();
3131 /* For performance reasons ipa_merge_fn_summary_after_inlining is not updating overall size
3132 and time. Recompute it. */
3134 void
3135 ipa_update_overall_fn_summary (struct cgraph_node *node)
3137 struct ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
3138 size_time_entry *e;
3139 int i;
3141 info->size = 0;
3142 info->time = 0;
3143 for (i = 0; vec_safe_iterate (info->size_time_table, i, &e); i++)
3145 info->size += e->size;
3146 info->time += e->time;
3148 estimate_calls_size_and_time (node, &info->size, &info->min_size,
3149 &info->time, NULL,
3150 ~(clause_t) (1 << predicate::false_condition),
3151 vNULL, vNULL, vNULL);
3152 info->size = (info->size + ipa_fn_summary::size_scale / 2) / ipa_fn_summary::size_scale;
3156 /* This function performs intraprocedural analysis in NODE that is required to
3157 inline indirect calls. */
3159 static void
3160 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
3162 ipa_analyze_node (node);
3163 if (dump_file && (dump_flags & TDF_DETAILS))
3165 ipa_print_node_params (dump_file, node);
3166 ipa_print_node_jump_functions (dump_file, node);
3171 /* Note function body size. */
3173 void
3174 inline_analyze_function (struct cgraph_node *node)
3176 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
3178 if (dump_file)
3179 fprintf (dump_file, "\nAnalyzing function: %s/%u\n",
3180 node->name (), node->order);
3181 if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p)
3182 inline_indirect_intraprocedural_analysis (node);
3183 compute_fn_summary (node, false);
3184 if (!optimize)
3186 struct cgraph_edge *e;
3187 for (e = node->callees; e; e = e->next_callee)
3188 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
3189 for (e = node->indirect_calls; e; e = e->next_callee)
3190 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
3193 pop_cfun ();
3197 /* Called when new function is inserted to callgraph late. */
3199 void
3200 ipa_fn_summary_t::insert (struct cgraph_node *node, ipa_fn_summary *)
3202 inline_analyze_function (node);
3205 /* Note function body size. */
3207 static void
3208 ipa_fn_summary_generate (void)
3210 struct cgraph_node *node;
3212 FOR_EACH_DEFINED_FUNCTION (node)
3213 if (DECL_STRUCT_FUNCTION (node->decl))
3214 node->local.versionable = tree_versionable_function_p (node->decl);
3216 ipa_fn_summary_alloc ();
3218 ipa_fn_summaries->enable_insertion_hook ();
3220 ipa_register_cgraph_hooks ();
3222 FOR_EACH_DEFINED_FUNCTION (node)
3223 if (!node->alias
3224 && (flag_generate_lto || flag_generate_offload|| flag_wpa
3225 || opt_for_fn (node->decl, optimize)))
3226 inline_analyze_function (node);
3230 /* Write inline summary for edge E to OB. */
3232 static void
3233 read_ipa_call_summary (struct lto_input_block *ib, struct cgraph_edge *e)
3235 struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
3236 predicate p;
3237 int length, i;
3239 es->call_stmt_size = streamer_read_uhwi (ib);
3240 es->call_stmt_time = streamer_read_uhwi (ib);
3241 es->loop_depth = streamer_read_uhwi (ib);
3243 bitpack_d bp = streamer_read_bitpack (ib);
3244 es->is_return_callee_uncaptured = bp_unpack_value (&bp, 1);
3246 p.stream_in (ib);
3247 edge_set_predicate (e, &p);
3248 length = streamer_read_uhwi (ib);
3249 if (length)
3251 es->param.safe_grow_cleared (length);
3252 for (i = 0; i < length; i++)
3253 es->param[i].change_prob = streamer_read_uhwi (ib);
3258 /* Stream in inline summaries from the section. */
3260 static void
3261 inline_read_section (struct lto_file_decl_data *file_data, const char *data,
3262 size_t len)
3264 const struct lto_function_header *header =
3265 (const struct lto_function_header *) data;
3266 const int cfg_offset = sizeof (struct lto_function_header);
3267 const int main_offset = cfg_offset + header->cfg_size;
3268 const int string_offset = main_offset + header->main_size;
3269 struct data_in *data_in;
3270 unsigned int i, count2, j;
3271 unsigned int f_count;
3273 lto_input_block ib ((const char *) data + main_offset, header->main_size,
3274 file_data->mode_table);
3276 data_in =
3277 lto_data_in_create (file_data, (const char *) data + string_offset,
3278 header->string_size, vNULL);
3279 f_count = streamer_read_uhwi (&ib);
3280 for (i = 0; i < f_count; i++)
3282 unsigned int index;
3283 struct cgraph_node *node;
3284 struct ipa_fn_summary *info;
3285 lto_symtab_encoder_t encoder;
3286 struct bitpack_d bp;
3287 struct cgraph_edge *e;
3288 predicate p;
3290 index = streamer_read_uhwi (&ib);
3291 encoder = file_data->symtab_node_encoder;
3292 node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder,
3293 index));
3294 info = ipa_fn_summaries->get_create (node);
3296 info->estimated_stack_size
3297 = info->estimated_self_stack_size = streamer_read_uhwi (&ib);
3298 info->size = info->self_size = streamer_read_uhwi (&ib);
3299 info->time = sreal::stream_in (&ib);
3301 bp = streamer_read_bitpack (&ib);
3302 info->inlinable = bp_unpack_value (&bp, 1);
3303 info->fp_expressions = bp_unpack_value (&bp, 1);
3305 count2 = streamer_read_uhwi (&ib);
3306 gcc_assert (!info->conds);
3307 for (j = 0; j < count2; j++)
3309 struct condition c;
3310 c.operand_num = streamer_read_uhwi (&ib);
3311 c.size = streamer_read_uhwi (&ib);
3312 c.code = (enum tree_code) streamer_read_uhwi (&ib);
3313 c.val = stream_read_tree (&ib, data_in);
3314 bp = streamer_read_bitpack (&ib);
3315 c.agg_contents = bp_unpack_value (&bp, 1);
3316 c.by_ref = bp_unpack_value (&bp, 1);
3317 if (c.agg_contents)
3318 c.offset = streamer_read_uhwi (&ib);
3319 vec_safe_push (info->conds, c);
3321 count2 = streamer_read_uhwi (&ib);
3322 gcc_assert (!info->size_time_table);
3323 for (j = 0; j < count2; j++)
3325 struct size_time_entry e;
3327 e.size = streamer_read_uhwi (&ib);
3328 e.time = sreal::stream_in (&ib);
3329 e.exec_predicate.stream_in (&ib);
3330 e.nonconst_predicate.stream_in (&ib);
3332 vec_safe_push (info->size_time_table, e);
3335 p.stream_in (&ib);
3336 set_hint_predicate (&info->loop_iterations, p);
3337 p.stream_in (&ib);
3338 set_hint_predicate (&info->loop_stride, p);
3339 p.stream_in (&ib);
3340 set_hint_predicate (&info->array_index, p);
3341 for (e = node->callees; e; e = e->next_callee)
3342 read_ipa_call_summary (&ib, e);
3343 for (e = node->indirect_calls; e; e = e->next_callee)
3344 read_ipa_call_summary (&ib, e);
3347 lto_free_section_data (file_data, LTO_section_ipa_fn_summary, NULL, data,
3348 len);
3349 lto_data_in_delete (data_in);
3353 /* Read inline summary. Jump functions are shared among ipa-cp
3354 and inliner, so when ipa-cp is active, we don't need to write them
3355 twice. */
3357 static void
3358 ipa_fn_summary_read (void)
3360 struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
3361 struct lto_file_decl_data *file_data;
3362 unsigned int j = 0;
3364 ipa_fn_summary_alloc ();
3366 while ((file_data = file_data_vec[j++]))
3368 size_t len;
3369 const char *data = lto_get_section_data (file_data,
3370 LTO_section_ipa_fn_summary,
3371 NULL, &len);
3372 if (data)
3373 inline_read_section (file_data, data, len);
3374 else
3375 /* Fatal error here. We do not want to support compiling ltrans units
3376 with different version of compiler or different flags than the WPA
3377 unit, so this should never happen. */
3378 fatal_error (input_location,
3379 "ipa inline summary is missing in input file");
3381 ipa_register_cgraph_hooks ();
3382 if (!flag_ipa_cp)
3383 ipa_prop_read_jump_functions ();
3385 gcc_assert (ipa_fn_summaries);
3386 ipa_fn_summaries->enable_insertion_hook ();
3390 /* Write inline summary for edge E to OB. */
3392 static void
3393 write_ipa_call_summary (struct output_block *ob, struct cgraph_edge *e)
3395 struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
3396 int i;
3398 streamer_write_uhwi (ob, es->call_stmt_size);
3399 streamer_write_uhwi (ob, es->call_stmt_time);
3400 streamer_write_uhwi (ob, es->loop_depth);
3402 bitpack_d bp = bitpack_create (ob->main_stream);
3403 bp_pack_value (&bp, es->is_return_callee_uncaptured, 1);
3404 streamer_write_bitpack (&bp);
3406 if (es->predicate)
3407 es->predicate->stream_out (ob);
3408 else
3409 streamer_write_uhwi (ob, 0);
3410 streamer_write_uhwi (ob, es->param.length ());
3411 for (i = 0; i < (int) es->param.length (); i++)
3412 streamer_write_uhwi (ob, es->param[i].change_prob);
3416 /* Write inline summary for node in SET.
3417 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
3418 active, we don't need to write them twice. */
3420 static void
3421 ipa_fn_summary_write (void)
3423 struct output_block *ob = create_output_block (LTO_section_ipa_fn_summary);
3424 lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
3425 unsigned int count = 0;
3426 int i;
3428 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
3430 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
3431 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
3432 if (cnode && cnode->definition && !cnode->alias)
3433 count++;
3435 streamer_write_uhwi (ob, count);
3437 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
3439 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
3440 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
3441 if (cnode && cnode->definition && !cnode->alias)
3443 struct ipa_fn_summary *info = ipa_fn_summaries->get_create (cnode);
3444 struct bitpack_d bp;
3445 struct cgraph_edge *edge;
3446 int i;
3447 size_time_entry *e;
3448 struct condition *c;
3450 streamer_write_uhwi (ob, lto_symtab_encoder_encode (encoder, cnode));
3451 streamer_write_hwi (ob, info->estimated_self_stack_size);
3452 streamer_write_hwi (ob, info->self_size);
3453 info->time.stream_out (ob);
3454 bp = bitpack_create (ob->main_stream);
3455 bp_pack_value (&bp, info->inlinable, 1);
3456 bp_pack_value (&bp, false, 1);
3457 bp_pack_value (&bp, info->fp_expressions, 1);
3458 streamer_write_bitpack (&bp);
3459 streamer_write_uhwi (ob, vec_safe_length (info->conds));
3460 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
3462 streamer_write_uhwi (ob, c->operand_num);
3463 streamer_write_uhwi (ob, c->size);
3464 streamer_write_uhwi (ob, c->code);
3465 stream_write_tree (ob, c->val, true);
3466 bp = bitpack_create (ob->main_stream);
3467 bp_pack_value (&bp, c->agg_contents, 1);
3468 bp_pack_value (&bp, c->by_ref, 1);
3469 streamer_write_bitpack (&bp);
3470 if (c->agg_contents)
3471 streamer_write_uhwi (ob, c->offset);
3473 streamer_write_uhwi (ob, vec_safe_length (info->size_time_table));
3474 for (i = 0; vec_safe_iterate (info->size_time_table, i, &e); i++)
3476 streamer_write_uhwi (ob, e->size);
3477 e->time.stream_out (ob);
3478 e->exec_predicate.stream_out (ob);
3479 e->nonconst_predicate.stream_out (ob);
3481 if (info->loop_iterations)
3482 info->loop_iterations->stream_out (ob);
3483 else
3484 streamer_write_uhwi (ob, 0);
3485 if (info->loop_stride)
3486 info->loop_stride->stream_out (ob);
3487 else
3488 streamer_write_uhwi (ob, 0);
3489 if (info->array_index)
3490 info->array_index->stream_out (ob);
3491 else
3492 streamer_write_uhwi (ob, 0);
3493 for (edge = cnode->callees; edge; edge = edge->next_callee)
3494 write_ipa_call_summary (ob, edge);
3495 for (edge = cnode->indirect_calls; edge; edge = edge->next_callee)
3496 write_ipa_call_summary (ob, edge);
3499 streamer_write_char_stream (ob->main_stream, 0);
3500 produce_asm (ob, NULL);
3501 destroy_output_block (ob);
3503 if (!flag_ipa_cp)
3504 ipa_prop_write_jump_functions ();
3508 /* Release inline summary. */
3510 void
3511 ipa_free_fn_summary (void)
3513 struct cgraph_node *node;
3514 if (!ipa_call_summaries)
3515 return;
3516 FOR_EACH_DEFINED_FUNCTION (node)
3517 if (!node->alias)
3518 ipa_fn_summaries->get_create (node)->reset (node);
3519 ipa_fn_summaries->release ();
3520 ipa_fn_summaries = NULL;
3521 ipa_call_summaries->release ();
3522 delete ipa_call_summaries;
3523 ipa_call_summaries = NULL;
3524 edge_predicate_pool.release ();
3527 namespace {
3529 const pass_data pass_data_local_fn_summary =
3531 GIMPLE_PASS, /* type */
3532 "local-fnsummary", /* name */
3533 OPTGROUP_INLINE, /* optinfo_flags */
3534 TV_INLINE_PARAMETERS, /* tv_id */
3535 0, /* properties_required */
3536 0, /* properties_provided */
3537 0, /* properties_destroyed */
3538 0, /* todo_flags_start */
3539 0, /* todo_flags_finish */
3542 class pass_local_fn_summary : public gimple_opt_pass
3544 public:
3545 pass_local_fn_summary (gcc::context *ctxt)
3546 : gimple_opt_pass (pass_data_local_fn_summary, ctxt)
3549 /* opt_pass methods: */
3550 opt_pass * clone () { return new pass_local_fn_summary (m_ctxt); }
3551 virtual unsigned int execute (function *)
3553 return compute_fn_summary_for_current ();
3556 }; // class pass_local_fn_summary
3558 } // anon namespace
3560 gimple_opt_pass *
3561 make_pass_local_fn_summary (gcc::context *ctxt)
3563 return new pass_local_fn_summary (ctxt);
3567 /* Free inline summary. */
3569 namespace {
3571 const pass_data pass_data_ipa_free_fn_summary =
3573 SIMPLE_IPA_PASS, /* type */
3574 "free-fnsummary", /* name */
3575 OPTGROUP_NONE, /* optinfo_flags */
3576 TV_IPA_FREE_INLINE_SUMMARY, /* tv_id */
3577 0, /* properties_required */
3578 0, /* properties_provided */
3579 0, /* properties_destroyed */
3580 0, /* todo_flags_start */
3581 0, /* todo_flags_finish */
3584 class pass_ipa_free_fn_summary : public simple_ipa_opt_pass
3586 public:
3587 pass_ipa_free_fn_summary (gcc::context *ctxt)
3588 : simple_ipa_opt_pass (pass_data_ipa_free_fn_summary, ctxt),
3589 small_p (false)
3592 /* opt_pass methods: */
3593 opt_pass *clone () { return new pass_ipa_free_fn_summary (m_ctxt); }
3594 void set_pass_param (unsigned int n, bool param)
3596 gcc_assert (n == 0);
3597 small_p = param;
3599 virtual bool gate (function *) { return small_p || !flag_wpa; }
3600 virtual unsigned int execute (function *)
3602 ipa_free_fn_summary ();
3603 /* Early optimizations may make function unreachable. We can not
3604 remove unreachable functions as part of the early opts pass because
3605 TODOs are run before subpasses. Do it here. */
3606 return small_p ? TODO_remove_functions | TODO_dump_symtab : 0;
3609 private:
3610 bool small_p;
3611 }; // class pass_ipa_free_fn_summary
3613 } // anon namespace
3615 simple_ipa_opt_pass *
3616 make_pass_ipa_free_fn_summary (gcc::context *ctxt)
3618 return new pass_ipa_free_fn_summary (ctxt);
3621 namespace {
3623 const pass_data pass_data_ipa_fn_summary =
3625 IPA_PASS, /* type */
3626 "fnsummary", /* name */
3627 OPTGROUP_INLINE, /* optinfo_flags */
3628 TV_IPA_FNSUMMARY, /* tv_id */
3629 0, /* properties_required */
3630 0, /* properties_provided */
3631 0, /* properties_destroyed */
3632 0, /* todo_flags_start */
3633 ( TODO_dump_symtab ), /* todo_flags_finish */
3636 class pass_ipa_fn_summary : public ipa_opt_pass_d
3638 public:
3639 pass_ipa_fn_summary (gcc::context *ctxt)
3640 : ipa_opt_pass_d (pass_data_ipa_fn_summary, ctxt,
3641 ipa_fn_summary_generate, /* generate_summary */
3642 ipa_fn_summary_write, /* write_summary */
3643 ipa_fn_summary_read, /* read_summary */
3644 NULL, /* write_optimization_summary */
3645 NULL, /* read_optimization_summary */
3646 NULL, /* stmt_fixup */
3647 0, /* function_transform_todo_flags_start */
3648 NULL, /* function_transform */
3649 NULL) /* variable_transform */
3652 /* opt_pass methods: */
3653 virtual unsigned int execute (function *) { return 0; }
3655 }; // class pass_ipa_fn_summary
3657 } // anon namespace
3659 ipa_opt_pass_d *
3660 make_pass_ipa_fn_summary (gcc::context *ctxt)
3662 return new pass_ipa_fn_summary (ctxt);
3665 /* Reset all state within ipa-fnsummary.c so that we can rerun the compiler
3666 within the same process. For use by toplev::finalize. */
3668 void
3669 ipa_fnsummary_c_finalize (void)
3671 ipa_free_fn_summary ();