1 /* Function summary pass.
2 Copyright (C) 2003-2017 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Analysis of function bodies used by inter-procedural passes
23 We estimate for each function
24 - function body size and size after specializing into given context
25 - average function execution time in a given context
28 - call statement size, time and how often the parameters change
30 ipa_fn_summary data structures store above information locally (i.e.
31 parameters of the function itself) and globally (i.e. parameters of
32 the function created by applying all the inline decisions already
33 present in the callgraph).
35 We provide access to the ipa_fn_summary data structure and
36 basic logic updating the parameters when inlining is performed.
38 The summaries are context sensitive. Context means
39 1) partial assignment of known constant values of operands
40 2) whether function is inlined into the call or not.
41 It is easy to add more variants. To represent function size and time
42 that depends on context (i.e. it is known to be optimized away when
43 context is known either by inlining or from IP-CP and cloning),
46 estimate_edge_size_and_time can be used to query
47 function size/time in the given context. ipa_merge_fn_summary_after_inlining merges
48 properties of caller and callee after inlining.
50 Finally pass_inline_parameters is exported. This is used to drive
51 computation of function parameters used by the early inliner. IPA
52 inlined performs analysis via its analyze_function method. */
56 #include "coretypes.h"
60 #include "alloc-pool.h"
61 #include "tree-pass.h"
63 #include "tree-streamer.h"
65 #include "diagnostic.h"
66 #include "fold-const.h"
67 #include "print-tree.h"
68 #include "tree-inline.h"
69 #include "gimple-pretty-print.h"
72 #include "gimple-iterator.h"
74 #include "tree-ssa-loop-niter.h"
75 #include "tree-ssa-loop.h"
76 #include "symbol-summary.h"
78 #include "ipa-fnsummary.h"
80 #include "tree-scalar-evolution.h"
81 #include "ipa-utils.h"
83 #include "cfgexpand.h"
87 function_summary
<ipa_fn_summary
*> *ipa_fn_summaries
;
88 call_summary
<ipa_call_summary
*> *ipa_call_summaries
;
90 /* Edge predicates goes here. */
91 static object_allocator
<predicate
> edge_predicate_pool ("edge predicates");
96 ipa_dump_hints (FILE *f
, ipa_hints hints
)
100 fprintf (f
, "IPA hints:");
101 if (hints
& INLINE_HINT_indirect_call
)
103 hints
&= ~INLINE_HINT_indirect_call
;
104 fprintf (f
, " indirect_call");
106 if (hints
& INLINE_HINT_loop_iterations
)
108 hints
&= ~INLINE_HINT_loop_iterations
;
109 fprintf (f
, " loop_iterations");
111 if (hints
& INLINE_HINT_loop_stride
)
113 hints
&= ~INLINE_HINT_loop_stride
;
114 fprintf (f
, " loop_stride");
116 if (hints
& INLINE_HINT_same_scc
)
118 hints
&= ~INLINE_HINT_same_scc
;
119 fprintf (f
, " same_scc");
121 if (hints
& INLINE_HINT_in_scc
)
123 hints
&= ~INLINE_HINT_in_scc
;
124 fprintf (f
, " in_scc");
126 if (hints
& INLINE_HINT_cross_module
)
128 hints
&= ~INLINE_HINT_cross_module
;
129 fprintf (f
, " cross_module");
131 if (hints
& INLINE_HINT_declared_inline
)
133 hints
&= ~INLINE_HINT_declared_inline
;
134 fprintf (f
, " declared_inline");
136 if (hints
& INLINE_HINT_array_index
)
138 hints
&= ~INLINE_HINT_array_index
;
139 fprintf (f
, " array_index");
141 if (hints
& INLINE_HINT_known_hot
)
143 hints
&= ~INLINE_HINT_known_hot
;
144 fprintf (f
, " known_hot");
150 /* Record SIZE and TIME to SUMMARY.
151 The accounted code will be executed when EXEC_PRED is true.
152 When NONCONST_PRED is false the code will evaulate to constant and
153 will get optimized out in specialized clones of the function. */
156 ipa_fn_summary::account_size_time (int size
, sreal time
,
157 const predicate
&exec_pred
,
158 const predicate
&nonconst_pred_in
)
163 predicate nonconst_pred
;
165 if (exec_pred
== false)
168 nonconst_pred
= nonconst_pred_in
& exec_pred
;
170 if (nonconst_pred
== false)
173 /* We need to create initial empty unconitional clause, but otherwie
174 we don't need to account empty times and sizes. */
175 if (!size
&& time
== 0 && size_time_table
)
178 gcc_assert (time
>= 0);
180 for (i
= 0; vec_safe_iterate (size_time_table
, i
, &e
); i
++)
181 if (e
->exec_predicate
== exec_pred
182 && e
->nonconst_predicate
== nonconst_pred
)
191 e
= &(*size_time_table
)[0];
192 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
194 "\t\tReached limit on number of entries, "
195 "ignoring the predicate.");
197 if (dump_file
&& (dump_flags
& TDF_DETAILS
) && (time
!= 0 || size
))
200 "\t\tAccounting size:%3.2f, time:%3.2f on %spredicate exec:",
201 ((double) size
) / ipa_fn_summary::size_scale
,
202 (time
.to_double ()), found
? "" : "new ");
203 exec_pred
.dump (dump_file
, conds
, 0);
204 if (exec_pred
!= nonconst_pred
)
206 fprintf (dump_file
, " nonconst:");
207 nonconst_pred
.dump (dump_file
, conds
);
210 fprintf (dump_file
, "\n");
214 struct size_time_entry new_entry
;
215 new_entry
.size
= size
;
216 new_entry
.time
= time
;
217 new_entry
.exec_predicate
= exec_pred
;
218 new_entry
.nonconst_predicate
= nonconst_pred
;
219 vec_safe_push (size_time_table
, new_entry
);
228 /* We proved E to be unreachable, redirect it to __bultin_unreachable. */
230 static struct cgraph_edge
*
231 redirect_to_unreachable (struct cgraph_edge
*e
)
233 struct cgraph_node
*callee
= !e
->inline_failed
? e
->callee
: NULL
;
234 struct cgraph_node
*target
= cgraph_node::get_create
235 (builtin_decl_implicit (BUILT_IN_UNREACHABLE
));
238 e
= e
->resolve_speculation (target
->decl
);
240 e
->make_direct (target
);
242 e
->redirect_callee (target
);
243 struct ipa_call_summary
*es
= ipa_call_summaries
->get (e
);
244 e
->inline_failed
= CIF_UNREACHABLE
;
247 es
->call_stmt_size
= 0;
248 es
->call_stmt_time
= 0;
250 callee
->remove_symbol_and_inline_clones ();
254 /* Set predicate for edge E. */
257 edge_set_predicate (struct cgraph_edge
*e
, predicate
*predicate
)
259 /* If the edge is determined to be never executed, redirect it
260 to BUILTIN_UNREACHABLE to make it clear to IPA passes the call will
262 if (predicate
&& *predicate
== false
263 /* When handling speculative edges, we need to do the redirection
264 just once. Do it always on the direct edge, so we do not
265 attempt to resolve speculation while duplicating the edge. */
266 && (!e
->speculative
|| e
->callee
))
267 e
= redirect_to_unreachable (e
);
269 struct ipa_call_summary
*es
= ipa_call_summaries
->get (e
);
270 if (predicate
&& *predicate
!= true)
273 es
->predicate
= edge_predicate_pool
.allocate ();
274 *es
->predicate
= *predicate
;
279 edge_predicate_pool
.remove (es
->predicate
);
280 es
->predicate
= NULL
;
284 /* Set predicate for hint *P. */
287 set_hint_predicate (predicate
**p
, predicate new_predicate
)
289 if (new_predicate
== false || new_predicate
== true)
292 edge_predicate_pool
.remove (*p
);
298 *p
= edge_predicate_pool
.allocate ();
304 /* Compute what conditions may or may not hold given invormation about
305 parameters. RET_CLAUSE returns truths that may hold in a specialized copy,
306 whie RET_NONSPEC_CLAUSE returns truths that may hold in an nonspecialized
307 copy when called in a given context. It is a bitmask of conditions. Bit
308 0 means that condition is known to be false, while bit 1 means that condition
309 may or may not be true. These differs - for example NOT_INLINED condition
310 is always false in the second and also builtin_constant_p tests can not use
311 the fact that parameter is indeed a constant.
313 KNOWN_VALS is partial mapping of parameters of NODE to constant values.
314 KNOWN_AGGS is a vector of aggreggate jump functions for each parameter.
315 Return clause of possible truths. When INLINE_P is true, assume that we are
318 ERROR_MARK means compile time invariant. */
321 evaluate_conditions_for_known_args (struct cgraph_node
*node
,
323 vec
<tree
> known_vals
,
324 vec
<ipa_agg_jump_function_p
>
326 clause_t
*ret_clause
,
327 clause_t
*ret_nonspec_clause
)
329 clause_t clause
= inline_p
? 0 : 1 << predicate::not_inlined_condition
;
330 clause_t nonspec_clause
= 1 << predicate::not_inlined_condition
;
331 struct ipa_fn_summary
*info
= ipa_fn_summaries
->get (node
);
335 for (i
= 0; vec_safe_iterate (info
->conds
, i
, &c
); i
++)
340 /* We allow call stmt to have fewer arguments than the callee function
341 (especially for K&R style programs). So bound check here (we assume
342 known_aggs vector, if non-NULL, has the same length as
344 gcc_checking_assert (!known_aggs
.exists ()
345 || (known_vals
.length () == known_aggs
.length ()));
346 if (c
->operand_num
>= (int) known_vals
.length ())
348 clause
|= 1 << (i
+ predicate::first_dynamic_condition
);
349 nonspec_clause
|= 1 << (i
+ predicate::first_dynamic_condition
);
355 struct ipa_agg_jump_function
*agg
;
357 if (c
->code
== predicate::changed
359 && (known_vals
[c
->operand_num
] == error_mark_node
))
362 if (known_aggs
.exists ())
364 agg
= known_aggs
[c
->operand_num
];
365 val
= ipa_find_agg_cst_for_param (agg
, known_vals
[c
->operand_num
],
366 c
->offset
, c
->by_ref
);
373 val
= known_vals
[c
->operand_num
];
374 if (val
== error_mark_node
&& c
->code
!= predicate::changed
)
380 clause
|= 1 << (i
+ predicate::first_dynamic_condition
);
381 nonspec_clause
|= 1 << (i
+ predicate::first_dynamic_condition
);
384 if (c
->code
== predicate::changed
)
386 nonspec_clause
|= 1 << (i
+ predicate::first_dynamic_condition
);
390 if (tree_to_shwi (TYPE_SIZE (TREE_TYPE (val
))) != c
->size
)
392 clause
|= 1 << (i
+ predicate::first_dynamic_condition
);
393 nonspec_clause
|= 1 << (i
+ predicate::first_dynamic_condition
);
396 if (c
->code
== predicate::is_not_constant
)
398 nonspec_clause
|= 1 << (i
+ predicate::first_dynamic_condition
);
402 val
= fold_unary (VIEW_CONVERT_EXPR
, TREE_TYPE (c
->val
), val
);
404 ? fold_binary_to_constant (c
->code
, boolean_type_node
, val
, c
->val
)
407 if (res
&& integer_zerop (res
))
410 clause
|= 1 << (i
+ predicate::first_dynamic_condition
);
411 nonspec_clause
|= 1 << (i
+ predicate::first_dynamic_condition
);
413 *ret_clause
= clause
;
414 if (ret_nonspec_clause
)
415 *ret_nonspec_clause
= nonspec_clause
;
419 /* Work out what conditions might be true at invocation of E. */
422 evaluate_properties_for_edge (struct cgraph_edge
*e
, bool inline_p
,
423 clause_t
*clause_ptr
,
424 clause_t
*nonspec_clause_ptr
,
425 vec
<tree
> *known_vals_ptr
,
426 vec
<ipa_polymorphic_call_context
>
428 vec
<ipa_agg_jump_function_p
> *known_aggs_ptr
)
430 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
431 struct ipa_fn_summary
*info
= ipa_fn_summaries
->get (callee
);
432 vec
<tree
> known_vals
= vNULL
;
433 vec
<ipa_agg_jump_function_p
> known_aggs
= vNULL
;
436 *clause_ptr
= inline_p
? 0 : 1 << predicate::not_inlined_condition
;
438 known_vals_ptr
->create (0);
439 if (known_contexts_ptr
)
440 known_contexts_ptr
->create (0);
442 if (ipa_node_params_sum
443 && !e
->call_stmt_cannot_inline_p
444 && ((clause_ptr
&& info
->conds
) || known_vals_ptr
|| known_contexts_ptr
))
446 struct ipa_node_params
*parms_info
;
447 struct ipa_edge_args
*args
= IPA_EDGE_REF (e
);
448 struct ipa_call_summary
*es
= ipa_call_summaries
->get (e
);
449 int i
, count
= ipa_get_cs_argument_count (args
);
451 if (e
->caller
->global
.inlined_to
)
452 parms_info
= IPA_NODE_REF (e
->caller
->global
.inlined_to
);
454 parms_info
= IPA_NODE_REF (e
->caller
);
456 if (count
&& (info
->conds
|| known_vals_ptr
))
457 known_vals
.safe_grow_cleared (count
);
458 if (count
&& (info
->conds
|| known_aggs_ptr
))
459 known_aggs
.safe_grow_cleared (count
);
460 if (count
&& known_contexts_ptr
)
461 known_contexts_ptr
->safe_grow_cleared (count
);
463 for (i
= 0; i
< count
; i
++)
465 struct ipa_jump_func
*jf
= ipa_get_ith_jump_func (args
, i
);
466 tree cst
= ipa_value_from_jfunc (parms_info
, jf
);
468 if (!cst
&& e
->call_stmt
469 && i
< (int)gimple_call_num_args (e
->call_stmt
))
471 cst
= gimple_call_arg (e
->call_stmt
, i
);
472 if (!is_gimple_min_invariant (cst
))
477 gcc_checking_assert (TREE_CODE (cst
) != TREE_BINFO
);
478 if (known_vals
.exists ())
481 else if (inline_p
&& !es
->param
[i
].change_prob
)
482 known_vals
[i
] = error_mark_node
;
484 if (known_contexts_ptr
)
485 (*known_contexts_ptr
)[i
] = ipa_context_from_jfunc (parms_info
, e
,
487 /* TODO: When IPA-CP starts propagating and merging aggregate jump
488 functions, use its knowledge of the caller too, just like the
489 scalar case above. */
490 known_aggs
[i
] = &jf
->agg
;
493 else if (e
->call_stmt
&& !e
->call_stmt_cannot_inline_p
494 && ((clause_ptr
&& info
->conds
) || known_vals_ptr
))
496 int i
, count
= (int)gimple_call_num_args (e
->call_stmt
);
498 if (count
&& (info
->conds
|| known_vals_ptr
))
499 known_vals
.safe_grow_cleared (count
);
500 for (i
= 0; i
< count
; i
++)
502 tree cst
= gimple_call_arg (e
->call_stmt
, i
);
503 if (!is_gimple_min_invariant (cst
))
510 evaluate_conditions_for_known_args (callee
, inline_p
,
511 known_vals
, known_aggs
, clause_ptr
,
515 *known_vals_ptr
= known_vals
;
517 known_vals
.release ();
520 *known_aggs_ptr
= known_aggs
;
522 known_aggs
.release ();
526 /* Allocate the function summary. */
529 ipa_fn_summary_alloc (void)
531 gcc_checking_assert (!ipa_fn_summaries
);
532 ipa_fn_summaries
= ipa_fn_summary_t::create_ggc (symtab
);
533 ipa_call_summaries
= new ipa_call_summary_t (symtab
, false);
536 /* We are called multiple time for given function; clear
537 data from previous run so they are not cumulated. */
540 ipa_call_summary::reset ()
542 call_stmt_size
= call_stmt_time
= 0;
544 edge_predicate_pool
.remove (predicate
);
549 /* We are called multiple time for given function; clear
550 data from previous run so they are not cumulated. */
553 ipa_fn_summary::reset (struct cgraph_node
*node
)
555 struct cgraph_edge
*e
;
558 estimated_stack_size
= 0;
559 estimated_self_stack_size
= 0;
560 stack_frame_offset
= 0;
567 edge_predicate_pool
.remove (loop_iterations
);
568 loop_iterations
= NULL
;
572 edge_predicate_pool
.remove (loop_stride
);
577 edge_predicate_pool
.remove (array_index
);
581 vec_free (size_time_table
);
582 for (e
= node
->callees
; e
; e
= e
->next_callee
)
583 ipa_call_summaries
->get (e
)->reset ();
584 for (e
= node
->indirect_calls
; e
; e
= e
->next_callee
)
585 ipa_call_summaries
->get (e
)->reset ();
586 fp_expressions
= false;
589 /* Hook that is called by cgraph.c when a node is removed. */
592 ipa_fn_summary_t::remove (cgraph_node
*node
, ipa_fn_summary
*info
)
597 /* Same as remap_predicate_after_duplication but handle hint predicate *P.
598 Additionally care about allocating new memory slot for updated predicate
599 and set it to NULL when it becomes true or false (and thus uninteresting).
603 remap_hint_predicate_after_duplication (predicate
**p
,
604 clause_t possible_truths
)
606 predicate new_predicate
;
611 new_predicate
= (*p
)->remap_after_duplication (possible_truths
);
612 /* We do not want to free previous predicate; it is used by node origin. */
614 set_hint_predicate (p
, new_predicate
);
618 /* Hook that is called by cgraph.c when a node is duplicated. */
620 ipa_fn_summary_t::duplicate (cgraph_node
*src
,
623 ipa_fn_summary
*info
)
625 memcpy (info
, ipa_fn_summaries
->get (src
), sizeof (ipa_fn_summary
));
626 /* TODO: as an optimization, we may avoid copying conditions
627 that are known to be false or true. */
628 info
->conds
= vec_safe_copy (info
->conds
);
630 /* When there are any replacements in the function body, see if we can figure
631 out that something was optimized out. */
632 if (ipa_node_params_sum
&& dst
->clone
.tree_map
)
634 vec
<size_time_entry
, va_gc
> *entry
= info
->size_time_table
;
635 /* Use SRC parm info since it may not be copied yet. */
636 struct ipa_node_params
*parms_info
= IPA_NODE_REF (src
);
637 vec
<tree
> known_vals
= vNULL
;
638 int count
= ipa_get_param_count (parms_info
);
640 clause_t possible_truths
;
641 predicate true_pred
= true;
643 int optimized_out_size
= 0;
644 bool inlined_to_p
= false;
645 struct cgraph_edge
*edge
, *next
;
647 info
->size_time_table
= 0;
648 known_vals
.safe_grow_cleared (count
);
649 for (i
= 0; i
< count
; i
++)
651 struct ipa_replace_map
*r
;
653 for (j
= 0; vec_safe_iterate (dst
->clone
.tree_map
, j
, &r
); j
++)
655 if (((!r
->old_tree
&& r
->parm_num
== i
)
656 || (r
->old_tree
&& r
->old_tree
== ipa_get_param (parms_info
, i
)))
657 && r
->replace_p
&& !r
->ref_p
)
659 known_vals
[i
] = r
->new_tree
;
664 evaluate_conditions_for_known_args (dst
, false,
668 /* We are going to specialize,
669 so ignore nonspec truths. */
671 known_vals
.release ();
673 info
->account_size_time (0, 0, true_pred
, true_pred
);
675 /* Remap size_time vectors.
676 Simplify the predicate by prunning out alternatives that are known
678 TODO: as on optimization, we can also eliminate conditions known
680 for (i
= 0; vec_safe_iterate (entry
, i
, &e
); i
++)
682 predicate new_exec_pred
;
683 predicate new_nonconst_pred
;
684 new_exec_pred
= e
->exec_predicate
.remap_after_duplication
686 new_nonconst_pred
= e
->nonconst_predicate
.remap_after_duplication
688 if (new_exec_pred
== false || new_nonconst_pred
== false)
689 optimized_out_size
+= e
->size
;
691 info
->account_size_time (e
->size
, e
->time
, new_exec_pred
,
695 /* Remap edge predicates with the same simplification as above.
696 Also copy constantness arrays. */
697 for (edge
= dst
->callees
; edge
; edge
= next
)
699 predicate new_predicate
;
700 struct ipa_call_summary
*es
= ipa_call_summaries
->get (edge
);
701 next
= edge
->next_callee
;
703 if (!edge
->inline_failed
)
707 new_predicate
= es
->predicate
->remap_after_duplication
709 if (new_predicate
== false && *es
->predicate
!= false)
710 optimized_out_size
+= es
->call_stmt_size
* ipa_fn_summary::size_scale
;
711 edge_set_predicate (edge
, &new_predicate
);
714 /* Remap indirect edge predicates with the same simplificaiton as above.
715 Also copy constantness arrays. */
716 for (edge
= dst
->indirect_calls
; edge
; edge
= next
)
718 predicate new_predicate
;
719 struct ipa_call_summary
*es
= ipa_call_summaries
->get (edge
);
720 next
= edge
->next_callee
;
722 gcc_checking_assert (edge
->inline_failed
);
725 new_predicate
= es
->predicate
->remap_after_duplication
727 if (new_predicate
== false && *es
->predicate
!= false)
728 optimized_out_size
+= es
->call_stmt_size
* ipa_fn_summary::size_scale
;
729 edge_set_predicate (edge
, &new_predicate
);
731 remap_hint_predicate_after_duplication (&info
->loop_iterations
,
733 remap_hint_predicate_after_duplication (&info
->loop_stride
,
735 remap_hint_predicate_after_duplication (&info
->array_index
,
738 /* If inliner or someone after inliner will ever start producing
739 non-trivial clones, we will get trouble with lack of information
740 about updating self sizes, because size vectors already contains
741 sizes of the calees. */
742 gcc_assert (!inlined_to_p
|| !optimized_out_size
);
746 info
->size_time_table
= vec_safe_copy (info
->size_time_table
);
747 if (info
->loop_iterations
)
749 predicate p
= *info
->loop_iterations
;
750 info
->loop_iterations
= NULL
;
751 set_hint_predicate (&info
->loop_iterations
, p
);
753 if (info
->loop_stride
)
755 predicate p
= *info
->loop_stride
;
756 info
->loop_stride
= NULL
;
757 set_hint_predicate (&info
->loop_stride
, p
);
759 if (info
->array_index
)
761 predicate p
= *info
->array_index
;
762 info
->array_index
= NULL
;
763 set_hint_predicate (&info
->array_index
, p
);
766 if (!dst
->global
.inlined_to
)
767 ipa_update_overall_fn_summary (dst
);
771 /* Hook that is called by cgraph.c when a node is duplicated. */
774 ipa_call_summary_t::duplicate (struct cgraph_edge
*src
,
775 struct cgraph_edge
*dst
,
776 struct ipa_call_summary
*srcinfo
,
777 struct ipa_call_summary
*info
)
780 info
->predicate
= NULL
;
781 edge_set_predicate (dst
, srcinfo
->predicate
);
782 info
->param
= srcinfo
->param
.copy ();
783 if (!dst
->indirect_unknown_callee
&& src
->indirect_unknown_callee
)
785 info
->call_stmt_size
-= (eni_size_weights
.indirect_call_cost
786 - eni_size_weights
.call_cost
);
787 info
->call_stmt_time
-= (eni_time_weights
.indirect_call_cost
788 - eni_time_weights
.call_cost
);
793 /* Keep edge cache consistent across edge removal. */
796 ipa_call_summary_t::remove (struct cgraph_edge
*,
797 struct ipa_call_summary
*sum
)
803 /* Dump edge summaries associated to NODE and recursively to all clones.
807 dump_ipa_call_summary (FILE *f
, int indent
, struct cgraph_node
*node
,
808 struct ipa_fn_summary
*info
)
810 struct cgraph_edge
*edge
;
811 for (edge
= node
->callees
; edge
; edge
= edge
->next_callee
)
813 struct ipa_call_summary
*es
= ipa_call_summaries
->get (edge
);
814 struct cgraph_node
*callee
= edge
->callee
->ultimate_alias_target ();
818 "%*s%s/%i %s\n%*s loop depth:%2i freq:%4i size:%2i"
819 " time: %2i callee size:%2i stack:%2i",
820 indent
, "", callee
->name (), callee
->order
,
822 ? "inlined" : cgraph_inline_failed_string (edge
-> inline_failed
),
823 indent
, "", es
->loop_depth
, edge
->frequency
,
824 es
->call_stmt_size
, es
->call_stmt_time
,
825 (int) ipa_fn_summaries
->get (callee
)->size
/ ipa_fn_summary::size_scale
,
826 (int) ipa_fn_summaries
->get (callee
)->estimated_stack_size
);
830 fprintf (f
, " predicate: ");
831 es
->predicate
->dump (f
, info
->conds
);
835 if (es
->param
.exists ())
836 for (i
= 0; i
< (int) es
->param
.length (); i
++)
838 int prob
= es
->param
[i
].change_prob
;
841 fprintf (f
, "%*s op%i is compile time invariant\n",
843 else if (prob
!= REG_BR_PROB_BASE
)
844 fprintf (f
, "%*s op%i change %f%% of time\n", indent
+ 2, "", i
,
845 prob
* 100.0 / REG_BR_PROB_BASE
);
847 if (!edge
->inline_failed
)
849 fprintf (f
, "%*sStack frame offset %i, callee self size %i,"
852 (int) ipa_fn_summaries
->get (callee
)->stack_frame_offset
,
853 (int) ipa_fn_summaries
->get (callee
)->estimated_self_stack_size
,
854 (int) ipa_fn_summaries
->get (callee
)->estimated_stack_size
);
855 dump_ipa_call_summary (f
, indent
+ 2, callee
, info
);
858 for (edge
= node
->indirect_calls
; edge
; edge
= edge
->next_callee
)
860 struct ipa_call_summary
*es
= ipa_call_summaries
->get (edge
);
861 fprintf (f
, "%*sindirect call loop depth:%2i freq:%4i size:%2i"
865 edge
->frequency
, es
->call_stmt_size
, es
->call_stmt_time
);
868 fprintf (f
, "predicate: ");
869 es
->predicate
->dump (f
, info
->conds
);
878 ipa_dump_fn_summary (FILE *f
, struct cgraph_node
*node
)
880 if (node
->definition
)
882 struct ipa_fn_summary
*s
= ipa_fn_summaries
->get (node
);
885 fprintf (f
, "IPA function summary for %s/%i", node
->name (),
887 if (DECL_DISREGARD_INLINE_LIMITS (node
->decl
))
888 fprintf (f
, " always_inline");
890 fprintf (f
, " inlinable");
891 if (s
->contains_cilk_spawn
)
892 fprintf (f
, " contains_cilk_spawn");
893 if (s
->fp_expressions
)
894 fprintf (f
, " fp_expression");
895 fprintf (f
, "\n global time: %f\n", s
->time
.to_double ());
896 fprintf (f
, " self size: %i\n", s
->self_size
);
897 fprintf (f
, " global size: %i\n", s
->size
);
898 fprintf (f
, " min size: %i\n", s
->min_size
);
899 fprintf (f
, " self stack: %i\n",
900 (int) s
->estimated_self_stack_size
);
901 fprintf (f
, " global stack: %i\n", (int) s
->estimated_stack_size
);
903 fprintf (f
, " estimated growth:%i\n", (int) s
->growth
);
905 fprintf (f
, " In SCC: %i\n", (int) s
->scc_no
);
906 for (i
= 0; vec_safe_iterate (s
->size_time_table
, i
, &e
); i
++)
908 fprintf (f
, " size:%f, time:%f",
909 (double) e
->size
/ ipa_fn_summary::size_scale
,
910 e
->time
.to_double ());
911 if (e
->exec_predicate
!= true)
913 fprintf (f
, ", executed if:");
914 e
->exec_predicate
.dump (f
, s
->conds
, 0);
916 if (e
->exec_predicate
!= e
->nonconst_predicate
)
918 fprintf (f
, ", nonconst if:");
919 e
->nonconst_predicate
.dump (f
, s
->conds
, 0);
923 if (s
->loop_iterations
)
925 fprintf (f
, " loop iterations:");
926 s
->loop_iterations
->dump (f
, s
->conds
);
930 fprintf (f
, " loop stride:");
931 s
->loop_stride
->dump (f
, s
->conds
);
935 fprintf (f
, " array index:");
936 s
->array_index
->dump (f
, s
->conds
);
938 fprintf (f
, " calls:\n");
939 dump_ipa_call_summary (f
, 4, node
, s
);
945 ipa_debug_fn_summary (struct cgraph_node
*node
)
947 ipa_dump_fn_summary (stderr
, node
);
951 ipa_dump_fn_summaries (FILE *f
)
953 struct cgraph_node
*node
;
955 FOR_EACH_DEFINED_FUNCTION (node
)
956 if (!node
->global
.inlined_to
)
957 ipa_dump_fn_summary (f
, node
);
960 /* Callback of walk_aliased_vdefs. Flags that it has been invoked to the
961 boolean variable pointed to by DATA. */
964 mark_modified (ao_ref
*ao ATTRIBUTE_UNUSED
, tree vdef ATTRIBUTE_UNUSED
,
967 bool *b
= (bool *) data
;
972 /* If OP refers to value of function parameter, return the corresponding
973 parameter. If non-NULL, the size of the memory load (or the SSA_NAME of the
974 PARM_DECL) will be stored to *SIZE_P in that case too. */
977 unmodified_parm_1 (gimple
*stmt
, tree op
, HOST_WIDE_INT
*size_p
)
979 /* SSA_NAME referring to parm default def? */
980 if (TREE_CODE (op
) == SSA_NAME
981 && SSA_NAME_IS_DEFAULT_DEF (op
)
982 && TREE_CODE (SSA_NAME_VAR (op
)) == PARM_DECL
)
985 *size_p
= tree_to_shwi (TYPE_SIZE (TREE_TYPE (op
)));
986 return SSA_NAME_VAR (op
);
988 /* Non-SSA parm reference? */
989 if (TREE_CODE (op
) == PARM_DECL
)
991 bool modified
= false;
994 ao_ref_init (&refd
, op
);
995 walk_aliased_vdefs (&refd
, gimple_vuse (stmt
), mark_modified
, &modified
,
1000 *size_p
= tree_to_shwi (TYPE_SIZE (TREE_TYPE (op
)));
1007 /* If OP refers to value of function parameter, return the corresponding
1008 parameter. Also traverse chains of SSA register assignments. If non-NULL,
1009 the size of the memory load (or the SSA_NAME of the PARM_DECL) will be
1010 stored to *SIZE_P in that case too. */
1013 unmodified_parm (gimple
*stmt
, tree op
, HOST_WIDE_INT
*size_p
)
1015 tree res
= unmodified_parm_1 (stmt
, op
, size_p
);
1019 if (TREE_CODE (op
) == SSA_NAME
1020 && !SSA_NAME_IS_DEFAULT_DEF (op
)
1021 && gimple_assign_single_p (SSA_NAME_DEF_STMT (op
)))
1022 return unmodified_parm (SSA_NAME_DEF_STMT (op
),
1023 gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op
)),
1028 /* If OP refers to a value of a function parameter or value loaded from an
1029 aggregate passed to a parameter (either by value or reference), return TRUE
1030 and store the number of the parameter to *INDEX_P, the access size into
1031 *SIZE_P, and information whether and how it has been loaded from an
1032 aggregate into *AGGPOS. INFO describes the function parameters, STMT is the
1033 statement in which OP is used or loaded. */
1036 unmodified_parm_or_parm_agg_item (struct ipa_func_body_info
*fbi
,
1037 gimple
*stmt
, tree op
, int *index_p
,
1038 HOST_WIDE_INT
*size_p
,
1039 struct agg_position_info
*aggpos
)
1041 tree res
= unmodified_parm_1 (stmt
, op
, size_p
);
1043 gcc_checking_assert (aggpos
);
1046 *index_p
= ipa_get_param_decl_index (fbi
->info
, res
);
1049 aggpos
->agg_contents
= false;
1050 aggpos
->by_ref
= false;
1054 if (TREE_CODE (op
) == SSA_NAME
)
1056 if (SSA_NAME_IS_DEFAULT_DEF (op
)
1057 || !gimple_assign_single_p (SSA_NAME_DEF_STMT (op
)))
1059 stmt
= SSA_NAME_DEF_STMT (op
);
1060 op
= gimple_assign_rhs1 (stmt
);
1061 if (!REFERENCE_CLASS_P (op
))
1062 return unmodified_parm_or_parm_agg_item (fbi
, stmt
, op
, index_p
, size_p
,
1066 aggpos
->agg_contents
= true;
1067 return ipa_load_from_parm_agg (fbi
, fbi
->info
->descriptors
,
1068 stmt
, op
, index_p
, &aggpos
->offset
,
1069 size_p
, &aggpos
->by_ref
);
1072 /* See if statement might disappear after inlining.
1073 0 - means not eliminated
1074 1 - half of statements goes away
1075 2 - for sure it is eliminated.
1076 We are not terribly sophisticated, basically looking for simple abstraction
1077 penalty wrappers. */
1080 eliminated_by_inlining_prob (gimple
*stmt
)
1082 enum gimple_code code
= gimple_code (stmt
);
1083 enum tree_code rhs_code
;
1093 if (gimple_num_ops (stmt
) != 2)
1096 rhs_code
= gimple_assign_rhs_code (stmt
);
1098 /* Casts of parameters, loads from parameters passed by reference
1099 and stores to return value or parameters are often free after
1100 inlining dua to SRA and further combining.
1101 Assume that half of statements goes away. */
1102 if (CONVERT_EXPR_CODE_P (rhs_code
)
1103 || rhs_code
== VIEW_CONVERT_EXPR
1104 || rhs_code
== ADDR_EXPR
1105 || gimple_assign_rhs_class (stmt
) == GIMPLE_SINGLE_RHS
)
1107 tree rhs
= gimple_assign_rhs1 (stmt
);
1108 tree lhs
= gimple_assign_lhs (stmt
);
1109 tree inner_rhs
= get_base_address (rhs
);
1110 tree inner_lhs
= get_base_address (lhs
);
1111 bool rhs_free
= false;
1112 bool lhs_free
= false;
1119 /* Reads of parameter are expected to be free. */
1120 if (unmodified_parm (stmt
, inner_rhs
, NULL
))
1122 /* Match expressions of form &this->field. Those will most likely
1123 combine with something upstream after inlining. */
1124 else if (TREE_CODE (inner_rhs
) == ADDR_EXPR
)
1126 tree op
= get_base_address (TREE_OPERAND (inner_rhs
, 0));
1127 if (TREE_CODE (op
) == PARM_DECL
)
1129 else if (TREE_CODE (op
) == MEM_REF
1130 && unmodified_parm (stmt
, TREE_OPERAND (op
, 0), NULL
))
1134 /* When parameter is not SSA register because its address is taken
1135 and it is just copied into one, the statement will be completely
1136 free after inlining (we will copy propagate backward). */
1137 if (rhs_free
&& is_gimple_reg (lhs
))
1140 /* Reads of parameters passed by reference
1141 expected to be free (i.e. optimized out after inlining). */
1142 if (TREE_CODE (inner_rhs
) == MEM_REF
1143 && unmodified_parm (stmt
, TREE_OPERAND (inner_rhs
, 0), NULL
))
1146 /* Copying parameter passed by reference into gimple register is
1147 probably also going to copy propagate, but we can't be quite
1149 if (rhs_free
&& is_gimple_reg (lhs
))
1152 /* Writes to parameters, parameters passed by value and return value
1153 (either dirrectly or passed via invisible reference) are free.
1155 TODO: We ought to handle testcase like
1156 struct a {int a,b;};
1158 retrurnsturct (void)
1164 This translate into:
1179 For that we either need to copy ipa-split logic detecting writes
1181 if (TREE_CODE (inner_lhs
) == PARM_DECL
1182 || TREE_CODE (inner_lhs
) == RESULT_DECL
1183 || (TREE_CODE (inner_lhs
) == MEM_REF
1184 && (unmodified_parm (stmt
, TREE_OPERAND (inner_lhs
, 0), NULL
)
1185 || (TREE_CODE (TREE_OPERAND (inner_lhs
, 0)) == SSA_NAME
1186 && SSA_NAME_VAR (TREE_OPERAND (inner_lhs
, 0))
1187 && TREE_CODE (SSA_NAME_VAR (TREE_OPERAND
1189 0))) == RESULT_DECL
))))
1192 && (is_gimple_reg (rhs
) || is_gimple_min_invariant (rhs
)))
1194 if (lhs_free
&& rhs_free
)
1204 /* If BB ends by a conditional we can turn into predicates, attach corresponding
1205 predicates to the CFG edges. */
1208 set_cond_stmt_execution_predicate (struct ipa_func_body_info
*fbi
,
1209 struct ipa_fn_summary
*summary
,
1216 struct agg_position_info aggpos
;
1217 enum tree_code code
, inverted_code
;
1223 last
= last_stmt (bb
);
1224 if (!last
|| gimple_code (last
) != GIMPLE_COND
)
1226 if (!is_gimple_ip_invariant (gimple_cond_rhs (last
)))
1228 op
= gimple_cond_lhs (last
);
1229 /* TODO: handle conditionals like
1232 if (unmodified_parm_or_parm_agg_item (fbi
, last
, op
, &index
, &size
, &aggpos
))
1234 code
= gimple_cond_code (last
);
1235 inverted_code
= invert_tree_comparison (code
, HONOR_NANS (op
));
1237 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1239 enum tree_code this_code
= (e
->flags
& EDGE_TRUE_VALUE
1240 ? code
: inverted_code
);
1241 /* invert_tree_comparison will return ERROR_MARK on FP
1242 comparsions that are not EQ/NE instead of returning proper
1243 unordered one. Be sure it is not confused with NON_CONSTANT. */
1244 if (this_code
!= ERROR_MARK
)
1247 = add_condition (summary
, index
, size
, &aggpos
, this_code
,
1248 unshare_expr_without_location
1249 (gimple_cond_rhs (last
)));
1250 e
->aux
= edge_predicate_pool
.allocate ();
1251 *(predicate
*) e
->aux
= p
;
1256 if (TREE_CODE (op
) != SSA_NAME
)
1259 if (builtin_constant_p (op))
1263 Here we can predicate nonconstant_code. We can't
1264 really handle constant_code since we have no predicate
1265 for this and also the constant code is not known to be
1266 optimized away when inliner doen't see operand is constant.
1267 Other optimizers might think otherwise. */
1268 if (gimple_cond_code (last
) != NE_EXPR
1269 || !integer_zerop (gimple_cond_rhs (last
)))
1271 set_stmt
= SSA_NAME_DEF_STMT (op
);
1272 if (!gimple_call_builtin_p (set_stmt
, BUILT_IN_CONSTANT_P
)
1273 || gimple_call_num_args (set_stmt
) != 1)
1275 op2
= gimple_call_arg (set_stmt
, 0);
1276 if (!unmodified_parm_or_parm_agg_item (fbi
, set_stmt
, op2
, &index
, &size
,
1279 FOR_EACH_EDGE (e
, ei
, bb
->succs
) if (e
->flags
& EDGE_FALSE_VALUE
)
1281 predicate p
= add_condition (summary
, index
, size
, &aggpos
,
1282 predicate::is_not_constant
, NULL_TREE
);
1283 e
->aux
= edge_predicate_pool
.allocate ();
1284 *(predicate
*) e
->aux
= p
;
1289 /* If BB ends by a switch we can turn into predicates, attach corresponding
1290 predicates to the CFG edges. */
1293 set_switch_stmt_execution_predicate (struct ipa_func_body_info
*fbi
,
1294 struct ipa_fn_summary
*summary
,
1301 struct agg_position_info aggpos
;
1307 lastg
= last_stmt (bb
);
1308 if (!lastg
|| gimple_code (lastg
) != GIMPLE_SWITCH
)
1310 gswitch
*last
= as_a
<gswitch
*> (lastg
);
1311 op
= gimple_switch_index (last
);
1312 if (!unmodified_parm_or_parm_agg_item (fbi
, last
, op
, &index
, &size
, &aggpos
))
1315 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1317 e
->aux
= edge_predicate_pool
.allocate ();
1318 *(predicate
*) e
->aux
= false;
1320 n
= gimple_switch_num_labels (last
);
1321 for (case_idx
= 0; case_idx
< n
; ++case_idx
)
1323 tree cl
= gimple_switch_label (last
, case_idx
);
1327 e
= find_edge (bb
, label_to_block (CASE_LABEL (cl
)));
1328 min
= CASE_LOW (cl
);
1329 max
= CASE_HIGH (cl
);
1331 /* For default we might want to construct predicate that none
1332 of cases is met, but it is bit hard to do not having negations
1333 of conditionals handy. */
1337 p
= add_condition (summary
, index
, size
, &aggpos
, EQ_EXPR
,
1338 unshare_expr_without_location (min
));
1342 p1
= add_condition (summary
, index
, size
, &aggpos
, GE_EXPR
,
1343 unshare_expr_without_location (min
));
1344 p2
= add_condition (summary
, index
, size
, &aggpos
, LE_EXPR
,
1345 unshare_expr_without_location (max
));
1348 *(struct predicate
*) e
->aux
1349 = p
.or_with (summary
->conds
, *(struct predicate
*) e
->aux
);
1354 /* For each BB in NODE attach to its AUX pointer predicate under
1355 which it is executable. */
1358 compute_bb_predicates (struct ipa_func_body_info
*fbi
,
1359 struct cgraph_node
*node
,
1360 struct ipa_fn_summary
*summary
)
1362 struct function
*my_function
= DECL_STRUCT_FUNCTION (node
->decl
);
1366 FOR_EACH_BB_FN (bb
, my_function
)
1368 set_cond_stmt_execution_predicate (fbi
, summary
, bb
);
1369 set_switch_stmt_execution_predicate (fbi
, summary
, bb
);
1372 /* Entry block is always executable. */
1373 ENTRY_BLOCK_PTR_FOR_FN (my_function
)->aux
1374 = edge_predicate_pool
.allocate ();
1375 *(predicate
*) ENTRY_BLOCK_PTR_FOR_FN (my_function
)->aux
= true;
1377 /* A simple dataflow propagation of predicates forward in the CFG.
1378 TODO: work in reverse postorder. */
1382 FOR_EACH_BB_FN (bb
, my_function
)
1384 predicate p
= false;
1387 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1391 predicate this_bb_predicate
1392 = *(predicate
*) e
->src
->aux
;
1394 this_bb_predicate
&= (*(struct predicate
*) e
->aux
);
1395 p
= p
.or_with (summary
->conds
, this_bb_predicate
);
1401 gcc_checking_assert (!bb
->aux
);
1407 bb
->aux
= edge_predicate_pool
.allocate ();
1408 *((predicate
*) bb
->aux
) = p
;
1410 else if (p
!= *(predicate
*) bb
->aux
)
1412 /* This OR operation is needed to ensure monotonous data flow
1413 in the case we hit the limit on number of clauses and the
1414 and/or operations above give approximate answers. */
1415 p
= p
.or_with (summary
->conds
, *(predicate
*)bb
->aux
);
1416 if (p
!= *(predicate
*) bb
->aux
)
1419 *((predicate
*) bb
->aux
) = p
;
1428 /* Return predicate specifying when the STMT might have result that is not
1429 a compile time constant. */
1432 will_be_nonconstant_expr_predicate (struct ipa_node_params
*info
,
1433 struct ipa_fn_summary
*summary
,
1435 vec
<predicate
> nonconstant_names
)
1441 while (UNARY_CLASS_P (expr
))
1442 expr
= TREE_OPERAND (expr
, 0);
1444 parm
= unmodified_parm (NULL
, expr
, &size
);
1445 if (parm
&& (index
= ipa_get_param_decl_index (info
, parm
)) >= 0)
1446 return add_condition (summary
, index
, size
, NULL
, predicate::changed
,
1448 if (is_gimple_min_invariant (expr
))
1450 if (TREE_CODE (expr
) == SSA_NAME
)
1451 return nonconstant_names
[SSA_NAME_VERSION (expr
)];
1452 if (BINARY_CLASS_P (expr
) || COMPARISON_CLASS_P (expr
))
1454 predicate p1
= will_be_nonconstant_expr_predicate
1455 (info
, summary
, TREE_OPERAND (expr
, 0),
1461 p2
= will_be_nonconstant_expr_predicate (info
, summary
,
1462 TREE_OPERAND (expr
, 1),
1464 return p1
.or_with (summary
->conds
, p2
);
1466 else if (TREE_CODE (expr
) == COND_EXPR
)
1468 predicate p1
= will_be_nonconstant_expr_predicate
1469 (info
, summary
, TREE_OPERAND (expr
, 0),
1475 p2
= will_be_nonconstant_expr_predicate (info
, summary
,
1476 TREE_OPERAND (expr
, 1),
1480 p1
= p1
.or_with (summary
->conds
, p2
);
1481 p2
= will_be_nonconstant_expr_predicate (info
, summary
,
1482 TREE_OPERAND (expr
, 2),
1484 return p2
.or_with (summary
->conds
, p1
);
1495 /* Return predicate specifying when the STMT might have result that is not
1496 a compile time constant. */
1499 will_be_nonconstant_predicate (struct ipa_func_body_info
*fbi
,
1500 struct ipa_fn_summary
*summary
,
1502 vec
<predicate
> nonconstant_names
)
1507 predicate op_non_const
;
1511 struct agg_position_info aggpos
;
1513 /* What statments might be optimized away
1514 when their arguments are constant. */
1515 if (gimple_code (stmt
) != GIMPLE_ASSIGN
1516 && gimple_code (stmt
) != GIMPLE_COND
1517 && gimple_code (stmt
) != GIMPLE_SWITCH
1518 && (gimple_code (stmt
) != GIMPLE_CALL
1519 || !(gimple_call_flags (stmt
) & ECF_CONST
)))
1522 /* Stores will stay anyway. */
1523 if (gimple_store_p (stmt
))
1526 is_load
= gimple_assign_load_p (stmt
);
1528 /* Loads can be optimized when the value is known. */
1532 gcc_assert (gimple_assign_single_p (stmt
));
1533 op
= gimple_assign_rhs1 (stmt
);
1534 if (!unmodified_parm_or_parm_agg_item (fbi
, stmt
, op
, &base_index
, &size
,
1541 /* See if we understand all operands before we start
1542 adding conditionals. */
1543 FOR_EACH_SSA_TREE_OPERAND (use
, stmt
, iter
, SSA_OP_USE
)
1545 tree parm
= unmodified_parm (stmt
, use
, NULL
);
1546 /* For arguments we can build a condition. */
1547 if (parm
&& ipa_get_param_decl_index (fbi
->info
, parm
) >= 0)
1549 if (TREE_CODE (use
) != SSA_NAME
)
1551 /* If we know when operand is constant,
1552 we still can say something useful. */
1553 if (nonconstant_names
[SSA_NAME_VERSION (use
)] != true)
1560 add_condition (summary
, base_index
, size
, &aggpos
, predicate::changed
,
1563 op_non_const
= false;
1564 FOR_EACH_SSA_TREE_OPERAND (use
, stmt
, iter
, SSA_OP_USE
)
1567 tree parm
= unmodified_parm (stmt
, use
, &size
);
1570 if (parm
&& (index
= ipa_get_param_decl_index (fbi
->info
, parm
)) >= 0)
1572 if (index
!= base_index
)
1573 p
= add_condition (summary
, index
, size
, NULL
, predicate::changed
,
1579 p
= nonconstant_names
[SSA_NAME_VERSION (use
)];
1580 op_non_const
= p
.or_with (summary
->conds
, op_non_const
);
1582 if ((gimple_code (stmt
) == GIMPLE_ASSIGN
|| gimple_code (stmt
) == GIMPLE_CALL
)
1583 && gimple_op (stmt
, 0)
1584 && TREE_CODE (gimple_op (stmt
, 0)) == SSA_NAME
)
1585 nonconstant_names
[SSA_NAME_VERSION (gimple_op (stmt
, 0))]
1587 return op_non_const
;
1590 struct record_modified_bb_info
1596 /* Value is initialized in INIT_BB and used in USE_BB. We want to copute
1597 probability how often it changes between USE_BB.
1598 INIT_BB->frequency/USE_BB->frequency is an estimate, but if INIT_BB
1599 is in different loop nest, we can do better.
1600 This is all just estimate. In theory we look for minimal cut separating
1601 INIT_BB and USE_BB, but we only want to anticipate loop invariant motion
1605 get_minimal_bb (basic_block init_bb
, basic_block use_bb
)
1607 struct loop
*l
= find_common_loop (init_bb
->loop_father
, use_bb
->loop_father
);
1608 if (l
&& l
->header
->frequency
< init_bb
->frequency
)
1613 /* Callback of walk_aliased_vdefs. Records basic blocks where the value may be
1614 set except for info->stmt. */
1617 record_modified (ao_ref
*ao ATTRIBUTE_UNUSED
, tree vdef
, void *data
)
1619 struct record_modified_bb_info
*info
=
1620 (struct record_modified_bb_info
*) data
;
1621 if (SSA_NAME_DEF_STMT (vdef
) == info
->stmt
)
1623 bitmap_set_bit (info
->bb_set
,
1624 SSA_NAME_IS_DEFAULT_DEF (vdef
)
1625 ? ENTRY_BLOCK_PTR_FOR_FN (cfun
)->index
1627 (gimple_bb (SSA_NAME_DEF_STMT (vdef
)),
1628 gimple_bb (info
->stmt
))->index
);
1632 /* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT
1633 will change since last invocation of STMT.
1635 Value 0 is reserved for compile time invariants.
1636 For common parameters it is REG_BR_PROB_BASE. For loop invariants it
1637 ought to be REG_BR_PROB_BASE / estimated_iters. */
1640 param_change_prob (gimple
*stmt
, int i
)
1642 tree op
= gimple_call_arg (stmt
, i
);
1643 basic_block bb
= gimple_bb (stmt
);
1645 if (TREE_CODE (op
) == WITH_SIZE_EXPR
)
1646 op
= TREE_OPERAND (op
, 0);
1648 tree base
= get_base_address (op
);
1650 /* Global invariants never change. */
1651 if (is_gimple_min_invariant (base
))
1654 /* We would have to do non-trivial analysis to really work out what
1655 is the probability of value to change (i.e. when init statement
1656 is in a sibling loop of the call).
1658 We do an conservative estimate: when call is executed N times more often
1659 than the statement defining value, we take the frequency 1/N. */
1660 if (TREE_CODE (base
) == SSA_NAME
)
1665 return REG_BR_PROB_BASE
;
1667 if (SSA_NAME_IS_DEFAULT_DEF (base
))
1668 init_freq
= ENTRY_BLOCK_PTR_FOR_FN (cfun
)->frequency
;
1670 init_freq
= get_minimal_bb
1671 (gimple_bb (SSA_NAME_DEF_STMT (base
)),
1672 gimple_bb (stmt
))->frequency
;
1676 if (init_freq
< bb
->frequency
)
1677 return MAX (GCOV_COMPUTE_SCALE (init_freq
, bb
->frequency
), 1);
1679 return REG_BR_PROB_BASE
;
1685 struct record_modified_bb_info info
;
1688 tree init
= ctor_for_folding (base
);
1690 if (init
!= error_mark_node
)
1693 return REG_BR_PROB_BASE
;
1694 ao_ref_init (&refd
, op
);
1696 info
.bb_set
= BITMAP_ALLOC (NULL
);
1697 walk_aliased_vdefs (&refd
, gimple_vuse (stmt
), record_modified
, &info
,
1699 if (bitmap_bit_p (info
.bb_set
, bb
->index
))
1701 BITMAP_FREE (info
.bb_set
);
1702 return REG_BR_PROB_BASE
;
1705 /* Assume that every memory is initialized at entry.
1706 TODO: Can we easilly determine if value is always defined
1707 and thus we may skip entry block? */
1708 if (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->frequency
)
1709 max
= ENTRY_BLOCK_PTR_FOR_FN (cfun
)->frequency
;
1713 EXECUTE_IF_SET_IN_BITMAP (info
.bb_set
, 0, index
, bi
)
1714 max
= MIN (max
, BASIC_BLOCK_FOR_FN (cfun
, index
)->frequency
);
1716 BITMAP_FREE (info
.bb_set
);
1717 if (max
< bb
->frequency
)
1718 return MAX (GCOV_COMPUTE_SCALE (max
, bb
->frequency
), 1);
1720 return REG_BR_PROB_BASE
;
1724 /* Find whether a basic block BB is the final block of a (half) diamond CFG
1725 sub-graph and if the predicate the condition depends on is known. If so,
1726 return true and store the pointer the predicate in *P. */
1729 phi_result_unknown_predicate (struct ipa_node_params
*info
,
1730 ipa_fn_summary
*summary
, basic_block bb
,
1732 vec
<predicate
> nonconstant_names
)
1736 basic_block first_bb
= NULL
;
1739 if (single_pred_p (bb
))
1745 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1747 if (single_succ_p (e
->src
))
1749 if (!single_pred_p (e
->src
))
1752 first_bb
= single_pred (e
->src
);
1753 else if (single_pred (e
->src
) != first_bb
)
1760 else if (e
->src
!= first_bb
)
1768 stmt
= last_stmt (first_bb
);
1770 || gimple_code (stmt
) != GIMPLE_COND
1771 || !is_gimple_ip_invariant (gimple_cond_rhs (stmt
)))
1774 *p
= will_be_nonconstant_expr_predicate (info
, summary
,
1775 gimple_cond_lhs (stmt
),
1783 /* Given a PHI statement in a function described by inline properties SUMMARY
1784 and *P being the predicate describing whether the selected PHI argument is
1785 known, store a predicate for the result of the PHI statement into
1786 NONCONSTANT_NAMES, if possible. */
1789 predicate_for_phi_result (struct ipa_fn_summary
*summary
, gphi
*phi
,
1791 vec
<predicate
> nonconstant_names
)
1795 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1797 tree arg
= gimple_phi_arg (phi
, i
)->def
;
1798 if (!is_gimple_min_invariant (arg
))
1800 gcc_assert (TREE_CODE (arg
) == SSA_NAME
);
1801 *p
= p
->or_with (summary
->conds
,
1802 nonconstant_names
[SSA_NAME_VERSION (arg
)]);
1808 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1810 fprintf (dump_file
, "\t\tphi predicate: ");
1811 p
->dump (dump_file
, summary
->conds
);
1813 nonconstant_names
[SSA_NAME_VERSION (gimple_phi_result (phi
))] = *p
;
1816 /* Return predicate specifying when array index in access OP becomes non-constant. */
1819 array_index_predicate (ipa_fn_summary
*info
,
1820 vec
< predicate
> nonconstant_names
, tree op
)
1822 predicate p
= false;
1823 while (handled_component_p (op
))
1825 if (TREE_CODE (op
) == ARRAY_REF
|| TREE_CODE (op
) == ARRAY_RANGE_REF
)
1827 if (TREE_CODE (TREE_OPERAND (op
, 1)) == SSA_NAME
)
1828 p
= p
.or_with (info
->conds
,
1829 nonconstant_names
[SSA_NAME_VERSION
1830 (TREE_OPERAND (op
, 1))]);
1832 op
= TREE_OPERAND (op
, 0);
1837 /* For a typical usage of __builtin_expect (a<b, 1), we
1838 may introduce an extra relation stmt:
1839 With the builtin, we have
1842 t3 = __builtin_expect (t2, 1);
1845 Without the builtin, we have
1848 This affects the size/time estimation and may have
1849 an impact on the earlier inlining.
1850 Here find this pattern and fix it up later. */
1853 find_foldable_builtin_expect (basic_block bb
)
1855 gimple_stmt_iterator bsi
;
1857 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1859 gimple
*stmt
= gsi_stmt (bsi
);
1860 if (gimple_call_builtin_p (stmt
, BUILT_IN_EXPECT
)
1861 || gimple_call_internal_p (stmt
, IFN_BUILTIN_EXPECT
))
1863 tree var
= gimple_call_lhs (stmt
);
1864 tree arg
= gimple_call_arg (stmt
, 0);
1865 use_operand_p use_p
;
1872 gcc_assert (TREE_CODE (var
) == SSA_NAME
);
1874 while (TREE_CODE (arg
) == SSA_NAME
)
1876 gimple
*stmt_tmp
= SSA_NAME_DEF_STMT (arg
);
1877 if (!is_gimple_assign (stmt_tmp
))
1879 switch (gimple_assign_rhs_code (stmt_tmp
))
1898 arg
= gimple_assign_rhs1 (stmt_tmp
);
1901 if (match
&& single_imm_use (var
, &use_p
, &use_stmt
)
1902 && gimple_code (use_stmt
) == GIMPLE_COND
)
1909 /* Return true when the basic blocks contains only clobbers followed by RESX.
1910 Such BBs are kept around to make removal of dead stores possible with
1911 presence of EH and will be optimized out by optimize_clobbers later in the
1914 NEED_EH is used to recurse in case the clobber has non-EH predecestors
1915 that can be clobber only, too.. When it is false, the RESX is not necessary
1916 on the end of basic block. */
1919 clobber_only_eh_bb_p (basic_block bb
, bool need_eh
= true)
1921 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
1927 if (gsi_end_p (gsi
))
1929 if (gimple_code (gsi_stmt (gsi
)) != GIMPLE_RESX
)
1933 else if (!single_succ_p (bb
))
1936 for (; !gsi_end_p (gsi
); gsi_prev (&gsi
))
1938 gimple
*stmt
= gsi_stmt (gsi
);
1939 if (is_gimple_debug (stmt
))
1941 if (gimple_clobber_p (stmt
))
1943 if (gimple_code (stmt
) == GIMPLE_LABEL
)
1948 /* See if all predecestors are either throws or clobber only BBs. */
1949 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1950 if (!(e
->flags
& EDGE_EH
)
1951 && !clobber_only_eh_bb_p (e
->src
, false))
1957 /* Return true if STMT compute a floating point expression that may be affected
1958 by -ffast-math and similar flags. */
1961 fp_expression_p (gimple
*stmt
)
1966 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_DEF
|SSA_OP_USE
)
1967 if (FLOAT_TYPE_P (TREE_TYPE (op
)))
1972 /* Analyze function body for NODE.
1973 EARLY indicates run from early optimization pipeline. */
1976 analyze_function_body (struct cgraph_node
*node
, bool early
)
1979 /* Estimate static overhead for function prologue/epilogue and alignment. */
1981 /* Benefits are scaled by probability of elimination that is in range
1984 struct function
*my_function
= DECL_STRUCT_FUNCTION (node
->decl
);
1986 struct ipa_fn_summary
*info
= ipa_fn_summaries
->get (node
);
1987 predicate bb_predicate
;
1988 struct ipa_func_body_info fbi
;
1989 vec
<predicate
> nonconstant_names
= vNULL
;
1992 predicate array_index
= true;
1993 gimple
*fix_builtin_expect_stmt
;
1995 gcc_assert (my_function
&& my_function
->cfg
);
1996 gcc_assert (cfun
== my_function
);
1998 memset(&fbi
, 0, sizeof(fbi
));
2000 info
->size_time_table
= NULL
;
2002 /* When optimizing and analyzing for IPA inliner, initialize loop optimizer
2003 so we can produce proper inline hints.
2005 When optimizing and analyzing for early inliner, initialize node params
2006 so we can produce correct BB predicates. */
2008 if (opt_for_fn (node
->decl
, optimize
))
2010 calculate_dominance_info (CDI_DOMINATORS
);
2012 loop_optimizer_init (LOOPS_NORMAL
| LOOPS_HAVE_RECORDED_EXITS
);
2015 ipa_check_create_node_params ();
2016 ipa_initialize_node_params (node
);
2019 if (ipa_node_params_sum
)
2022 fbi
.info
= IPA_NODE_REF (node
);
2023 fbi
.bb_infos
= vNULL
;
2024 fbi
.bb_infos
.safe_grow_cleared (last_basic_block_for_fn (cfun
));
2025 fbi
.param_count
= count_formal_params(node
->decl
);
2026 nonconstant_names
.safe_grow_cleared
2027 (SSANAMES (my_function
)->length ());
2032 fprintf (dump_file
, "\nAnalyzing function body size: %s\n",
2035 /* When we run into maximal number of entries, we assign everything to the
2036 constant truth case. Be sure to have it in list. */
2037 bb_predicate
= true;
2038 info
->account_size_time (0, 0, bb_predicate
, bb_predicate
);
2040 bb_predicate
= predicate::not_inlined ();
2041 info
->account_size_time (2 * ipa_fn_summary::size_scale
, 0, bb_predicate
,
2045 compute_bb_predicates (&fbi
, node
, info
);
2046 order
= XNEWVEC (int, n_basic_blocks_for_fn (cfun
));
2047 nblocks
= pre_and_rev_post_order_compute (NULL
, order
, false);
2048 for (n
= 0; n
< nblocks
; n
++)
2050 bb
= BASIC_BLOCK_FOR_FN (cfun
, order
[n
]);
2051 freq
= compute_call_stmt_bb_frequency (node
->decl
, bb
);
2052 if (clobber_only_eh_bb_p (bb
))
2054 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2055 fprintf (dump_file
, "\n Ignoring BB %i;"
2056 " it will be optimized away by cleanup_clobbers\n",
2061 /* TODO: Obviously predicates can be propagated down across CFG. */
2065 bb_predicate
= *(predicate
*) bb
->aux
;
2067 bb_predicate
= false;
2070 bb_predicate
= true;
2072 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2074 fprintf (dump_file
, "\n BB %i predicate:", bb
->index
);
2075 bb_predicate
.dump (dump_file
, info
->conds
);
2078 if (fbi
.info
&& nonconstant_names
.exists ())
2080 predicate phi_predicate
;
2081 bool first_phi
= true;
2083 for (gphi_iterator bsi
= gsi_start_phis (bb
); !gsi_end_p (bsi
);
2087 && !phi_result_unknown_predicate (fbi
.info
, info
, bb
,
2092 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2094 fprintf (dump_file
, " ");
2095 print_gimple_stmt (dump_file
, gsi_stmt (bsi
), 0);
2097 predicate_for_phi_result (info
, bsi
.phi (), &phi_predicate
,
2102 fix_builtin_expect_stmt
= find_foldable_builtin_expect (bb
);
2104 for (gimple_stmt_iterator bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
);
2107 gimple
*stmt
= gsi_stmt (bsi
);
2108 int this_size
= estimate_num_insns (stmt
, &eni_size_weights
);
2109 int this_time
= estimate_num_insns (stmt
, &eni_time_weights
);
2111 predicate will_be_nonconstant
;
2113 /* This relation stmt should be folded after we remove
2114 buildin_expect call. Adjust the cost here. */
2115 if (stmt
== fix_builtin_expect_stmt
)
2121 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2123 fprintf (dump_file
, " ");
2124 print_gimple_stmt (dump_file
, stmt
, 0);
2125 fprintf (dump_file
, "\t\tfreq:%3.2f size:%3i time:%3i\n",
2126 ((double) freq
) / CGRAPH_FREQ_BASE
, this_size
,
2130 if (gimple_assign_load_p (stmt
) && nonconstant_names
.exists ())
2132 predicate this_array_index
;
2134 array_index_predicate (info
, nonconstant_names
,
2135 gimple_assign_rhs1 (stmt
));
2136 if (this_array_index
!= false)
2137 array_index
&= this_array_index
;
2139 if (gimple_store_p (stmt
) && nonconstant_names
.exists ())
2141 predicate this_array_index
;
2143 array_index_predicate (info
, nonconstant_names
,
2144 gimple_get_lhs (stmt
));
2145 if (this_array_index
!= false)
2146 array_index
&= this_array_index
;
2150 if (is_gimple_call (stmt
)
2151 && !gimple_call_internal_p (stmt
))
2153 struct cgraph_edge
*edge
= node
->get_edge (stmt
);
2154 struct ipa_call_summary
*es
= ipa_call_summaries
->get (edge
);
2156 /* Special case: results of BUILT_IN_CONSTANT_P will be always
2157 resolved as constant. We however don't want to optimize
2158 out the cgraph edges. */
2159 if (nonconstant_names
.exists ()
2160 && gimple_call_builtin_p (stmt
, BUILT_IN_CONSTANT_P
)
2161 && gimple_call_lhs (stmt
)
2162 && TREE_CODE (gimple_call_lhs (stmt
)) == SSA_NAME
)
2164 predicate false_p
= false;
2165 nonconstant_names
[SSA_NAME_VERSION (gimple_call_lhs (stmt
))]
2168 if (ipa_node_params_sum
)
2170 int count
= gimple_call_num_args (stmt
);
2174 es
->param
.safe_grow_cleared (count
);
2175 for (i
= 0; i
< count
; i
++)
2177 int prob
= param_change_prob (stmt
, i
);
2178 gcc_assert (prob
>= 0 && prob
<= REG_BR_PROB_BASE
);
2179 es
->param
[i
].change_prob
= prob
;
2183 es
->call_stmt_size
= this_size
;
2184 es
->call_stmt_time
= this_time
;
2185 es
->loop_depth
= bb_loop_depth (bb
);
2186 edge_set_predicate (edge
, &bb_predicate
);
2189 /* TODO: When conditional jump or swithc is known to be constant, but
2190 we did not translate it into the predicates, we really can account
2191 just maximum of the possible paths. */
2194 = will_be_nonconstant_predicate (&fbi
, info
,
2195 stmt
, nonconstant_names
);
2197 will_be_nonconstant
= true;
2198 if (this_time
|| this_size
)
2202 prob
= eliminated_by_inlining_prob (stmt
);
2203 if (prob
== 1 && dump_file
&& (dump_flags
& TDF_DETAILS
))
2205 "\t\t50%% will be eliminated by inlining\n");
2206 if (prob
== 2 && dump_file
&& (dump_flags
& TDF_DETAILS
))
2207 fprintf (dump_file
, "\t\tWill be eliminated by inlining\n");
2209 struct predicate p
= bb_predicate
& will_be_nonconstant
;
2211 /* We can ignore statement when we proved it is never going
2212 to happen, but we can not do that for call statements
2213 because edges are accounted specially. */
2215 if (*(is_gimple_call (stmt
) ? &bb_predicate
: &p
) != false)
2221 /* We account everything but the calls. Calls have their own
2222 size/time info attached to cgraph edges. This is necessary
2223 in order to make the cost disappear after inlining. */
2224 if (!is_gimple_call (stmt
))
2228 predicate ip
= bb_predicate
& predicate::not_inlined ();
2229 info
->account_size_time (this_size
* prob
,
2230 (sreal
)(this_time
* prob
)
2231 / (CGRAPH_FREQ_BASE
* 2), ip
,
2235 info
->account_size_time (this_size
* (2 - prob
),
2236 (sreal
)(this_time
* (2 - prob
))
2237 / (CGRAPH_FREQ_BASE
* 2),
2242 if (!info
->fp_expressions
&& fp_expression_p (stmt
))
2244 info
->fp_expressions
= true;
2246 fprintf (dump_file
, " fp_expression set\n");
2249 gcc_assert (time
>= 0);
2250 gcc_assert (size
>= 0);
2254 set_hint_predicate (&ipa_fn_summaries
->get (node
)->array_index
, array_index
);
2255 time
= time
/ CGRAPH_FREQ_BASE
;
2258 if (nonconstant_names
.exists () && !early
)
2261 predicate loop_iterations
= true;
2262 predicate loop_stride
= true;
2264 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2265 flow_loops_dump (dump_file
, NULL
, 0);
2267 FOR_EACH_LOOP (loop
, 0)
2272 struct tree_niter_desc niter_desc
;
2273 bb_predicate
= *(predicate
*) loop
->header
->aux
;
2275 exits
= get_loop_exit_edges (loop
);
2276 FOR_EACH_VEC_ELT (exits
, j
, ex
)
2277 if (number_of_iterations_exit (loop
, ex
, &niter_desc
, false)
2278 && !is_gimple_min_invariant (niter_desc
.niter
))
2280 predicate will_be_nonconstant
2281 = will_be_nonconstant_expr_predicate (fbi
.info
, info
,
2284 if (will_be_nonconstant
!= true)
2285 will_be_nonconstant
= bb_predicate
& will_be_nonconstant
;
2286 if (will_be_nonconstant
!= true
2287 && will_be_nonconstant
!= false)
2288 /* This is slightly inprecise. We may want to represent each
2289 loop with independent predicate. */
2290 loop_iterations
&= will_be_nonconstant
;
2295 /* To avoid quadratic behavior we analyze stride predicates only
2296 with respect to the containing loop. Thus we simply iterate
2297 over all defs in the outermost loop body. */
2298 for (loop
= loops_for_fn (cfun
)->tree_root
->inner
;
2299 loop
!= NULL
; loop
= loop
->next
)
2301 basic_block
*body
= get_loop_body (loop
);
2302 for (unsigned i
= 0; i
< loop
->num_nodes
; i
++)
2304 gimple_stmt_iterator gsi
;
2305 bb_predicate
= *(predicate
*) body
[i
]->aux
;
2306 for (gsi
= gsi_start_bb (body
[i
]); !gsi_end_p (gsi
);
2309 gimple
*stmt
= gsi_stmt (gsi
);
2311 if (!is_gimple_assign (stmt
))
2314 tree def
= gimple_assign_lhs (stmt
);
2315 if (TREE_CODE (def
) != SSA_NAME
)
2319 if (!simple_iv (loop_containing_stmt (stmt
),
2320 loop_containing_stmt (stmt
),
2322 || is_gimple_min_invariant (iv
.step
))
2325 predicate will_be_nonconstant
2326 = will_be_nonconstant_expr_predicate (fbi
.info
, info
,
2329 if (will_be_nonconstant
!= true)
2330 will_be_nonconstant
= bb_predicate
& will_be_nonconstant
;
2331 if (will_be_nonconstant
!= true
2332 && will_be_nonconstant
!= false)
2333 /* This is slightly inprecise. We may want to represent
2334 each loop with independent predicate. */
2335 loop_stride
= loop_stride
& will_be_nonconstant
;
2340 set_hint_predicate (&ipa_fn_summaries
->get (node
)->loop_iterations
,
2342 set_hint_predicate (&ipa_fn_summaries
->get (node
)->loop_stride
,
2346 FOR_ALL_BB_FN (bb
, my_function
)
2352 edge_predicate_pool
.remove ((predicate
*)bb
->aux
);
2354 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2357 edge_predicate_pool
.remove ((predicate
*) e
->aux
);
2361 ipa_fn_summaries
->get (node
)->time
= time
;
2362 ipa_fn_summaries
->get (node
)->self_size
= size
;
2363 nonconstant_names
.release ();
2364 ipa_release_body_info (&fbi
);
2365 if (opt_for_fn (node
->decl
, optimize
))
2368 loop_optimizer_finalize ();
2369 else if (!ipa_edge_args_sum
)
2370 ipa_free_all_node_params ();
2371 free_dominance_info (CDI_DOMINATORS
);
2375 fprintf (dump_file
, "\n");
2376 ipa_dump_fn_summary (dump_file
, node
);
2381 /* Compute function summary.
2382 EARLY is true when we compute parameters during early opts. */
2385 compute_fn_summary (struct cgraph_node
*node
, bool early
)
2387 HOST_WIDE_INT self_stack_size
;
2388 struct cgraph_edge
*e
;
2389 struct ipa_fn_summary
*info
;
2391 gcc_assert (!node
->global
.inlined_to
);
2393 if (!ipa_fn_summaries
)
2394 ipa_fn_summary_alloc ();
2396 info
= ipa_fn_summaries
->get (node
);
2399 /* Estimate the stack size for the function if we're optimizing. */
2400 self_stack_size
= optimize
&& !node
->thunk
.thunk_p
2401 ? estimated_stack_frame_size (node
) : 0;
2402 info
->estimated_self_stack_size
= self_stack_size
;
2403 info
->estimated_stack_size
= self_stack_size
;
2404 info
->stack_frame_offset
= 0;
2406 if (node
->thunk
.thunk_p
)
2408 struct ipa_call_summary
*es
= ipa_call_summaries
->get (node
->callees
);
2411 node
->local
.can_change_signature
= false;
2412 es
->call_stmt_size
= eni_size_weights
.call_cost
;
2413 es
->call_stmt_time
= eni_time_weights
.call_cost
;
2414 info
->account_size_time (ipa_fn_summary::size_scale
* 2, 2, t
, t
);
2415 t
= predicate::not_inlined ();
2416 info
->account_size_time (2 * ipa_fn_summary::size_scale
, 0, t
, t
);
2417 ipa_update_overall_fn_summary (node
);
2418 info
->self_size
= info
->size
;
2419 /* We can not inline instrumentation clones. */
2420 if (node
->thunk
.add_pointer_bounds_args
)
2422 info
->inlinable
= false;
2423 node
->callees
->inline_failed
= CIF_CHKP
;
2426 info
->inlinable
= true;
2430 /* Even is_gimple_min_invariant rely on current_function_decl. */
2431 push_cfun (DECL_STRUCT_FUNCTION (node
->decl
));
2433 /* Can this function be inlined at all? */
2434 if (!opt_for_fn (node
->decl
, optimize
)
2435 && !lookup_attribute ("always_inline",
2436 DECL_ATTRIBUTES (node
->decl
)))
2437 info
->inlinable
= false;
2439 info
->inlinable
= tree_inlinable_function_p (node
->decl
);
2441 info
->contains_cilk_spawn
= fn_contains_cilk_spawn_p (cfun
);
2443 /* Type attributes can use parameter indices to describe them. */
2444 if (TYPE_ATTRIBUTES (TREE_TYPE (node
->decl
)))
2445 node
->local
.can_change_signature
= false;
2448 /* Otherwise, inlinable functions always can change signature. */
2449 if (info
->inlinable
)
2450 node
->local
.can_change_signature
= true;
2453 /* Functions calling builtin_apply can not change signature. */
2454 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2456 tree
cdecl = e
->callee
->decl
;
2457 if (DECL_BUILT_IN (cdecl)
2458 && DECL_BUILT_IN_CLASS (cdecl) == BUILT_IN_NORMAL
2459 && (DECL_FUNCTION_CODE (cdecl) == BUILT_IN_APPLY_ARGS
2460 || DECL_FUNCTION_CODE (cdecl) == BUILT_IN_VA_START
))
2463 node
->local
.can_change_signature
= !e
;
2466 /* Functions called by instrumentation thunk can't change signature
2467 because instrumentation thunk modification is not supported. */
2468 if (node
->local
.can_change_signature
)
2469 for (e
= node
->callers
; e
; e
= e
->next_caller
)
2470 if (e
->caller
->thunk
.thunk_p
2471 && e
->caller
->thunk
.add_pointer_bounds_args
)
2473 node
->local
.can_change_signature
= false;
2476 analyze_function_body (node
, early
);
2479 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2480 if (e
->callee
->comdat_local_p ())
2482 node
->calls_comdat_local
= (e
!= NULL
);
2484 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2485 info
->size
= info
->self_size
;
2486 info
->stack_frame_offset
= 0;
2487 info
->estimated_stack_size
= info
->estimated_self_stack_size
;
2489 /* Code above should compute exactly the same result as
2490 ipa_update_overall_fn_summary but because computation happens in
2491 different order the roundoff errors result in slight changes. */
2492 ipa_update_overall_fn_summary (node
);
2493 gcc_assert (info
->size
== info
->self_size
);
2497 /* Compute parameters of functions used by inliner using
2498 current_function_decl. */
2501 compute_fn_summary_for_current (void)
2503 compute_fn_summary (cgraph_node::get (current_function_decl
), true);
2507 /* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS,
2508 KNOWN_CONTEXTS and KNOWN_AGGS. */
2511 estimate_edge_devirt_benefit (struct cgraph_edge
*ie
,
2512 int *size
, int *time
,
2513 vec
<tree
> known_vals
,
2514 vec
<ipa_polymorphic_call_context
> known_contexts
,
2515 vec
<ipa_agg_jump_function_p
> known_aggs
)
2518 struct cgraph_node
*callee
;
2519 struct ipa_fn_summary
*isummary
;
2520 enum availability avail
;
2523 if (!known_vals
.exists () && !known_contexts
.exists ())
2525 if (!opt_for_fn (ie
->caller
->decl
, flag_indirect_inlining
))
2528 target
= ipa_get_indirect_edge_target (ie
, known_vals
, known_contexts
,
2529 known_aggs
, &speculative
);
2530 if (!target
|| speculative
)
2533 /* Account for difference in cost between indirect and direct calls. */
2534 *size
-= (eni_size_weights
.indirect_call_cost
- eni_size_weights
.call_cost
);
2535 *time
-= (eni_time_weights
.indirect_call_cost
- eni_time_weights
.call_cost
);
2536 gcc_checking_assert (*time
>= 0);
2537 gcc_checking_assert (*size
>= 0);
2539 callee
= cgraph_node::get (target
);
2540 if (!callee
|| !callee
->definition
)
2542 callee
= callee
->function_symbol (&avail
);
2543 if (avail
< AVAIL_AVAILABLE
)
2545 isummary
= ipa_fn_summaries
->get (callee
);
2546 return isummary
->inlinable
;
2549 /* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to
2550 handle edge E with probability PROB.
2551 Set HINTS if edge may be devirtualized.
2552 KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call
2556 estimate_edge_size_and_time (struct cgraph_edge
*e
, int *size
, int *min_size
,
2559 vec
<tree
> known_vals
,
2560 vec
<ipa_polymorphic_call_context
> known_contexts
,
2561 vec
<ipa_agg_jump_function_p
> known_aggs
,
2564 struct ipa_call_summary
*es
= ipa_call_summaries
->get (e
);
2565 int call_size
= es
->call_stmt_size
;
2566 int call_time
= es
->call_stmt_time
;
2569 && estimate_edge_devirt_benefit (e
, &call_size
, &call_time
,
2570 known_vals
, known_contexts
, known_aggs
)
2571 && hints
&& e
->maybe_hot_p ())
2572 *hints
|= INLINE_HINT_indirect_call
;
2573 cur_size
= call_size
* ipa_fn_summary::size_scale
;
2576 *min_size
+= cur_size
;
2577 if (prob
== REG_BR_PROB_BASE
)
2578 *time
+= ((sreal
)(call_time
* e
->frequency
)) / CGRAPH_FREQ_BASE
;
2580 *time
+= ((sreal
)call_time
) * (prob
* e
->frequency
)
2581 / (CGRAPH_FREQ_BASE
* REG_BR_PROB_BASE
);
2586 /* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
2587 calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
2588 describe context of the call site. */
2591 estimate_calls_size_and_time (struct cgraph_node
*node
, int *size
,
2592 int *min_size
, sreal
*time
,
2594 clause_t possible_truths
,
2595 vec
<tree
> known_vals
,
2596 vec
<ipa_polymorphic_call_context
> known_contexts
,
2597 vec
<ipa_agg_jump_function_p
> known_aggs
)
2599 struct cgraph_edge
*e
;
2600 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2602 struct ipa_call_summary
*es
= ipa_call_summaries
->get (e
);
2604 /* Do not care about zero sized builtins. */
2605 if (e
->inline_failed
&& !es
->call_stmt_size
)
2607 gcc_checking_assert (!es
->call_stmt_time
);
2611 || es
->predicate
->evaluate (possible_truths
))
2613 if (e
->inline_failed
)
2615 /* Predicates of calls shall not use NOT_CHANGED codes,
2616 sowe do not need to compute probabilities. */
2617 estimate_edge_size_and_time (e
, size
,
2618 es
->predicate
? NULL
: min_size
,
2619 time
, REG_BR_PROB_BASE
,
2620 known_vals
, known_contexts
,
2624 estimate_calls_size_and_time (e
->callee
, size
, min_size
, time
,
2627 known_vals
, known_contexts
,
2631 for (e
= node
->indirect_calls
; e
; e
= e
->next_callee
)
2633 struct ipa_call_summary
*es
= ipa_call_summaries
->get (e
);
2635 || es
->predicate
->evaluate (possible_truths
))
2636 estimate_edge_size_and_time (e
, size
,
2637 es
->predicate
? NULL
: min_size
,
2638 time
, REG_BR_PROB_BASE
,
2639 known_vals
, known_contexts
, known_aggs
,
2645 /* Estimate size and time needed to execute NODE assuming
2646 POSSIBLE_TRUTHS clause, and KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
2647 information about NODE's arguments. If non-NULL use also probability
2648 information present in INLINE_PARAM_SUMMARY vector.
2649 Additionally detemine hints determined by the context. Finally compute
2650 minimal size needed for the call that is independent on the call context and
2651 can be used for fast estimates. Return the values in RET_SIZE,
2652 RET_MIN_SIZE, RET_TIME and RET_HINTS. */
2655 estimate_node_size_and_time (struct cgraph_node
*node
,
2656 clause_t possible_truths
,
2657 clause_t nonspec_possible_truths
,
2658 vec
<tree
> known_vals
,
2659 vec
<ipa_polymorphic_call_context
> known_contexts
,
2660 vec
<ipa_agg_jump_function_p
> known_aggs
,
2661 int *ret_size
, int *ret_min_size
,
2663 sreal
*ret_nonspecialized_time
,
2664 ipa_hints
*ret_hints
,
2665 vec
<inline_param_summary
>
2666 inline_param_summary
)
2668 struct ipa_fn_summary
*info
= ipa_fn_summaries
->get (node
);
2673 ipa_hints hints
= 0;
2676 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2679 fprintf (dump_file
, " Estimating body: %s/%i\n"
2680 " Known to be false: ", node
->name (),
2683 for (i
= predicate::not_inlined_condition
;
2684 i
< (predicate::first_dynamic_condition
2685 + (int) vec_safe_length (info
->conds
)); i
++)
2686 if (!(possible_truths
& (1 << i
)))
2689 fprintf (dump_file
, ", ");
2691 dump_condition (dump_file
, info
->conds
, i
);
2695 estimate_calls_size_and_time (node
, &size
, &min_size
, &time
, &hints
, possible_truths
,
2696 known_vals
, known_contexts
, known_aggs
);
2697 sreal nonspecialized_time
= time
;
2699 for (i
= 0; vec_safe_iterate (info
->size_time_table
, i
, &e
); i
++)
2701 bool exec
= e
->exec_predicate
.evaluate (nonspec_possible_truths
);
2703 /* Because predicates are conservative, it can happen that nonconst is 1
2707 bool nonconst
= e
->nonconst_predicate
.evaluate (possible_truths
);
2709 gcc_checking_assert (e
->time
>= 0);
2710 gcc_checking_assert (time
>= 0);
2712 /* We compute specialized size only because size of nonspecialized
2713 copy is context independent.
2715 The difference between nonspecialized execution and specialized is
2716 that nonspecialized is not going to have optimized out computations
2717 known to be constant in a specialized setting. */
2720 nonspecialized_time
+= e
->time
;
2723 else if (!inline_param_summary
.exists ())
2730 int prob
= e
->nonconst_predicate
.probability
2731 (info
->conds
, possible_truths
,
2732 inline_param_summary
);
2733 gcc_checking_assert (prob
>= 0);
2734 gcc_checking_assert (prob
<= REG_BR_PROB_BASE
);
2735 time
+= e
->time
* prob
/ REG_BR_PROB_BASE
;
2737 gcc_checking_assert (time
>= 0);
2740 gcc_checking_assert ((*info
->size_time_table
)[0].exec_predicate
== true);
2741 gcc_checking_assert ((*info
->size_time_table
)[0].nonconst_predicate
== true);
2742 min_size
= (*info
->size_time_table
)[0].size
;
2743 gcc_checking_assert (size
>= 0);
2744 gcc_checking_assert (time
>= 0);
2745 /* nonspecialized_time should be always bigger than specialized time.
2746 Roundoff issues however may get into the way. */
2747 gcc_checking_assert ((nonspecialized_time
- time
) >= -1);
2749 /* Roundoff issues may make specialized time bigger than nonspecialized
2750 time. We do not really want that to happen because some heurstics
2751 may get confused by seeing negative speedups. */
2752 if (time
> nonspecialized_time
)
2753 time
= nonspecialized_time
;
2755 if (info
->loop_iterations
2756 && !info
->loop_iterations
->evaluate (possible_truths
))
2757 hints
|= INLINE_HINT_loop_iterations
;
2758 if (info
->loop_stride
2759 && !info
->loop_stride
->evaluate (possible_truths
))
2760 hints
|= INLINE_HINT_loop_stride
;
2761 if (info
->array_index
2762 && !info
->array_index
->evaluate (possible_truths
))
2763 hints
|= INLINE_HINT_array_index
;
2765 hints
|= INLINE_HINT_in_scc
;
2766 if (DECL_DECLARED_INLINE_P (node
->decl
))
2767 hints
|= INLINE_HINT_declared_inline
;
2769 size
= RDIV (size
, ipa_fn_summary::size_scale
);
2770 min_size
= RDIV (min_size
, ipa_fn_summary::size_scale
);
2772 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2773 fprintf (dump_file
, "\n size:%i time:%f nonspec time:%f\n", (int) size
,
2774 time
.to_double (), nonspecialized_time
.to_double ());
2777 if (ret_nonspecialized_time
)
2778 *ret_nonspecialized_time
= nonspecialized_time
;
2782 *ret_min_size
= min_size
;
2789 /* Estimate size and time needed to execute callee of EDGE assuming that
2790 parameters known to be constant at caller of EDGE are propagated.
2791 KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values
2792 and types for parameters. */
2795 estimate_ipcp_clone_size_and_time (struct cgraph_node
*node
,
2796 vec
<tree
> known_vals
,
2797 vec
<ipa_polymorphic_call_context
>
2799 vec
<ipa_agg_jump_function_p
> known_aggs
,
2800 int *ret_size
, sreal
*ret_time
,
2801 sreal
*ret_nonspec_time
,
2804 clause_t clause
, nonspec_clause
;
2806 evaluate_conditions_for_known_args (node
, false, known_vals
, known_aggs
,
2807 &clause
, &nonspec_clause
);
2808 estimate_node_size_and_time (node
, clause
, nonspec_clause
,
2809 known_vals
, known_contexts
,
2810 known_aggs
, ret_size
, NULL
, ret_time
,
2811 ret_nonspec_time
, hints
, vNULL
);
2815 /* Update summary information of inline clones after inlining.
2816 Compute peak stack usage. */
2819 inline_update_callee_summaries (struct cgraph_node
*node
, int depth
)
2821 struct cgraph_edge
*e
;
2822 struct ipa_fn_summary
*callee_info
= ipa_fn_summaries
->get (node
);
2823 struct ipa_fn_summary
*caller_info
= ipa_fn_summaries
->get (node
->callers
->caller
);
2826 callee_info
->stack_frame_offset
2827 = caller_info
->stack_frame_offset
2828 + caller_info
->estimated_self_stack_size
;
2829 peak
= callee_info
->stack_frame_offset
2830 + callee_info
->estimated_self_stack_size
;
2831 if (ipa_fn_summaries
->get (node
->global
.inlined_to
)->estimated_stack_size
< peak
)
2832 ipa_fn_summaries
->get (node
->global
.inlined_to
)->estimated_stack_size
= peak
;
2833 ipa_propagate_frequency (node
);
2834 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2836 if (!e
->inline_failed
)
2837 inline_update_callee_summaries (e
->callee
, depth
);
2838 ipa_call_summaries
->get (e
)->loop_depth
+= depth
;
2840 for (e
= node
->indirect_calls
; e
; e
= e
->next_callee
)
2841 ipa_call_summaries
->get (e
)->loop_depth
+= depth
;
2844 /* Update change_prob of EDGE after INLINED_EDGE has been inlined.
2845 When functoin A is inlined in B and A calls C with parameter that
2846 changes with probability PROB1 and C is known to be passthroug
2847 of argument if B that change with probability PROB2, the probability
2848 of change is now PROB1*PROB2. */
2851 remap_edge_change_prob (struct cgraph_edge
*inlined_edge
,
2852 struct cgraph_edge
*edge
)
2854 if (ipa_node_params_sum
)
2857 struct ipa_edge_args
*args
= IPA_EDGE_REF (edge
);
2858 struct ipa_call_summary
*es
= ipa_call_summaries
->get (edge
);
2859 struct ipa_call_summary
*inlined_es
2860 = ipa_call_summaries
->get (inlined_edge
);
2862 for (i
= 0; i
< ipa_get_cs_argument_count (args
); i
++)
2864 struct ipa_jump_func
*jfunc
= ipa_get_ith_jump_func (args
, i
);
2865 if (jfunc
->type
== IPA_JF_PASS_THROUGH
2866 || jfunc
->type
== IPA_JF_ANCESTOR
)
2868 int id
= jfunc
->type
== IPA_JF_PASS_THROUGH
2869 ? ipa_get_jf_pass_through_formal_id (jfunc
)
2870 : ipa_get_jf_ancestor_formal_id (jfunc
);
2871 if (id
< (int) inlined_es
->param
.length ())
2873 int prob1
= es
->param
[i
].change_prob
;
2874 int prob2
= inlined_es
->param
[id
].change_prob
;
2875 int prob
= combine_probabilities (prob1
, prob2
);
2877 if (prob1
&& prob2
&& !prob
)
2880 es
->param
[i
].change_prob
= prob
;
2887 /* Update edge summaries of NODE after INLINED_EDGE has been inlined.
2889 Remap predicates of callees of NODE. Rest of arguments match
2892 Also update change probabilities. */
2895 remap_edge_summaries (struct cgraph_edge
*inlined_edge
,
2896 struct cgraph_node
*node
,
2897 struct ipa_fn_summary
*info
,
2898 struct ipa_fn_summary
*callee_info
,
2899 vec
<int> operand_map
,
2900 vec
<int> offset_map
,
2901 clause_t possible_truths
,
2902 predicate
*toplev_predicate
)
2904 struct cgraph_edge
*e
, *next
;
2905 for (e
= node
->callees
; e
; e
= next
)
2907 struct ipa_call_summary
*es
= ipa_call_summaries
->get (e
);
2909 next
= e
->next_callee
;
2911 if (e
->inline_failed
)
2913 remap_edge_change_prob (inlined_edge
, e
);
2917 p
= es
->predicate
->remap_after_inlining
2918 (info
, callee_info
, operand_map
,
2919 offset_map
, possible_truths
,
2921 edge_set_predicate (e
, &p
);
2924 edge_set_predicate (e
, toplev_predicate
);
2927 remap_edge_summaries (inlined_edge
, e
->callee
, info
, callee_info
,
2928 operand_map
, offset_map
, possible_truths
,
2931 for (e
= node
->indirect_calls
; e
; e
= next
)
2933 struct ipa_call_summary
*es
= ipa_call_summaries
->get (e
);
2935 next
= e
->next_callee
;
2937 remap_edge_change_prob (inlined_edge
, e
);
2940 p
= es
->predicate
->remap_after_inlining
2941 (info
, callee_info
, operand_map
, offset_map
,
2942 possible_truths
, *toplev_predicate
);
2943 edge_set_predicate (e
, &p
);
2946 edge_set_predicate (e
, toplev_predicate
);
2950 /* Same as remap_predicate, but set result into hint *HINT. */
2953 remap_hint_predicate (struct ipa_fn_summary
*info
,
2954 struct ipa_fn_summary
*callee_info
,
2956 vec
<int> operand_map
,
2957 vec
<int> offset_map
,
2958 clause_t possible_truths
,
2959 predicate
*toplev_predicate
)
2965 p
= (*hint
)->remap_after_inlining
2967 operand_map
, offset_map
,
2968 possible_truths
, *toplev_predicate
);
2969 if (p
!= false && p
!= true)
2972 set_hint_predicate (hint
, p
);
2978 /* We inlined EDGE. Update summary of the function we inlined into. */
2981 ipa_merge_fn_summary_after_inlining (struct cgraph_edge
*edge
)
2983 struct ipa_fn_summary
*callee_info
= ipa_fn_summaries
->get (edge
->callee
);
2984 struct cgraph_node
*to
= (edge
->caller
->global
.inlined_to
2985 ? edge
->caller
->global
.inlined_to
: edge
->caller
);
2986 struct ipa_fn_summary
*info
= ipa_fn_summaries
->get (to
);
2987 clause_t clause
= 0; /* not_inline is known to be false. */
2989 vec
<int> operand_map
= vNULL
;
2990 vec
<int> offset_map
= vNULL
;
2992 predicate toplev_predicate
;
2993 predicate true_p
= true;
2994 struct ipa_call_summary
*es
= ipa_call_summaries
->get (edge
);
2997 toplev_predicate
= *es
->predicate
;
2999 toplev_predicate
= true;
3001 info
->fp_expressions
|= callee_info
->fp_expressions
;
3003 if (callee_info
->conds
)
3004 evaluate_properties_for_edge (edge
, true, &clause
, NULL
, NULL
, NULL
, NULL
);
3005 if (ipa_node_params_sum
&& callee_info
->conds
)
3007 struct ipa_edge_args
*args
= IPA_EDGE_REF (edge
);
3008 int count
= ipa_get_cs_argument_count (args
);
3013 operand_map
.safe_grow_cleared (count
);
3014 offset_map
.safe_grow_cleared (count
);
3016 for (i
= 0; i
< count
; i
++)
3018 struct ipa_jump_func
*jfunc
= ipa_get_ith_jump_func (args
, i
);
3021 /* TODO: handle non-NOPs when merging. */
3022 if (jfunc
->type
== IPA_JF_PASS_THROUGH
)
3024 if (ipa_get_jf_pass_through_operation (jfunc
) == NOP_EXPR
)
3025 map
= ipa_get_jf_pass_through_formal_id (jfunc
);
3026 if (!ipa_get_jf_pass_through_agg_preserved (jfunc
))
3029 else if (jfunc
->type
== IPA_JF_ANCESTOR
)
3031 HOST_WIDE_INT offset
= ipa_get_jf_ancestor_offset (jfunc
);
3032 if (offset
>= 0 && offset
< INT_MAX
)
3034 map
= ipa_get_jf_ancestor_formal_id (jfunc
);
3035 if (!ipa_get_jf_ancestor_agg_preserved (jfunc
))
3037 offset_map
[i
] = offset
;
3040 operand_map
[i
] = map
;
3041 gcc_assert (map
< ipa_get_param_count (IPA_NODE_REF (to
)));
3044 for (i
= 0; vec_safe_iterate (callee_info
->size_time_table
, i
, &e
); i
++)
3047 p
= e
->exec_predicate
.remap_after_inlining
3048 (info
, callee_info
, operand_map
,
3051 predicate nonconstp
;
3052 nonconstp
= e
->nonconst_predicate
.remap_after_inlining
3053 (info
, callee_info
, operand_map
,
3056 if (p
!= false && nonconstp
!= false)
3058 sreal add_time
= ((sreal
)e
->time
* edge
->frequency
) / CGRAPH_FREQ_BASE
;
3059 int prob
= e
->nonconst_predicate
.probability (callee_info
->conds
,
3061 add_time
= add_time
* prob
/ REG_BR_PROB_BASE
;
3062 if (prob
!= REG_BR_PROB_BASE
3063 && dump_file
&& (dump_flags
& TDF_DETAILS
))
3065 fprintf (dump_file
, "\t\tScaling time by probability:%f\n",
3066 (double) prob
/ REG_BR_PROB_BASE
);
3068 info
->account_size_time (e
->size
, add_time
, p
, nonconstp
);
3071 remap_edge_summaries (edge
, edge
->callee
, info
, callee_info
, operand_map
,
3072 offset_map
, clause
, &toplev_predicate
);
3073 remap_hint_predicate (info
, callee_info
,
3074 &callee_info
->loop_iterations
,
3075 operand_map
, offset_map
, clause
, &toplev_predicate
);
3076 remap_hint_predicate (info
, callee_info
,
3077 &callee_info
->loop_stride
,
3078 operand_map
, offset_map
, clause
, &toplev_predicate
);
3079 remap_hint_predicate (info
, callee_info
,
3080 &callee_info
->array_index
,
3081 operand_map
, offset_map
, clause
, &toplev_predicate
);
3083 inline_update_callee_summaries (edge
->callee
,
3084 ipa_call_summaries
->get (edge
)->loop_depth
);
3086 /* We do not maintain predicates of inlined edges, free it. */
3087 edge_set_predicate (edge
, &true_p
);
3088 /* Similarly remove param summaries. */
3089 es
->param
.release ();
3090 operand_map
.release ();
3091 offset_map
.release ();
3094 /* For performance reasons ipa_merge_fn_summary_after_inlining is not updating overall size
3095 and time. Recompute it. */
3098 ipa_update_overall_fn_summary (struct cgraph_node
*node
)
3100 struct ipa_fn_summary
*info
= ipa_fn_summaries
->get (node
);
3106 for (i
= 0; vec_safe_iterate (info
->size_time_table
, i
, &e
); i
++)
3108 info
->size
+= e
->size
;
3109 info
->time
+= e
->time
;
3111 estimate_calls_size_and_time (node
, &info
->size
, &info
->min_size
,
3113 ~(clause_t
) (1 << predicate::false_condition
),
3114 vNULL
, vNULL
, vNULL
);
3115 info
->size
= (info
->size
+ ipa_fn_summary::size_scale
/ 2) / ipa_fn_summary::size_scale
;
3119 /* This function performs intraprocedural analysis in NODE that is required to
3120 inline indirect calls. */
3123 inline_indirect_intraprocedural_analysis (struct cgraph_node
*node
)
3125 ipa_analyze_node (node
);
3126 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3128 ipa_print_node_params (dump_file
, node
);
3129 ipa_print_node_jump_functions (dump_file
, node
);
3134 /* Note function body size. */
3137 inline_analyze_function (struct cgraph_node
*node
)
3139 push_cfun (DECL_STRUCT_FUNCTION (node
->decl
));
3142 fprintf (dump_file
, "\nAnalyzing function: %s/%u\n",
3143 node
->name (), node
->order
);
3144 if (opt_for_fn (node
->decl
, optimize
) && !node
->thunk
.thunk_p
)
3145 inline_indirect_intraprocedural_analysis (node
);
3146 compute_fn_summary (node
, false);
3149 struct cgraph_edge
*e
;
3150 for (e
= node
->callees
; e
; e
= e
->next_callee
)
3151 e
->inline_failed
= CIF_FUNCTION_NOT_OPTIMIZED
;
3152 for (e
= node
->indirect_calls
; e
; e
= e
->next_callee
)
3153 e
->inline_failed
= CIF_FUNCTION_NOT_OPTIMIZED
;
3160 /* Called when new function is inserted to callgraph late. */
3163 ipa_fn_summary_t::insert (struct cgraph_node
*node
, ipa_fn_summary
*)
3165 inline_analyze_function (node
);
3168 /* Note function body size. */
3171 ipa_fn_summary_generate (void)
3173 struct cgraph_node
*node
;
3175 FOR_EACH_DEFINED_FUNCTION (node
)
3176 if (DECL_STRUCT_FUNCTION (node
->decl
))
3177 node
->local
.versionable
= tree_versionable_function_p (node
->decl
);
3179 /* When not optimizing, do not bother to analyze. Inlining is still done
3180 because edge redirection needs to happen there. */
3181 if (!optimize
&& !flag_generate_lto
&& !flag_generate_offload
&& !flag_wpa
)
3184 ipa_fn_summary_alloc ();
3186 ipa_fn_summaries
->enable_insertion_hook ();
3188 ipa_register_cgraph_hooks ();
3189 ipa_free_fn_summary ();
3191 FOR_EACH_DEFINED_FUNCTION (node
)
3193 inline_analyze_function (node
);
3197 /* Write inline summary for edge E to OB. */
3200 read_ipa_call_summary (struct lto_input_block
*ib
, struct cgraph_edge
*e
)
3202 struct ipa_call_summary
*es
= ipa_call_summaries
->get (e
);
3206 es
->call_stmt_size
= streamer_read_uhwi (ib
);
3207 es
->call_stmt_time
= streamer_read_uhwi (ib
);
3208 es
->loop_depth
= streamer_read_uhwi (ib
);
3210 edge_set_predicate (e
, &p
);
3211 length
= streamer_read_uhwi (ib
);
3214 es
->param
.safe_grow_cleared (length
);
3215 for (i
= 0; i
< length
; i
++)
3216 es
->param
[i
].change_prob
= streamer_read_uhwi (ib
);
3221 /* Stream in inline summaries from the section. */
3224 inline_read_section (struct lto_file_decl_data
*file_data
, const char *data
,
3227 const struct lto_function_header
*header
=
3228 (const struct lto_function_header
*) data
;
3229 const int cfg_offset
= sizeof (struct lto_function_header
);
3230 const int main_offset
= cfg_offset
+ header
->cfg_size
;
3231 const int string_offset
= main_offset
+ header
->main_size
;
3232 struct data_in
*data_in
;
3233 unsigned int i
, count2
, j
;
3234 unsigned int f_count
;
3236 lto_input_block
ib ((const char *) data
+ main_offset
, header
->main_size
,
3237 file_data
->mode_table
);
3240 lto_data_in_create (file_data
, (const char *) data
+ string_offset
,
3241 header
->string_size
, vNULL
);
3242 f_count
= streamer_read_uhwi (&ib
);
3243 for (i
= 0; i
< f_count
; i
++)
3246 struct cgraph_node
*node
;
3247 struct ipa_fn_summary
*info
;
3248 lto_symtab_encoder_t encoder
;
3249 struct bitpack_d bp
;
3250 struct cgraph_edge
*e
;
3253 index
= streamer_read_uhwi (&ib
);
3254 encoder
= file_data
->symtab_node_encoder
;
3255 node
= dyn_cast
<cgraph_node
*> (lto_symtab_encoder_deref (encoder
,
3257 info
= ipa_fn_summaries
->get (node
);
3259 info
->estimated_stack_size
3260 = info
->estimated_self_stack_size
= streamer_read_uhwi (&ib
);
3261 info
->size
= info
->self_size
= streamer_read_uhwi (&ib
);
3262 info
->time
= sreal::stream_in (&ib
);
3264 bp
= streamer_read_bitpack (&ib
);
3265 info
->inlinable
= bp_unpack_value (&bp
, 1);
3266 info
->contains_cilk_spawn
= bp_unpack_value (&bp
, 1);
3267 info
->fp_expressions
= bp_unpack_value (&bp
, 1);
3269 count2
= streamer_read_uhwi (&ib
);
3270 gcc_assert (!info
->conds
);
3271 for (j
= 0; j
< count2
; j
++)
3274 c
.operand_num
= streamer_read_uhwi (&ib
);
3275 c
.size
= streamer_read_uhwi (&ib
);
3276 c
.code
= (enum tree_code
) streamer_read_uhwi (&ib
);
3277 c
.val
= stream_read_tree (&ib
, data_in
);
3278 bp
= streamer_read_bitpack (&ib
);
3279 c
.agg_contents
= bp_unpack_value (&bp
, 1);
3280 c
.by_ref
= bp_unpack_value (&bp
, 1);
3282 c
.offset
= streamer_read_uhwi (&ib
);
3283 vec_safe_push (info
->conds
, c
);
3285 count2
= streamer_read_uhwi (&ib
);
3286 gcc_assert (!info
->size_time_table
);
3287 for (j
= 0; j
< count2
; j
++)
3289 struct size_time_entry e
;
3291 e
.size
= streamer_read_uhwi (&ib
);
3292 e
.time
= sreal::stream_in (&ib
);
3293 e
.exec_predicate
.stream_in (&ib
);
3294 e
.nonconst_predicate
.stream_in (&ib
);
3296 vec_safe_push (info
->size_time_table
, e
);
3300 set_hint_predicate (&info
->loop_iterations
, p
);
3302 set_hint_predicate (&info
->loop_stride
, p
);
3304 set_hint_predicate (&info
->array_index
, p
);
3305 for (e
= node
->callees
; e
; e
= e
->next_callee
)
3306 read_ipa_call_summary (&ib
, e
);
3307 for (e
= node
->indirect_calls
; e
; e
= e
->next_callee
)
3308 read_ipa_call_summary (&ib
, e
);
3311 lto_free_section_data (file_data
, LTO_section_ipa_fn_summary
, NULL
, data
,
3313 lto_data_in_delete (data_in
);
3317 /* Read inline summary. Jump functions are shared among ipa-cp
3318 and inliner, so when ipa-cp is active, we don't need to write them
3322 ipa_fn_summary_read (void)
3324 struct lto_file_decl_data
**file_data_vec
= lto_get_file_decl_data ();
3325 struct lto_file_decl_data
*file_data
;
3328 ipa_fn_summary_alloc ();
3330 while ((file_data
= file_data_vec
[j
++]))
3333 const char *data
= lto_get_section_data (file_data
,
3334 LTO_section_ipa_fn_summary
,
3337 inline_read_section (file_data
, data
, len
);
3339 /* Fatal error here. We do not want to support compiling ltrans units
3340 with different version of compiler or different flags than the WPA
3341 unit, so this should never happen. */
3342 fatal_error (input_location
,
3343 "ipa inline summary is missing in input file");
3347 ipa_register_cgraph_hooks ();
3349 ipa_prop_read_jump_functions ();
3352 gcc_assert (ipa_fn_summaries
);
3353 ipa_fn_summaries
->enable_insertion_hook ();
3357 /* Write inline summary for edge E to OB. */
3360 write_ipa_call_summary (struct output_block
*ob
, struct cgraph_edge
*e
)
3362 struct ipa_call_summary
*es
= ipa_call_summaries
->get (e
);
3365 streamer_write_uhwi (ob
, es
->call_stmt_size
);
3366 streamer_write_uhwi (ob
, es
->call_stmt_time
);
3367 streamer_write_uhwi (ob
, es
->loop_depth
);
3369 es
->predicate
->stream_out (ob
);
3371 streamer_write_uhwi (ob
, 0);
3372 streamer_write_uhwi (ob
, es
->param
.length ());
3373 for (i
= 0; i
< (int) es
->param
.length (); i
++)
3374 streamer_write_uhwi (ob
, es
->param
[i
].change_prob
);
3378 /* Write inline summary for node in SET.
3379 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
3380 active, we don't need to write them twice. */
3383 ipa_fn_summary_write (void)
3385 struct output_block
*ob
= create_output_block (LTO_section_ipa_fn_summary
);
3386 lto_symtab_encoder_t encoder
= ob
->decl_state
->symtab_node_encoder
;
3387 unsigned int count
= 0;
3390 for (i
= 0; i
< lto_symtab_encoder_size (encoder
); i
++)
3392 symtab_node
*snode
= lto_symtab_encoder_deref (encoder
, i
);
3393 cgraph_node
*cnode
= dyn_cast
<cgraph_node
*> (snode
);
3394 if (cnode
&& cnode
->definition
&& !cnode
->alias
)
3397 streamer_write_uhwi (ob
, count
);
3399 for (i
= 0; i
< lto_symtab_encoder_size (encoder
); i
++)
3401 symtab_node
*snode
= lto_symtab_encoder_deref (encoder
, i
);
3402 cgraph_node
*cnode
= dyn_cast
<cgraph_node
*> (snode
);
3403 if (cnode
&& cnode
->definition
&& !cnode
->alias
)
3405 struct ipa_fn_summary
*info
= ipa_fn_summaries
->get (cnode
);
3406 struct bitpack_d bp
;
3407 struct cgraph_edge
*edge
;
3410 struct condition
*c
;
3412 streamer_write_uhwi (ob
, lto_symtab_encoder_encode (encoder
, cnode
));
3413 streamer_write_hwi (ob
, info
->estimated_self_stack_size
);
3414 streamer_write_hwi (ob
, info
->self_size
);
3415 info
->time
.stream_out (ob
);
3416 bp
= bitpack_create (ob
->main_stream
);
3417 bp_pack_value (&bp
, info
->inlinable
, 1);
3418 bp_pack_value (&bp
, info
->contains_cilk_spawn
, 1);
3419 bp_pack_value (&bp
, info
->fp_expressions
, 1);
3420 streamer_write_bitpack (&bp
);
3421 streamer_write_uhwi (ob
, vec_safe_length (info
->conds
));
3422 for (i
= 0; vec_safe_iterate (info
->conds
, i
, &c
); i
++)
3424 streamer_write_uhwi (ob
, c
->operand_num
);
3425 streamer_write_uhwi (ob
, c
->size
);
3426 streamer_write_uhwi (ob
, c
->code
);
3427 stream_write_tree (ob
, c
->val
, true);
3428 bp
= bitpack_create (ob
->main_stream
);
3429 bp_pack_value (&bp
, c
->agg_contents
, 1);
3430 bp_pack_value (&bp
, c
->by_ref
, 1);
3431 streamer_write_bitpack (&bp
);
3432 if (c
->agg_contents
)
3433 streamer_write_uhwi (ob
, c
->offset
);
3435 streamer_write_uhwi (ob
, vec_safe_length (info
->size_time_table
));
3436 for (i
= 0; vec_safe_iterate (info
->size_time_table
, i
, &e
); i
++)
3438 streamer_write_uhwi (ob
, e
->size
);
3439 e
->time
.stream_out (ob
);
3440 e
->exec_predicate
.stream_out (ob
);
3441 e
->nonconst_predicate
.stream_out (ob
);
3443 if (info
->loop_iterations
)
3444 info
->loop_iterations
->stream_out (ob
);
3446 streamer_write_uhwi (ob
, 0);
3447 if (info
->loop_stride
)
3448 info
->loop_stride
->stream_out (ob
);
3450 streamer_write_uhwi (ob
, 0);
3451 if (info
->array_index
)
3452 info
->array_index
->stream_out (ob
);
3454 streamer_write_uhwi (ob
, 0);
3455 for (edge
= cnode
->callees
; edge
; edge
= edge
->next_callee
)
3456 write_ipa_call_summary (ob
, edge
);
3457 for (edge
= cnode
->indirect_calls
; edge
; edge
= edge
->next_callee
)
3458 write_ipa_call_summary (ob
, edge
);
3461 streamer_write_char_stream (ob
->main_stream
, 0);
3462 produce_asm (ob
, NULL
);
3463 destroy_output_block (ob
);
3465 if (optimize
&& !flag_ipa_cp
)
3466 ipa_prop_write_jump_functions ();
3470 /* Release inline summary. */
3473 ipa_free_fn_summary (void)
3475 struct cgraph_node
*node
;
3476 if (!ipa_call_summaries
)
3478 FOR_EACH_DEFINED_FUNCTION (node
)
3480 ipa_fn_summaries
->get (node
)->reset (node
);
3481 ipa_fn_summaries
->release ();
3482 ipa_fn_summaries
= NULL
;
3483 ipa_call_summaries
->release ();
3484 delete ipa_call_summaries
;
3485 ipa_call_summaries
= NULL
;
3486 edge_predicate_pool
.release ();
3491 const pass_data pass_data_local_fn_summary
=
3493 GIMPLE_PASS
, /* type */
3494 "local-fnsummary", /* name */
3495 OPTGROUP_INLINE
, /* optinfo_flags */
3496 TV_INLINE_PARAMETERS
, /* tv_id */
3497 0, /* properties_required */
3498 0, /* properties_provided */
3499 0, /* properties_destroyed */
3500 0, /* todo_flags_start */
3501 0, /* todo_flags_finish */
3504 class pass_local_fn_summary
: public gimple_opt_pass
3507 pass_local_fn_summary (gcc::context
*ctxt
)
3508 : gimple_opt_pass (pass_data_local_fn_summary
, ctxt
)
3511 /* opt_pass methods: */
3512 opt_pass
* clone () { return new pass_local_fn_summary (m_ctxt
); }
3513 virtual unsigned int execute (function
*)
3515 return compute_fn_summary_for_current ();
3518 }; // class pass_local_fn_summary
3523 make_pass_local_fn_summary (gcc::context
*ctxt
)
3525 return new pass_local_fn_summary (ctxt
);
3529 /* Free inline summary. */
3533 const pass_data pass_data_ipa_free_fn_summary
=
3535 SIMPLE_IPA_PASS
, /* type */
3536 "free-fnsummary", /* name */
3537 OPTGROUP_NONE
, /* optinfo_flags */
3538 TV_IPA_FREE_INLINE_SUMMARY
, /* tv_id */
3539 0, /* properties_required */
3540 0, /* properties_provided */
3541 0, /* properties_destroyed */
3542 0, /* todo_flags_start */
3543 /* Early optimizations may make function unreachable. We can not
3544 remove unreachable functions as part of the ealry opts pass because
3545 TODOs are run before subpasses. Do it here. */
3546 ( TODO_remove_functions
| TODO_dump_symtab
), /* todo_flags_finish */
3549 class pass_ipa_free_fn_summary
: public simple_ipa_opt_pass
3552 pass_ipa_free_fn_summary (gcc::context
*ctxt
)
3553 : simple_ipa_opt_pass (pass_data_ipa_free_fn_summary
, ctxt
)
3556 /* opt_pass methods: */
3557 virtual unsigned int execute (function
*)
3559 ipa_free_fn_summary ();
3563 }; // class pass_ipa_free_fn_summary
3567 simple_ipa_opt_pass
*
3568 make_pass_ipa_free_fn_summary (gcc::context
*ctxt
)
3570 return new pass_ipa_free_fn_summary (ctxt
);
3575 const pass_data pass_data_ipa_fn_summary
=
3577 IPA_PASS
, /* type */
3578 "fnsummary", /* name */
3579 OPTGROUP_INLINE
, /* optinfo_flags */
3580 TV_IPA_INLINING
, /* tv_id */
3581 0, /* properties_required */
3582 0, /* properties_provided */
3583 0, /* properties_destroyed */
3584 0, /* todo_flags_start */
3585 ( TODO_dump_symtab
), /* todo_flags_finish */
3588 class pass_ipa_fn_summary
: public ipa_opt_pass_d
3591 pass_ipa_fn_summary (gcc::context
*ctxt
)
3592 : ipa_opt_pass_d (pass_data_ipa_fn_summary
, ctxt
,
3593 ipa_fn_summary_generate
, /* generate_summary */
3594 ipa_fn_summary_write
, /* write_summary */
3595 ipa_fn_summary_read
, /* read_summary */
3596 NULL
, /* write_optimization_summary */
3597 NULL
, /* read_optimization_summary */
3598 NULL
, /* stmt_fixup */
3599 0, /* function_transform_todo_flags_start */
3600 NULL
, /* function_transform */
3601 NULL
) /* variable_transform */
3604 /* opt_pass methods: */
3605 virtual unsigned int execute (function
*) { return 0; }
3607 }; // class pass_ipa_fn_summary
3612 make_pass_ipa_fn_summary (gcc::context
*ctxt
)
3614 return new pass_ipa_fn_summary (ctxt
);